diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh
index c526e61..2b0782a 100755
--- a/.ci/linux-build.sh
+++ b/.ci/linux-build.sh
@@ -3,15 +3,15 @@
 set -o errexit
 set -x
 
-CFLAGS=""
+COMMON_CFLAGS=""
 OVN_CFLAGS=""
-SPARSE_FLAGS=""
 EXTRA_OPTS="--enable-Werror"
 
 function configure_ovs()
 {
     pushd ovs
-    ./boot.sh && ./configure $* || { cat config.log; exit 1; }
+    ./boot.sh && ./configure CFLAGS="${COMMON_CFLAGS}" $* || \
+    { cat config.log; exit 1; }
     make -j4 || { cat config.log; exit 1; }
     popd
 }
@@ -19,40 +19,43 @@ function configure_ovs()
 function configure_ovn()
 {
     configure_ovs $*
-
-    export OVS_CFLAGS="${OVS_CFLAGS} ${OVN_CFLAGS}"
-    ./boot.sh && ./configure $* || \
+    ./boot.sh && ./configure CFLAGS="${COMMON_CFLAGS} ${OVN_CFLAGS}" $* || \
     { cat config.log; exit 1; }
 }
 
 save_OPTS="${OPTS} $*"
 OPTS="${EXTRA_OPTS} ${save_OPTS}"
 
-# If AddressSanitizer is requested, enable it, but only for OVN, not for OVS.
-# However, disable some optimizations for OVS, to make AddressSanitizer
-# reports user friendly.
-if [ "$ASAN" ]; then
-    CFLAGS="-fno-omit-frame-pointer -fno-common"
-    OVN_CFLAGS="-fsanitize=address"
+# If AddressSanitizer and UndefinedBehaviorSanitizer are requested, enable them,
+# but only for OVN, not for OVS.  However, disable some optimizations for
+# OVS, to make sanitizer reports user friendly.
+if [ "$SANITIZERS" ]; then
+    # Use the default options configured in tests/atlocal.in, in UBSAN_OPTIONS.
+    COMMON_CFLAGS="${COMMON_CFLAGS} -O1 -fno-omit-frame-pointer -fno-common -g"
+    OVN_CFLAGS="${OVN_CFLAGS} -fsanitize=address,undefined"
 fi
 
 if [ "$CC" = "clang" ]; then
-    export OVS_CFLAGS="$CFLAGS -Wno-error=unused-command-line-argument"
+    COMMON_CFLAGS="${COMMON_CFLAGS} -Wno-error=unused-command-line-argument"
 elif [ "$M32" ]; then
     # Not using sparse for 32bit builds on 64bit machine.
-    # Adding m32 flag directly to CC to avoid any posiible issues with API/ABI
+    # Adding m32 flag directly to CC to avoid any possible issues with API/ABI
     # difference on 'configure' and 'make' stages.
     export CC="$CC -m32"
 else
     OPTS="$OPTS --enable-sparse"
-    export OVS_CFLAGS="$CFLAGS $SPARSE_FLAGS"
 fi
 
 if [ "$TESTSUITE" ]; then
+    TESTSUITEFLAGS=""
+    if [[ ! -z $TESTSUITE_KW ]]; then
+        TESTSUITEFLAGS="-k $TESTSUITE_KW"
+    fi
+
     if [ "$TESTSUITE" = "system-test" ]; then
         configure_ovn $OPTS
         make -j4 || { cat config.log; exit 1; }
-        if ! sudo make -j4 check-kernel RECHECK=yes; then
+        if ! sudo make -j4 check-kernel TESTSUITEFLAGS="$TESTSUITEFLAGS" RECHECK=yes; then
             # system-kmod-testsuite.log is necessary for debugging.
             cat tests/system-kmod-testsuite.log
             exit 1
@@ -63,7 +66,9 @@ if [ "$TESTSUITE" ]; then
         configure_ovn
 
         export DISTCHECK_CONFIGURE_FLAGS="$OPTS"
-        if ! make distcheck -j4 TESTSUITEFLAGS="-j4" RECHECK=yes; then
+        if ! make distcheck CFLAGS="${COMMON_CFLAGS} ${OVN_CFLAGS}" -j4 \
+            TESTSUITEFLAGS="$TESTSUITEFLAGS -j4" RECHECK=yes
+        then
             # testsuite.log is necessary for debugging.
             cat */_build/sub/tests/testsuite.log
             exit 1
diff --git a/.ci/ovn-kubernetes/Dockerfile b/.ci/ovn-kubernetes/Dockerfile
index 1966288..e74b620 100644
--- a/.ci/ovn-kubernetes/Dockerfile
+++ b/.ci/ovn-kubernetes/Dockerfile
@@ -37,7 +37,7 @@ RUN rm rpm/rpmbuild/RPMS/x86_64/*debug*
 RUN rm rpm/rpmbuild/RPMS/x86_64/*docker*
 
 # Build ovn-kubernetes
-FROM golang:1.17 as ovnkubebuilder
+FROM golang:1.18 as ovnkubebuilder
 ARG OVNKUBE_COMMIT
 ARG LIBOVSDB_COMMIT
 
diff --git a/.github/workflows/ovn-kubernetes.yml b/.github/workflows/ovn-kubernetes.yml
index c05bbd3..ba6b291 100644
--- a/.github/workflows/ovn-kubernetes.yml
+++ b/.github/workflows/ovn-kubernetes.yml
@@ -8,11 +8,15 @@ on:
   # Run Sunday at midnight
   - cron: '0 0 * * 0'
 
+concurrency:
+  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }}
+  cancel-in-progress: true
+
 env:
-  GO_VERSION: "1.17.6"
-  K8S_VERSION: v1.23.3
+  GO_VERSION: "1.18.4"
+  K8S_VERSION: v1.24.0
   OVNKUBE_COMMIT: "master"
-  LIBOVSDB_COMMIT: "8081fe24e48f"
+  LIBOVSDB_COMMIT: "98c0bad3cff1"
   KIND_CLUSTER_NAME: ovn
   KIND_INSTALL_INGRESS: true
   KIND_ALLOW_SYSTEM_WRITES: true
@@ -51,44 +55,31 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        target:
-          - shard: shard-conformance
-            hybrid-overlay: false
-            multicast-enable: false
-            emptylb-enable: false
-          - shard: control-plane
-            hybrid-overlay: true
-            multicast-enable: true
-            emptylb-enable: true
-        ipfamily:
-          - ip: ipv4
-            name: "IPv4"
-            ipv4: true
-            ipv6: false
-          - ip: ipv6
-            name: "IPv6"
-            ipv4: false
-            ipv6: true
-          - ip: dualstack
-            name: "Dualstack"
-            ipv4: true
-            ipv6: true
-        # Example of how to exclude a fully qualified test:
-        # - {"ipfamily": {"ip": ipv4}, "ha": {"enabled": "false"}, "gateway-mode": shared, "target": {"shard": shard-n-other}}
-        exclude:
-         # Not currently supported but needs to be.
-         - {"ipfamily": {"ip": dualstack}, "target": {"shard": control-plane}}
-         - {"ipfamily": {"ip": ipv6}, "target": {"shard": control-plane}}
+        # Valid options are:
+        # target: ["shard-conformance", "control-plane" ]
+        #         shard-conformance: hybrid-overlay = multicast-enable = emptylb-enable = false
+        #         control-plane: hybrid-overlay = multicast-enable = emptylb-enable = true
+        # gateway-mode: ["local", "shared"]
+        # ipfamily: ["ipv4", "ipv6", "dualstack"]
+        # disable-snat-multiple-gws: ["noSnatGW", "snatGW"]
+        include:
+          - {"target": "shard-conformance", "ha": "HA",   "gateway-mode": "local",  "ipfamily": "ipv6",      "disable-snat-multiple-gws": "snatGW"}
+          - {"target": "shard-conformance", "ha": "HA",   "gateway-mode": "local",  "ipfamily": "dualstack", "disable-snat-multiple-gws": "snatGW"}
+          - {"target": "shard-conformance", "ha": "HA",   "gateway-mode": "shared", "ipfamily": "ipv4",      "disable-snat-multiple-gws": "snatGW"}
+          - {"target": "shard-conformance", "ha": "HA",   "gateway-mode": "shared", "ipfamily": "ipv6",      "disable-snat-multiple-gws": "snatGW"}
+          - {"target": "control-plane",     "ha": "HA",   "gateway-mode": "shared", "ipfamily": "ipv4",      "disable-snat-multiple-gws": "noSnatGW"}
+          - {"target": "control-plane",     "ha": "HA",   "gateway-mode": "shared", "ipfamily": "ipv4",      "disable-snat-multiple-gws": "snatGW"}
     needs: [build]
     env:
-      JOB_NAME: "${{ matrix.target.shard }}-${{ matrix.ipfamily.name }}"
+      JOB_NAME: "${{ matrix.target }}-${{ matrix.ha }}-${{ matrix.gateway-mode }}-${{ matrix.ipfamily }}-${{ matrix.disable-snat-multiple-gws }}-${{ matrix.second-bridge }}"
+      OVN_HYBRID_OVERLAY_ENABLE: "${{ matrix.target == 'control-plane' }}"
+      OVN_MULTICAST_ENABLE:  "${{ matrix.target == 'control-plane' }}"
+      OVN_EMPTY_LB_EVENTS: "${{ matrix.target == 'control-plane' }}"
       OVN_HA: "true"
-      KIND_IPV4_SUPPORT: "${{ matrix.ipfamily.ipv4 }}"
-      KIND_IPV6_SUPPORT: "${{ matrix.ipfamily.ipv6 }}"
-      OVN_HYBRID_OVERLAY_ENABLE: "${{ matrix.target.hybrid-overlay }}"
-      OVN_GATEWAY_MODE: "shared"
-      OVN_MULTICAST_ENABLE:  "${{ matrix.target.multicast-enable }}"
-      OVN_EMPTY_LB_EVENTS: "${{ matrix.target.emptylb-enable }}"
+      OVN_DISABLE_SNAT_MULTIPLE_GWS: "${{ matrix.disable-snat-multiple-gws == 'noSnatGW' }}"
+      OVN_GATEWAY_MODE: "${{ matrix.gateway-mode }}"
+      KIND_IPV4_SUPPORT: "${{ matrix.ipfamily == 'IPv4' || matrix.ipfamily == 'dualstack' }}"
+      KIND_IPV6_SUPPORT: "${{ matrix.ipfamily == 'IPv6' || matrix.ipfamily == 'dualstack' }}"
     steps:
 
     - name: Free up disk space
@@ -134,7 +125,7 @@ jobs:
 
     - name: Run Tests
       run: |
-        make -C test ${{ matrix.target.shard }}
+        make -C test ${{ matrix.target }}
       working-directory: src/github.com/ovn-org/ovn-kubernetes
 
     - name: Upload Junit Reports
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 7b36481..7a59cd4 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -7,6 +7,10 @@ on:
     # Run Sunday at midnight
     - cron: '0 0 * * 0'
 
+concurrency:
+  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }}
+  cancel-in-progress: true
+
 jobs:
   build-linux:
     env:
@@ -15,43 +19,49 @@ jobs:
         libssl-dev llvm-dev libelf-dev libnuma-dev libpcap-dev  \
         selinux-policy-dev ncat python3-scapy isc-dhcp-server
       m32_dependecies: gcc-multilib
-      CC:          ${{ matrix.compiler }}
-      LIBS:        ${{ matrix.libs }}
-      M32:         ${{ matrix.m32 }}
-      OPTS:        ${{ matrix.opts }}
-      TESTSUITE:   ${{ matrix.testsuite }}
-      ASAN:        ${{ matrix.asan }}
-
-    name: linux ${{ join(matrix.*, ' ') }}
+      CC:          ${{ matrix.cfg.compiler }}
+      LIBS:        ${{ matrix.cfg.libs }}
+      M32:         ${{ matrix.cfg.m32 }}
+      OPTS:        ${{ matrix.cfg.opts }}
+      TESTSUITE:   ${{ matrix.cfg.testsuite }}
+      TESTSUITE_KW:   ${{ matrix.cfg.testsuite_kw }}
+      SANITIZERS:  ${{ matrix.cfg.sanitizers }}
+
+    name: linux ${{ join(matrix.cfg.*, ' ') }}
     runs-on: ubuntu-20.04
 
     strategy:
       fail-fast: false
       matrix:
-        include:
-          - compiler:     gcc
-            opts:         --disable-ssl
-          - compiler:     clang
-            opts:         --disable-ssl
-
-          - compiler:     gcc
-            testsuite:    test
-          - compiler:     gcc
-            testsuite:    system-test
-          - compiler:     clang
-            testsuite:    test
-            asan:         asan
-
-          - compiler:     gcc
-            testsuite:    test
-            libs:         -ljemalloc
-          - compiler:     clang
-            testsuite:    test
-            libs:         -ljemalloc
-
-          - compiler:     gcc
-            m32:          m32
-            opts:         --disable-ssl
+        cfg:
+        - { compiler: gcc, opts: --disable-ssl }
+        - { compiler: clang, opts: --disable-ssl }
+        - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
+        - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
+        - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
+        - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
+        - { compiler: gcc, testsuite: test, testsuite_kw: "!ovn-northd" }
+        - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
+        - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
+        - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
+        - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
+        - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "!ovn-northd" }
+        - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
+        - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
+        - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
+        - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
+        - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "!ovn-northd" }
+        - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
+        - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
+        - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
+        - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
+        - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "!ovn-northd" }
+        - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
+        - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
+        - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
+        - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
+        - { compiler: gcc, testsuite: system-test, testsuite_kw: "!ovn-northd" }
+        - { compiler: gcc,  m32: m32, opts: --disable-ssl}
 
     steps:
     - name: checkout
@@ -85,11 +95,11 @@ jobs:
       run:  sudo apt install -y ${{ env.dependencies }}
 
     - name: install libunbound libunwind
-      if:   matrix.m32 == ''
+      if:   matrix.cfg.m32 == ''
       run:  sudo apt install -y libunbound-dev libunwind-dev
 
     - name: install 32-bit dependencies
-      if:   matrix.m32 != ''
+      if:   matrix.cfg.m32 != ''
       run:  sudo apt install -y ${{ env.m32_dependecies }}
 
     - name: update PATH
@@ -127,7 +137,7 @@ jobs:
       if: failure() || cancelled()
       uses: actions/upload-artifact@v2
       with:
-        name: logs-linux-${{ join(matrix.*, '-') }}
+        name: logs-linux-${{ join(matrix.cfg.*, '-') }}
         path: logs.tgz
 
   build-osx:
diff --git a/AUTHORS.rst b/AUTHORS.rst
index d3747f8..af13515 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -176,6 +176,7 @@ Hui Kang                           kangh@us.ibm.com
 Hyong Youb Kim                     hyonkim@cisco.com
 Ian Campbell                       Ian.Campbell@citrix.com
 Ian Stokes                         ian.stokes@intel.com
+Igor Zhukov                        ivzhukov@sbercloud.ru
 Ihar Hrachyshka                    ihrachys@redhat.com
 Ilya Maximets                      i.maximets@samsung.com
 Iman Tabrizian                     tabrizian@outlook.com
@@ -202,6 +203,7 @@ Jian Li                            lijian@ooclab.com
 Jianbo Liu                         jianbol@mellanox.com
 Jing Ai                            jinga@google.com
 Jiri Benc                          jbenc@redhat.com
+Jochen Friedrich                   jochen@scram.de
 Joe Perches                        joe@perches.com
 Joe Stringer                       joe@ovn.org
 Jonathan Vestin                    jonavest@kau.se
@@ -395,6 +397,7 @@ Vishal Deep Ajmera                 vishal.deep.ajmera@ericsson.com
 Vivien Bernet-Rollande             vbr@soprive.net
 Vladislav Odintsov                 odivlad@gmail.com
 wangqianyu                         wang.qianyu@zte.com.cn
+wangchuanlei                       wangchuanlei@inspur.com
 Wang Sheng-Hui                     shhuiw@gmail.com
 Wang Zhike                         wangzhike@jd.com
 Wei Li                             liw@dtdream.com
diff --git a/Documentation/conf.py b/Documentation/conf.py
index d89c64e..f7eceae 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -58,7 +58,7 @@ author = u'The Open Virtual Network (OVN) Development Community'
 # The full version, including alpha/beta/rc tags.
 release = None
 filename = "../configure.ac"
-with open(filename, 'rU') as f:
+with open(filename, 'r') as f:
     for line in f:
         if 'AC_INIT' in line:
             # Parse "AC_INIT(openvswitch, 2.7.90, bugs@openvswitch.org)":
diff --git a/Documentation/internals/release-process.rst b/Documentation/internals/release-process.rst
index 9db6e74..13a22fa 100644
--- a/Documentation/internals/release-process.rst
+++ b/Documentation/internals/release-process.rst
@@ -34,33 +34,33 @@ contributors, obtained through public discussion on, e.g., ovs-dev or the
 Release Strategy
 ----------------
 
-OVN feature development takes place on the "master" branch. Ordinarily, new
-features are rebased against master and applied directly.  For features that
+OVN feature development takes place on the "main" branch. Ordinarily, new
+features are rebased against main and applied directly.  For features that
 take significant development, sometimes it is more appropriate to merge a
-separate branch into master; please discuss this on ovs-dev in advance.
+separate branch into main; please discuss this on ovs-dev in advance.
 
 The process of making a release has the following stages.  See `Release
 Scheduling`_ for the timing of each stage:
 
-1. "Soft freeze" of the master branch.
+1. "Soft freeze" of the main branch.
 
    During the freeze, we ask committers to refrain from applying patches that
    add new features unless those patches were already being publicly discussed
    and reviewed before the freeze began.  Bug fixes are welcome at any time.
    Please propose and discuss exceptions on ovs-dev.
  
-2. Fork a release branch from master, named for the expected release number,
+2. Fork a release branch from main, named for the expected release number,
    e.g. "branch-2019.10" for the branch that will yield OVN 2019.10.x.
 
    Release branches are intended for testing and stabilization.  At this stage
    and in later stages, they should receive only bug fixes, not new features.
    Bug fixes applied to release branches should be backports of corresponding
-   bug fixes to the master branch, except for bugs present only on release
+   bug fixes to the main branch, except for bugs present only on release
    branches (which are rare in practice).
 
    At this stage, sometimes there can be exceptions to the rule that a release
    branch receives only bug fixes.  Like bug fixes, new features on release
-   branches should be backports of the corresponding commits on the master
+   branches should be backports of the corresponding commits on the main
    branch.  Features to be added to release branches should be limited in scope
    and risk and discussed on ovs-dev before creating the branch.
 
@@ -97,10 +97,10 @@ one year of critical bug fixes and security fixes.
 Release Numbering
 -----------------
 
-The version number on master should normally end in .90.  This indicates that
+The version number on main should normally end in .90.  This indicates that
 the OVN version is "almost" the next version to branch.
 
-Forking master into branch-x.y requires two commits to master.  The first is
+Forking main into branch-x.y requires two commits to main.  The first is
 titled "Prepare for x.y.0" and increments the version number to x.y.  This is
 the initial commit on branch-x.y.  The second is titled "Prepare for post-x.y.0
 (x.y.90)" and increments the version number to x.y.90.
@@ -124,9 +124,9 @@ approximate:
 +---------------+---------------------+--------------------------------------+
 | T             | Dec 1, Mar 1, ...   | Begin x.y release cycle              |
 +---------------+---------------------+--------------------------------------+
-| T + 2         | Feb 1, May 1, ...   | "Soft freeze" master for x.y release |
+| T + 2         | Feb 1, May 1, ...   | "Soft freeze" main for x.y release   |
 +---------------+---------------------+--------------------------------------+
-| T + 2.5       | Feb 15, May 15, ... | Fork branch-x.y from master          |
+| T + 2.5       | Feb 15, May 15, ... | Fork branch-x.y from main            |
 +---------------+---------------------+--------------------------------------+
 | T + 3         | Mar 1, Jun 1, ...   | Release version x.y.0                |
 +---------------+---------------------+--------------------------------------+
@@ -134,34 +134,34 @@ approximate:
 Release Calendar
 ----------------
 
-The 2021 timetable is shown below. Note that these dates are not set in stone.
+The 2022 timetable is shown below. Note that these dates are not set in stone.
 If extenuating circumstances arise, a release may be delayed from its target
 date.
 
 +---------+-------------+-----------------+---------+
 | Release | Soft Freeze | Branch Creation | Release |
 +---------+-------------+-----------------+---------+
-| 21.03.0 | Feb 5       | Feb 19          | Mar 5   |
+| 22.03.0 | Feb 4       | Feb 18          | Mar 4   |
 +---------+-------------+-----------------+---------+
-| 21.06.0 | May 7       | May 21          | Jun 4   |
+| 22.06.0 | May 6       | May 20          | Jun 3   |
 +---------+-------------+-----------------+---------+
-| 21.09.0 | Aug 6       | Aug 20          | Sep 3   |
+| 22.09.0 | Aug 5       | Aug 19          | Sep 2   |
 +---------+-------------+-----------------+---------+
-| 21.12.0 | Nov 5       | Nov 19          | Dec 3   |
+| 22.12.0 | Nov 4       | Nov 18          | Dec 2   |
 +---------+-------------+-----------------+---------+
 
-Below is the 2022 timetable
+Below is the 2023 timetable
 
 +---------+-------------+-----------------+---------+
 | Release | Soft Freeze | Branch Creation | Release |
 +---------+-------------+-----------------+---------+
-| 22.03.0 | Feb 4       | Feb 18          | Mar 4   |
+| 23.03.0 | Feb 3       | Feb 17          | Mar 3   |
 +---------+-------------+-----------------+---------+
-| 22.06.0 | May 6       | May 20          | Jun 3   |
+| 23.06.0 | May 5       | May 19          | Jun 2   |
 +---------+-------------+-----------------+---------+
-| 22.09.0 | Aug 5       | Aug 19          | Sep 2   |
+| 23.09.0 | Aug 4       | Aug 18          | Sep 1   |
 +---------+-------------+-----------------+---------+
-| 22.12.0 | Nov 4       | Nov 18          | Dec 2   |
+| 23.12.0 | Nov 3       | Nov 17          | Dec 1   |
 +---------+-------------+-----------------+---------+
 
 Contact
diff --git a/Documentation/tutorials/ovn-ipsec.rst b/Documentation/tutorials/ovn-ipsec.rst
index 305dd56..aea7aa3 100644
--- a/Documentation/tutorials/ovn-ipsec.rst
+++ b/Documentation/tutorials/ovn-ipsec.rst
@@ -93,6 +93,29 @@ database to false::
        # systemctl enable firewalld
        # firewall-cmd --permanent --add-service ipsec
 
+Enforcing IPsec NAT-T UDP encapsulation
+---------------------------------------
+
+In specific situations, it may be required to enforce NAT-T (RFC3948) UDP
+encapsulation unconditionally and to bypass the normal NAT detection mechanism.
+For example, this may be required in environments where firewalls drop ESP
+traffic, but where NAT-T detection (RFC3947) fails because packets otherwise
+are not subject to NAT.
+In such scenarios, UDP encapsulation can be enforced with the following.
+
+For libreswan backends::
+
+    $ ovn-nbctl set nb_global . options:ipsec_encapsulation=true
+
+For strongswan backends::
+
+    $ ovn-nbctl set nb_global . options:ipsec_forceencaps=true
+
+.. note::
+
+   Support for this feature is only availably when OVN is used together with
+   OVS releases that accept IPsec custom tunnel options.
+
 Troubleshooting
 ---------------
 
@@ -119,6 +142,7 @@ For example::
    Remote name:    host_2
    CA cert:        /path/to/cacert.pem
    PSK:            None
+   Custom Options: {'encapsulation': 'yes'} <---- Whether NAT-T is enforced
    Ofport:         2          <--- Whether ovs-vswitchd has assigned Ofport
                                    number to this Tunnel Port
    CFM state:      Disabled     <--- Whether CFM declared this tunnel healthy
diff --git a/NEWS b/NEWS
index 1664707..ef6a99f 100644
--- a/NEWS
+++ b/NEWS
@@ -1,8 +1,56 @@
-OVN v22.03.1 - 03 Jun 2022
+OVN v22.09.0 - 16 Sep 2022
 --------------------------
-   - Bug fixes
+  - ovn-controller: Add configuration knob, through OVS external-id
+    "ovn-encap-df_default" to enable or disable tunnel DF flag.
+  - Add option "localnet_learn_fdb" to LSP that will allow localnet
+    ports to learn MAC addresses and store them in FDB table.
+  - northd: introduce the capability to automatically deploy a load-balancer
+    on each logical-switch connected to a logical router where the
+    load-balancer has been installed by the CMS. In order to enable the
+    feature the CMS has to set install_ls_lb_from_router to true in option
+    column of NB_Global table.
+  - Added nb_global IPsec options ipsec_encapsulation=true (libreswan)
+    and ipsec_forceencaps=true (strongswan) to unconditionally enforce
+    NAT-T UDP encapsulation. Requires OVS support for IPsec custom tunnel
+    options (which is available in OVS 3.0).
+  - Removed possibility of disabling logical datapath groups.
+  - Removed the copying of SB's Chassis other_config into external_ids.
+  - Added MAC binding aging mechanism, that is disabled by default.
+    It can be enabled per logical router with option
+    "mac_binding_age_threshold".
+  - If it is needed to create Load Balancer within LR with VIP, which matches
+    any of LR's LRP IP, there is no need to create SNAT entry.  Now such
+    traffic destined to LRP IP is not dropped.
+  - Bump python version required for building OVN to 3.6.
+
+OVN v22.06.0 - 03 Jun 2022
+--------------------------
+  - Support IGMP and MLD snooping on transit logical switches that connect
+    different OVN Interconnection availability zones.
   - Replaced the usage of masked ct_label by ct_mark in most cases to work
     better with hardware-offloading.
+  - Support NAT for logical routers with multiple distributed gateway ports.
+  - Add global option (NB_Global.options:default_acl_drop) to enable
+    implicit drop behavior on logical switches with ACLs applied.
+  - Support (LSP.options:qos_min_rate) to guarantee minimal bandwidth available
+    for a logical port.
+  - Add NB.Load_Balancer.options:neighbor_responder to allow the CMS to
+    explicitly request routers to reply to any ARP/ND request for a VIP
+    (when set to "all") and only for reachable VIPs (when set to "reachable"
+    or by default).
+  - Changed the way to enable northd parallelization.
+    Removed support for:
+    - use_parallel_build in NBDB.
+    - --dummy-numa in northd cmdline.
+    Added support for:
+    -  --n-threads=<N> in northd cmdline.
+    - set-n-threads/get-n-threads unixctls.
+    - --ovn-northd-n-threads command line argument in ovn-ctl
+  - Added support for setting the Next server IP in the DHCP header
+    using the private DHCP option - 253 in native OVN DHCPv4 responder.
+  - Support list of chassis for Logical_Switch_Port:options:requested-chassis.
+  - Support Logical_Switch_Port:options:activation-strategy for live migration
+    scenarios.
 
 OVN v22.03.0 - 11 Mar 2022
 --------------------------
diff --git a/TODO.rst b/TODO.rst
index 618ea48..1273828 100644
--- a/TODO.rst
+++ b/TODO.rst
@@ -139,10 +139,6 @@ OVN To-do List
 
   * Packaging for RHEL, Debian, etc.
 
-* ovn-controller: Stop copying the local OVS configuration into the
-  Chassis external_ids column (same for the "is-remote" configuration from
-  ovn-ic) a few releases after the 20.06 version (21.06 maybe ?).
-
 * ovn-controller: Remove backwards compatibility for Southbound DB Port_Group
   names in expr.c a few releases after the 20.09 version. Right now
   ovn-controller maintains backwards compatibility when connecting to a
@@ -170,3 +166,16 @@ OVN To-do List
   * physical.c has a global simap -localvif_to_ofport which stores the
     local OVS interfaces and the ofport numbers. Move this to the engine data
     of the engine data node - ed_type_pflow_output.
+
+* ovn-northd parallel logical flow processing
+
+  * Multi-threaded logical flow computation was optimized for the case
+    when datapath groups are disabled.  Datpath groups are always enabled
+    now so northd parallel processing should be revisited.
+
+* ovn-controller daemon module
+
+  * Dumitru Ceara: Add a new module e.g. ovn/lib/daemon-ovn.c that wraps
+    OVS' daemonize_start() call and initializes the additional things, like
+    the unixctl commands. Or, we should move the APIs such as
+    daemon_started_recently() to OVS's lib/daemon.
diff --git a/configure.ac b/configure.ac
index 70f86e1..765aacb 100644
--- a/configure.ac
+++ b/configure.ac
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 AC_PREREQ(2.63)
-AC_INIT(ovn, 22.03.1, bugs@openvswitch.org)
+AC_INIT(ovn, 22.09.0, bugs@openvswitch.org)
 AC_CONFIG_MACRO_DIR([m4])
 AC_CONFIG_AUX_DIR([build-aux])
 AC_CONFIG_HEADERS([config.h])
diff --git a/controller-vtep/binding.c b/controller-vtep/binding.c
index 1ee52b5..0d0c1ab 100644
--- a/controller-vtep/binding.c
+++ b/controller-vtep/binding.c
@@ -207,8 +207,8 @@ binding_run(struct controller_vtep_ctx *ctx)
         }
     }
 
-    struct shash_node *iter, *next;
-    SHASH_FOR_EACH_SAFE (iter, next, &ps_map) {
+    struct shash_node *iter;
+    SHASH_FOR_EACH_SAFE (iter, &ps_map) {
         struct ps *ps = iter->data;
         struct shash_node *node;
 
diff --git a/controller-vtep/gateway.c b/controller-vtep/gateway.c
index 9fbfc03..2a8714e 100644
--- a/controller-vtep/gateway.c
+++ b/controller-vtep/gateway.c
@@ -135,11 +135,11 @@ revalidate_gateway(struct controller_vtep_ctx *ctx)
         simap_put(&gw_chassis_map, pswitch->name, gw_reval_seq);
     }
 
-    struct simap_node *iter, *next;
+    struct simap_node *iter;
     /* For 'gw_node' in 'gw_chassis_map' whose data is not
      * 'gw_reval_seq', it means the corresponding physical switch no
      * longer exist.  So, garbage collects them. */
-    SIMAP_FOR_EACH_SAFE (iter, next, &gw_chassis_map) {
+    SIMAP_FOR_EACH_SAFE (iter, &gw_chassis_map) {
         if (iter->data != gw_reval_seq) {
             const struct sbrec_chassis *chassis_rec;
 
diff --git a/controller-vtep/vtep.c b/controller-vtep/vtep.c
index ecca00b..7f5e1d6 100644
--- a/controller-vtep/vtep.c
+++ b/controller-vtep/vtep.c
@@ -311,6 +311,9 @@ vtep_macs_run(struct ovsdb_idl_txn *vtep_idl_txn, struct shash *ucast_macs_rmts,
                     hash_uint64((uint64_t) vtep_ls->tunnel_key[0]));
     }
 
+    const char *dp, *peer;
+    const struct sbrec_port_binding *lrp_pb, *peer_pb;
+
     SHASH_FOR_EACH (node, non_vtep_pbs) {
         const struct sbrec_port_binding *port_binding_rec = node->data;
         const struct sbrec_chassis *chassis_rec;
@@ -324,7 +327,27 @@ vtep_macs_run(struct ovsdb_idl_txn *vtep_idl_txn, struct shash *ucast_macs_rmts,
             continue;
         }
 
-        tnl_key = port_binding_rec->datapath->tunnel_key;
+        if (!strcmp(port_binding_rec->type, "chassisredirect")) {
+            dp = smap_get(&port_binding_rec->options, "distributed-port");
+            lrp_pb = shash_find_data(non_vtep_pbs, dp);
+            if (!lrp_pb) {
+                continue;
+            }
+
+            peer = smap_get(&lrp_pb->options, "peer");
+            if (!peer) {
+                continue;
+            }
+
+            peer_pb = shash_find_data(non_vtep_pbs, peer);
+            if (!peer_pb) {
+                continue;
+            }
+            tnl_key = peer_pb->datapath->tunnel_key;
+        } else {
+            tnl_key = port_binding_rec->datapath->tunnel_key;
+        }
+
         HMAP_FOR_EACH_WITH_HASH (ls_node, hmap_node,
                                  hash_uint64((uint64_t) tnl_key),
                                  &ls_map) {
@@ -426,8 +449,8 @@ vtep_macs_run(struct ovsdb_idl_txn *vtep_idl_txn, struct shash *ucast_macs_rmts,
     SHASH_FOR_EACH (node, ucast_macs_rmts) {
         vteprec_ucast_macs_remote_delete(node->data);
     }
-    struct ls_hash_node *iter, *next;
-    HMAP_FOR_EACH_SAFE (iter, next, hmap_node, &ls_map) {
+    struct ls_hash_node *iter;
+    HMAP_FOR_EACH_SAFE (iter, hmap_node, &ls_map) {
         struct vtep_rec_physical_locator_list_entry *ploc_entry;
         vtep_update_mmr(vtep_idl_txn, &iter->locators_list,
                         iter->vtep_ls, iter->mmr_ext);
@@ -454,7 +477,7 @@ vtep_macs_run(struct ovsdb_idl_txn *vtep_idl_txn, struct shash *ucast_macs_rmts,
 static bool
 vtep_lswitch_cleanup(struct ovsdb_idl *vtep_idl)
 {
-   const struct vteprec_logical_switch *vtep_ls;
+    const struct vteprec_logical_switch *vtep_ls;
     bool done = true;
 
     VTEPREC_LOGICAL_SWITCH_FOR_EACH (vtep_ls, vtep_idl) {
@@ -572,9 +595,11 @@ vtep_run(struct controller_vtep_ctx *ctx)
     /* Collects and classifies 'Port_Binding's. */
     SBREC_PORT_BINDING_FOR_EACH(port_binding_rec, ctx->ovnsb_idl) {
         struct shash *target =
-            !strcmp(port_binding_rec->type, "vtep") ? &vtep_pbs : &non_vtep_pbs;
+            !strcmp(port_binding_rec->type, "vtep") ? &vtep_pbs
+                                                    : &non_vtep_pbs;
 
-        if (!port_binding_rec->chassis) {
+        if (!port_binding_rec->chassis &&
+            strcmp(port_binding_rec->type, "patch")) {
             continue;
         }
         shash_add(target, port_binding_rec->logical_port, port_binding_rec);
diff --git a/controller/binding.c b/controller/binding.c
index 9eaaddb..8f6b4b1 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -48,9 +48,71 @@ VLOG_DEFINE_THIS_MODULE(binding);
 
 #define OVN_QOS_TYPE "linux-htb"
 
+#define CLAIM_TIME_THRESHOLD_MS 500
+
+struct claimed_port {
+    long long int last_claimed;
+};
+
+static struct shash _claimed_ports = SHASH_INITIALIZER(&_claimed_ports);
+static struct sset _postponed_ports = SSET_INITIALIZER(&_postponed_ports);
+
+struct sset *
+get_postponed_ports(void)
+{
+    return &_postponed_ports;
+}
+
+static long long int
+get_claim_timestamp(const char *port_name)
+{
+    struct claimed_port *cp = shash_find_data(&_claimed_ports, port_name);
+    return cp ? cp->last_claimed : 0;
+}
+
+static void
+register_claim_timestamp(const char *port_name, long long int t)
+{
+    struct claimed_port *cp = shash_find_data(&_claimed_ports, port_name);
+    if (!cp) {
+        cp = xzalloc(sizeof *cp);
+        shash_add(&_claimed_ports, port_name, cp);
+    }
+    cp->last_claimed = t;
+}
+
+static void
+cleanup_claimed_port_timestamps(void)
+{
+    long long int now = time_msec();
+    struct shash_node *node;
+    SHASH_FOR_EACH_SAFE (node, &_claimed_ports) {
+        struct claimed_port *cp = (struct claimed_port *) node->data;
+        if (now - cp->last_claimed >= 5 * CLAIM_TIME_THRESHOLD_MS) {
+            free(cp);
+            shash_delete(&_claimed_ports, node);
+        }
+    }
+}
+
+/* Schedule any pending binding work. Runs with in the main ovn-controller
+ * thread context.*/
+void
+binding_wait(void)
+{
+    const char *port_name;
+    SSET_FOR_EACH (port_name, &_postponed_ports) {
+        long long int t = get_claim_timestamp(port_name);
+        if (t) {
+            poll_timer_wait_until(t + CLAIM_TIME_THRESHOLD_MS);
+        }
+    }
+}
+
 struct qos_queue {
     struct hmap_node node;
     uint32_t queue_id;
+    uint32_t min_rate;
     uint32_t max_rate;
     uint32_t burst;
 };
@@ -88,17 +150,19 @@ static void update_lport_tracking(const struct sbrec_port_binding *pb,
 static void
 get_qos_params(const struct sbrec_port_binding *pb, struct hmap *queue_map)
 {
+    uint32_t min_rate = smap_get_int(&pb->options, "qos_min_rate", 0);
     uint32_t max_rate = smap_get_int(&pb->options, "qos_max_rate", 0);
     uint32_t burst = smap_get_int(&pb->options, "qos_burst", 0);
     uint32_t queue_id = smap_get_int(&pb->options, "qdisc_queue_id", 0);
 
-    if ((!max_rate && !burst) || !queue_id) {
+    if ((!min_rate && !max_rate && !burst) || !queue_id) {
         /* Qos is not configured for this port. */
         return;
     }
 
     struct qos_queue *node = xzalloc(sizeof *node);
     hmap_insert(queue_map, &node->node, hash_int(queue_id, 0));
+    node->min_rate = min_rate;
     node->max_rate = max_rate;
     node->burst = burst;
     node->queue_id = queue_id;
@@ -238,9 +302,12 @@ setup_qos(const char *egress_iface, struct hmap *queue_map)
         HMAP_FOR_EACH_WITH_HASH (sb_info, node, hash_int(queue_id, 0),
                                  queue_map) {
             is_queue_needed = true;
-            if (sb_info->max_rate ==
+            if (sb_info->min_rate ==
+                smap_get_int(&queue_details, "min-rate", 0)
+                && sb_info->max_rate ==
                 smap_get_int(&queue_details, "max-rate", 0)
-                && sb_info->burst == smap_get_int(&queue_details, "burst", 0)) {
+                && sb_info->burst ==
+                smap_get_int(&queue_details, "burst", 0)) {
                 /* This queue is consistent. */
                 hmap_insert(&consistent_queues, &sb_info->node,
                             hash_int(queue_id, 0));
@@ -265,6 +332,7 @@ setup_qos(const char *egress_iface, struct hmap *queue_map)
         }
 
         smap_clear(&queue_details);
+        smap_add_format(&queue_details, "min-rate", "%d", sb_info->min_rate);
         smap_add_format(&queue_details, "max-rate", "%d", sb_info->max_rate);
         smap_add_format(&queue_details, "burst", "%d", sb_info->burst);
         error = netdev_set_queue(netdev_phy, sb_info->queue_id,
@@ -380,6 +448,23 @@ update_ld_external_ports(const struct sbrec_port_binding *binding_rec,
 }
 
 static void
+update_ld_multichassis_ports(const struct sbrec_port_binding *binding_rec,
+                             struct hmap *local_datapaths)
+{
+    struct local_datapath *ld = get_local_datapath(
+        local_datapaths, binding_rec->datapath->tunnel_key);
+    if (!ld) {
+        return;
+    }
+    if (binding_rec->additional_chassis) {
+        add_local_datapath_multichassis_port(ld, binding_rec->logical_port,
+                                             binding_rec);
+    } else {
+        remove_local_datapath_multichassis_port(ld, binding_rec->logical_port);
+    }
+}
+
+static void
 update_ld_localnet_port(const struct sbrec_port_binding *binding_rec,
                         struct shash *bridge_mappings,
                         struct sset *egress_ifaces,
@@ -481,60 +566,60 @@ remove_related_lport(const struct sbrec_port_binding *pb,
     }
 }
 
+/*
+ * Update local_datapath peers when port type changed
+ * and remove irrelevant ports from this list.
+ */
+static void
+update_ld_peers(const struct sbrec_port_binding *pb,
+                 struct hmap *local_datapaths)
+{
+    struct local_datapath *ld =
+        get_local_datapath(local_datapaths, pb->datapath->tunnel_key);
+
+    if (!ld) {
+        return;
+    }
+
+    /*
+     * This will handle cases where the pb type was explicitly
+     * changed from router type to any other port type and will
+     * remove it from the ld peers list.
+     */
+    enum en_lport_type type = get_lport_type(pb);
+    int num_peers = ld->n_peer_ports;
+    if (type != LP_PATCH) {
+        remove_local_datapath_peer_port(pb, ld, local_datapaths);
+        if (num_peers != ld->n_peer_ports) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+            VLOG_DBG_RL(&rl,
+                        "removing lport %s from the ld peers list",
+                        pb->logical_port);
+        }
+    }
+}
+
 static void
 delete_active_pb_ras_pd(const struct sbrec_port_binding *pb,
                         struct shash *ras_pd_map)
 {
-    struct pb_ld_binding *ras_pd =
-        shash_find_and_delete(ras_pd_map, pb->logical_port);
-
-    free(ras_pd);
+    shash_find_and_delete(ras_pd_map, pb->logical_port);
 }
 
 static void
 update_active_pb_ras_pd(const struct sbrec_port_binding *pb,
-                        struct hmap *local_datapaths,
                         struct shash *map, const char *conf)
 {
     bool ras_pd_conf = smap_get_bool(&pb->options, conf, false);
     struct shash_node *iter = shash_find(map, pb->logical_port);
-    struct pb_ld_binding *ras_pd = iter ? iter->data : NULL;
 
-    if (iter && !ras_pd_conf) {
+    if (!ras_pd_conf && iter) {
         shash_delete(map, iter);
-        free(ras_pd);
-        return;
-    }
-    if (ras_pd_conf) {
-        if (!ras_pd) {
-            ras_pd = xzalloc(sizeof *ras_pd);
-            ras_pd->pb = pb;
-            shash_add(map, pb->logical_port, ras_pd);
-        }
-        ovs_assert(ras_pd);
-        ras_pd->ld = get_local_datapath(local_datapaths,
-                                        pb->datapath->tunnel_key);
+    } else if (ras_pd_conf && !iter) {
+        shash_add(map, pb->logical_port, pb);
     }
 }
 
-/* This structure represents a logical port (or port binding)
- * which is associated with 'struct local_binding'.
- *
- * An instance of 'struct binding_lport' is created for a logical port
- *  - If the OVS interface's iface-id corresponds to the logical port.
- *  - If it is a container or virtual logical port and its parent
- *    has a 'local binding'.
- *
- */
-struct binding_lport {
-    struct ovs_list list_node; /* Node in local_binding.binding_lports. */
-
-    char *name;
-    const struct sbrec_port_binding *pb;
-    struct local_binding *lbinding;
-    enum en_lport_type type;
-};
-
 static struct local_binding *local_binding_create(
     const char *name, const struct ovsrec_interface *);
 static void local_binding_add(struct shash *local_bindings,
@@ -577,6 +662,11 @@ static const struct sbrec_port_binding *binding_lport_get_parent_pb(
     struct binding_lport *b_lprt);
 static struct binding_lport *binding_lport_check_and_cleanup(
     struct binding_lport *, struct shash *b_lports);
+static bool binding_lport_has_port_sec_changed(
+    struct binding_lport *, const struct sbrec_port_binding *);
+static void binding_lport_clear_port_sec(struct binding_lport *);
+static bool binding_lport_update_port_sec(
+    struct binding_lport *, const struct sbrec_port_binding *);
 
 static char *get_lport_type_str(enum en_lport_type lport_type);
 static bool ovs_iface_matches_lport_iface_id_ver(
@@ -607,15 +697,15 @@ local_binding_data_init(struct local_binding_data *lbinding_data)
 void
 local_binding_data_destroy(struct local_binding_data *lbinding_data)
 {
-    struct shash_node *node, *next;
+    struct shash_node *node;
 
-    SHASH_FOR_EACH_SAFE (node, next, &lbinding_data->lports) {
+    SHASH_FOR_EACH_SAFE (node, &lbinding_data->lports) {
         struct binding_lport *b_lport = node->data;
         binding_lport_destroy(b_lport);
         shash_delete(&lbinding_data->lports, node);
     }
 
-    SHASH_FOR_EACH_SAFE (node, next, &lbinding_data->bindings) {
+    SHASH_FOR_EACH_SAFE (node, &lbinding_data->bindings) {
         struct local_binding *lbinding = node->data;
         local_binding_destroy(lbinding, &lbinding_data->lports);
         shash_delete(&lbinding_data->bindings, node);
@@ -650,11 +740,17 @@ local_binding_get_lport_ofport(const struct shash *local_bindings,
 }
 
 bool
-local_binding_is_up(struct shash *local_bindings, const char *pb_name)
+local_binding_is_up(struct shash *local_bindings, const char *pb_name,
+                    const struct sbrec_chassis *chassis_rec)
 {
     struct local_binding *lbinding =
         local_binding_find(local_bindings, pb_name);
     struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding);
+
+    if (b_lport && b_lport->pb->chassis != chassis_rec) {
+        return false;
+    }
+
     if (lbinding && b_lport && lbinding->iface) {
         if (b_lport->pb->n_up && !b_lport->pb->up[0]) {
             return false;
@@ -666,13 +762,23 @@ local_binding_is_up(struct shash *local_bindings, const char *pb_name)
 }
 
 bool
-local_binding_is_down(struct shash *local_bindings, const char *pb_name)
+local_binding_is_down(struct shash *local_bindings, const char *pb_name,
+                      const struct sbrec_chassis *chassis_rec)
 {
     struct local_binding *lbinding =
         local_binding_find(local_bindings, pb_name);
 
     struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding);
 
+    if (b_lport) {
+        if (b_lport->pb->chassis == chassis_rec) {
+            return false;
+        } else if (b_lport->pb->chassis) {
+            VLOG_DBG("lport %s already claimed by other chassis",
+                     b_lport->pb->logical_port);
+        }
+    }
+
     if (!lbinding) {
         return true;
     }
@@ -691,6 +797,7 @@ local_binding_is_down(struct shash *local_bindings, const char *pb_name)
 
 void
 local_binding_set_up(struct shash *local_bindings, const char *pb_name,
+                     const struct sbrec_chassis *chassis_rec,
                      const char *ts_now_str, bool sb_readonly,
                      bool ovs_readonly)
 {
@@ -710,8 +817,8 @@ local_binding_set_up(struct shash *local_bindings, const char *pb_name,
                                                     ts_now_str);
     }
 
-    if (!sb_readonly && lbinding && b_lport && b_lport->pb->n_up
-            && !b_lport->pb->up[0]) {
+    if (!sb_readonly && lbinding && b_lport && b_lport->pb->n_up &&
+            !b_lport->pb->up[0] && b_lport->pb->chassis == chassis_rec) {
         VLOG_INFO("Setting lport %s up in Southbound", pb_name);
         binding_lport_set_up(b_lport, sb_readonly);
         LIST_FOR_EACH (b_lport, list_node, &lbinding->binding_lports) {
@@ -722,6 +829,7 @@ local_binding_set_up(struct shash *local_bindings, const char *pb_name,
 
 void
 local_binding_set_down(struct shash *local_bindings, const char *pb_name,
+                       const struct sbrec_chassis *chassis_rec,
                        bool sb_readonly, bool ovs_readonly)
 {
     struct local_binding *lbinding =
@@ -736,7 +844,8 @@ local_binding_set_down(struct shash *local_bindings, const char *pb_name,
                                                     OVN_INSTALLED_EXT_ID);
     }
 
-    if (!sb_readonly && b_lport && b_lport->pb->n_up && b_lport->pb->up[0]) {
+    if (!sb_readonly && b_lport && b_lport->pb->n_up && b_lport->pb->up[0] &&
+            (!b_lport->pb->chassis || b_lport->pb->chassis == chassis_rec)) {
         VLOG_INFO("Setting lport %s down in Southbound", pb_name);
         binding_lport_set_down(b_lport, sb_readonly);
         LIST_FOR_EACH (b_lport, list_node, &lbinding->binding_lports) {
@@ -867,7 +976,7 @@ get_lport_type_str(enum en_lport_type lport_type)
     case LP_CHASSISREDIRECT:
         return "CHASSISREDIRECT";
     case LP_L3GATEWAY:
-        return "L3GATEWAT";
+        return "L3GATEWAY";
     case LP_LOCALNET:
         return "PATCH";
     case LP_LOCALPORT:
@@ -887,37 +996,187 @@ get_lport_type_str(enum en_lport_type lport_type)
     OVS_NOT_REACHED();
 }
 
-/* For newly claimed ports, if 'notify_up' is 'false':
+void
+set_pb_chassis_in_sbrec(const struct sbrec_port_binding *pb,
+                        const struct sbrec_chassis *chassis_rec,
+                        bool is_set)
+{
+    if (pb->chassis != chassis_rec) {
+         if (is_set) {
+            if (pb->chassis) {
+                VLOG_INFO("Changing chassis for lport %s from %s to %s.",
+                          pb->logical_port, pb->chassis->name,
+                          chassis_rec->name);
+            } else {
+                VLOG_INFO("Claiming lport %s for this chassis.",
+                          pb->logical_port);
+            }
+            for (int i = 0; i < pb->n_mac; i++) {
+                VLOG_INFO("%s: Claiming %s", pb->logical_port, pb->mac[i]);
+            }
+            sbrec_port_binding_set_chassis(pb, chassis_rec);
+        }
+    } else if (!is_set) {
+        sbrec_port_binding_set_chassis(pb, NULL);
+    }
+}
+
+bool
+local_bindings_pb_chassis_is_set(struct shash *local_bindings,
+                                 const char *pb_name,
+                                 const struct sbrec_chassis *chassis_rec)
+{
+    struct local_binding *lbinding =
+        local_binding_find(local_bindings, pb_name);
+    struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding);
+
+    if (b_lport && b_lport->pb && b_lport->pb->chassis == chassis_rec) {
+        return true;
+    }
+    return false;
+}
+
+void
+local_binding_set_pb(struct shash *local_bindings, const char *pb_name,
+                     const struct sbrec_chassis *chassis_rec,
+                     struct hmap *tracked_datapaths, bool is_set)
+{
+    struct local_binding *lbinding =
+        local_binding_find(local_bindings, pb_name);
+    struct binding_lport *b_lport = local_binding_get_primary_lport(lbinding);
+
+    if (b_lport) {
+        set_pb_chassis_in_sbrec(b_lport->pb, chassis_rec, is_set);
+        if (tracked_datapaths) {
+            update_lport_tracking(b_lport->pb, tracked_datapaths, true);
+        }
+    }
+}
+
+/* For newly claimed ports:
  * - set the 'pb.up' field to true if 'pb' has no 'parent_pb'.
  * - set the 'pb.up' field to true if 'parent_pb.up' is 'true' (e.g., for
  *   container and virtual ports).
- * Otherwise request a notification to be sent when the OVS flows
- * corresponding to 'pb' have been installed.
+ *
+ * Returns false if lport is not claimed due to 'sb_readonly'.
+ * Returns true otherwise.
  *
  * Note:
- *   Updates (directly or through a notification) the 'pb->up' field only if
- *   it's explicitly set to 'false'.
+ *   Updates the 'pb->up' field only if it's explicitly set to 'false'.
  *   This is to ensure compatibility with older versions of ovn-northd.
  */
-static void
+static bool
 claimed_lport_set_up(const struct sbrec_port_binding *pb,
                      const struct sbrec_port_binding *parent_pb,
-                     const struct sbrec_chassis *chassis_rec,
-                     bool notify_up, struct if_status_mgr *if_mgr)
+                     bool sb_readonly)
 {
-    if (!notify_up) {
-        bool up = true;
-        if (!parent_pb || (parent_pb->n_up && parent_pb->up[0])) {
+    /* When notify_up is false in claim_port(), no state is created
+     * by if_status_mgr. In such cases, return false (i.e. trigger recompute)
+     * if we can't update sb (because it is readonly).
+     */
+    bool up = true;
+    if (!parent_pb || (parent_pb->n_up && parent_pb->up[0])) {
+        if (!sb_readonly) {
             if (pb->n_up) {
                 sbrec_port_binding_set_up(pb, &up, 1);
             }
+        } else if (pb->n_up && !pb->up[0]) {
+            return false;
         }
-        return;
     }
+    return true;
+}
+
+typedef void (*set_func)(const struct sbrec_port_binding *pb,
+                         const struct sbrec_encap *);
 
-    if (pb->chassis != chassis_rec || (pb->n_up && !pb->up[0])) {
-        if_status_mgr_claim_iface(if_mgr, pb->logical_port);
+static bool
+update_port_encap_if_needed(const struct sbrec_port_binding *pb,
+                            const struct sbrec_chassis *chassis_rec,
+                            const struct ovsrec_interface *iface_rec,
+                            bool sb_readonly)
+{
+    const struct sbrec_encap *encap_rec =
+        sbrec_get_port_encap(chassis_rec, iface_rec);
+    if ((encap_rec && pb->encap != encap_rec) ||
+        (!encap_rec && pb->encap)) {
+        if (sb_readonly) {
+            return false;
+        }
+        sbrec_port_binding_set_encap(pb, encap_rec);
+    }
+    return true;
+}
+
+static void
+remove_additional_encap_for_chassis(const struct sbrec_port_binding *pb,
+                                    const struct sbrec_chassis *chassis_rec)
+{
+    for (size_t i = 0; i < pb->n_additional_encap; i++) {
+        if (!strcmp(pb->additional_encap[i]->chassis_name,
+                    chassis_rec->name)) {
+            sbrec_port_binding_update_additional_encap_delvalue(
+                pb, pb->additional_encap[i]);
+        }
+    }
+}
+
+static bool
+update_port_additional_encap_if_needed(
+    const struct sbrec_port_binding *pb,
+    const struct sbrec_chassis *chassis_rec,
+    const struct ovsrec_interface *iface_rec,
+    bool sb_readonly)
+{
+    const struct sbrec_encap *encap_rec =
+        sbrec_get_port_encap(chassis_rec, iface_rec);
+    if (encap_rec) {
+        for (size_t i = 0; i < pb->n_additional_encap; i++) {
+            if (pb->additional_encap[i] == encap_rec) {
+                return true;
+            }
+        }
+        if (sb_readonly) {
+            return false;
+        }
+        sbrec_port_binding_update_additional_encap_addvalue(pb, encap_rec);
     }
+    return true;
+}
+
+bool
+is_additional_chassis(const struct sbrec_port_binding *pb,
+                      const struct sbrec_chassis *chassis_rec)
+{
+    for (size_t i = 0; i < pb->n_additional_chassis; i++) {
+        if (pb->additional_chassis[i] == chassis_rec) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static void
+remove_additional_chassis(const struct sbrec_port_binding *pb,
+                          const struct sbrec_chassis *chassis_rec)
+{
+    sbrec_port_binding_update_additional_chassis_delvalue(pb, chassis_rec);
+    remove_additional_encap_for_chassis(pb, chassis_rec);
+}
+
+static bool
+lport_maybe_postpone(const char *port_name, long long int now,
+                     struct sset *postponed_ports)
+{
+    long long int last_claimed = get_claim_timestamp(port_name);
+    if (now - last_claimed >= CLAIM_TIME_THRESHOLD_MS) {
+        return false;
+    }
+
+    sset_add(postponed_ports, port_name);
+    VLOG_DBG("Postponed claim on logical port %s.", port_name);
+
+    return true;
 }
 
 /* Returns false if lport is not claimed due to 'sb_readonly'.
@@ -930,46 +1189,90 @@ claim_lport(const struct sbrec_port_binding *pb,
             const struct ovsrec_interface *iface_rec,
             bool sb_readonly, bool notify_up,
             struct hmap *tracked_datapaths,
-            struct if_status_mgr *if_mgr)
-{
-    if (!sb_readonly) {
-        claimed_lport_set_up(pb, parent_pb, chassis_rec, notify_up, if_mgr);
-    }
-
-    if (pb->chassis != chassis_rec) {
-        if (sb_readonly) {
-            return false;
-        }
+            struct if_status_mgr *if_mgr,
+            struct sset *postponed_ports)
+{
+    enum can_bind can_bind = lport_can_bind_on_this_chassis(chassis_rec, pb);
+    bool update_tracked = false;
+
+    if (can_bind == CAN_BIND_AS_MAIN) {
+        if (pb->chassis != chassis_rec) {
+            long long int now = time_msec();
+            if (pb->chassis) {
+                if (lport_maybe_postpone(pb->logical_port, now,
+                                         postponed_ports)) {
+                    return true;
+                }
+            }
+            if (is_additional_chassis(pb, chassis_rec)) {
+                if (sb_readonly) {
+                    return false;
+                }
+                remove_additional_chassis(pb, chassis_rec);
+            }
+            update_tracked = true;
 
-        if (pb->chassis) {
-            VLOG_INFO("Changing chassis for lport %s from %s to %s.",
-                    pb->logical_port, pb->chassis->name,
-                    chassis_rec->name);
+            if (!notify_up) {
+                if (!claimed_lport_set_up(pb, parent_pb, sb_readonly)) {
+                    return false;
+                }
+                if (sb_readonly) {
+                    return false;
+                }
+                set_pb_chassis_in_sbrec(pb, chassis_rec, true);
+            } else {
+                if_status_mgr_claim_iface(if_mgr, pb, chassis_rec,
+                                          sb_readonly);
+            }
+            register_claim_timestamp(pb->logical_port, now);
+            sset_find_and_delete(postponed_ports, pb->logical_port);
         } else {
-            VLOG_INFO("Claiming lport %s for this chassis.", pb->logical_port);
-        }
-        for (int i = 0; i < pb->n_mac; i++) {
-            VLOG_INFO("%s: Claiming %s", pb->logical_port, pb->mac[i]);
+            if (!notify_up) {
+                if (!claimed_lport_set_up(pb, parent_pb, sb_readonly)) {
+                    return false;
+                }
+            } else {
+                if (pb->n_up && !pb->up[0]) {
+                    if_status_mgr_claim_iface(if_mgr, pb, chassis_rec,
+                                              sb_readonly);
+                }
+            }
         }
+    } else if (can_bind == CAN_BIND_AS_ADDITIONAL) {
+        if (!is_additional_chassis(pb, chassis_rec)) {
+            if (sb_readonly) {
+                return false;
+            }
 
-        sbrec_port_binding_set_chassis(pb, chassis_rec);
+            VLOG_INFO("Claiming lport %s for this additional chassis.",
+                      pb->logical_port);
+            for (size_t i = 0; i < pb->n_mac; i++) {
+                VLOG_INFO("%s: Claiming %s", pb->logical_port, pb->mac[i]);
+            }
 
+            sbrec_port_binding_update_additional_chassis_addvalue(pb,
+                                                                  chassis_rec);
+            if (pb->chassis == chassis_rec) {
+                sbrec_port_binding_set_chassis(pb, NULL);
+            }
+            update_tracked = true;
+        }
+    }
+
+    if (update_tracked) {
         if (tracked_datapaths) {
             update_lport_tracking(pb, tracked_datapaths, true);
         }
     }
 
     /* Check if the port encap binding, if any, has changed */
-    struct sbrec_encap *encap_rec =
-        sbrec_get_port_encap(chassis_rec, iface_rec);
-    if ((encap_rec && pb->encap != encap_rec) ||
-        (!encap_rec && pb->encap)) {
-        if (sb_readonly) {
-            return false;
-        }
-        sbrec_port_binding_set_encap(pb, encap_rec);
+    if (can_bind == CAN_BIND_AS_MAIN) {
+        return update_port_encap_if_needed(
+            pb, chassis_rec, iface_rec, sb_readonly);
+    } else if (can_bind == CAN_BIND_AS_ADDITIONAL) {
+        return update_port_additional_encap_if_needed(
+            pb, chassis_rec, iface_rec, sb_readonly);
     }
-
     return true;
 }
 
@@ -981,7 +1284,9 @@ claim_lport(const struct sbrec_port_binding *pb,
  * Caller should make sure that this is the case.
  */
 static bool
-release_lport_(const struct sbrec_port_binding *pb, bool sb_readonly)
+release_lport_main_chassis(const struct sbrec_port_binding *pb,
+                           bool sb_readonly,
+                           struct if_status_mgr *if_mgr)
 {
     if (pb->encap) {
         if (sb_readonly) {
@@ -990,11 +1295,14 @@ release_lport_(const struct sbrec_port_binding *pb, bool sb_readonly)
         sbrec_port_binding_set_encap(pb, NULL);
     }
 
+    /* If sb is readonly, pb->chassis is unset through if-status if present. */
+
     if (pb->chassis) {
-        if (sb_readonly) {
+        if (!sb_readonly) {
+            sbrec_port_binding_set_chassis(pb, NULL);
+        } else if (!if_status_mgr_iface_is_present(if_mgr, pb->logical_port)) {
             return false;
         }
-        sbrec_port_binding_set_chassis(pb, NULL);
     }
 
     if (pb->virtual_parent) {
@@ -1004,16 +1312,49 @@ release_lport_(const struct sbrec_port_binding *pb, bool sb_readonly)
         sbrec_port_binding_set_virtual_parent(pb, NULL);
     }
 
-    VLOG_INFO("Releasing lport %s from this chassis.", pb->logical_port);
+    VLOG_INFO("Releasing lport %s from this chassis (sb_readonly=%d)",
+              pb->logical_port, sb_readonly);
+
     return true;
 }
 
 static bool
-release_lport(const struct sbrec_port_binding *pb, bool sb_readonly,
+release_lport_additional_chassis(const struct sbrec_port_binding *pb,
+                                 const struct sbrec_chassis *chassis_rec,
+                                 bool sb_readonly)
+{
+    if (pb->additional_encap) {
+        if (sb_readonly) {
+            return false;
+        }
+        remove_additional_encap_for_chassis(pb, chassis_rec);
+    }
+
+    if (is_additional_chassis(pb, chassis_rec)) {
+        if (sb_readonly) {
+            return false;
+        }
+        remove_additional_chassis(pb, chassis_rec);
+    }
+
+    VLOG_INFO("Releasing lport %s from this additional chassis.",
+              pb->logical_port);
+    return true;
+}
+
+static bool
+release_lport(const struct sbrec_port_binding *pb,
+              const struct sbrec_chassis *chassis_rec, bool sb_readonly,
               struct hmap *tracked_datapaths, struct if_status_mgr *if_mgr)
 {
-    if (!release_lport_(pb, sb_readonly)) {
-        return false;
+    if (pb->chassis == chassis_rec) {
+        if (!release_lport_main_chassis(pb, sb_readonly, if_mgr)) {
+            return false;
+        }
+    } else if (is_additional_chassis(pb, chassis_rec)) {
+        if (!release_lport_additional_chassis(pb, chassis_rec, sb_readonly)) {
+            return false;
+        }
     }
 
     update_lport_tracking(pb, tracked_datapaths, false);
@@ -1032,7 +1373,8 @@ is_binding_lport_this_chassis(struct binding_lport *b_lport,
                               const struct sbrec_chassis *chassis)
 {
     return (b_lport && b_lport->pb && chassis &&
-            b_lport->pb->chassis == chassis);
+            (b_lport->pb->chassis == chassis
+             || is_additional_chassis(b_lport->pb, chassis)));
 }
 
 /* Returns 'true' if the 'lbinding' has binding lports of type LP_CONTAINER,
@@ -1057,7 +1399,7 @@ release_binding_lport(const struct sbrec_chassis *chassis_rec,
 {
     if (is_binding_lport_this_chassis(b_lport, chassis_rec)) {
         remove_related_lport(b_lport->pb, b_ctx_out);
-        if (!release_lport(b_lport->pb, sb_readonly,
+        if (!release_lport(b_lport->pb, chassis_rec, sb_readonly,
                            b_ctx_out->tracked_dp_bindings,
                            b_ctx_out->if_mgr)) {
             return false;
@@ -1088,7 +1430,8 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
                              b_lport->lbinding->iface,
                              !b_ctx_in->ovnsb_idl_txn,
                              !parent_pb, b_ctx_out->tracked_dp_bindings,
-                             b_ctx_out->if_mgr)){
+                             b_ctx_out->if_mgr,
+                             b_ctx_out->postponed_ports)) {
                 return false;
             }
 
@@ -1100,28 +1443,32 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
                                b_ctx_out->tracked_dp_bindings);
             update_related_lport(pb, b_ctx_out);
             update_local_lports(pb->logical_port, b_ctx_out);
+            if (binding_lport_update_port_sec(b_lport, pb) &&
+                    b_ctx_out->tracked_dp_bindings) {
+                tracked_datapath_lport_add(pb, TRACKED_RESOURCE_UPDATED,
+                                           b_ctx_out->tracked_dp_bindings);
+            }
             if (b_lport->lbinding->iface && qos_map && b_ctx_in->ovs_idl_txn) {
                 get_qos_params(pb, qos_map);
             }
         } else {
             /* We could, but can't claim the lport. */
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
-                VLOG_INFO_RL(&rl,
-                             "Not claiming lport %s, chassis %s "
-                             "requested-chassis %s",
-                             pb->logical_port,
-                             b_ctx_in->chassis_rec->name,
-                             pb->requested_chassis ?
-                             pb->requested_chassis->name : "(option points at "
-                                                           "non-existent "
-                                                           "chassis)");
+            const char *requested_chassis_option = smap_get(
+                &pb->options, "requested-chassis");
+            VLOG_INFO_RL(&rl,
+                "Not claiming lport %s, chassis %s requested-chassis %s",
+                pb->logical_port, b_ctx_in->chassis_rec->name,
+                requested_chassis_option ? requested_chassis_option : "[]");
         }
     }
 
-    if (pb->chassis == b_ctx_in->chassis_rec) {
+    if (pb->chassis == b_ctx_in->chassis_rec
+            || is_additional_chassis(pb, b_ctx_in->chassis_rec)) {
         /* Release the lport if there is no lbinding. */
         if (!lbinding_set || !can_bind) {
-            return release_lport(pb, !b_ctx_in->ovnsb_idl_txn,
+            return release_lport(pb, b_ctx_in->chassis_rec,
+                                 !b_ctx_in->ovnsb_idl_txn,
                                  b_ctx_out->tracked_dp_bindings,
                                  b_ctx_out->if_mgr);
         }
@@ -1243,7 +1590,8 @@ consider_container_lport(const struct sbrec_port_binding *pb,
          * if it was bound earlier. */
         if (is_binding_lport_this_chassis(container_b_lport,
                                           b_ctx_in->chassis_rec)) {
-            return release_lport(pb, !b_ctx_in->ovnsb_idl_txn,
+            return release_lport(pb, b_ctx_in->chassis_rec,
+                                 !b_ctx_in->ovnsb_idl_txn,
                                  b_ctx_out->tracked_dp_bindings,
                                  b_ctx_out->if_mgr);
         }
@@ -1336,11 +1684,21 @@ consider_localport(const struct sbrec_port_binding *pb,
 
     /* If the port binding is claimed, then release it as localport is claimed
      * by any ovn-controller. */
-    if (pb->chassis == b_ctx_in->chassis_rec) {
-        if (!release_lport_(pb, !b_ctx_in->ovnsb_idl_txn)) {
+    enum can_bind can_bind = lport_can_bind_on_this_chassis(
+        b_ctx_in->chassis_rec, pb);
+    if (can_bind == CAN_BIND_AS_MAIN) {
+        if (!release_lport_main_chassis(pb, !b_ctx_in->ovnsb_idl_txn,
+            b_ctx_out->if_mgr)) {
             return false;
         }
+    } else if (can_bind == CAN_BIND_AS_ADDITIONAL) {
+        if (!release_lport_additional_chassis(pb, b_ctx_in->chassis_rec,
+                                              !b_ctx_in->ovnsb_idl_txn)) {
+            return false;
+        }
+    }
 
+    if (can_bind) {
         remove_related_lport(pb, b_ctx_out);
     }
 
@@ -1370,9 +1728,14 @@ consider_nonvif_lport_(const struct sbrec_port_binding *pb,
         return claim_lport(pb, NULL, b_ctx_in->chassis_rec, NULL,
                            !b_ctx_in->ovnsb_idl_txn, false,
                            b_ctx_out->tracked_dp_bindings,
-                           b_ctx_out->if_mgr);
-    } else if (pb->chassis == b_ctx_in->chassis_rec) {
-        return release_lport(pb, !b_ctx_in->ovnsb_idl_txn,
+                           b_ctx_out->if_mgr,
+                           b_ctx_out->postponed_ports);
+    }
+
+    if (pb->chassis == b_ctx_in->chassis_rec ||
+            is_additional_chassis(pb, b_ctx_in->chassis_rec)) {
+        return release_lport(pb, b_ctx_in->chassis_rec,
+                             !b_ctx_in->ovnsb_idl_txn,
                              b_ctx_out->tracked_dp_bindings,
                              b_ctx_out->if_mgr);
     }
@@ -1551,6 +1914,8 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
 
     struct ovs_list localnet_lports = OVS_LIST_INITIALIZER(&localnet_lports);
     struct ovs_list external_lports = OVS_LIST_INITIALIZER(&external_lports);
+    struct ovs_list multichassis_ports = OVS_LIST_INITIALIZER(
+                                                        &multichassis_ports);
 
     struct lport {
         struct ovs_list list_node;
@@ -1565,11 +1930,9 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
     const struct sbrec_port_binding *pb;
     SBREC_PORT_BINDING_TABLE_FOR_EACH (pb,
                                        b_ctx_in->port_binding_table) {
-        update_active_pb_ras_pd(pb, b_ctx_out->local_datapaths,
-                                b_ctx_out->local_active_ports_ipv6_pd,
+        update_active_pb_ras_pd(pb, b_ctx_out->local_active_ports_ipv6_pd,
                                 "ipv6_prefix_delegation");
-        update_active_pb_ras_pd(pb, b_ctx_out->local_datapaths,
-                                b_ctx_out->local_active_ports_ras,
+        update_active_pb_ras_pd(pb, b_ctx_out->local_active_ports_ras,
                                 "ipv6_ra_send_periodic");
 
         enum en_lport_type lport_type = get_lport_type(pb);
@@ -1586,6 +1949,13 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
 
         case LP_VIF:
             consider_vif_lport(pb, b_ctx_in, b_ctx_out, NULL, qos_map_ptr);
+            if (pb->additional_chassis) {
+                struct lport *multichassis_lport = xmalloc(
+                    sizeof *multichassis_lport);
+                multichassis_lport->pb = pb;
+                ovs_list_push_back(&multichassis_ports,
+                                   &multichassis_lport->list_node);
+            }
             break;
 
         case LP_CONTAINER:
@@ -1661,6 +2031,16 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
         free(ext_lport);
     }
 
+    /* Run through multichassis lport list to see if these are ports
+     * on local datapaths discovered from above loop, and update the
+     * corresponding local datapath accordingly. */
+    struct lport *multichassis_lport;
+    LIST_FOR_EACH_POP (multichassis_lport, list_node, &multichassis_ports) {
+        update_ld_multichassis_ports(multichassis_lport->pb,
+                                     b_ctx_out->local_datapaths);
+        free(multichassis_lport);
+    }
+
     shash_destroy(&bridge_mappings);
 
     if (!sset_is_empty(b_ctx_out->egress_ifaces)
@@ -1673,6 +2053,8 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
     }
 
     destroy_qos_map(&qos_map);
+
+    cleanup_claimed_port_timestamps();
 }
 
 /* Returns true if the database is all cleaned up, false if more work is
@@ -1699,6 +2081,10 @@ binding_cleanup(struct ovsdb_idl_txn *ovnsb_idl_txn,
             sbrec_port_binding_set_chassis(binding_rec, NULL);
             any_changes = true;
         }
+        if (is_additional_chassis(binding_rec, chassis_rec)) {
+            remove_additional_chassis(binding_rec, chassis_rec);
+            any_changes = true;
+        }
     }
 
     if (any_changes) {
@@ -1728,6 +2114,7 @@ remove_pb_from_local_datapath(const struct sbrec_port_binding *pb,
     } else if (!strcmp(pb->type, "external")) {
         remove_local_datapath_external_port(ld, pb->logical_port);
     }
+    remove_local_datapath_multichassis_port(ld, pb->logical_port);
 }
 
 static void
@@ -2308,6 +2695,128 @@ consider_patch_port_for_local_datapaths(const struct sbrec_port_binding *pb,
     }
 }
 
+static bool
+handle_updated_port(struct binding_ctx_in *b_ctx_in,
+                    struct binding_ctx_out *b_ctx_out,
+                    const struct sbrec_port_binding *pb,
+                    struct hmap *qos_map_ptr)
+{
+    update_active_pb_ras_pd(pb, b_ctx_out->local_active_ports_ipv6_pd,
+                            "ipv6_prefix_delegation");
+
+    update_active_pb_ras_pd(pb, b_ctx_out->local_active_ports_ras,
+                            "ipv6_ra_send_periodic");
+
+    enum en_lport_type lport_type = get_lport_type(pb);
+
+    struct binding_lport *b_lport =
+        binding_lport_find(&b_ctx_out->lbinding_data->lports,
+                           pb->logical_port);
+    if (b_lport) {
+        ovs_assert(b_lport->pb == pb);
+
+        if (b_lport->type != lport_type) {
+            b_lport->type = lport_type;
+        }
+
+        if (b_lport->lbinding) {
+            if (!local_binding_handle_stale_binding_lports(
+                    b_lport->lbinding, b_ctx_in, b_ctx_out, qos_map_ptr)) {
+                return false;
+            }
+        }
+    }
+
+    bool handled = true;
+
+    switch (lport_type) {
+    case LP_VIF:
+    case LP_CONTAINER:
+    case LP_VIRTUAL:
+        update_ld_multichassis_ports(pb, b_ctx_out->local_datapaths);
+        handled = handle_updated_vif_lport(pb, lport_type, b_ctx_in,
+                                           b_ctx_out, qos_map_ptr);
+        break;
+
+    case LP_LOCALPORT:
+        handled = consider_localport(pb, b_ctx_in, b_ctx_out);
+        break;
+
+    case LP_PATCH:
+        update_related_lport(pb, b_ctx_out);
+        consider_patch_port_for_local_datapaths(pb, b_ctx_in, b_ctx_out);
+        break;
+
+    case LP_VTEP:
+        update_related_lport(pb, b_ctx_out);
+        /* VTEP lports are claimed/released by ovn-controller-vteps.
+         * We are not sure what changed. */
+        b_ctx_out->non_vif_ports_changed = true;
+        break;
+
+    case LP_L2GATEWAY:
+        handled = consider_l2gw_lport(pb, b_ctx_in, b_ctx_out);
+        break;
+
+    case LP_L3GATEWAY:
+        handled = consider_l3gw_lport(pb, b_ctx_in, b_ctx_out);
+        break;
+
+    case LP_CHASSISREDIRECT:
+        handled = consider_cr_lport(pb, b_ctx_in, b_ctx_out);
+        if (!handled) {
+            break;
+        }
+        const char *distributed_port = smap_get(&pb->options,
+                                                "distributed-port");
+        if (!distributed_port) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+            VLOG_WARN_RL(&rl, "No distributed-port option set for "
+                         "chassisredirect port %s", pb->logical_port);
+            break;
+        }
+        const struct sbrec_port_binding *distributed_pb
+            = lport_lookup_by_name(b_ctx_in->sbrec_port_binding_by_name,
+                                   distributed_port);
+        if (!distributed_pb) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+            VLOG_WARN_RL(&rl, "No port binding record for distributed "
+                         "port %s referred by chassisredirect port %s",
+                         distributed_port, pb->logical_port);
+            break;
+        }
+        consider_patch_port_for_local_datapaths(distributed_pb, b_ctx_in,
+                                                b_ctx_out);
+        break;
+
+    case LP_EXTERNAL:
+        handled = consider_external_lport(pb, b_ctx_in, b_ctx_out);
+        update_ld_external_ports(pb, b_ctx_out->local_datapaths);
+        break;
+
+    case LP_LOCALNET: {
+        consider_localnet_lport(pb, b_ctx_in, b_ctx_out, qos_map_ptr);
+
+        struct shash bridge_mappings =
+            SHASH_INITIALIZER(&bridge_mappings);
+        add_ovs_bridge_mappings(b_ctx_in->ovs_table,
+                                b_ctx_in->bridge_table,
+                                &bridge_mappings);
+        update_ld_localnet_port(pb, &bridge_mappings,
+                                b_ctx_out->egress_ifaces,
+                                b_ctx_out->local_datapaths);
+        shash_destroy(&bridge_mappings);
+        break;
+    }
+
+    case LP_REMOTE:
+    case LP_UNKNOWN:
+        break;
+    }
+
+    return handled;
+}
+
 /* Returns true if the port binding changes resulted in local binding
  * updates, false otherwise.
  */
@@ -2377,8 +2886,7 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
     }
 
     struct shash_node *node;
-    struct shash_node *node_next;
-    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_container_pbs) {
+    SHASH_FOR_EACH_SAFE (node, &deleted_container_pbs) {
         handled = handle_deleted_vif_lport(node->data, LP_CONTAINER, b_ctx_in,
                                            b_ctx_out);
         shash_delete(&deleted_container_pbs, node);
@@ -2387,7 +2895,7 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
         }
     }
 
-    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_virtual_pbs) {
+    SHASH_FOR_EACH_SAFE (node, &deleted_virtual_pbs) {
         handled = handle_deleted_vif_lport(node->data, LP_VIRTUAL, b_ctx_in,
                                            b_ctx_out);
         shash_delete(&deleted_virtual_pbs, node);
@@ -2396,7 +2904,7 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
         }
     }
 
-    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_vif_pbs) {
+    SHASH_FOR_EACH_SAFE (node, &deleted_vif_pbs) {
         handled = handle_deleted_vif_lport(node->data, LP_VIF, b_ctx_in,
                                            b_ctx_out);
         shash_delete(&deleted_vif_pbs, node);
@@ -2405,13 +2913,13 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
         }
     }
 
-    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_localport_pbs) {
+    SHASH_FOR_EACH_SAFE (node, &deleted_localport_pbs) {
         handle_deleted_vif_lport(node->data, LP_LOCALPORT, b_ctx_in,
                                  b_ctx_out);
         shash_delete(&deleted_localport_pbs, node);
     }
 
-    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_other_pbs) {
+    SHASH_FOR_EACH_SAFE (node, &deleted_other_pbs) {
         handle_deleted_lport(node->data, b_ctx_in, b_ctx_out);
         shash_delete(&deleted_other_pbs, node);
     }
@@ -2438,124 +2946,72 @@ delete_done:
             continue;
         }
 
-        update_active_pb_ras_pd(pb, b_ctx_out->local_datapaths,
-                                b_ctx_out->local_active_ports_ipv6_pd,
-                                "ipv6_prefix_delegation");
-
-        update_active_pb_ras_pd(pb, b_ctx_out->local_datapaths,
-                                b_ctx_out->local_active_ports_ras,
-                                "ipv6_ra_send_periodic");
-
-        enum en_lport_type lport_type = get_lport_type(pb);
-
-        struct binding_lport *b_lport =
-            binding_lport_find(&b_ctx_out->lbinding_data->lports,
-                               pb->logical_port);
-        if (b_lport) {
-            ovs_assert(b_lport->pb == pb);
-
-            if (b_lport->type != lport_type) {
-                b_lport->type = lport_type;
-            }
-
-            if (b_lport->lbinding) {
-                handled = local_binding_handle_stale_binding_lports(
-                    b_lport->lbinding, b_ctx_in, b_ctx_out, qos_map_ptr);
-                if (!handled) {
-                    /* Backout from the handling. */
-                    break;
-                }
-            }
+        if (sbrec_port_binding_is_updated(pb, SBREC_PORT_BINDING_COL_TYPE)) {
+            update_ld_peers(pb, b_ctx_out->local_datapaths);
         }
 
-        switch (lport_type) {
-        case LP_VIF:
-        case LP_CONTAINER:
-        case LP_VIRTUAL:
-            handled = handle_updated_vif_lport(pb, lport_type, b_ctx_in,
-                                               b_ctx_out, qos_map_ptr);
-            break;
-
-        case LP_LOCALPORT:
-            handled = consider_localport(pb, b_ctx_in, b_ctx_out);
-            break;
-
-        case LP_PATCH:
-            update_related_lport(pb, b_ctx_out);
-            consider_patch_port_for_local_datapaths(pb, b_ctx_in, b_ctx_out);
-            break;
-
-        case LP_VTEP:
-            update_related_lport(pb, b_ctx_out);
-            /* VTEP lports are claimed/released by ovn-controller-vteps.
-             * We are not sure what changed. */
-            b_ctx_out->non_vif_ports_changed = true;
-            break;
-
-        case LP_L2GATEWAY:
-            handled = consider_l2gw_lport(pb, b_ctx_in, b_ctx_out);
-            break;
-
-        case LP_L3GATEWAY:
-            handled = consider_l3gw_lport(pb, b_ctx_in, b_ctx_out);
-            break;
-
-        case LP_CHASSISREDIRECT:
-            handled = consider_cr_lport(pb, b_ctx_in, b_ctx_out);
-            if (!handled) {
-                break;
-            }
-            const char *distributed_port = smap_get(&pb->options,
-                                                    "distributed-port");
-            if (!distributed_port) {
-                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-                VLOG_WARN_RL(&rl, "No distributed-port option set for "
-                             "chassisredirect port %s", pb->logical_port);
-                break;
-            }
-            const struct sbrec_port_binding *distributed_pb
-                = lport_lookup_by_name(b_ctx_in->sbrec_port_binding_by_name,
-                                       distributed_port);
-            if (!distributed_pb) {
-                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-                VLOG_WARN_RL(&rl, "No port binding record for distributed "
-                             "port %s referred by chassisredirect port %s",
-                             distributed_port, pb->logical_port);
-                break;
-            }
-            consider_patch_port_for_local_datapaths(distributed_pb, b_ctx_in,
-                                                    b_ctx_out);
-            break;
-
-        case LP_EXTERNAL:
-            handled = consider_external_lport(pb, b_ctx_in, b_ctx_out);
-            update_ld_external_ports(pb, b_ctx_out->local_datapaths);
-            break;
-
-        case LP_LOCALNET: {
-            consider_localnet_lport(pb, b_ctx_in, b_ctx_out, qos_map_ptr);
-
-            struct shash bridge_mappings =
-                SHASH_INITIALIZER(&bridge_mappings);
-            add_ovs_bridge_mappings(b_ctx_in->ovs_table,
-                                    b_ctx_in->bridge_table,
-                                    &bridge_mappings);
-            update_ld_localnet_port(pb, &bridge_mappings,
-                                    b_ctx_out->egress_ifaces,
-                                    b_ctx_out->local_datapaths);
-            shash_destroy(&bridge_mappings);
+        handled = handle_updated_port(b_ctx_in, b_ctx_out, pb, qos_map_ptr);
+        if (!handled) {
             break;
         }
+    }
 
-        case LP_REMOTE:
-        case LP_UNKNOWN:
-            break;
+    /* Also handle any postponed (throttled) ports. */
+    const char *port_name;
+    struct sset postponed_ports = SSET_INITIALIZER(&postponed_ports);
+    sset_clone(&postponed_ports, b_ctx_out->postponed_ports);
+    SSET_FOR_EACH (port_name, &postponed_ports) {
+        pb = lport_lookup_by_name(b_ctx_in->sbrec_port_binding_by_name,
+                                  port_name);
+        if (!pb) {
+            sset_find_and_delete(b_ctx_out->postponed_ports, port_name);
+            continue;
         }
-
+        handled = handle_updated_port(b_ctx_in, b_ctx_out, pb, qos_map_ptr);
         if (!handled) {
             break;
         }
     }
+    sset_destroy(&postponed_ports);
+    cleanup_claimed_port_timestamps();
+
+    if (handled) {
+        /* There may be new local datapaths added by the above handling, so go
+         * through each port_binding of newly added local datapaths to update
+         * related local_datapaths if needed. */
+        struct shash bridge_mappings =
+            SHASH_INITIALIZER(&bridge_mappings);
+        add_ovs_bridge_mappings(b_ctx_in->ovs_table,
+                                b_ctx_in->bridge_table,
+                                &bridge_mappings);
+        struct tracked_datapath *t_dp;
+        HMAP_FOR_EACH (t_dp, node, b_ctx_out->tracked_dp_bindings) {
+            if (t_dp->tracked_type != TRACKED_RESOURCE_NEW) {
+                continue;
+            }
+            struct sbrec_port_binding *target =
+                sbrec_port_binding_index_init_row(
+                    b_ctx_in->sbrec_port_binding_by_datapath);
+            sbrec_port_binding_index_set_datapath(target, t_dp->dp);
+
+            SBREC_PORT_BINDING_FOR_EACH_EQUAL (pb, target,
+                b_ctx_in->sbrec_port_binding_by_datapath) {
+                enum en_lport_type lport_type = get_lport_type(pb);
+                if (lport_type == LP_LOCALNET) {
+                    update_ld_localnet_port(pb, &bridge_mappings,
+                                            b_ctx_out->egress_ifaces,
+                                            b_ctx_out->local_datapaths);
+                } else if (lport_type == LP_EXTERNAL) {
+                    update_ld_external_ports(pb, b_ctx_out->local_datapaths);
+                } else if (pb->n_additional_chassis) {
+                    update_ld_multichassis_ports(pb,
+                                                 b_ctx_out->local_datapaths);
+                }
+            }
+            sbrec_port_binding_index_destroy_row(target);
+        }
+        shash_destroy(&bridge_mappings);
+    }
 
     if (handled && qos_map_ptr && set_noop_qos(b_ctx_in->ovs_idl_txn,
                                                b_ctx_in->port_table,
@@ -2727,9 +3183,9 @@ local_binding_handle_stale_binding_lports(struct local_binding *lbinding,
     }
 
     bool handled = true;
-    struct binding_lport *b_lport, *next;
+    struct binding_lport *b_lport;
     const struct sbrec_port_binding *pb;
-    LIST_FOR_EACH_SAFE (b_lport, next, list_node, &lbinding->binding_lports) {
+    LIST_FOR_EACH_SAFE (b_lport, list_node, &lbinding->binding_lports) {
         /* Get the lport type again from the pb.  Its possible that the
          * pb type has changed. */
         enum en_lport_type pb_lport_type = get_lport_type(b_lport->pb);
@@ -2802,6 +3258,7 @@ binding_lport_destroy(struct binding_lport *b_lport)
         ovs_list_remove(&b_lport->list_node);
     }
 
+    binding_lport_clear_port_sec(b_lport);
     free(b_lport->name);
     free(b_lport);
 }
@@ -2929,6 +3386,55 @@ cleanup:
 
 
 static bool
+binding_lport_has_port_sec_changed(struct binding_lport *b_lport,
+                                   const struct sbrec_port_binding *pb)
+{
+    if (b_lport->n_port_security != pb->n_port_security) {
+        return true;
+    }
+
+    for (size_t i = 0; i < b_lport->n_port_security; i++) {
+        if (strcmp(b_lport->port_security[i], pb->port_security[i])) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static void
+binding_lport_clear_port_sec(struct binding_lport *b_lport)
+{
+    for (size_t i = 0; i < b_lport->n_port_security; i++) {
+        free(b_lport->port_security[i]);
+    }
+    free(b_lport->port_security);
+    b_lport->n_port_security = 0;
+}
+
+static bool
+binding_lport_update_port_sec(struct binding_lport *b_lport,
+                              const struct sbrec_port_binding *pb)
+{
+    if (binding_lport_has_port_sec_changed(b_lport, pb)) {
+        binding_lport_clear_port_sec(b_lport);
+        b_lport->port_security =
+            pb->n_port_security ?
+            xmalloc(pb->n_port_security * sizeof *b_lport->port_security) :
+            NULL;
+
+        b_lport->n_port_security = pb->n_port_security;
+        for (size_t i = 0; i < pb->n_port_security; i++) {
+            b_lport->port_security[i] = xstrdup(pb->port_security[i]);
+        }
+
+        return true;
+    }
+
+    return false;
+}
+
+static bool
 ovs_iface_matches_lport_iface_id_ver(const struct ovsrec_interface *iface,
                                      const struct sbrec_port_binding *pb)
 {
@@ -2949,3 +3455,10 @@ ovs_iface_matches_lport_iface_id_ver(const struct ovsrec_interface *iface,
 
     return true;
 }
+
+void
+binding_destroy(void)
+{
+    shash_destroy_free_data(&_claimed_ports);
+    sset_clear(&_postponed_ports);
+}
diff --git a/controller/binding.h b/controller/binding.h
index e49e1eb..ad959a9 100644
--- a/controller/binding.h
+++ b/controller/binding.h
@@ -103,6 +103,8 @@ struct binding_ctx_out {
     struct hmap *tracked_dp_bindings;
 
     struct if_status_mgr *if_mgr;
+
+    struct sset *postponed_ports;
 };
 
 /* Local bindings. binding.c module binds the logical port (represented by
@@ -135,7 +137,6 @@ struct local_binding {
     struct ovs_list binding_lports;
 };
 
-
 struct local_binding_data {
     struct shash bindings;
     struct shash lports;
@@ -152,13 +153,25 @@ const struct sbrec_port_binding *local_binding_get_primary_pb(
 ofp_port_t local_binding_get_lport_ofport(const struct shash *local_bindings,
                                           const char *pb_name);
 
-bool local_binding_is_up(struct shash *local_bindings, const char *pb_name);
-bool local_binding_is_down(struct shash *local_bindings, const char *pb_name);
+bool local_binding_is_up(struct shash *local_bindings, const char *pb_name,
+                         const struct sbrec_chassis *);
+bool local_binding_is_down(struct shash *local_bindings, const char *pb_name,
+                           const struct sbrec_chassis *);
+
 void local_binding_set_up(struct shash *local_bindings, const char *pb_name,
+                          const struct sbrec_chassis *chassis_rec,
                           const char *ts_now_str, bool sb_readonly,
                           bool ovs_readonly);
 void local_binding_set_down(struct shash *local_bindings, const char *pb_name,
+                            const struct sbrec_chassis *chassis_rec,
                             bool sb_readonly, bool ovs_readonly);
+void local_binding_set_pb(struct shash *local_bindings, const char *pb_name,
+                          const struct sbrec_chassis *chassis_rec,
+                          struct hmap *tracked_datapaths,
+                          bool is_set);
+bool local_bindings_pb_chassis_is_set(struct shash *local_bindings,
+                                      const char *pb_name,
+                                      const struct sbrec_chassis *chassis_rec);
 
 void binding_register_ovs_idl(struct ovsdb_idl *);
 void binding_run(struct binding_ctx_in *, struct binding_ctx_out *);
@@ -174,6 +187,13 @@ void binding_tracked_dp_destroy(struct hmap *tracked_datapaths);
 
 void binding_dump_local_bindings(struct local_binding_data *, struct ds *);
 
+bool is_additional_chassis(const struct sbrec_port_binding *pb,
+                           const struct sbrec_chassis *chassis_rec);
+
+void set_pb_chassis_in_sbrec(const struct sbrec_port_binding *pb,
+                             const struct sbrec_chassis *chassis_rec,
+                             bool is_set);
+
 /* Corresponds to each Port_Binding.type. */
 enum en_lport_type {
     LP_UNKNOWN,
@@ -193,4 +213,34 @@ enum en_lport_type {
 
 enum en_lport_type get_lport_type(const struct sbrec_port_binding *);
 
+/* This structure represents a logical port (or port binding)
+ * which is associated with 'struct local_binding'.
+ *
+ * An instance of 'struct binding_lport' is created for a logical port
+ *  - If the OVS interface's iface-id corresponds to the logical port.
+ *  - If it is a container or virtual logical port and its parent
+ *    has a 'local binding'.
+ *
+ */
+struct binding_lport {
+    struct ovs_list list_node; /* Node in local_binding.binding_lports. */
+
+    char *name;
+    const struct sbrec_port_binding *pb;
+    struct local_binding *lbinding;
+    enum en_lport_type type;
+
+    /* Cached port security. */
+    char **port_security;
+    size_t n_port_security;
+};
+
+struct sset *get_postponed_ports(void);
+
+/* Schedule any pending binding work. */
+void binding_wait(void);
+
+/* Clean up module state. */
+void binding_destroy(void);
+
 #endif /* controller/binding.h */
diff --git a/controller/chassis.c b/controller/chassis.c
index 92850fc..685d9b2 100644
--- a/controller/chassis.c
+++ b/controller/chassis.c
@@ -351,6 +351,7 @@ chassis_build_other_config(const struct ovs_chassis_cfg *ovs_cfg,
                  ovs_cfg->is_interconn ? "true" : "false");
     smap_replace(config, OVN_FEATURE_PORT_UP_NOTIF, "true");
     smap_replace(config, OVN_FEATURE_CT_NO_MASKED_LABEL, "true");
+    smap_replace(config, OVN_FEATURE_MAC_BINDING_TIMESTAMP, "true");
 }
 
 /*
@@ -462,6 +463,12 @@ chassis_other_config_changed(const struct ovs_chassis_cfg *ovs_cfg,
         return true;
     }
 
+    if (!smap_get_bool(&chassis_rec->other_config,
+                       OVN_FEATURE_MAC_BINDING_TIMESTAMP,
+                       false)) {
+        return true;
+    }
+
     return false;
 }
 
@@ -581,7 +588,7 @@ chassis_get_record(struct ovsdb_idl_txn *ovnsb_idl_txn,
                    const struct sbrec_chassis **chassis_rec)
 {
     const struct sbrec_chassis *chassis =
-        chassis = chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
+        chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
 
     if (!chassis && ovnsb_idl_txn) {
         /* Create the chassis record. */
@@ -623,10 +630,6 @@ chassis_update(const struct sbrec_chassis *chassis_rec,
         chassis_build_other_config(ovs_cfg, &other_config);
         sbrec_chassis_verify_other_config(chassis_rec);
         sbrec_chassis_set_other_config(chassis_rec, &other_config);
-        /* TODO(lucasagomes): Continue writing the configuration to the
-         * external_ids column for backward compatibility with the current
-         * systems, this behavior should be removed in the future. */
-        sbrec_chassis_set_external_ids(chassis_rec, &other_config);
         smap_destroy(&other_config);
         updated = true;
     }
diff --git a/controller/encaps.c b/controller/encaps.c
index 8e6d290..9647ba5 100644
--- a/controller/encaps.c
+++ b/controller/encaps.c
@@ -190,6 +190,14 @@ tunnel_add(struct tunnel_ctx *tc, const struct sbrec_sb_global *sbg,
             smap_add(&options, "tos", encap_tos);
         }
 
+        /* If the df_default option is configured, get it */
+
+        const char *encap_df = smap_get(&cfg->external_ids,
+           "ovn-encap-df_default");
+        if (encap_df) {
+            smap_add(&options, "df_default", encap_df);
+        }
+
         /* If ovn-set-local-ip option is configured, get it */
         set_local_ip = smap_get_bool(&cfg->external_ids, "ovn-set-local-ip",
                                      false);
@@ -199,6 +207,21 @@ tunnel_add(struct tunnel_ctx *tc, const struct sbrec_sb_global *sbg,
     if (sbg->ipsec) {
         set_local_ip = true;
         smap_add(&options, "remote_name", new_chassis_id);
+
+        /* Force NAT-T traversal via configuration */
+        /* Two ipsec backends are supported: libreswan and strongswan */
+        /* libreswan param: encapsulation; strongswan param: forceencaps */
+        bool encapsulation;
+        bool forceencaps;
+        encapsulation = smap_get_bool(&sbg->options, "ipsec_encapsulation",
+                                      false);
+        forceencaps = smap_get_bool(&sbg->options, "ipsec_forceencaps", false);
+        if (encapsulation) {
+            smap_add(&options, "ipsec_encapsulation", "yes");
+        }
+        if (forceencaps) {
+            smap_add(&options, "ipsec_forceencaps", "yes");
+        }
     }
 
     if (set_local_ip) {
@@ -442,8 +465,8 @@ encaps_run(struct ovsdb_idl_txn *ovs_idl_txn,
     }
 
     /* Delete any existing OVN tunnels that were not still around. */
-    struct shash_node *node, *next_node;
-    SHASH_FOR_EACH_SAFE (node, next_node, &tc.chassis) {
+    struct shash_node *node;
+    SHASH_FOR_EACH_SAFE (node, &tc.chassis) {
         struct chassis_node *chassis = node->data;
         ovsrec_bridge_update_ports_delvalue(chassis->bridge, chassis->port);
         shash_delete(&tc.chassis, node);
diff --git a/controller/if-status.c b/controller/if-status.c
index fa4c8bd..d1c14ac 100644
--- a/controller/if-status.c
+++ b/controller/if-status.c
@@ -24,6 +24,7 @@
 #include "lib/util.h"
 #include "timeval.h"
 #include "openvswitch/vlog.h"
+#include "lib/ovn-sb-idl.h"
 
 VLOG_DEFINE_THIS_MODULE(if_status);
 
@@ -53,9 +54,12 @@ VLOG_DEFINE_THIS_MODULE(if_status);
  */
 
 enum if_state {
-    OIF_CLAIMED,       /* Newly claimed interface. */
-    OIF_INSTALL_FLOWS, /* Already claimed interface for which flows are still
-                        * being installed.
+    OIF_CLAIMED,       /* Newly claimed interface. pb->chassis update not yet
+                          initiated. */
+    OIF_INSTALL_FLOWS, /* Claimed interface with pb->chassis update sent to
+                        * SB (but update notification not confirmed, so the
+                        * update may be resent in any of the following states)
+                        * and for which flows are still being installed.
                         */
     OIF_MARK_UP,       /* Interface with flows successfully installed in OVS
                         * but not yet marked "up" in the binding module (in
@@ -78,6 +82,63 @@ static const char *if_state_names[] = {
     [OIF_INSTALLED]     = "INSTALLED",
 };
 
+/*
+ *       +----------------------+
+ * +---> |                      |
+ * | +-> |         NULL         | <--------------------------------------+++-+
+ * | |   +----------------------+                                            |
+ * | |     ^ release_iface   | claim_iface()                                 |
+ * | |     |                 V - sbrec_update_chassis(if sb is rw)           |
+ * | |   +----------------------+                                            |
+ * | |   |                      | <----------------------------------------+ |
+ * | |   |       CLAIMED        | <--------------------------------------+ | |
+ * | |   +----------------------+                                        | | |
+ * | |                 |  V  ^                                           | | |
+ * | |                 |  |  | handle_claims()                           | | |
+ * | |                 |  |  | - sbrec_update_chassis(if sb is rw)       | | |
+ * | |                 |  +--+                                           | | |
+ * | |                 |                                                 | | |
+ * | |                 | mgr_update(when sb is rw i.e. pb->chassis)      | | |
+ * | |                 |            has been updated                     | | |
+ * | | release_iface   | - request seqno                                 | | |
+ * | |                 |                                                 | | |
+ * | |                 V                                                 | | |
+ * | |   +----------------------+                                        | | |
+ * | +-- |                      |  mgr_run(seqno not rcvd)               | | |
+ * |     |    INSTALL_FLOWS     |   - set port down in sb                | | |
+ * |     |                      |   - remove ovn-installed from ovsdb    | | |
+ * |     |                      |  mgr_update()                          | | |
+ * |     +----------------------+   - sbrec_update_chassis if needed     | | |
+ * |                    |                                                | | |
+ * |                    |  mgr_run(seqno rcvd)                           | | |
+ * |                    |  - set port up in sb                           | | |
+ * | release_iface      |  - set ovn-installed in ovs                    | | |
+ * |                    V                                                | | |
+ * |   +----------------------+                                          | | |
+ * |   |                      |  mgr_run()                               | | |
+ * +-- |       MARK_UP        |  - set port up in sb                     | | |
+ *     |                      |  - set ovn-installed in ovs              | | |
+ *     |                      |  mgr_update()                            | | |
+ *     +----------------------+  - sbrec_update_chassis if needed        | | |
+ *              |                                                        | | |
+ *              | mgr_update(rcvd port up / ovn_installed & chassis set) | | |
+ *              V                                                        | | |
+ *     +----------------------+                                          | | |
+ *     |      INSTALLED       | ------------> claim_iface ---------------+ | |
+ *     +----------------------+                                            | |
+ *              |                                                          | |
+ *              | release_iface                                            | |
+ *              V                                                          | |
+ *     +----------------------+                                            | |
+ *     |                      | ------------> claim_iface -----------------+ |
+ *     |      MARK_DOWN       | ------> mgr_update(rcvd port down) ----------+
+ *     |                      | mgr_run()
+ *     |                      | - set port down in sb
+ *     |                      | mgr_update()
+ *     +----------------------+ - sbrec_update_chassis(NULL)
+ */
+
+
 struct ovs_iface {
     char *id;               /* Extracted from OVS external_ids.iface_id. */
     enum if_state state;    /* State of the interface in the state machine. */
@@ -115,6 +176,7 @@ static void ovs_iface_set_state(struct if_status_mgr *, struct ovs_iface *,
 
 static void if_status_mgr_update_bindings(
     struct if_status_mgr *mgr, struct local_binding_data *binding_data,
+    const struct sbrec_chassis *,
     bool sb_readonly, bool ovs_readonly);
 
 struct if_status_mgr *
@@ -133,10 +195,9 @@ if_status_mgr_create(void)
 void
 if_status_mgr_clear(struct if_status_mgr *mgr)
 {
-    struct shash_node *node_next;
     struct shash_node *node;
 
-    SHASH_FOR_EACH_SAFE (node, node_next, &mgr->ifaces) {
+    SHASH_FOR_EACH_SAFE (node, &mgr->ifaces) {
         ovs_iface_destroy(mgr, node->data);
     }
     ovs_assert(shash_is_empty(&mgr->ifaces));
@@ -158,14 +219,22 @@ if_status_mgr_destroy(struct if_status_mgr *mgr)
 }
 
 void
-if_status_mgr_claim_iface(struct if_status_mgr *mgr, const char *iface_id)
+if_status_mgr_claim_iface(struct if_status_mgr *mgr,
+                          const struct sbrec_port_binding *pb,
+                          const struct sbrec_chassis *chassis_rec,
+                          bool sb_readonly)
 {
+    const char *iface_id = pb->logical_port;
     struct ovs_iface *iface = shash_find_data(&mgr->ifaces, iface_id);
 
     if (!iface) {
         iface = ovs_iface_create(mgr, iface_id, OIF_CLAIMED);
     }
 
+    if (!sb_readonly) {
+        set_pb_chassis_in_sbrec(pb, chassis_rec, true);
+    }
+
     switch (iface->state) {
     case OIF_CLAIMED:
     case OIF_INSTALL_FLOWS:
@@ -182,6 +251,12 @@ if_status_mgr_claim_iface(struct if_status_mgr *mgr, const char *iface_id)
     }
 }
 
+bool
+if_status_mgr_iface_is_present(struct if_status_mgr *mgr, const char *iface_id)
+{
+    return !!shash_find_data(&mgr->ifaces, iface_id);
+}
+
 void
 if_status_mgr_release_iface(struct if_status_mgr *mgr, const char *iface_id)
 {
@@ -246,26 +321,65 @@ if_status_mgr_delete_iface(struct if_status_mgr *mgr, const char *iface_id)
     }
 }
 
+bool
+if_status_handle_claims(struct if_status_mgr *mgr,
+                        struct local_binding_data *binding_data,
+                        const struct sbrec_chassis *chassis_rec,
+                        struct hmap *tracked_datapath,
+                        bool sb_readonly)
+{
+    if (!binding_data || sb_readonly) {
+        return false;
+    }
+
+    struct shash *bindings = &binding_data->bindings;
+    struct hmapx_node *node;
+
+    bool rc = false;
+    HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_CLAIMED]) {
+        struct ovs_iface *iface = node->data;
+        VLOG_INFO("if_status_handle_claims for %s", iface->id);
+        local_binding_set_pb(bindings, iface->id, chassis_rec,
+                             tracked_datapath, true);
+        rc = true;
+    }
+    return rc;
+}
+
 void
 if_status_mgr_update(struct if_status_mgr *mgr,
-                     struct local_binding_data *binding_data)
+                     struct local_binding_data *binding_data,
+                     const struct sbrec_chassis *chassis_rec,
+                     bool sb_readonly)
 {
     if (!binding_data) {
         return;
     }
 
     struct shash *bindings = &binding_data->bindings;
-    struct hmapx_node *node_next;
     struct hmapx_node *node;
 
+    /* Interfaces in OIF_MARK_UP/INSTALL_FLOWS state have already set their
+     * pb->chassis. However, the update might still be in fly (confirmation
+     * not received yet) or pb->chassis was overwitten by another chassis.
+     */
+
     /* Move all interfaces that have been confirmed "up" by the binding module,
      * from OIF_MARK_UP to OIF_INSTALLED.
      */
-    HMAPX_FOR_EACH_SAFE (node, node_next,
-                         &mgr->ifaces_per_state[OIF_MARK_UP]) {
+    HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_MARK_UP]) {
         struct ovs_iface *iface = node->data;
 
-        if (local_binding_is_up(bindings, iface->id)) {
+        if (!local_bindings_pb_chassis_is_set(bindings, iface->id,
+            chassis_rec)) {
+            if (!sb_readonly) {
+                local_binding_set_pb(bindings, iface->id, chassis_rec,
+                                     NULL, true);
+            } else {
+                continue;
+            }
+        }
+        if (local_binding_is_up(bindings, iface->id, chassis_rec)) {
             ovs_iface_set_state(mgr, iface, OIF_INSTALLED);
         }
     }
@@ -273,31 +387,59 @@ if_status_mgr_update(struct if_status_mgr *mgr,
     /* Cleanup all interfaces that have been confirmed "down" by the binding
      * module.
      */
-    HMAPX_FOR_EACH_SAFE (node, node_next,
-                         &mgr->ifaces_per_state[OIF_MARK_DOWN]) {
+    HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_MARK_DOWN]) {
         struct ovs_iface *iface = node->data;
 
-        if (local_binding_is_down(bindings, iface->id)) {
+        if (!sb_readonly) {
+            local_binding_set_pb(bindings, iface->id, chassis_rec,
+                                 NULL, false);
+        }
+        if (local_binding_is_down(bindings, iface->id, chassis_rec)) {
             ovs_iface_destroy(mgr, iface);
         }
     }
 
-    /* Register for a notification about flows being installed in OVS for all
-     * newly claimed interfaces.
-     *
-     * Move them from OIF_CLAIMED to OIF_INSTALL_FLOWS.
+    /* Update pb->chassis in case it's not set (previous update still in fly
+     * or pb->chassis was overwitten by another chassis.
      */
-    bool new_ifaces = false;
-    HMAPX_FOR_EACH_SAFE (node, node_next,
-                         &mgr->ifaces_per_state[OIF_CLAIMED]) {
-        struct ovs_iface *iface = node->data;
+    if (!sb_readonly) {
+        HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_INSTALL_FLOWS]) {
+            struct ovs_iface *iface = node->data;
+
+            if (!local_bindings_pb_chassis_is_set(bindings, iface->id,
+                chassis_rec)) {
+                local_binding_set_pb(bindings, iface->id, chassis_rec,
+                                     NULL, true);
+            }
+        }
+    }
 
-        ovs_iface_set_state(mgr, iface, OIF_INSTALL_FLOWS);
-        iface->install_seqno = mgr->iface_seqno + 1;
-        new_ifaces = true;
+    /* Move newly claimed interfaces from OIF_CLAIMED to OIF_INSTALL_FLOWS.
+     */
+    bool new_ifaces = false;
+    if (!sb_readonly) {
+        HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_CLAIMED]) {
+            struct ovs_iface *iface = node->data;
+            /* No need to to update pb->chassis as already done
+             * in if_status_handle_claims or if_status_mgr_claim_iface
+             */
+            ovs_iface_set_state(mgr, iface, OIF_INSTALL_FLOWS);
+            iface->install_seqno = mgr->iface_seqno + 1;
+            new_ifaces = true;
+        }
+    } else {
+        HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_CLAIMED]) {
+            struct ovs_iface *iface = node->data;
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_INFO_RL(&rl,
+                         "Not updating pb chassis for %s now as "
+                         "sb is readonly", iface->id);
+        }
     }
 
-    /* Request a seqno update when the flows for new interfaces have been
+    /* Register for a notification about flows being installed in OVS for all
+     * newly claimed interfaces for which pb->chassis has been updated.
+     * Request a seqno update when the flows for new interfaces have been
      * installed in OVS.
      */
     if (new_ifaces) {
@@ -311,19 +453,18 @@ if_status_mgr_update(struct if_status_mgr *mgr,
 void
 if_status_mgr_run(struct if_status_mgr *mgr,
                   struct local_binding_data *binding_data,
+                  const struct sbrec_chassis *chassis_rec,
                   bool sb_readonly, bool ovs_readonly)
 {
     struct ofctrl_acked_seqnos *acked_seqnos =
             ofctrl_acked_seqnos_get(mgr->iface_seq_type_pb_cfg);
-    struct hmapx_node *node_next;
     struct hmapx_node *node;
 
     /* Move interfaces from state OIF_INSTALL_FLOWS to OIF_MARK_UP if a
      * notification has been received aabout their flows being installed
      * in OVS.
      */
-    HMAPX_FOR_EACH_SAFE (node, node_next,
-                         &mgr->ifaces_per_state[OIF_INSTALL_FLOWS]) {
+    HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_INSTALL_FLOWS]) {
         struct ovs_iface *iface = node->data;
 
         if (!ofctrl_acked_seqnos_contains(acked_seqnos,
@@ -335,8 +476,8 @@ if_status_mgr_run(struct if_status_mgr *mgr,
     ofctrl_acked_seqnos_destroy(acked_seqnos);
 
     /* Update binding states. */
-    if_status_mgr_update_bindings(mgr, binding_data, sb_readonly,
-                                  ovs_readonly);
+    if_status_mgr_update_bindings(mgr, binding_data, chassis_rec,
+                                  sb_readonly, ovs_readonly);
 }
 
 static void
@@ -397,6 +538,7 @@ ovs_iface_set_state(struct if_status_mgr *mgr, struct ovs_iface *iface,
 static void
 if_status_mgr_update_bindings(struct if_status_mgr *mgr,
                               struct local_binding_data *binding_data,
+                              const struct sbrec_chassis *chassis_rec,
                               bool sb_readonly, bool ovs_readonly)
 {
     if (!binding_data) {
@@ -407,12 +549,13 @@ if_status_mgr_update_bindings(struct if_status_mgr *mgr,
     struct hmapx_node *node;
 
     /* Notify the binding module to set "down" all bindings that are still
-     * in the process of being installed in OVS, i.e., are not yet instsalled.
+     * in the process of being installed in OVS, i.e., are not yet installed.
      */
     HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_INSTALL_FLOWS]) {
         struct ovs_iface *iface = node->data;
 
-        local_binding_set_down(bindings, iface->id, sb_readonly, ovs_readonly);
+        local_binding_set_down(bindings, iface->id, chassis_rec,
+                               sb_readonly, ovs_readonly);
     }
 
     /* Notifiy the binding module to set "up" all bindings that have had
@@ -423,7 +566,7 @@ if_status_mgr_update_bindings(struct if_status_mgr *mgr,
     HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_MARK_UP]) {
         struct ovs_iface *iface = node->data;
 
-        local_binding_set_up(bindings, iface->id, ts_now_str,
+        local_binding_set_up(bindings, iface->id, chassis_rec, ts_now_str,
                              sb_readonly, ovs_readonly);
     }
     free(ts_now_str);
@@ -434,7 +577,8 @@ if_status_mgr_update_bindings(struct if_status_mgr *mgr,
     HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_MARK_DOWN]) {
         struct ovs_iface *iface = node->data;
 
-        local_binding_set_down(bindings, iface->id, sb_readonly, ovs_readonly);
+        local_binding_set_down(bindings, iface->id, chassis_rec,
+                               sb_readonly, ovs_readonly);
     }
 }
 
diff --git a/controller/if-status.h b/controller/if-status.h
index ff4aa76..5bd187a 100644
--- a/controller/if-status.h
+++ b/controller/if-status.h
@@ -26,15 +26,27 @@ struct simap;
 struct if_status_mgr *if_status_mgr_create(void);
 void if_status_mgr_clear(struct if_status_mgr *);
 void if_status_mgr_destroy(struct if_status_mgr *);
-
-void if_status_mgr_claim_iface(struct if_status_mgr *, const char *iface_id);
+void if_status_mgr_claim_iface(struct if_status_mgr *,
+                               const struct sbrec_port_binding *pb,
+                               const struct sbrec_chassis *chassis_rec,
+                               bool sb_readonly);
 void if_status_mgr_release_iface(struct if_status_mgr *, const char *iface_id);
 void if_status_mgr_delete_iface(struct if_status_mgr *, const char *iface_id);
 
-void if_status_mgr_update(struct if_status_mgr *, struct local_binding_data *);
+void if_status_mgr_update(struct if_status_mgr *, struct local_binding_data *,
+                          const struct sbrec_chassis *chassis,
+                          bool sb_readonly);
 void if_status_mgr_run(struct if_status_mgr *mgr, struct local_binding_data *,
+                       const struct sbrec_chassis *,
                        bool sb_readonly, bool ovs_readonly);
 void if_status_mgr_get_memory_usage(struct if_status_mgr *mgr,
                                     struct simap *usage);
+bool if_status_mgr_iface_is_present(struct if_status_mgr *mgr,
+                                    const char *iface_id);
+bool if_status_handle_claims(struct if_status_mgr *mgr,
+                             struct local_binding_data *binding_data,
+                             const struct sbrec_chassis *chassis_rec,
+                             struct hmap *tracked_datapath,
+                             bool sb_readonly);
 
 # endif /* controller/if-status.h */
diff --git a/controller/ip-mcast.c b/controller/ip-mcast.c
index 9b0b446..a870fb2 100644
--- a/controller/ip-mcast.c
+++ b/controller/ip-mcast.c
@@ -16,6 +16,7 @@
 #include <config.h>
 
 #include "ip-mcast.h"
+#include "ip-mcast-index.h"
 #include "lport.h"
 #include "lib/ovn-sb-idl.h"
 
@@ -27,6 +28,18 @@ struct igmp_group_port {
     const struct sbrec_port_binding *port;
 };
 
+static const struct sbrec_igmp_group *
+igmp_group_lookup_(struct ovsdb_idl_index *igmp_groups,
+                   const char *addr_str,
+                   const struct sbrec_datapath_binding *datapath,
+                   const struct sbrec_chassis *chassis);
+
+static struct sbrec_igmp_group *
+igmp_group_create_(struct ovsdb_idl_txn *idl_txn,
+                   const char *addr_str,
+                   const struct sbrec_datapath_binding *datapath,
+                   const struct sbrec_chassis *chassis);
+
 struct ovsdb_idl_index *
 igmp_group_index_create(struct ovsdb_idl *idl)
 {
@@ -54,17 +67,16 @@ igmp_group_lookup(struct ovsdb_idl_index *igmp_groups,
         return NULL;
     }
 
-    struct sbrec_igmp_group *target =
-        sbrec_igmp_group_index_init_row(igmp_groups);
-
-    sbrec_igmp_group_index_set_address(target, addr_str);
-    sbrec_igmp_group_index_set_datapath(target, datapath);
-    sbrec_igmp_group_index_set_chassis(target, chassis);
+    return igmp_group_lookup_(igmp_groups, addr_str, datapath, chassis);
+}
 
-    const struct sbrec_igmp_group *g =
-        sbrec_igmp_group_index_find(igmp_groups, target);
-    sbrec_igmp_group_index_destroy_row(target);
-    return g;
+const struct sbrec_igmp_group *
+igmp_mrouter_lookup(struct ovsdb_idl_index *igmp_groups,
+                    const struct sbrec_datapath_binding *datapath,
+                    const struct sbrec_chassis *chassis)
+{
+    return igmp_group_lookup_(igmp_groups, OVN_IGMP_GROUP_MROUTERS,
+                              datapath, chassis);
 }
 
 /* Creates and returns a new IGMP group based on an IPv4 (mapped in IPv6) or
@@ -82,13 +94,16 @@ igmp_group_create(struct ovsdb_idl_txn *idl_txn,
         return NULL;
     }
 
-    struct sbrec_igmp_group *g = sbrec_igmp_group_insert(idl_txn);
-
-    sbrec_igmp_group_set_address(g, addr_str);
-    sbrec_igmp_group_set_datapath(g, datapath);
-    sbrec_igmp_group_set_chassis(g, chassis);
+    return igmp_group_create_(idl_txn, addr_str, datapath, chassis);
+}
 
-    return g;
+struct sbrec_igmp_group *
+igmp_mrouter_create(struct ovsdb_idl_txn *idl_txn,
+                    const struct sbrec_datapath_binding *datapath,
+                    const struct sbrec_chassis *chassis)
+{
+    return igmp_group_create_(idl_txn, OVN_IGMP_GROUP_MROUTERS, datapath,
+                              chassis);
 }
 
 void
@@ -141,6 +156,54 @@ igmp_group_update_ports(const struct sbrec_igmp_group *g,
 }
 
 void
+igmp_mrouter_update_ports(const struct sbrec_igmp_group *g,
+                          struct ovsdb_idl_index *datapaths,
+                          struct ovsdb_idl_index *port_bindings,
+                          const struct mcast_snooping *ms)
+    OVS_REQ_RDLOCK(ms->rwlock)
+{
+    struct igmp_group_port *old_ports_storage =
+        (g->n_ports ? xmalloc(g->n_ports * sizeof *old_ports_storage) : NULL);
+
+    struct hmap old_ports = HMAP_INITIALIZER(&old_ports);
+
+    for (size_t i = 0; i < g->n_ports; i++) {
+        struct igmp_group_port *old_port = &old_ports_storage[i];
+
+        old_port->port = g->ports[i];
+        hmap_insert(&old_ports, &old_port->hmap_node,
+                    old_port->port->tunnel_key);
+    }
+
+    struct mcast_mrouter_bundle *bundle;
+    uint64_t dp_key = g->datapath->tunnel_key;
+
+    LIST_FOR_EACH (bundle, mrouter_node, &ms->mrouter_lru) {
+        uint32_t port_key = (uintptr_t)bundle->port;
+        const struct sbrec_port_binding *sbrec_port =
+            lport_lookup_by_key(datapaths, port_bindings, dp_key, port_key);
+        if (!sbrec_port) {
+            continue;
+        }
+
+        struct hmap_node *node = hmap_first_with_hash(&old_ports, port_key);
+        if (!node) {
+            sbrec_igmp_group_update_ports_addvalue(g, sbrec_port);
+        } else {
+            hmap_remove(&old_ports, node);
+        }
+    }
+
+    struct igmp_group_port *igmp_port;
+    HMAP_FOR_EACH_POP (igmp_port, hmap_node, &old_ports) {
+        sbrec_igmp_group_update_ports_delvalue(g, igmp_port->port);
+    }
+
+    free(old_ports_storage);
+    hmap_destroy(&old_ports);
+}
+
+void
 igmp_group_delete(const struct sbrec_igmp_group *g)
 {
     sbrec_igmp_group_delete(g);
@@ -162,3 +225,37 @@ igmp_group_cleanup(struct ovsdb_idl_txn *ovnsb_idl_txn,
 
     return true;
 }
+
+static const struct sbrec_igmp_group *
+igmp_group_lookup_(struct ovsdb_idl_index *igmp_groups,
+                   const char *addr_str,
+                   const struct sbrec_datapath_binding *datapath,
+                   const struct sbrec_chassis *chassis)
+{
+    struct sbrec_igmp_group *target =
+        sbrec_igmp_group_index_init_row(igmp_groups);
+
+    sbrec_igmp_group_index_set_address(target, addr_str);
+    sbrec_igmp_group_index_set_datapath(target, datapath);
+    sbrec_igmp_group_index_set_chassis(target, chassis);
+
+    const struct sbrec_igmp_group *g =
+        sbrec_igmp_group_index_find(igmp_groups, target);
+    sbrec_igmp_group_index_destroy_row(target);
+    return g;
+}
+
+static struct sbrec_igmp_group *
+igmp_group_create_(struct ovsdb_idl_txn *idl_txn,
+                   const char *addr_str,
+                   const struct sbrec_datapath_binding *datapath,
+                   const struct sbrec_chassis *chassis)
+{
+    struct sbrec_igmp_group *g = sbrec_igmp_group_insert(idl_txn);
+
+    sbrec_igmp_group_set_address(g, addr_str);
+    sbrec_igmp_group_set_datapath(g, datapath);
+    sbrec_igmp_group_set_chassis(g, chassis);
+
+    return g;
+}
diff --git a/controller/ip-mcast.h b/controller/ip-mcast.h
index b3447d4..326f39d 100644
--- a/controller/ip-mcast.h
+++ b/controller/ip-mcast.h
@@ -30,12 +30,20 @@ const struct sbrec_igmp_group *igmp_group_lookup(
     const struct in6_addr *address,
     const struct sbrec_datapath_binding *datapath,
     const struct sbrec_chassis *chassis);
+const struct sbrec_igmp_group *igmp_mrouter_lookup(
+    struct ovsdb_idl_index *igmp_groups,
+    const struct sbrec_datapath_binding *datapath,
+    const struct sbrec_chassis *chassis);
 
 struct sbrec_igmp_group *igmp_group_create(
     struct ovsdb_idl_txn *idl_txn,
     const struct in6_addr *address,
     const struct sbrec_datapath_binding *datapath,
     const struct sbrec_chassis *chassis);
+struct sbrec_igmp_group *igmp_mrouter_create(
+    struct ovsdb_idl_txn *idl_txn,
+    const struct sbrec_datapath_binding *datapath,
+    const struct sbrec_chassis *chassis);
 
 void igmp_group_update_ports(const struct sbrec_igmp_group *g,
                              struct ovsdb_idl_index *datapaths,
@@ -43,6 +51,12 @@ void igmp_group_update_ports(const struct sbrec_igmp_group *g,
                              const struct mcast_snooping *ms,
                              const struct mcast_group *mc_group)
     OVS_REQ_RDLOCK(ms->rwlock);
+void
+igmp_mrouter_update_ports(const struct sbrec_igmp_group *g,
+                          struct ovsdb_idl_index *datapaths,
+                          struct ovsdb_idl_index *port_bindings,
+                          const struct mcast_snooping *ms)
+    OVS_REQ_RDLOCK(ms->rwlock);
 
 void igmp_group_delete(const struct sbrec_igmp_group *g);
 
diff --git a/controller/lflow-cache.c b/controller/lflow-cache.c
index 9c3db06..9fca2d7 100644
--- a/controller/lflow-cache.c
+++ b/controller/lflow-cache.c
@@ -108,9 +108,8 @@ lflow_cache_flush(struct lflow_cache *lc)
     COVERAGE_INC(lflow_cache_flush);
     for (size_t i = 0; i < LCACHE_T_MAX; i++) {
         struct lflow_cache_entry *lce;
-        struct lflow_cache_entry *lce_next;
 
-        HMAP_FOR_EACH_SAFE (lce, lce_next, node, &lc->entries[i]) {
+        HMAP_FOR_EACH_SAFE (lce, node, &lc->entries[i]) {
             lflow_cache_delete__(lc, lce);
         }
     }
diff --git a/controller/lflow-conj-ids.c b/controller/lflow-conj-ids.c
index 70c6187..79bd08c 100644
--- a/controller/lflow-conj-ids.c
+++ b/controller/lflow-conj-ids.c
@@ -220,8 +220,8 @@ lflow_conj_ids_free(struct conj_ids *conj_ids, const struct uuid *lflow_uuid)
     if (!ltd) {
         return;
     }
-    struct lflow_conj_node *lflow_conj, *next;
-    LIST_FOR_EACH_SAFE (lflow_conj, next, list_node, &ltd->dps) {
+    struct lflow_conj_node *lflow_conj;
+    LIST_FOR_EACH_SAFE (lflow_conj, list_node, &ltd->dps) {
         lflow_conj_ids_free_(conj_ids, lflow_conj);
     }
     hmap_remove(&conj_ids->lflow_to_dps, &ltd->hmap_node);
@@ -238,26 +238,24 @@ lflow_conj_ids_init(struct conj_ids *conj_ids)
 
 void
 lflow_conj_ids_destroy(struct conj_ids *conj_ids) {
-    struct conj_id_node *conj_id_node, *next;
-    HMAP_FOR_EACH_SAFE (conj_id_node, next, hmap_node,
+    struct conj_id_node *conj_id_node;
+    HMAP_FOR_EACH_SAFE (conj_id_node, hmap_node,
                         &conj_ids->conj_id_allocations) {
         hmap_remove(&conj_ids->conj_id_allocations, &conj_id_node->hmap_node);
         free(conj_id_node);
     }
     hmap_destroy(&conj_ids->conj_id_allocations);
 
-    struct lflow_conj_node *lflow_conj, *l_c_next;
-    HMAP_FOR_EACH_SAFE (lflow_conj, l_c_next, hmap_node,
-                        &conj_ids->lflow_conj_ids) {
+    struct lflow_conj_node *lflow_conj;
+    HMAP_FOR_EACH_SAFE (lflow_conj, hmap_node, &conj_ids->lflow_conj_ids) {
         hmap_remove(&conj_ids->lflow_conj_ids, &lflow_conj->hmap_node);
         ovs_list_remove(&lflow_conj->list_node);
         free(lflow_conj);
     }
     hmap_destroy(&conj_ids->lflow_conj_ids);
 
-    struct lflow_to_dps_node *ltd, *ltd_next;
-    HMAP_FOR_EACH_SAFE (ltd, ltd_next, hmap_node,
-                        &conj_ids->lflow_to_dps) {
+    struct lflow_to_dps_node *ltd;
+    HMAP_FOR_EACH_SAFE (ltd, hmap_node, &conj_ids->lflow_to_dps) {
         hmap_remove(&conj_ids->lflow_to_dps, &ltd->hmap_node);
         free(ltd);
     }
diff --git a/controller/lflow.c b/controller/lflow.c
index a988290..eef4438 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -14,6 +14,7 @@
  */
 
 #include <config.h>
+#include "binding.h"
 #include "lflow.h"
 #include "coverage.h"
 #include "ha-chassis.h"
@@ -114,6 +115,11 @@ static void ref_lflow_node_destroy(struct ref_lflow_node *);
 static void lflow_resource_destroy_lflow(struct lflow_resource_ref *,
                                          const struct uuid *lflow_uuid);
 
+static void add_port_sec_flows(const struct shash *binding_lports,
+                               const struct sbrec_chassis *,
+                               struct ovn_desired_flow_table *);
+static void consider_port_sec_flows(const struct sbrec_port_binding *pb,
+                                    struct ovn_desired_flow_table *);
 
 static bool
 lookup_port_cb(const void *aux_, const char *port_name, unsigned int *portp)
@@ -220,10 +226,10 @@ lflow_resource_init(struct lflow_resource_ref *lfrr)
 void
 lflow_resource_destroy(struct lflow_resource_ref *lfrr)
 {
-    struct ref_lflow_node *rlfn, *rlfn_next;
-    HMAP_FOR_EACH_SAFE (rlfn, rlfn_next, node, &lfrr->ref_lflow_table) {
-        struct lflow_ref_list_node *lrln, *next;
-        HMAP_FOR_EACH_SAFE (lrln, next, hmap_node, &rlfn->lflow_uuids) {
+    struct ref_lflow_node *rlfn;
+    HMAP_FOR_EACH_SAFE (rlfn, node, &lfrr->ref_lflow_table) {
+        struct lflow_ref_list_node *lrln;
+        HMAP_FOR_EACH_SAFE (lrln, hmap_node, &rlfn->lflow_uuids) {
             ovs_list_remove(&lrln->list_node);
             hmap_remove(&rlfn->lflow_uuids, &lrln->hmap_node);
             free(lrln);
@@ -233,8 +239,8 @@ lflow_resource_destroy(struct lflow_resource_ref *lfrr)
     }
     hmap_destroy(&lfrr->ref_lflow_table);
 
-    struct lflow_ref_node *lfrn, *lfrn_next;
-    HMAP_FOR_EACH_SAFE (lfrn, lfrn_next, node, &lfrr->lflow_ref_table) {
+    struct lflow_ref_node *lfrn;
+    HMAP_FOR_EACH_SAFE (lfrn, node, &lfrr->lflow_ref_table) {
         hmap_remove(&lfrr->lflow_ref_table, &lfrn->node);
         free(lfrn);
     }
@@ -342,8 +348,8 @@ lflow_resource_destroy_lflow(struct lflow_resource_ref *lfrr,
     }
 
     hmap_remove(&lfrr->lflow_ref_table, &lfrn->node);
-    struct lflow_ref_list_node *lrln, *next;
-    LIST_FOR_EACH_SAFE (lrln, next, list_node, &lfrn->lflow_ref_head) {
+    struct lflow_ref_list_node *lrln;
+    LIST_FOR_EACH_SAFE (lrln, list_node, &lfrn->lflow_ref_head) {
         ovs_list_remove(&lrln->list_node);
         hmap_remove(&lrln->rlfn->lflow_uuids, &lrln->hmap_node);
 
@@ -435,7 +441,7 @@ lflow_handle_changed_flows(struct lflow_ctx_in *l_ctx_in,
      * lflow_add_flows_for_datapath() may have been called before calling
      * this function. */
     struct hmap flood_remove_nodes = HMAP_INITIALIZER(&flood_remove_nodes);
-    struct ofctrl_flood_remove_node *ofrn, *next;
+    struct ofctrl_flood_remove_node *ofrn;
     SBREC_LOGICAL_FLOW_TABLE_FOR_EACH_TRACKED (lflow,
                                                l_ctx_in->logical_flow_table) {
         if (lflows_processed_find(l_ctx_out->lflows_processed,
@@ -485,7 +491,7 @@ lflow_handle_changed_flows(struct lflow_ctx_in *l_ctx_in,
                                   l_ctx_in, l_ctx_out);
         }
     }
-    HMAP_FOR_EACH_SAFE (ofrn, next, hmap_node, &flood_remove_nodes) {
+    HMAP_FOR_EACH_SAFE (ofrn, hmap_node, &flood_remove_nodes) {
         hmap_remove(&flood_remove_nodes, &ofrn->hmap_node);
         free(ofrn);
     }
@@ -690,8 +696,8 @@ consider_lflow_for_added_as_ips__(
 
     /* Discard the matches unrelated to the added addresses in the AS
      * 'as_name'. */
-    struct expr_match *m, *m_next;
-    HMAP_FOR_EACH_SAFE (m, m_next, hmap_node, &matches) {
+    struct expr_match *m;
+    HMAP_FOR_EACH_SAFE (m, hmap_node, &matches) {
         if (!m->as_name || strcmp(m->as_name, as_name) ||
             (has_dummy_ip && !memcmp(&m->as_ip, &dummy_ip, sizeof dummy_ip))) {
             hmap_remove(&matches, &m->hmap_node);
@@ -985,7 +991,7 @@ lflow_handle_changed_ref(enum ref_type ref_type, const char *ref_name,
 
     struct ovs_list lflows_todo = OVS_LIST_INITIALIZER(&lflows_todo);
 
-    struct lflow_ref_list_node *lrln, *lrln_uuid, *lrln_uuid_next;
+    struct lflow_ref_list_node *lrln, *lrln_uuid;
     HMAP_FOR_EACH (lrln, hmap_node, &rlfn->lflow_uuids) {
         if (lflows_processed_find(l_ctx_out->lflows_processed,
                                   &lrln->lflow_uuid)) {
@@ -1027,7 +1033,7 @@ lflow_handle_changed_ref(enum ref_type ref_type, const char *ref_name,
     /* Re-parse the related lflows. */
     /* Firstly, flood remove the flows from desired flow table. */
     struct hmap flood_remove_nodes = HMAP_INITIALIZER(&flood_remove_nodes);
-    LIST_FOR_EACH_SAFE (lrln_uuid, lrln_uuid_next, list_node, &lflows_todo) {
+    LIST_FOR_EACH_SAFE (lrln_uuid, list_node, &lflows_todo) {
         VLOG_DBG("Reprocess lflow "UUID_FMT" for resource type: %d,"
                  " name: %s.",
                  UUID_ARGS(&lrln_uuid->lflow_uuid),
@@ -1039,7 +1045,7 @@ lflow_handle_changed_ref(enum ref_type ref_type, const char *ref_name,
     ofctrl_flood_remove_flows(l_ctx_out->flow_table, &flood_remove_nodes);
 
     /* Secondly, for each lflow that is actually removed, reprocessing it. */
-    struct ofctrl_flood_remove_node *ofrn, *ofrn_next;
+    struct ofctrl_flood_remove_node *ofrn;
     HMAP_FOR_EACH (ofrn, hmap_node, &flood_remove_nodes) {
         lflow_resource_destroy_lflow(l_ctx_out->lfrr, &ofrn->sb_uuid);
         lflow_conj_ids_free(l_ctx_out->conj_ids, &ofrn->sb_uuid);
@@ -1070,7 +1076,7 @@ lflow_handle_changed_ref(enum ref_type ref_type, const char *ref_name,
                               &nd_ra_opts, &controller_event_opts, false,
                               l_ctx_in, l_ctx_out);
     }
-    HMAP_FOR_EACH_SAFE (ofrn, ofrn_next, hmap_node, &flood_remove_nodes) {
+    HMAP_FOR_EACH_SAFE (ofrn, hmap_node, &flood_remove_nodes) {
         hmap_remove(&flood_remove_nodes, &ofrn->hmap_node);
         free(ofrn);
     }
@@ -1169,6 +1175,8 @@ add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
         .ct_snat_vip_ptable = OFTABLE_CT_SNAT_HAIRPIN,
         .fdb_ptable = OFTABLE_GET_FDB,
         .fdb_lookup_ptable = OFTABLE_LOOKUP_FDB,
+        .in_port_sec_ptable = OFTABLE_CHK_IN_PORT_SEC,
+        .out_port_sec_ptable = OFTABLE_CHK_OUT_PORT_SEC,
         .ctrl_meter_id = ctrl_meter_id,
         .common_nat_ct_zone = get_common_nat_zone(ldp),
     };
@@ -1562,8 +1570,8 @@ lflows_processed_remove(struct hmap *lflows_processed,
 void
 lflows_processed_destroy(struct hmap *lflows_processed)
 {
-    struct lflow_processed_node *node, *next;
-    HMAP_FOR_EACH_SAFE (node, next, hmap_node, lflows_processed) {
+    struct lflow_processed_node *node;
+    HMAP_FOR_EACH_SAFE (node, hmap_node, lflows_processed) {
         hmap_remove(lflows_processed, &node->hmap_node);
         free(node);
     }
@@ -1640,40 +1648,50 @@ static void
 consider_neighbor_flow(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                        const struct hmap *local_datapaths,
                        const struct sbrec_mac_binding *b,
-                       struct ovn_desired_flow_table *flow_table)
+                       const struct sbrec_static_mac_binding *smb,
+                       struct ovn_desired_flow_table *flow_table,
+                       uint16_t priority)
 {
+    if (!b && !smb) {
+        return;
+    }
+
+    char *logical_port = b ? b->logical_port : smb->logical_port;
+    char *ip = b ? b->ip : smb->ip;
+    char *mac = b ? b->mac : smb->mac;
+
     const struct sbrec_port_binding *pb
-        = lport_lookup_by_name(sbrec_port_binding_by_name, b->logical_port);
+        = lport_lookup_by_name(sbrec_port_binding_by_name, logical_port);
     if (!pb || !get_local_datapath(local_datapaths,
                                    pb->datapath->tunnel_key)) {
         return;
     }
 
-    struct eth_addr mac;
-    if (!eth_addr_from_string(b->mac, &mac)) {
+    struct eth_addr mac_addr;
+    if (!eth_addr_from_string(mac, &mac_addr)) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
-        VLOG_WARN_RL(&rl, "bad 'mac' %s", b->mac);
+        VLOG_WARN_RL(&rl, "bad 'mac' %s", mac);
         return;
     }
 
     struct match get_arp_match = MATCH_CATCHALL_INITIALIZER;
     struct match lookup_arp_match = MATCH_CATCHALL_INITIALIZER;
 
-    if (strchr(b->ip, '.')) {
-        ovs_be32 ip;
-        if (!ip_parse(b->ip, &ip)) {
+    if (strchr(ip, '.')) {
+        ovs_be32 ip_addr;
+        if (!ip_parse(ip, &ip_addr)) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
-            VLOG_WARN_RL(&rl, "bad 'ip' %s", b->ip);
+            VLOG_WARN_RL(&rl, "bad 'ip' %s", ip);
             return;
         }
-        match_set_reg(&get_arp_match, 0, ntohl(ip));
-        match_set_reg(&lookup_arp_match, 0, ntohl(ip));
+        match_set_reg(&get_arp_match, 0, ntohl(ip_addr));
+        match_set_reg(&lookup_arp_match, 0, ntohl(ip_addr));
         match_set_dl_type(&lookup_arp_match, htons(ETH_TYPE_ARP));
     } else {
         struct in6_addr ip6;
-        if (!ipv6_parse(b->ip, &ip6)) {
+        if (!ipv6_parse(ip, &ip6)) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
-            VLOG_WARN_RL(&rl, "bad 'ip' %s", b->ip);
+            VLOG_WARN_RL(&rl, "bad 'ip' %s", ip);
             return;
         }
         ovs_be128 value;
@@ -1696,20 +1714,22 @@ consider_neighbor_flow(struct ovsdb_idl_index *sbrec_port_binding_by_name,
     uint64_t stub[1024 / 8];
     struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
     uint8_t value = 1;
-    put_load(mac.ea, sizeof mac.ea, MFF_ETH_DST, 0, 48, &ofpacts);
+    put_load(mac_addr.ea, sizeof mac_addr.ea, MFF_ETH_DST, 0, 48, &ofpacts);
     put_load(&value, sizeof value, MFF_LOG_FLAGS, MLF_LOOKUP_MAC_BIT, 1,
              &ofpacts);
-    ofctrl_add_flow(flow_table, OFTABLE_MAC_BINDING, 100,
-                    b->header_.uuid.parts[0], &get_arp_match,
-                    &ofpacts, &b->header_.uuid);
+    ofctrl_add_flow(flow_table, OFTABLE_MAC_BINDING, priority,
+                    b ? b->header_.uuid.parts[0] : smb->header_.uuid.parts[0],
+                    &get_arp_match, &ofpacts,
+                    b ? &b->header_.uuid : &smb->header_.uuid);
 
     ofpbuf_clear(&ofpacts);
     put_load(&value, sizeof value, MFF_LOG_FLAGS, MLF_LOOKUP_MAC_BIT, 1,
              &ofpacts);
-    match_set_dl_src(&lookup_arp_match, mac);
-    ofctrl_add_flow(flow_table, OFTABLE_MAC_LOOKUP, 100,
-                    b->header_.uuid.parts[0], &lookup_arp_match,
-                    &ofpacts, &b->header_.uuid);
+    match_set_dl_src(&lookup_arp_match, mac_addr);
+    ofctrl_add_flow(flow_table, OFTABLE_MAC_LOOKUP, priority,
+                    b ? b->header_.uuid.parts[0] : smb->header_.uuid.parts[0],
+                    &lookup_arp_match, &ofpacts,
+                    b ? &b->header_.uuid : &smb->header_.uuid);
 
     ofpbuf_uninit(&ofpacts);
 }
@@ -1719,13 +1739,23 @@ consider_neighbor_flow(struct ovsdb_idl_index *sbrec_port_binding_by_name,
 static void
 add_neighbor_flows(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                    const struct sbrec_mac_binding_table *mac_binding_table,
+                   const struct sbrec_static_mac_binding_table *smb_table,
                    const struct hmap *local_datapaths,
                    struct ovn_desired_flow_table *flow_table)
 {
+    /* Add flows for learnt MAC bindings */
     const struct sbrec_mac_binding *b;
     SBREC_MAC_BINDING_TABLE_FOR_EACH (b, mac_binding_table) {
         consider_neighbor_flow(sbrec_port_binding_by_name, local_datapaths,
-                               b, flow_table);
+                               b, NULL, flow_table, 100);
+    }
+
+    /* Add flows for statically configured MAC bindings */
+    const struct sbrec_static_mac_binding *smb;
+    SBREC_STATIC_MAC_BINDING_TABLE_FOR_EACH (smb, smb_table) {
+        consider_neighbor_flow(sbrec_port_binding_by_name, local_datapaths,
+                               NULL, smb, flow_table,
+                               smb->override_dynamic_mac ? 150 : 50);
     }
 }
 
@@ -1745,6 +1775,7 @@ add_lb_vip_hairpin_reply_action(struct in6_addr *vip6, ovs_be32 vip,
                                 uint64_t cookie, struct ofpbuf *ofpacts)
 {
     struct match match = MATCH_CATCHALL_INITIALIZER;
+    size_t ol_offset = ofpacts->size;
     struct ofpact_learn *ol = ofpact_put_LEARN(ofpacts);
     struct ofpact_learn_spec *ol_spec;
     unsigned int imm_bytes;
@@ -1898,6 +1929,8 @@ add_lb_vip_hairpin_reply_action(struct in6_addr *vip6, ovs_be32 vip,
     src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
     memcpy(src_imm, &imm_reg_value, imm_bytes);
 
+    /* Reload ol pointer since ofpacts buffer can be reallocated. */
+    ol = ofpbuf_at_assert(ofpacts, ol_offset, sizeof *ol);
     ofpact_finish_LEARN(ofpacts, &ol);
 }
 
@@ -2031,6 +2064,20 @@ add_lb_vip_hairpin_flows(struct ovn_controller_lb *lb,
 }
 
 static void
+add_lb_ct_snat_hairpin_for_dp(const struct ovn_controller_lb *lb,
+                              const struct sbrec_datapath_binding *datapath,
+                              struct match *dp_match,
+                              struct ofpbuf *dp_acts,
+                              struct ovn_desired_flow_table *flow_table)
+{
+    match_set_metadata(dp_match, htonll(datapath->tunnel_key));
+    ofctrl_add_or_append_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN, 200,
+                              lb->slb->header_.uuid.parts[0],
+                              dp_match, dp_acts, &lb->slb->header_.uuid,
+                              NX_CTLR_NO_METER, NULL);
+}
+
+static void
 add_lb_ct_snat_hairpin_dp_flows(struct ovn_controller_lb *lb,
                                 uint32_t id,
                                 struct ovn_desired_flow_table *flow_table)
@@ -2055,12 +2102,15 @@ add_lb_ct_snat_hairpin_dp_flows(struct ovn_controller_lb *lb,
     struct match dp_match = MATCH_CATCHALL_INITIALIZER;
 
     for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
-        match_set_metadata(&dp_match,
-                           htonll(lb->slb->datapaths[i]->tunnel_key));
-        ofctrl_add_or_append_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN, 200,
-                                  lb->slb->header_.uuid.parts[0],
-                                  &dp_match, &dp_acts, &lb->slb->header_.uuid,
-                                  NX_CTLR_NO_METER, NULL);
+        add_lb_ct_snat_hairpin_for_dp(lb, lb->slb->datapaths[i],
+                                      &dp_match, &dp_acts, flow_table);
+    }
+    if (lb->slb->datapath_group) {
+        for (size_t i = 0; i < lb->slb->datapath_group->n_datapaths; i++) {
+            add_lb_ct_snat_hairpin_for_dp(
+                lb, lb->slb->datapath_group->datapaths[i],
+                &dp_match, &dp_acts, flow_table);
+        }
     }
 
     ofpbuf_uninit(&dp_acts);
@@ -2318,7 +2368,20 @@ consider_lb_hairpin_flows(const struct sbrec_load_balancer *sbrec_lb,
         }
     }
 
-    if (i == sbrec_lb->n_datapaths) {
+    if (sbrec_lb->n_datapaths && i == sbrec_lb->n_datapaths) {
+        return;
+    }
+
+    struct sbrec_logical_dp_group *dp_group = sbrec_lb->datapath_group;
+
+    for (i = 0; dp_group && i < dp_group->n_datapaths; i++) {
+        if (get_local_datapath(local_datapaths,
+                               dp_group->datapaths[i]->tunnel_key)) {
+            break;
+        }
+    }
+
+    if (dp_group && i == dp_group->n_datapaths) {
         return;
     }
 
@@ -2382,7 +2445,7 @@ add_lb_hairpin_flows(const struct sbrec_load_balancer_table *lb_table,
 
 /* Handles neighbor changes in mac_binding table. */
 void
-lflow_handle_changed_neighbors(
+lflow_handle_changed_mac_bindings(
     struct ovsdb_idl_index *sbrec_port_binding_by_name,
     const struct sbrec_mac_binding_table *mac_binding_table,
     const struct hmap *local_datapaths,
@@ -2409,7 +2472,36 @@ lflow_handle_changed_neighbors(
             VLOG_DBG("handle new mac_binding "UUID_FMT,
                      UUID_ARGS(&mb->header_.uuid));
             consider_neighbor_flow(sbrec_port_binding_by_name, local_datapaths,
-                                   mb, flow_table);
+                                   mb, NULL, flow_table, 100);
+        }
+    }
+}
+
+/* Handles changes to static_mac_binding table. */
+void
+lflow_handle_changed_static_mac_bindings(
+    struct ovsdb_idl_index *sbrec_port_binding_by_name,
+    const struct sbrec_static_mac_binding_table *smb_table,
+    const struct hmap *local_datapaths,
+    struct ovn_desired_flow_table *flow_table)
+{
+    const struct sbrec_static_mac_binding *smb;
+    SBREC_STATIC_MAC_BINDING_TABLE_FOR_EACH_TRACKED (smb, smb_table) {
+        if (sbrec_static_mac_binding_is_deleted(smb)) {
+            VLOG_DBG("handle deleted static_mac_binding "UUID_FMT,
+                     UUID_ARGS(&smb->header_.uuid));
+            ofctrl_remove_flows(flow_table, &smb->header_.uuid);
+        } else {
+            if (!sbrec_static_mac_binding_is_new(smb)) {
+                VLOG_DBG("handle updated static_mac_binding "UUID_FMT,
+                         UUID_ARGS(&smb->header_.uuid));
+                ofctrl_remove_flows(flow_table, &smb->header_.uuid);
+            }
+            VLOG_DBG("handle new static_mac_binding "UUID_FMT,
+                     UUID_ARGS(&smb->header_.uuid));
+            consider_neighbor_flow(sbrec_port_binding_by_name, local_datapaths,
+                                   NULL, smb, flow_table,
+                                   smb->override_dynamic_mac ? 150 : 50);
         }
     }
 }
@@ -2479,7 +2571,9 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct lflow_ctx_out *l_ctx_out)
 
     add_logical_flows(l_ctx_in, l_ctx_out);
     add_neighbor_flows(l_ctx_in->sbrec_port_binding_by_name,
-                       l_ctx_in->mac_binding_table, l_ctx_in->local_datapaths,
+                       l_ctx_in->mac_binding_table,
+                       l_ctx_in->static_mac_binding_table,
+                       l_ctx_in->local_datapaths,
                        l_ctx_out->flow_table);
     add_lb_hairpin_flows(l_ctx_in->lb_table, l_ctx_in->local_datapaths,
                          l_ctx_in->lb_hairpin_use_ct_mark,
@@ -2488,6 +2582,8 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct lflow_ctx_out *l_ctx_out)
                          l_ctx_out->hairpin_id_pool);
     add_fdb_flows(l_ctx_in->fdb_table, l_ctx_in->local_datapaths,
                   l_ctx_out->flow_table);
+    add_port_sec_flows(l_ctx_in->binding_lports, l_ctx_in->chassis,
+                       l_ctx_out->flow_table);
 }
 
 /* Should be called at every ovn-controller iteration before IDL tracked
@@ -2617,10 +2713,24 @@ lflow_add_flows_for_datapath(const struct sbrec_datapath_binding *dp,
         mb, mb_index_row, l_ctx_in->sbrec_mac_binding_by_datapath) {
         consider_neighbor_flow(l_ctx_in->sbrec_port_binding_by_name,
                                l_ctx_in->local_datapaths,
-                               mb, l_ctx_out->flow_table);
+                               mb, NULL, l_ctx_out->flow_table, 100);
     }
     sbrec_mac_binding_index_destroy_row(mb_index_row);
 
+    struct sbrec_static_mac_binding *smb_index_row =
+        sbrec_static_mac_binding_index_init_row(
+            l_ctx_in->sbrec_static_mac_binding_by_datapath);
+    sbrec_static_mac_binding_index_set_datapath(smb_index_row, dp);
+    const struct sbrec_static_mac_binding *smb;
+    SBREC_STATIC_MAC_BINDING_FOR_EACH_EQUAL (
+        smb, smb_index_row, l_ctx_in->sbrec_static_mac_binding_by_datapath) {
+        consider_neighbor_flow(l_ctx_in->sbrec_port_binding_by_name,
+                               l_ctx_in->local_datapaths,
+                               NULL, smb, l_ctx_out->flow_table,
+                               smb->override_dynamic_mac ? 150 : 50);
+    }
+    sbrec_static_mac_binding_index_destroy_row(smb_index_row);
+
     dhcp_opts_destroy(&dhcp_opts);
     dhcp_opts_destroy(&dhcpv6_opts);
     nd_ra_opts_destroy(&nd_ra_opts);
@@ -2647,8 +2757,25 @@ lflow_handle_flows_for_lport(const struct sbrec_port_binding *pb,
 {
     bool changed;
 
-    return lflow_handle_changed_ref(REF_TYPE_PORTBINDING, pb->logical_port,
-                                    l_ctx_in, l_ctx_out, &changed);
+    if (!lflow_handle_changed_ref(REF_TYPE_PORTBINDING, pb->logical_port,
+                                  l_ctx_in, l_ctx_out, &changed)) {
+        return false;
+    }
+
+    /* Program the port security flows.
+     * Note: All the port security OF rules are added using the 'uuid'
+     * of the port binding.  Right now port binding 'uuid' is used in
+     * the logical flow table (l_ctx_out->flow_table) only for port
+     * security flows.  Later if new flows are added using the
+     * port binding'uuid', then this function should handle it properly.
+     */
+    ofctrl_remove_flows(l_ctx_out->flow_table, &pb->header_.uuid);
+
+    if (pb->n_port_security && shash_find(l_ctx_in->binding_lports,
+                                          pb->logical_port)) {
+        consider_port_sec_flows(pb, l_ctx_out->flow_table);
+    }
+    return true;
 }
 
 /* Handles port-binding add/deletions. */
@@ -2785,3 +2912,778 @@ lflow_handle_changed_fdbs(struct lflow_ctx_in *l_ctx_in,
 
     return true;
 }
+
+static void
+add_port_sec_flows(const struct shash *binding_lports,
+                   const struct sbrec_chassis *chassis,
+                   struct ovn_desired_flow_table *flow_table)
+{
+    const struct shash_node *node;
+    SHASH_FOR_EACH (node, binding_lports) {
+        const struct binding_lport *b_lport = node->data;
+        if (!b_lport->pb || b_lport->pb->chassis != chassis) {
+            continue;
+        }
+
+        consider_port_sec_flows(b_lport->pb, flow_table);
+    }
+}
+
+static void
+reset_match_for_port_sec_flows(const struct sbrec_port_binding *pb,
+                               enum mf_field_id reg_id, struct match *match)
+{
+    match_init_catchall(match);
+    match_set_metadata(match, htonll(pb->datapath->tunnel_key));
+    match_set_reg(match, reg_id - MFF_REG0, pb->tunnel_key);
+}
+
+static void build_port_sec_deny_action(struct ofpbuf *ofpacts)
+{
+    ofpbuf_clear(ofpacts);
+    uint8_t value = 1;
+    put_load(&value, sizeof value, MFF_LOG_FLAGS,
+             MLF_CHECK_PORT_SEC_BIT, 1, ofpacts);
+}
+
+static void build_port_sec_allow_action(struct ofpbuf *ofpacts)
+{
+    ofpbuf_clear(ofpacts);
+    uint8_t value = 0;
+    put_load(&value, sizeof value, MFF_LOG_FLAGS,
+             MLF_CHECK_PORT_SEC_BIT, 1, ofpacts);
+}
+
+static void build_port_sec_adv_nd_check(struct ofpbuf *ofpacts)
+{
+    ofpbuf_clear(ofpacts);
+    struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(ofpacts);
+    resubmit->in_port = OFPP_IN_PORT;
+    resubmit->table_id = OFTABLE_CHK_IN_PORT_SEC_ND;
+}
+
+static void
+build_in_port_sec_default_flows(const struct sbrec_port_binding *pb,
+                                struct match *m, struct ofpbuf *ofpacts,
+                                struct ovn_desired_flow_table *flow_table)
+{
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    build_port_sec_deny_action(ofpacts);
+
+    /* Add the below logical flow equivalent OF rule in 'in_port_sec' table.
+     * priority: 80
+     * match - "inport == pb->logical_port"
+     * action - "port_sec_failed = 1;"
+     * description: "Default drop all traffic from""
+     */
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 80,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* ARP checking is done in the next table. So just advance
+     * the arp packets to the next table.
+     *
+     * Add the below logical flow equivalent OF rules in 'in_port_sec' table.
+     * priority: 95
+     * match - "inport == pb->logical_port && arp"
+     * action - "resubmit(,PORT_SEC_ND_TABLE);"
+     */
+    match_set_dl_type(m, htons(ETH_TYPE_ARP));
+    build_port_sec_adv_nd_check(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd' table
+     * priority: 80
+     * match - "inport == pb->logical_port && arp"
+     * action - "port_sec_failed = 1;"
+     * description: "Default drop all arp packets"
+     * note: "Higher priority flows are added to allow the legit ARP packets."
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    build_port_sec_deny_action(ofpacts);
+    match_set_dl_type(m, htons(ETH_TYPE_ARP));
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 80,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd' table
+     * priority: 80
+     * match - "inport == pb->logical_port && icmp6 && icmp6.code == 136"
+     * action - "port_sec_failed = 1;"
+     * description: "Default drop all IPv6 NA packets"
+     * note: "Higher priority flows are added to allow the legit NA packets."
+     */
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    match_set_nw_proto(m, IPPROTO_ICMPV6);
+    match_set_nw_ttl(m, 255);
+    match_set_icmp_type(m, 136);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 80,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd' table
+     * priority: 80
+     * match - "inport == pb->logical_port && icmp6 && icmp6.code == 135"
+     * action - "port_sec_failed = 0;"
+     * description: "Default allow all IPv6 NS packets"
+     * note: This is a hack for now.  Ideally we should do default drop.
+     *       There seems to be a bug in ovs-vswitchd which needs further
+     *       investigation.
+     *
+     * Eg.  If there are below OF rules in the same table
+     * (1) priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,
+     *     icmp_code=0,nd_sll=fa:16:3e:94:05:98
+     *     actions=load:0->NXM_NX_REG10[12]
+     * (2) priority=80,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,
+     *     icmp_code=0 actions=load:1->NXM_NX_REG10[12]
+     *
+     * An IPv6 NS packet with nd_sll = fa:16:3e:94:05:98 is matching on the
+     * second prio-80 flow instead of the first one.
+     */
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    match_set_nw_proto(m, IPPROTO_ICMPV6);
+    match_set_nw_ttl(m, 255);
+    match_set_icmp_type(m, 135);
+    build_port_sec_allow_action(ofpacts); /*TODO:  Change this to
+                                           * build_port_sec_deny_action(). */
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 80,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+build_in_port_sec_no_ip_flows(const struct sbrec_port_binding *pb,
+                              struct lport_addresses *ps_addr,
+                              struct match *m, struct ofpbuf *ofpacts,
+                              struct ovn_desired_flow_table *flow_table)
+{
+    if (ps_addr->n_ipv4_addrs || ps_addr->n_ipv6_addrs) {
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec' table.
+     * priority: 90
+     * match - "inport == pb->logical_port && eth.src == ps_addr.ea"
+     * action - "next;"
+     * description: "Advance the packet for ARP/ND check"
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    match_set_dl_src(m, ps_addr->ea);
+    build_port_sec_adv_nd_check(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+build_in_port_sec_ip4_flows(const struct sbrec_port_binding *pb,
+                           struct lport_addresses *ps_addr,
+                           struct match *m, struct ofpbuf *ofpacts,
+                           struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv4_addrs) {
+        /* If no IPv4 addresses, then 'pb' is not allowed to send IPv4 traffic.
+         * build_in_port_sec_default_flows() takes care of this scenario. */
+        return;
+    }
+
+    /* Advance all traffic from the port security eth address for ND check. */
+    build_port_sec_allow_action(ofpacts);
+
+    /* Add the below logical flow equivalent OF rules in in_port_sec.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *         ip4.src == {ps_addr.ipv4_addrs}"
+     * action - "port_sec_failed = 0;"
+     */
+    for (size_t j = 0; j < ps_addr->n_ipv4_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+        match_set_dl_src(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_IP));
+
+        ovs_be32 mask = ps_addr->ipv4_addrs[j].mask;
+        /* When the netmask is applied, if the host portion is
+         * non-zero, the host can only use the specified
+         * address.  If zero, the host is allowed to use any
+         * address in the subnet.
+         */
+        if (ps_addr->ipv4_addrs[j].plen == 32 ||
+                ps_addr->ipv4_addrs[j].addr & ~mask) {
+            match_set_nw_src(m, ps_addr->ipv4_addrs[j].addr);
+        } else {
+            match_set_nw_src_masked(m, ps_addr->ipv4_addrs[j].addr, mask);
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    /* Add the below logical flow equivalent OF rules in in_port_sec.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *          ip4.src == 0.0.0.0 && ip4.dst == 255.255.255.255 &&
+     *          udp.src == 67 && udp.dst == 68"
+     * action - "port_sec_failed = 0;"
+     * description: "Allow the DHCP requests."
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    match_set_dl_src(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IP));
+
+    ovs_be32 ip4 = htonl(0);
+    match_set_nw_src(m, ip4);
+    ip4 = htonl(0xffffffff);
+    match_set_nw_dst(m, ip4);
+    match_set_nw_proto(m, IPPROTO_UDP);
+    match_set_tp_src(m, htons(68));
+    match_set_tp_dst(m, htons(67));
+
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+/* Adds the OF rules to allow ARP packets in 'in_port_sec_nd' table. */
+static void
+build_in_port_sec_arp_flows(const struct sbrec_port_binding *pb,
+                           struct lport_addresses *ps_addr,
+                           struct match *m, struct ofpbuf *ofpacts,
+                           struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv4_addrs && ps_addr->n_ipv6_addrs) {
+        /* No ARP is allowed as only IPv6 addresses are configured. */
+        return;
+    }
+
+    build_port_sec_allow_action(ofpacts);
+
+    if (!ps_addr->n_ipv4_addrs) {
+        /* No IPv4 addresses.
+         * Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+         * table.
+         * priority: 90
+         * match - "inport == pb->port && eth.src == ps_addr.ea &&
+         *          arp && arp.sha == ps_addr.ea"
+         * action - "port_sec_failed = 0;"
+         */
+        reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+        match_set_dl_src(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_ARP));
+        match_set_arp_sha(m, ps_addr->ea);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *         arp && arp.sha == ps_addr.ea && arp.spa == {ps_addr.ipv4_addrs}"
+     * action - "port_sec_failed = 0;"
+     */
+    for (size_t j = 0; j < ps_addr->n_ipv4_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+        match_set_dl_src(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_ARP));
+        match_set_arp_sha(m, ps_addr->ea);
+
+        ovs_be32 mask = ps_addr->ipv4_addrs[j].mask;
+        if (ps_addr->ipv4_addrs[j].plen == 32 ||
+                ps_addr->ipv4_addrs[j].addr & ~mask) {
+            match_set_nw_src(m, ps_addr->ipv4_addrs[j].addr);
+        } else {
+            match_set_nw_src_masked(m, ps_addr->ipv4_addrs[j].addr, mask);
+        }
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+}
+
+static void
+build_in_port_sec_ip6_flows(const struct sbrec_port_binding *pb,
+                           struct lport_addresses *ps_addr,
+                           struct match *m, struct ofpbuf *ofpacts,
+                           struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv6_addrs) {
+        /* If no IPv6 addresses, then 'pb' is not allowed to send IPv6 traffic.
+         * build_in_port_sec_default_flows() takes care of this scenario. */
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *         ip6.src == {ps_addr.ipv6_addrs, lla}"
+     * action - "next;"
+     * description - Advance the packet for Neighbor Solicit/Adv check.
+     */
+    build_port_sec_adv_nd_check(ofpacts);
+
+    for (size_t j = 0; j < ps_addr->n_ipv6_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+        match_set_dl_src(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+
+        if (ps_addr->ipv6_addrs[j].plen == 128
+            || !ipv6_addr_is_host_zero(&ps_addr->ipv6_addrs[j].addr,
+                                        &ps_addr->ipv6_addrs[j].mask)) {
+            match_set_ipv6_src(m, &ps_addr->ipv6_addrs[j].addr);
+        } else {
+            match_set_ipv6_src_masked(m, &ps_addr->ipv6_addrs[j].network,
+                                        &ps_addr->ipv6_addrs[j].mask);
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    match_set_dl_src(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+
+    struct in6_addr lla;
+    in6_generate_lla(ps_addr->ea, &lla);
+    match_set_ipv6_src(m, &lla);
+
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *          ip6.src == :: && ip6.dst == ff02::/16 && icmp6 &&
+     *          icmp6.code == 0 && icmp6.type == {131, 143}"
+     * action - "port_sec_failed = 0;"
+     */
+    build_port_sec_allow_action(ofpacts);
+    match_set_ipv6_src(m, &in6addr_any);
+    struct in6_addr ip6, mask;
+    char *err = ipv6_parse_masked("ff02::/16", &ip6, &mask);
+    ovs_assert(!err);
+
+    match_set_ipv6_dst_masked(m, &ip6, &mask);
+    match_set_nw_proto(m, IPPROTO_ICMPV6);
+    match_set_icmp_type(m, 131);
+    match_set_icmp_code(m, 0);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    match_set_icmp_type(m, 143);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *          ip6.src == :: && ip6.dst == ff02::/16 && icmp6 &&
+     *          icmp6.code == 0 && icmp6.type == 135"
+     * action - "next;"
+     * description: "Advance the packet for Neighbor solicit check"
+     */
+    build_port_sec_adv_nd_check(ofpacts);
+    match_set_icmp_type(m, 135);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+/* Adds the OF rules to allow IPv6 Neigh discovery packet in
+ * 'in_port_sec_nd' table. */
+static void
+build_in_port_sec_nd_flows(const struct sbrec_port_binding *pb,
+                           struct lport_addresses *ps_addr,
+                           struct match *m, struct ofpbuf *ofpacts,
+                           struct ovn_desired_flow_table *flow_table)
+{
+    build_port_sec_allow_action(ofpacts);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *          icmp6 && icmp6.code == 135 && icmp6.type == 0 &&
+     *          ip6.tll == 255 && nd.sll == {00:00:00:00:00:00, ps_addr.ea}"
+     * action - "port_sec_failed = 0;"
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    match_set_nw_proto(m, IPPROTO_ICMPV6);
+    match_set_nw_ttl(m, 225);
+    match_set_icmp_type(m, 135);
+    match_set_icmp_code(m, 0);
+
+    match_set_arp_sha(m, eth_addr_zero);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    match_set_arp_sha(m, ps_addr->ea);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    match_set_icmp_type(m, 136);
+    match_set_icmp_code(m, 0);
+    if (ps_addr->n_ipv6_addrs) {
+        /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+         * table if IPv6 addresses are configured.
+         * priority: 90
+         * match - "inport == pb->port && eth.src == ps_addr.ea && icmp6 &&
+         *          icmp6.code == 136 && icmp6.type == 0 && ip6.tll == 255 &&
+         *          nd.tll == {00:00:00:00:00:00, ps_addr.ea} &&
+         *          nd.target == {ps_addr.ipv6_addrs, lla}"
+         * action - "port_sec_failed = 0;"
+         */
+        struct in6_addr lla;
+        in6_generate_lla(ps_addr->ea, &lla);
+        match_set_arp_tha(m, eth_addr_zero);
+
+        match_set_nd_target(m, &lla);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+        match_set_arp_tha(m, ps_addr->ea);
+        match_set_nd_target(m, &lla);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+
+        for (size_t j = 0; j < ps_addr->n_ipv6_addrs; j++) {
+            reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+            match_set_dl_src(m, ps_addr->ea);
+            match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+            match_set_nw_proto(m, IPPROTO_ICMPV6);
+            match_set_icmp_type(m, 136);
+            match_set_icmp_code(m, 0);
+            match_set_arp_tha(m, eth_addr_zero);
+
+            if (ps_addr->ipv6_addrs[j].plen == 128
+                || !ipv6_addr_is_host_zero(&ps_addr->ipv6_addrs[j].addr,
+                                            &ps_addr->ipv6_addrs[j].mask)) {
+                match_set_nd_target(m, &ps_addr->ipv6_addrs[j].addr);
+            } else {
+                match_set_nd_target_masked(m, &ps_addr->ipv6_addrs[j].network,
+                                           &ps_addr->ipv6_addrs[j].mask);
+            }
+
+            ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                            pb->header_.uuid.parts[0], m, ofpacts,
+                            &pb->header_.uuid);
+
+            match_set_arp_tha(m, ps_addr->ea);
+            ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                            pb->header_.uuid.parts[0], m, ofpacts,
+                            &pb->header_.uuid);
+        }
+    } else {
+        /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+         * table if no IPv6 addresses are configured.
+         * priority: 90
+         * match - "inport == pb->port && eth.src == ps_addr.ea && icmp6 &&
+         *          icmp6.code == 136 && icmp6.type == 0 && ip6.tll == 255 &&
+         *          nd.tll == {00:00:00:00:00:00, ps_addr.ea}"
+         * action - "port_sec_failed = 0;"
+         */
+        match_set_arp_tha(m, eth_addr_zero);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+
+        match_set_arp_tha(m, ps_addr->ea);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+}
+
+static void
+build_out_port_sec_no_ip_flows(const struct sbrec_port_binding *pb,
+                               struct lport_addresses *ps_addr,
+                               struct match *m, struct ofpbuf *ofpacts,
+                               struct ovn_desired_flow_table *flow_table)
+{
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 85
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea"
+     * action - "port_sec_failed = 0;"
+     * description: "Allow the packet if eth.dst matches."
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    build_port_sec_allow_action(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 85,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+build_out_port_sec_ip4_flows(const struct sbrec_port_binding *pb,
+                            struct lport_addresses *ps_addr,
+                            struct match *m, struct ofpbuf *ofpacts,
+                            struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv4_addrs && !ps_addr->n_ipv6_addrs) {
+         /* No IPv4 and no IPv6 addresses in the port security.
+          * Both IPv4 and IPv6 traffic should be delivered to the
+          * lport. build_out_port_sec_no_ip_flows() takes care of
+          * adding the required flow(s) to allow. */
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 90
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea && ip4"
+     * action - "port_sec_failed = 1;"
+     * description: Default drop IPv4 packets.  If IPv4 addresses are
+     *              configured, then higher priority flows are added
+     *              to allow specific IPv4 packets.
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IP));
+    build_port_sec_deny_action(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    if (!ps_addr->n_ipv4_addrs) {
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 95
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea &&
+     *          ip4.dst == {ps_addr.ipv4_addrs, 255.255.255.255, 224.0.0.0/4},"
+     * action - "port_sec_failed = 0;"
+     */
+    build_port_sec_allow_action(ofpacts);
+    for (size_t j = 0; j < ps_addr->n_ipv4_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+        match_set_dl_dst(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_IP));
+        ovs_be32 mask = ps_addr->ipv4_addrs[j].mask;
+        if (ps_addr->ipv4_addrs[j].plen == 32
+                || ps_addr->ipv4_addrs[j].addr & ~mask) {
+
+            if (ps_addr->ipv4_addrs[j].plen != 32) {
+                /* Special case to allow bcast traffic.
+                 * Eg. If ps_addr is 10.0.0.4/24, then add the below flow
+                 * priority: 95
+                 * match - "outport == pb->logical_port &&
+                 *          eth.dst == ps_addr.ea &&
+                 *          ip4.dst == 10.0.0.255"
+                 * action - "port_sec_failed = 0;"
+                 */
+                ovs_be32 bcast_addr;
+                ovs_assert(ip_parse(ps_addr->ipv4_addrs[j].bcast_s,
+                                    &bcast_addr));
+                match_set_nw_dst(m, bcast_addr);
+                ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                                pb->header_.uuid.parts[0], m, ofpacts,
+                                &pb->header_.uuid);
+            }
+
+            match_set_nw_dst(m, ps_addr->ipv4_addrs[j].addr);
+        } else {
+            /* host portion is zero */
+            match_set_nw_dst_masked(m, ps_addr->ipv4_addrs[j].addr,
+                                    mask);
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IP));
+
+    ovs_be32 ip4 = htonl(0xffffffff);
+    match_set_nw_dst(m, ip4);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Allow 224.0.0.0/4 traffic. */
+    ip4 = htonl(0xe0000000);
+    ovs_be32 mask = htonl(0xf0000000);
+    match_set_nw_dst_masked(m, ip4, mask);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+build_out_port_sec_ip6_flows(const struct sbrec_port_binding *pb,
+                            struct lport_addresses *ps_addr,
+                            struct match *m, struct ofpbuf *ofpacts,
+                            struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv4_addrs && !ps_addr->n_ipv6_addrs) {
+        /* No IPv4 and no IPv6 addresses in the port security.
+         * Both IPv4 and IPv6 traffic should be delivered to the
+         * lport. build_out_port_sec_no_ip_flows() takes care of
+         * adding the required flow(s) to allow. */
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 90
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea && ip6"
+     * action - "port_sec_failed = 1;"
+     * description: Default drop IPv6 packets.  If IPv6 addresses are
+     *              configured, then higher priority flows are added
+     *              to allow specific IPv6 packets.
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    build_port_sec_deny_action(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    if (!ps_addr->n_ipv6_addrs) {
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 95
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea &&
+     *          ip6.dst == {ps_addr.ipv6_addrs, lla, ff00::/8},"
+     * action - "port_sec_failed = 0;"
+     */
+    build_port_sec_allow_action(ofpacts);
+    for (size_t j = 0; j < ps_addr->n_ipv6_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+        match_set_dl_dst(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+
+        if (ps_addr->ipv6_addrs[j].plen == 128
+            || !ipv6_addr_is_host_zero(&ps_addr->ipv6_addrs[j].addr,
+                                        &ps_addr->ipv6_addrs[j].mask)) {
+            match_set_ipv6_dst(m, &ps_addr->ipv6_addrs[j].addr);
+        } else {
+            match_set_ipv6_dst_masked(m, &ps_addr->ipv6_addrs[j].network,
+                                      &ps_addr->ipv6_addrs[j].mask);
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    struct in6_addr lla;
+    in6_generate_lla(ps_addr->ea, &lla);
+
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    match_set_ipv6_dst(m, &lla);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    struct in6_addr ip6, mask;
+    char *err = ipv6_parse_masked("ff00::/8", &ip6, &mask);
+    ovs_assert(!err);
+
+    match_set_ipv6_dst_masked(m, &ip6, &mask);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+consider_port_sec_flows(const struct sbrec_port_binding *pb,
+                        struct ovn_desired_flow_table *flow_table)
+{
+    if (!pb->n_port_security) {
+        return;
+    }
+
+    struct lport_addresses *ps_addrs;   /* Port security addresses. */
+    size_t n_ps_addrs = 0;
+
+    ps_addrs = xmalloc(sizeof *ps_addrs * pb->n_port_security);
+    for (size_t i = 0; i < pb->n_port_security; i++) {
+        if (!extract_lsp_addresses(pb->port_security[i],
+                                    &ps_addrs[n_ps_addrs])) {
+            static struct vlog_rate_limit rl
+                = VLOG_RATE_LIMIT_INIT(1, 1);
+            VLOG_INFO_RL(&rl, "invalid syntax '%s' in port "
+                         "security. No MAC address found",
+                         pb->port_security[i]);
+            continue;
+        }
+        n_ps_addrs++;
+    }
+
+    if (!n_ps_addrs) {
+        free(ps_addrs);
+        return;
+    }
+
+    struct match match = MATCH_CATCHALL_INITIALIZER;
+    uint64_t stub[1024 / 8];
+    struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+    build_in_port_sec_default_flows(pb, &match, &ofpacts, flow_table);
+
+    for (size_t i = 0; i < n_ps_addrs; i++) {
+        build_in_port_sec_no_ip_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                      flow_table);
+        build_in_port_sec_ip4_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                    flow_table);
+        build_in_port_sec_arp_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                    flow_table);
+        build_in_port_sec_ip6_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                    flow_table);
+        build_in_port_sec_nd_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                   flow_table);
+    }
+
+    /* Out port security. */
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec_nd'
+     * table.
+     * priority: 80
+     * match - "outport == pb->logical_port"
+     * action - "port_sec_failed = 1;"
+     * descrption: "Drop all traffic"
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, &match);
+    build_port_sec_deny_action(&ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 80,
+                    pb->header_.uuid.parts[0], &match, &ofpacts,
+                    &pb->header_.uuid);
+
+    for (size_t i = 0; i < n_ps_addrs; i++) {
+        build_out_port_sec_no_ip_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                       flow_table);
+        build_out_port_sec_ip4_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                       flow_table);
+        build_out_port_sec_ip6_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                       flow_table);
+    }
+
+    ofpbuf_uninit(&ofpacts);
+    for (size_t i = 0; i < n_ps_addrs; i++) {
+        destroy_lport_addresses(&ps_addrs[i]);
+    }
+    free(ps_addrs);
+}
diff --git a/controller/lflow.h b/controller/lflow.h
index 48a3650..543d3cd 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -76,6 +76,11 @@ struct uuid;
 #define OFTABLE_CT_SNAT_HAIRPIN      70
 #define OFTABLE_GET_FDB              71
 #define OFTABLE_LOOKUP_FDB           72
+#define OFTABLE_CHK_IN_PORT_SEC      73
+#define OFTABLE_CHK_IN_PORT_SEC_ND   74
+#define OFTABLE_CHK_OUT_PORT_SEC     75
+#define OFTABLE_ECMP_NH_MAC          76
+#define OFTABLE_ECMP_NH              77
 
 enum ref_type {
     REF_TYPE_ADDRSET,
@@ -149,11 +154,13 @@ struct lflow_ctx_in {
     const struct sbrec_fdb_table *fdb_table;
     const struct sbrec_chassis *chassis;
     const struct sbrec_load_balancer_table *lb_table;
+    const struct sbrec_static_mac_binding_table *static_mac_binding_table;
     const struct hmap *local_datapaths;
     const struct shash *addr_sets;
     const struct shash *port_groups;
     const struct sset *active_tunnels;
     const struct sset *related_lport_ids;
+    const struct shash *binding_lports;
     const struct hmap *chassis_tunnels;
     bool lb_hairpin_use_ct_mark;
 };
@@ -194,9 +201,14 @@ bool lflow_handle_addr_set_update(const char *as_name, struct addr_set_diff *,
                                   struct lflow_ctx_out *,
                                   bool *changed);
 
-void lflow_handle_changed_neighbors(
+void lflow_handle_changed_mac_bindings(
     struct ovsdb_idl_index *sbrec_port_binding_by_name,
-    const struct sbrec_mac_binding_table *,
+    const struct sbrec_mac_binding_table *mac_binding_table,
+    const struct hmap *local_datapaths,
+    struct ovn_desired_flow_table *);
+void lflow_handle_changed_static_mac_bindings(
+    struct ovsdb_idl_index *sbrec_port_binding_by_name,
+    const struct sbrec_static_mac_binding_table *smb_table,
     const struct hmap *local_datapaths,
     struct ovn_desired_flow_table *);
 bool lflow_handle_changed_lbs(struct lflow_ctx_in *, struct lflow_ctx_out *);
diff --git a/controller/local_data.c b/controller/local_data.c
index 9844590..9eee568 100644
--- a/controller/local_data.c
+++ b/controller/local_data.c
@@ -34,7 +34,7 @@
 
 VLOG_DEFINE_THIS_MODULE(ldata);
 
-static void add_local_datapath__(
+static struct local_datapath *add_local_datapath__(
     struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
     struct ovsdb_idl_index *sbrec_port_binding_by_datapath,
     struct ovsdb_idl_index *sbrec_port_binding_by_name,
@@ -72,6 +72,7 @@ local_datapath_alloc(const struct sbrec_datapath_binding *dp)
     ld->is_switch = datapath_is_switch(dp);
     ld->is_transit_switch = datapath_is_transit_switch(dp);
     shash_init(&ld->external_ports);
+    shash_init(&ld->multichassis_ports);
     /* memory accounting - common part. */
     local_datapath_usage += sizeof *ld;
 
@@ -97,13 +98,20 @@ local_datapath_destroy(struct local_datapath *ld)
     SHASH_FOR_EACH (node, &ld->external_ports) {
         local_datapath_usage -= strlen(node->name);
     }
-    local_datapath_usage -= shash_count(&ld->external_ports) * sizeof *node;
+    SHASH_FOR_EACH (node, &ld->multichassis_ports) {
+        local_datapath_usage -= strlen(node->name);
+    }
+    local_datapath_usage -= (shash_count(&ld->external_ports)
+                             * sizeof *node);
+    local_datapath_usage -= (shash_count(&ld->multichassis_ports)
+                             * sizeof *node);
     local_datapath_usage -= sizeof *ld;
     local_datapath_usage -=
         ld->n_allocated_peer_ports * sizeof *ld->peer_ports;
 
     free(ld->peer_ports);
     shash_destroy(&ld->external_ports);
+    shash_destroy(&ld->multichassis_ports);
     free(ld);
 }
 
@@ -186,17 +194,7 @@ add_local_datapath_peer_port(
         return;
     }
 
-    bool present = false;
-    for (size_t i = 0; i < ld->n_peer_ports; i++) {
-        if (ld->peer_ports[i].local == pb) {
-            present = true;
-            break;
-        }
-    }
-
-    if (!present) {
-        local_datapath_peer_port_add(ld, pb, peer);
-    }
+    local_datapath_peer_port_add(ld, pb, peer);
 
     struct local_datapath *peer_ld =
         get_local_datapath(local_datapaths,
@@ -210,12 +208,6 @@ add_local_datapath_peer_port(
         return;
     }
 
-    for (size_t i = 0; i < peer_ld->n_peer_ports; i++) {
-        if (peer_ld->peer_ports[i].local == peer) {
-            return;
-        }
-    }
-
     local_datapath_peer_port_add(peer_ld, peer, pb);
 }
 
@@ -275,6 +267,26 @@ remove_local_datapath_external_port(struct local_datapath *ld,
 }
 
 void
+add_local_datapath_multichassis_port(struct local_datapath *ld,
+                                     char *logical_port, const void *data)
+{
+    if (!shash_replace(&ld->multichassis_ports, logical_port, data)) {
+        local_datapath_usage += sizeof(struct shash_node) +
+                                strlen(logical_port);
+    }
+}
+
+void
+remove_local_datapath_multichassis_port(struct local_datapath *ld,
+                                        char *logical_port)
+{
+    if (shash_find_and_delete(&ld->multichassis_ports, logical_port)) {
+        local_datapath_usage -= sizeof(struct shash_node) +
+                                strlen(logical_port);
+    }
+}
+
+void
 local_datapath_memory_usage(struct simap *usage)
 {
     simap_increase(usage, "local_datapath_usage-KB",
@@ -513,7 +525,7 @@ chassis_tunnel_find(const struct hmap *chassis_tunnels, const char *chassis_id,
 }
 
 /* static functions. */
-static void
+static struct local_datapath *
 add_local_datapath__(struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
                      struct ovsdb_idl_index *sbrec_port_binding_by_datapath,
                      struct ovsdb_idl_index *sbrec_port_binding_by_name,
@@ -525,7 +537,7 @@ add_local_datapath__(struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
     uint32_t dp_key = dp->tunnel_key;
     struct local_datapath *ld = get_local_datapath(local_datapaths, dp_key);
     if (ld) {
-        return;
+        return ld;
     }
 
     ld = local_datapath_alloc(dp);
@@ -540,7 +552,7 @@ add_local_datapath__(struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
     if (depth >= 100) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
         VLOG_WARN_RL(&rl, "datapaths nested too deep");
-        return;
+        return ld;
     }
 
     struct sbrec_port_binding *target =
@@ -561,19 +573,22 @@ add_local_datapath__(struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
                 if (peer && peer->datapath) {
                     if (need_add_patch_peer_to_local(
                             sbrec_port_binding_by_name, pb, chassis)) {
-                        add_local_datapath__(sbrec_datapath_binding_by_key,
+                        struct local_datapath *peer_ld =
+                            add_local_datapath__(sbrec_datapath_binding_by_key,
                                              sbrec_port_binding_by_datapath,
                                              sbrec_port_binding_by_name,
                                              depth + 1, peer->datapath,
                                              chassis, local_datapaths,
                                              tracked_datapaths);
+                        local_datapath_peer_port_add(peer_ld, peer, pb);
+                        local_datapath_peer_port_add(ld, pb, peer);
                     }
-                    local_datapath_peer_port_add(ld, pb, peer);
                 }
             }
         }
     }
     sbrec_port_binding_index_destroy_row(target);
+    return ld;
 }
 
 static struct tracked_datapath *
@@ -594,6 +609,11 @@ local_datapath_peer_port_add(struct local_datapath *ld,
                              const struct sbrec_port_binding *local,
                              const struct sbrec_port_binding *remote)
 {
+    for (size_t i = 0; i < ld->n_peer_ports; i++) {
+        if (ld->peer_ports[i].local == local) {
+            return;
+        }
+    }
     ld->n_peer_ports++;
     if (ld->n_peer_ports > ld->n_allocated_peer_ports) {
         size_t old_n_ports = ld->n_allocated_peer_ports;
diff --git a/controller/local_data.h b/controller/local_data.h
index 9306ddf..d898c8a 100644
--- a/controller/local_data.h
+++ b/controller/local_data.h
@@ -58,6 +58,7 @@ struct local_datapath {
     size_t n_allocated_peer_ports;
 
     struct shash external_ports;
+    struct shash multichassis_ports;
 };
 
 struct local_datapath *local_datapath_alloc(
@@ -155,5 +156,10 @@ void add_local_datapath_external_port(struct local_datapath *ld,
                                       char *logical_port, const void *data);
 void remove_local_datapath_external_port(struct local_datapath *ld,
                                          char *logical_port);
+void add_local_datapath_multichassis_port(struct local_datapath *ld,
+                                          char *logical_port,
+                                          const void *data);
+void remove_local_datapath_multichassis_port(struct local_datapath *ld,
+                                             char *logical_port);
 
 #endif /* controller/local_data.h */
diff --git a/controller/lport.c b/controller/lport.c
index 5ad40f6..add7e91 100644
--- a/controller/lport.c
+++ b/controller/lport.c
@@ -108,29 +108,41 @@ lport_get_l3gw_peer(const struct sbrec_port_binding *pb,
     return get_peer_lport(pb, sbrec_port_binding_by_name);
 }
 
-bool
+enum can_bind
 lport_can_bind_on_this_chassis(const struct sbrec_chassis *chassis_rec,
                                const struct sbrec_port_binding *pb)
 {
-    /* We need to check for presence of the requested-chassis option in
-     * addittion to checking the pb->requested_chassis column because this
-     * column will be set to NULL whenever the option points to a non-existent
-     * chassis.  As the controller routinely clears its own chassis record this
-     * might occur more often than one might think. */
+    if (pb->requested_chassis == chassis_rec) {
+        return CAN_BIND_AS_MAIN;
+    }
+
+    for (size_t i = 0; i < pb->n_requested_additional_chassis; i++) {
+        if (pb->requested_additional_chassis[i] == chassis_rec) {
+            return CAN_BIND_AS_ADDITIONAL;
+        }
+    }
+
     const char *requested_chassis_option = smap_get(&pb->options,
                                                     "requested-chassis");
-    if (requested_chassis_option && requested_chassis_option[0]
-        && !pb->requested_chassis) {
-        /* The requested-chassis option is set, but the requested_chassis
-         * column is not filled.  This means that the chassis the option
-         * points to is currently not running, or is in the process of starting
-         * up.  In this case we must fall back to comparing the strings to
-         * avoid release/claim thrashing. */
-        return !strcmp(requested_chassis_option, chassis_rec->name)
-               || !strcmp(requested_chassis_option, chassis_rec->hostname);
+    if (!requested_chassis_option || !strcmp("", requested_chassis_option)) {
+        return CAN_BIND_AS_MAIN;
+    }
+
+    char *tokstr = xstrdup(requested_chassis_option);
+    char *save_ptr = NULL;
+    char *chassis;
+    enum can_bind can_bind = CAN_BIND_AS_MAIN;
+    for (chassis = strtok_r(tokstr, ",", &save_ptr); chassis != NULL;
+         chassis = strtok_r(NULL, ",", &save_ptr)) {
+        if (!strcmp(chassis, chassis_rec->name)
+                || !strcmp(chassis, chassis_rec->hostname)) {
+            free(tokstr);
+            return can_bind;
+        }
+        can_bind = CAN_BIND_AS_ADDITIONAL;
     }
-    return !requested_chassis_option || !requested_chassis_option[0]
-           || chassis_rec == pb->requested_chassis;
+    free(tokstr);
+    return CANNOT_BIND;
 }
 
 const struct sbrec_datapath_binding *
@@ -185,3 +197,25 @@ get_peer_lport(const struct sbrec_port_binding *pb,
                                 peer_name);
     return (peer && peer->datapath) ? peer : NULL;
 }
+
+bool
+lport_is_activated_by_activation_strategy(const struct sbrec_port_binding *pb,
+                                          const struct sbrec_chassis *chassis)
+{
+    const char *activated_chassis = smap_get(&pb->options,
+                                             "additional-chassis-activated");
+    if (activated_chassis) {
+        char *save_ptr;
+        char *tokstr = xstrdup(activated_chassis);
+        for (const char *chassis_name = strtok_r(tokstr, ",", &save_ptr);
+             chassis_name != NULL;
+             chassis_name = strtok_r(NULL, ",", &save_ptr)) {
+            if (!strcmp(chassis_name, chassis->name)) {
+                free(tokstr);
+                return true;
+            }
+        }
+        free(tokstr);
+    }
+    return false;
+}
diff --git a/controller/lport.h b/controller/lport.h
index 4716c58..644c672 100644
--- a/controller/lport.h
+++ b/controller/lport.h
@@ -43,8 +43,15 @@ const struct sbrec_port_binding *lport_lookup_by_key(
     struct ovsdb_idl_index *sbrec_port_binding_by_key,
     uint64_t dp_key, uint64_t port_key);
 
-bool lport_can_bind_on_this_chassis(const struct sbrec_chassis *chassis_rec,
-                                   const struct sbrec_port_binding *pb);
+enum can_bind {
+    CANNOT_BIND = 0,
+    CAN_BIND_AS_MAIN,
+    CAN_BIND_AS_ADDITIONAL,
+};
+
+enum can_bind
+lport_can_bind_on_this_chassis(const struct sbrec_chassis *chassis_rec,
+                               const struct sbrec_port_binding *pb);
 
 const struct sbrec_datapath_binding *datapath_lookup_by_key(
     struct ovsdb_idl_index *sbrec_datapath_binding_by_key, uint64_t dp_key);
@@ -63,4 +70,7 @@ const struct sbrec_port_binding *lport_get_peer(
 const struct sbrec_port_binding *lport_get_l3gw_peer(
     const struct sbrec_port_binding *,
     struct ovsdb_idl_index *sbrec_port_binding_by_name);
+bool
+lport_is_activated_by_activation_strategy(const struct sbrec_port_binding *pb,
+                                          const struct sbrec_chassis *chassis);
 #endif /* controller/lport.h */
diff --git a/controller/mac-learn.c b/controller/mac-learn.c
index 27634dc..a276070 100644
--- a/controller/mac-learn.c
+++ b/controller/mac-learn.c
@@ -18,14 +18,18 @@
 #include "mac-learn.h"
 
 /* OpenvSwitch lib includes. */
+#include "openvswitch/poll-loop.h"
 #include "openvswitch/vlog.h"
 #include "lib/packets.h"
+#include "lib/random.h"
 #include "lib/smap.h"
+#include "lib/timeval.h"
 
 VLOG_DEFINE_THIS_MODULE(mac_learn);
 
 #define MAX_MAC_BINDINGS 1000
 #define MAX_FDB_ENTRIES  1000
+#define MAX_MAC_BINDING_DELAY_MSEC 50
 
 static size_t mac_binding_hash(uint32_t dp_key, uint32_t port_key,
                                struct in6_addr *);
@@ -46,25 +50,19 @@ ovn_mac_bindings_init(struct hmap *mac_bindings)
 }
 
 void
-ovn_mac_bindings_flush(struct hmap *mac_bindings)
+ovn_mac_bindings_destroy(struct hmap *mac_bindings)
 {
     struct mac_binding *mb;
     HMAP_FOR_EACH_POP (mb, hmap_node, mac_bindings) {
         free(mb);
     }
-}
-
-void
-ovn_mac_bindings_destroy(struct hmap *mac_bindings)
-{
-    ovn_mac_bindings_flush(mac_bindings);
     hmap_destroy(mac_bindings);
 }
 
 struct mac_binding *
 ovn_mac_binding_add(struct hmap *mac_bindings, uint32_t dp_key,
                     uint32_t port_key, struct in6_addr *ip,
-                    struct eth_addr mac)
+                    struct eth_addr mac, bool is_unicast)
 {
     uint32_t hash = mac_binding_hash(dp_key, port_key, ip);
 
@@ -75,10 +73,13 @@ ovn_mac_binding_add(struct hmap *mac_bindings, uint32_t dp_key,
             return NULL;
         }
 
+        uint32_t delay = is_unicast
+            ? 0 : random_range(MAX_MAC_BINDING_DELAY_MSEC) + 1;
         mb = xmalloc(sizeof *mb);
         mb->dp_key = dp_key;
         mb->port_key = port_key;
         mb->ip = *ip;
+        mb->commit_at_ms = time_msec() + delay;
         hmap_insert(mac_bindings, &mb->hmap_node, hash);
     }
     mb->mac = mac;
@@ -86,6 +87,30 @@ ovn_mac_binding_add(struct hmap *mac_bindings, uint32_t dp_key,
     return mb;
 }
 
+/* This is called from ovn-controller main context */
+void
+ovn_mac_binding_wait(struct hmap *mac_bindings)
+{
+    struct mac_binding *mb;
+
+    HMAP_FOR_EACH (mb, hmap_node, mac_bindings) {
+        poll_timer_wait_until(mb->commit_at_ms);
+    }
+}
+
+void
+ovn_mac_binding_remove(struct mac_binding *mb, struct hmap *mac_bindings)
+{
+    hmap_remove(mac_bindings, &mb->hmap_node);
+    free(mb);
+}
+
+bool
+ovn_mac_binding_can_commit(const struct mac_binding *mb, long long now)
+{
+    return now >= mb->commit_at_ms;
+}
+
 /* fdb functions. */
 void
 ovn_fdb_init(struct hmap *fdbs)
diff --git a/controller/mac-learn.h b/controller/mac-learn.h
index e7e8ba2..57c50c5 100644
--- a/controller/mac-learn.h
+++ b/controller/mac-learn.h
@@ -31,16 +31,21 @@ struct mac_binding {
 
     /* Value. */
     struct eth_addr mac;
+
+    /* Timestamp when to commit to SB. */
+    long long commit_at_ms;
 };
 
 void ovn_mac_bindings_init(struct hmap *mac_bindings);
-void ovn_mac_bindings_flush(struct hmap *mac_bindings);
 void ovn_mac_bindings_destroy(struct hmap *mac_bindings);
+void ovn_mac_binding_wait(struct hmap *mac_bindings);
+void ovn_mac_binding_remove(struct mac_binding *mb, struct hmap *mac_bindings);
+bool ovn_mac_binding_can_commit(const struct mac_binding *mb, long long now);
 
 struct mac_binding *ovn_mac_binding_add(struct hmap *mac_bindings,
                                         uint32_t dp_key, uint32_t port_key,
                                         struct in6_addr *ip,
-                                        struct eth_addr mac);
+                                        struct eth_addr mac, bool is_unicast);
 
 
 
diff --git a/controller/ofctrl-seqno.c b/controller/ofctrl-seqno.c
index c9334b0..923de47 100644
--- a/controller/ofctrl-seqno.c
+++ b/controller/ofctrl-seqno.c
@@ -173,8 +173,8 @@ ofctrl_seqno_update_create(size_t seqno_type, uint64_t new_cfg)
 void
 ofctrl_seqno_run(uint64_t flow_cfg)
 {
-    struct ofctrl_seqno_update *update, *prev;
-    LIST_FOR_EACH_SAFE (update, prev, list_node, &ofctrl_seqno_updates) {
+    struct ofctrl_seqno_update *update;
+    LIST_FOR_EACH_SAFE (update, list_node, &ofctrl_seqno_updates) {
         if (flow_cfg < update->flow_cfg) {
             break;
         }
diff --git a/controller/ofctrl.c b/controller/ofctrl.c
index 3b9d717..54b75b3 100644
--- a/controller/ofctrl.c
+++ b/controller/ofctrl.c
@@ -47,6 +47,7 @@
 #include "physical.h"
 #include "openvswitch/rconn.h"
 #include "socket-util.h"
+#include "timeval.h"
 #include "util.h"
 #include "vswitch-idl.h"
 
@@ -297,6 +298,7 @@ static unsigned int seqno;
     STATE(S_NEW)                                \
     STATE(S_TLV_TABLE_REQUESTED)                \
     STATE(S_TLV_TABLE_MOD_SENT)                 \
+    STATE(S_WAIT_BEFORE_CLEAR)                  \
     STATE(S_CLEAR_FLOWS)                        \
     STATE(S_UPDATE_FLOWS)
 enum ofctrl_state {
@@ -339,6 +341,14 @@ static uint64_t cur_cfg;
 /* Current state. */
 static enum ofctrl_state state;
 
+/* The time (ms) to stay in the state S_WAIT_BEFORE_CLEAR. Read from
+ * external_ids: ovn-ofctrl-wait-before-clear. */
+static unsigned int wait_before_clear_time = 0;
+
+/* The time when the state S_WAIT_BEFORE_CLEAR should complete.
+ * If the timer is not started yet, it is set to 0. */
+static long long int wait_before_clear_expire = 0;
+
 /* Transaction IDs for messages in flight to the switch. */
 static ovs_be32 xid, xid2;
 
@@ -381,9 +391,11 @@ static void ofctrl_meter_bands_clear(void);
  * S_CLEAR_FLOWS or S_UPDATE_FLOWS, this is really the option we have. */
 static enum mf_field_id mff_ovn_geneve;
 
-/* Indicates if flows need to be reinstalled for scenarios when ovs
- * is restarted, even if there is no change in the desired flow table. */
-static bool need_reinstall_flows;
+/* Indicates if we just went through the S_CLEAR_FLOWS state, which means we
+ * need to perform a one time deletion for all the existing flows, groups and
+ * meters. This can happen during initialization or OpenFlow reconnection
+ * (e.g. after OVS restart). */
+static bool ofctrl_initial_clear;
 
 static ovs_be32 queue_msg(struct ofpbuf *);
 
@@ -444,18 +456,19 @@ recv_S_NEW(const struct ofp_header *oh OVS_UNUSED,
  * If we receive an NXT_TLV_TABLE_REPLY:
  *
  *     - If it contains our tunnel metadata option, assign its field ID to
- *       mff_ovn_geneve and transition to S_CLEAR_FLOWS.
+ *       mff_ovn_geneve and transition to S_WAIT_BEFORE_CLEAR.
  *
  *     - Otherwise, if there is an unused tunnel metadata field ID, send
  *       NXT_TLV_TABLE_MOD and OFPT_BARRIER_REQUEST, and transition to
  *       S_TLV_TABLE_MOD_SENT.
  *
  *     - Otherwise, log an error, disable Geneve, and transition to
- *       S_CLEAR_FLOWS.
+ *       S_WAIT_BEFORE_CLEAR.
  *
  * If we receive an OFPT_ERROR:
  *
- *     - Log an error, disable Geneve, and transition to S_CLEAR_FLOWS. */
+ *     - Log an error, disable Geneve, and transition to S_WAIT_BEFORE_CLEAR.
+ */
 
 static void
 run_S_TLV_TABLE_REQUESTED(void)
@@ -482,7 +495,7 @@ process_tlv_table_reply(const struct ofputil_tlv_table_reply *reply)
                 return false;
             } else {
                 mff_ovn_geneve = MFF_TUN_METADATA0 + map->index;
-                state = S_CLEAR_FLOWS;
+                state = S_WAIT_BEFORE_CLEAR;
                 return true;
             }
         }
@@ -549,7 +562,7 @@ recv_S_TLV_TABLE_REQUESTED(const struct ofp_header *oh, enum ofptype type,
 
     /* Error path. */
     mff_ovn_geneve = 0;
-    state = S_CLEAR_FLOWS;
+    state = S_WAIT_BEFORE_CLEAR;
 }
 
 /* S_TLV_TABLE_MOD_SENT, when NXT_TLV_TABLE_MOD and OFPT_BARRIER_REQUEST
@@ -561,12 +574,12 @@ recv_S_TLV_TABLE_REQUESTED(const struct ofp_header *oh, enum ofptype type,
  *       raced with some other controller.  Transition to S_NEW.
  *
  *     - Otherwise, log an error, disable Geneve, and transition to
- *       S_CLEAR_FLOWS.
+ *       S_WAIT_BEFORE_CLEAR.
  *
  * If we receive OFPT_BARRIER_REPLY:
  *
  *     - Set the tunnel metadata field ID to the one that we requested.
- *       Transition to S_CLEAR_FLOWS.
+ *       Transition to S_WAIT_BEFORE_CLEAR.
  */
 
 static void
@@ -581,7 +594,7 @@ recv_S_TLV_TABLE_MOD_SENT(const struct ofp_header *oh, enum ofptype type,
     if (oh->xid != xid && oh->xid != xid2) {
         ofctrl_recv(oh, type);
     } else if (oh->xid == xid2 && type == OFPTYPE_BARRIER_REPLY) {
-        state = S_CLEAR_FLOWS;
+        state = S_WAIT_BEFORE_CLEAR;
     } else if (oh->xid == xid && type == OFPTYPE_ERROR) {
         enum ofperr error = ofperr_decode_msg(oh, NULL);
         if (error == OFPERR_NXTTMFC_ALREADY_MAPPED ||
@@ -605,7 +618,36 @@ recv_S_TLV_TABLE_MOD_SENT(const struct ofp_header *oh, enum ofptype type,
     return;
 
 error:
-    state = S_CLEAR_FLOWS;
+    state = S_WAIT_BEFORE_CLEAR;
+}
+
+/* S_WAIT_BEFORE_CLEAR, we are almost ready to set up flows, but just wait for
+ * a while until the initial flow compute to complete before we clear the
+ * existing flows in OVS, so that we won't end up with an empty flow table,
+ * which may cause data plane down time. */
+static void
+run_S_WAIT_BEFORE_CLEAR(void)
+{
+    if (!wait_before_clear_time ||
+        (wait_before_clear_expire &&
+         time_msec() >= wait_before_clear_expire)) {
+        wait_before_clear_expire = 0;
+        state = S_CLEAR_FLOWS;
+        return;
+    }
+
+    if (!wait_before_clear_expire) {
+        /* Start the timer. */
+        wait_before_clear_expire = time_msec() + wait_before_clear_time;
+    }
+    poll_timer_wait_until(wait_before_clear_expire);
+}
+
+static void
+recv_S_WAIT_BEFORE_CLEAR(const struct ofp_header *oh, enum ofptype type,
+                         struct shash *pending_ct_zones OVS_UNUSED)
+{
+    ofctrl_recv(oh, type);
 }
 
 /* S_CLEAR_FLOWS, after we've established a Geneve metadata field ID and it's
@@ -619,25 +661,10 @@ run_S_CLEAR_FLOWS(void)
 {
     VLOG_DBG("clearing all flows");
 
-    need_reinstall_flows = true;
-    /* Send a flow_mod to delete all flows. */
-    struct ofputil_flow_mod fm = {
-        .table_id = OFPTT_ALL,
-        .command = OFPFC_DELETE,
-    };
-    minimatch_init_catchall(&fm.match);
-    queue_msg(encode_flow_mod(&fm));
-    minimatch_destroy(&fm.match);
-
-    /* Send a group_mod to delete all groups. */
-    struct ofputil_group_mod gm;
-    memset(&gm, 0, sizeof gm);
-    gm.command = OFPGC11_DELETE;
-    gm.group_id = OFPG_ALL;
-    gm.command_bucket_id = OFPG15_BUCKET_ALL;
-    ovs_list_init(&gm.buckets);
-    queue_msg(encode_group_mod(&gm));
-    ofputil_uninit_group_mod(&gm);
+    /* Set the flag so that the ofctrl_run() can clear the existing flows,
+     * groups and meters. We clear them in ofctrl_run() right before the new
+     * ones are installed to avoid data plane downtime. */
+    ofctrl_initial_clear = true;
 
     /* Clear installed_flows, to match the state of the switch. */
     ovn_installed_flow_table_clear();
@@ -647,13 +674,6 @@ run_S_CLEAR_FLOWS(void)
         ovn_extend_table_clear(groups, true);
     }
 
-    /* Send a meter_mod to delete all meters. */
-    struct ofputil_meter_mod mm;
-    memset(&mm, 0, sizeof mm);
-    mm.command = OFPMC13_DELETE;
-    mm.meter.meter_id = OFPM13_ALL;
-    queue_msg(encode_meter_mod(&mm));
-
     /* Clear existing meters, to match the state of the switch. */
     if (meters) {
         ovn_extend_table_clear(meters, true);
@@ -661,14 +681,18 @@ run_S_CLEAR_FLOWS(void)
     }
 
     /* All flow updates are irrelevant now. */
-    struct ofctrl_flow_update *fup, *next;
-    LIST_FOR_EACH_SAFE (fup, next, list_node, &flow_updates) {
+    struct ofctrl_flow_update *fup;
+    LIST_FOR_EACH_SAFE (fup, list_node, &flow_updates) {
         mem_stats.oflow_update_usage -= ofctrl_flow_update_size(fup);
         ovs_list_remove(&fup->list_node);
         free(fup);
     }
 
     state = S_UPDATE_FLOWS;
+
+    /* Give a chance for the main loop to call ofctrl_put() in case there were
+     * pending flows waiting ofctrl state change to S_UPDATE_FLOWS. */
+    poll_immediate_wake();
 }
 
 static void
@@ -732,7 +756,9 @@ ofctrl_get_mf_field_id(void)
     if (!rconn_is_connected(swconn)) {
         return 0;
     }
-    return (state == S_CLEAR_FLOWS || state == S_UPDATE_FLOWS
+    return (state == S_WAIT_BEFORE_CLEAR
+            || state == S_CLEAR_FLOWS
+            || state == S_UPDATE_FLOWS
             ? mff_ovn_geneve : 0);
 }
 
@@ -740,7 +766,9 @@ ofctrl_get_mf_field_id(void)
  * hypervisor on which we are running.  Attempts to negotiate a Geneve option
  * field for class OVN_GENEVE_CLASS, type OVN_GENEVE_TYPE. */
 void
-ofctrl_run(const struct ovsrec_bridge *br_int, struct shash *pending_ct_zones)
+ofctrl_run(const struct ovsrec_bridge *br_int,
+           const struct ovsrec_open_vswitch_table *ovs_table,
+           struct shash *pending_ct_zones)
 {
     char *target = xasprintf("unix:%s/%s.mgmt", ovs_rundir(), br_int->name);
     if (strcmp(target, rconn_get_target(swconn))) {
@@ -767,6 +795,16 @@ ofctrl_run(const struct ovsrec_bridge *br_int, struct shash *pending_ct_zones)
             }
         }
     }
+    const struct ovsrec_open_vswitch *cfg =
+        ovsrec_open_vswitch_table_first(ovs_table);
+    ovs_assert(cfg);
+    unsigned int _wait_before_clear_time =
+        smap_get_uint(&cfg->external_ids, "ovn-ofctrl-wait-before-clear", 0);
+    if (_wait_before_clear_time != wait_before_clear_time) {
+        VLOG_INFO("ofctrl-wait-before-clear is now %u ms (was %u ms)",
+                  _wait_before_clear_time, wait_before_clear_time);
+        wait_before_clear_time = _wait_before_clear_time;
+    }
 
     bool progress = true;
     for (int i = 0; progress && i < 50; i++) {
@@ -987,8 +1025,8 @@ unlink_installed_to_desired(struct installed_flow *i, struct desired_flow *d)
 static void
 unlink_all_refs_for_installed_flow(struct installed_flow *i)
 {
-    struct desired_flow *d, *next;
-    LIST_FOR_EACH_SAFE (d, next, installed_ref_list_node, &i->desired_refs) {
+    struct desired_flow *d;
+    LIST_FOR_EACH_SAFE (d, installed_ref_list_node, &i->desired_refs) {
         unlink_installed_to_desired(i, d);
     }
 }
@@ -1414,9 +1452,9 @@ ofctrl_remove_flows_for_as_ip(struct ovn_desired_flow_table *flow_table,
          * sets. */
         return false;
     }
-    struct sb_flow_ref *sfr, *next;
+    struct sb_flow_ref *sfr;
     size_t count = 0;
-    LIST_FOR_EACH_SAFE (sfr, next, as_ip_flow_list, &itfn->flows) {
+    LIST_FOR_EACH_SAFE (sfr, as_ip_flow_list, &itfn->flows) {
         /* If the desired flow is referenced by multiple sb lflows, it
          * shouldn't have been indexed by address set. */
         ovs_assert(ovs_list_is_short(&sfr->sb_list));
@@ -1457,8 +1495,8 @@ remove_flows_from_sb_to_flow(struct ovn_desired_flow_table *flow_table,
     struct ovs_list to_be_removed = OVS_LIST_INITIALIZER(&to_be_removed);
 
     /* Traverse all flows for the given sb_uuid. */
-    struct sb_flow_ref *sfr, *next;
-    LIST_FOR_EACH_SAFE (sfr, next, flow_list, &stf->flows) {
+    struct sb_flow_ref *sfr;
+    LIST_FOR_EACH_SAFE (sfr, flow_list, &stf->flows) {
         ovs_list_remove(&sfr->sb_list);
         ovs_list_remove(&sfr->flow_list);
         ovs_list_remove(&sfr->as_ip_flow_list);
@@ -1479,12 +1517,11 @@ remove_flows_from_sb_to_flow(struct ovn_desired_flow_table *flow_table,
         }
     }
 
-    struct sb_addrset_ref *sar, *next_sar;
-    LIST_FOR_EACH_SAFE (sar, next_sar, list_node, &stf->addrsets) {
+    struct sb_addrset_ref *sar;
+    LIST_FOR_EACH_SAFE (sar, list_node, &stf->addrsets) {
         ovs_list_remove(&sar->list_node);
-        struct as_ip_to_flow_node *itfn, *itfn_next;
-        HMAP_FOR_EACH_SAFE (itfn, itfn_next, hmap_node,
-                            &sar->as_ip_to_flow_map) {
+        struct as_ip_to_flow_node *itfn;
+        HMAP_FOR_EACH_SAFE (itfn, hmap_node, &sar->as_ip_to_flow_map) {
             hmap_remove(&sar->as_ip_to_flow_map, &itfn->hmap_node);
             ovs_assert(ovs_list_is_empty(&itfn->flows));
             mem_stats.sb_flow_ref_usage -= sizeof *itfn;
@@ -1505,7 +1542,7 @@ remove_flows_from_sb_to_flow(struct ovn_desired_flow_table *flow_table,
 
     /* Detach the items in f->references from the sfr.flow_list lists,
      * so that recursive calls will not mess up the sfr.sb_list list. */
-    struct desired_flow *f, *f_next;
+    struct desired_flow *f;
     LIST_FOR_EACH (f, list_node, &to_be_removed) {
         ovs_assert(!ovs_list_is_empty(&f->references));
         LIST_FOR_EACH (sfr, sb_list, &f->references) {
@@ -1513,8 +1550,8 @@ remove_flows_from_sb_to_flow(struct ovn_desired_flow_table *flow_table,
             ovs_list_remove(&sfr->as_ip_flow_list);
         }
     }
-    LIST_FOR_EACH_SAFE (f, f_next, list_node, &to_be_removed) {
-        LIST_FOR_EACH_SAFE (sfr, next, sb_list, &f->references) {
+    LIST_FOR_EACH_SAFE (f, list_node, &to_be_removed) {
+        LIST_FOR_EACH_SAFE (sfr, sb_list, &f->references) {
             if (!flood_remove_find_node(flood_remove_nodes, &sfr->sb_uuid)) {
                 ofctrl_flood_remove_add_node(flood_remove_nodes,
                                              &sfr->sb_uuid);
@@ -1793,9 +1830,8 @@ ovn_desired_flow_table_clear(struct ovn_desired_flow_table *flow_table)
 {
     flow_table->change_tracked = false;
 
-    struct desired_flow *f, *f_next;
-    LIST_FOR_EACH_SAFE (f, f_next, track_list_node,
-                        &flow_table->tracked_flows) {
+    struct desired_flow *f;
+    LIST_FOR_EACH_SAFE (f, track_list_node, &flow_table->tracked_flows) {
         ovs_list_remove(&f->track_list_node);
         if (f->is_deleted) {
             if (f->installed_flow) {
@@ -1805,9 +1841,8 @@ ovn_desired_flow_table_clear(struct ovn_desired_flow_table *flow_table)
         }
     }
 
-    struct sb_to_flow *stf, *next;
-    HMAP_FOR_EACH_SAFE (stf, next, hmap_node,
-                        &flow_table->uuid_flow_table) {
+    struct sb_to_flow *stf;
+    HMAP_FOR_EACH_SAFE (stf, hmap_node, &flow_table->uuid_flow_table) {
         remove_flows_from_sb_to_flow(flow_table, stf, NULL, NULL);
     }
 }
@@ -1825,14 +1860,14 @@ ovn_desired_flow_table_destroy(struct ovn_desired_flow_table *flow_table)
 static void
 ovn_installed_flow_table_clear(void)
 {
-    struct installed_flow *f, *next;
-    HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &installed_lflows) {
+    struct installed_flow *f;
+    HMAP_FOR_EACH_SAFE (f, match_hmap_node, &installed_lflows) {
         hmap_remove(&installed_lflows, &f->match_hmap_node);
         unlink_all_refs_for_installed_flow(f);
         installed_flow_destroy(f);
     }
 
-    HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &installed_pflows) {
+    HMAP_FOR_EACH_SAFE (f, match_hmap_node, &installed_pflows) {
         hmap_remove(&installed_pflows, &f->match_hmap_node);
         unlink_all_refs_for_installed_flow(f);
         installed_flow_destroy(f);
@@ -1858,21 +1893,24 @@ encode_flow_mod(struct ofputil_flow_mod *fm)
     return ofputil_encode_flow_mod(fm, OFPUTIL_P_OF15_OXM);
 }
 
-static void
-add_flow_mod(struct ofputil_flow_mod *fm,
-             struct ofputil_bundle_ctrl_msg *bc,
-             struct ovs_list *msgs)
+static struct ofpbuf *
+encode_bundle_add(struct ofpbuf *msg, struct ofputil_bundle_ctrl_msg *bc)
 {
-    struct ofpbuf *msg = encode_flow_mod(fm);
     struct ofputil_bundle_add_msg bam = {
         .bundle_id = bc->bundle_id,
         .flags     = bc->flags,
         .msg       = msg->data,
     };
-    struct ofpbuf *bundle_msg;
-
-    bundle_msg = ofputil_encode_bundle_add(OFP15_VERSION, &bam);
+    return ofputil_encode_bundle_add(OFP15_VERSION, &bam);
+}
 
+static void
+add_flow_mod(struct ofputil_flow_mod *fm,
+             struct ofputil_bundle_ctrl_msg *bc,
+             struct ovs_list *msgs)
+{
+    struct ofpbuf *msg = encode_flow_mod(fm);
+    struct ofpbuf *bundle_msg = encode_bundle_add(msg, bc);
     ofpbuf_delete(msg);
     ovs_list_push_back(msgs, &bundle_msg->list_node);
 }
@@ -1886,13 +1924,18 @@ encode_group_mod(const struct ofputil_group_mod *gm)
 }
 
 static void
-add_group_mod(struct ofputil_group_mod *gm, struct ovs_list *msgs)
+add_group_mod(struct ofputil_group_mod *gm,
+              struct ofputil_bundle_ctrl_msg *bc,
+              struct ovs_list *msgs)
 {
     struct ofpbuf *msg = encode_group_mod(gm);
-    if (msg->size <= UINT16_MAX) {
-        ovs_list_push_back(msgs, &msg->list_node);
+    if ((msg->size + sizeof(struct ofp14_bundle_ctrl_msg)) <= UINT16_MAX) {
+        struct ofpbuf *bundle_msg = encode_bundle_add(msg, bc);
+        ofpbuf_delete(msg);
+        ovs_list_push_back(msgs, &bundle_msg->list_node);
         return;
     }
+
     /* This group mod request is too large to fit in a single OF message
      * since the header can only specify a 16-bit size. We need to break
      * this into multiple group_mod requests.
@@ -1914,7 +1957,9 @@ add_group_mod(struct ofputil_group_mod *gm, struct ovs_list *msgs)
      * the size of the buckets, we will not put too many in our new group_mod
      * message.
      */
-    size_t max_buckets = ((UINT16_MAX - sizeof *ogm) / bucket_size) / 2;
+    size_t max_buckets = ((UINT16_MAX - sizeof *ogm -
+                           sizeof(struct ofp14_bundle_ctrl_msg)) / bucket_size)
+                         / 2;
 
     ovs_assert(max_buckets < ovs_list_size(&gm->buckets));
 
@@ -1943,14 +1988,16 @@ add_group_mod(struct ofputil_group_mod *gm, struct ovs_list *msgs)
     ovs_list_splice(&split.buckets, &bucket->list_node, &gm->buckets);
 
     struct ofpbuf *orig = encode_group_mod(gm);
-    ovs_list_push_back(msgs, &orig->list_node);
+    struct ofpbuf *bundle_msg = encode_bundle_add(orig, bc);
+    ofpbuf_delete(orig);
+    ovs_list_push_back(msgs, &bundle_msg->list_node);
 
     /* We call this recursively just in case our new
      * INSERT_BUCKET/REMOVE_BUCKET group_mod is still too
      * large for an OF message. This will allow for it to
      * be broken into pieces, too.
      */
-    add_group_mod(&split, msgs);
+    add_group_mod(&split, bc, msgs);
     ofputil_uninit_group_mod(&split);
 }
 
@@ -2046,8 +2093,8 @@ update_ovs_meter(struct ovn_extend_table_info *entry,
 static void
 ofctrl_meter_bands_clear(void)
 {
-    struct shash_node *node, *next;
-    SHASH_FOR_EACH_SAFE (node, next, &meter_bands) {
+    struct shash_node *node;
+    SHASH_FOR_EACH_SAFE (node, &meter_bands) {
         struct meter_band_entry *mb = node->data;
         shash_delete(&meter_bands, node);
         free(mb->bands);
@@ -2262,8 +2309,8 @@ update_installed_flows_by_compare(struct ovn_desired_flow_table *flow_table,
     /* Iterate through all of the installed flows.  If any of them are no
      * longer desired, delete them; if any of them should have different
      * actions, update them. */
-    struct installed_flow *i, *next;
-    HMAP_FOR_EACH_SAFE (i, next, match_hmap_node, installed_flows) {
+    struct installed_flow *i;
+    HMAP_FOR_EACH_SAFE (i, match_hmap_node, installed_flows) {
         unlink_all_refs_for_installed_flow(i);
         struct desired_flow *d = desired_flow_lookup(flow_table, &i->flow);
         if (!d) {
@@ -2356,8 +2403,8 @@ static void
 merge_tracked_flows(struct ovn_desired_flow_table *flow_table)
 {
     struct hmap deleted_flows = HMAP_INITIALIZER(&deleted_flows);
-    struct desired_flow *f, *next;
-    LIST_FOR_EACH_SAFE (f, next, track_list_node,
+    struct desired_flow *f;
+    LIST_FOR_EACH_SAFE (f, track_list_node,
                         &flow_table->tracked_flows) {
         if (f->is_deleted) {
             /* reuse f->match_hmap_node field since it is already removed from
@@ -2388,7 +2435,7 @@ merge_tracked_flows(struct ovn_desired_flow_table *flow_table)
             ovs_list_init(&f->track_list_node);
         }
     }
-    HMAP_FOR_EACH_SAFE (f, next, match_hmap_node, &deleted_flows) {
+    HMAP_FOR_EACH_SAFE (f, match_hmap_node, &deleted_flows) {
         hmap_remove(&deleted_flows, &f->match_hmap_node);
     }
     hmap_destroy(&deleted_flows);
@@ -2401,8 +2448,8 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
                                 struct ovs_list *msgs)
 {
     merge_tracked_flows(flow_table);
-    struct desired_flow *f, *f_next;
-    LIST_FOR_EACH_SAFE (f, f_next, track_list_node,
+    struct desired_flow *f;
+    LIST_FOR_EACH_SAFE (f, track_list_node,
                         &flow_table->tracked_flows) {
         ovs_list_remove(&f->track_list_node);
         if (f->is_deleted) {
@@ -2466,16 +2513,25 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
     }
 }
 
+bool
+ofctrl_has_backlog(void)
+{
+    if (rconn_packet_counter_n_packets(tx_counter)
+        || rconn_get_version(swconn) < 0) {
+        return true;
+    }
+    return false;
+}
+
 /* The flow table can be updated if the connection to the switch is up and
  * in the correct state and not backlogged with existing flow_mods.  (Our
  * criteria for being backlogged appear very conservative, but the socket
  * between ovn-controller and OVS provides some buffering.) */
-bool
+static bool
 ofctrl_can_put(void)
 {
     if (state != S_UPDATE_FLOWS
-        || rconn_packet_counter_n_packets(tx_counter)
-        || rconn_get_version(swconn) < 0) {
+        || ofctrl_has_backlog()) {
         return false;
     }
     return true;
@@ -2505,7 +2561,7 @@ ofctrl_put(struct ovn_desired_flow_table *lflow_table,
     static uint64_t old_req_cfg = 0;
     bool need_put = false;
     if (lflows_changed || pflows_changed || skipped_last_time ||
-        need_reinstall_flows) {
+        ofctrl_initial_clear) {
         need_put = true;
         old_req_cfg = req_cfg;
     } else if (req_cfg != old_req_cfg) {
@@ -2534,8 +2590,6 @@ ofctrl_put(struct ovn_desired_flow_table *lflow_table,
         return;
     }
 
-    need_reinstall_flows = false;
-
     /* OpenFlow messages to send to the switch to bring it up-to-date. */
     struct ovs_list msgs = OVS_LIST_INITIALIZER(&msgs);
 
@@ -2550,27 +2604,17 @@ ofctrl_put(struct ovn_desired_flow_table *lflow_table,
         }
     }
 
-    /* Iterate through all the desired groups. If there are new ones,
-     * add them to the switch. */
-    struct ovn_extend_table_info *desired;
-    EXTEND_TABLE_FOR_EACH_UNINSTALLED (desired, groups) {
-        /* Create and install new group. */
-        struct ofputil_group_mod gm;
-        enum ofputil_protocol usable_protocols;
-        char *group_string = xasprintf("group_id=%"PRIu32",%s",
-                                       desired->table_id,
-                                       desired->name);
-        char *error = parse_ofp_group_mod_str(&gm, OFPGC15_ADD, group_string,
-                                              NULL, NULL, &usable_protocols);
-        if (!error) {
-            add_group_mod(&gm, &msgs);
-        } else {
-            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
-            VLOG_ERR_RL(&rl, "new group %s %s", error, group_string);
-            free(error);
-        }
-        free(group_string);
-        ofputil_uninit_group_mod(&gm);
+    if (ofctrl_initial_clear) {
+        /* Send a meter_mod to delete all meters.
+         * XXX: Ideally, we should include the meter deletion and
+         * reinstallation in the same bundle just like for flows and groups,
+         * for minimum data plane interruption. However, OVS doesn't support
+         * METER_MOD in bundle yet. */
+        struct ofputil_meter_mod mm;
+        memset(&mm, 0, sizeof mm);
+        mm.command = OFPMC13_DELETE;
+        mm.meter.meter_id = OFPM13_ALL;
+        add_meter_mod(&mm, &msgs);
     }
 
     /* Iterate through all the desired meters. If there are new ones,
@@ -2605,6 +2649,52 @@ ofctrl_put(struct ovn_desired_flow_table *lflow_table,
     bundle_open = ofputil_encode_bundle_ctrl_request(OFP15_VERSION, &bc);
     ovs_list_push_back(&msgs, &bundle_open->list_node);
 
+    if (ofctrl_initial_clear) {
+        /* Send a flow_mod to delete all flows. */
+        struct ofputil_flow_mod fm = {
+            .table_id = OFPTT_ALL,
+            .command = OFPFC_DELETE,
+        };
+        minimatch_init_catchall(&fm.match);
+        add_flow_mod(&fm, &bc, &msgs);
+        minimatch_destroy(&fm.match);
+
+        /* Send a group_mod to delete all groups. */
+        struct ofputil_group_mod gm;
+        memset(&gm, 0, sizeof gm);
+        gm.command = OFPGC11_DELETE;
+        gm.group_id = OFPG_ALL;
+        gm.command_bucket_id = OFPG15_BUCKET_ALL;
+        ovs_list_init(&gm.buckets);
+        add_group_mod(&gm, &bc, &msgs);
+        ofputil_uninit_group_mod(&gm);
+
+        ofctrl_initial_clear = false;
+    }
+
+    /* Iterate through all the desired groups. If there are new ones,
+     * add them to the switch. */
+    struct ovn_extend_table_info *desired;
+    EXTEND_TABLE_FOR_EACH_UNINSTALLED (desired, groups) {
+        /* Create and install new group. */
+        struct ofputil_group_mod gm;
+        enum ofputil_protocol usable_protocols;
+        char *group_string = xasprintf("group_id=%"PRIu32",%s",
+                                       desired->table_id,
+                                       desired->name);
+        char *error = parse_ofp_group_mod_str(&gm, OFPGC15_ADD, group_string,
+                                              NULL, NULL, &usable_protocols);
+        if (!error) {
+            add_group_mod(&gm, &bc, &msgs);
+        } else {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_ERR_RL(&rl, "new group %s %s", error, group_string);
+            free(error);
+        }
+        free(group_string);
+        ofputil_uninit_group_mod(&gm);
+    }
+
     /* If skipped last time, then process the flow table
      * (tracked) flows even if lflows_changed is not set.
      * Same for pflows_changed. */
@@ -2634,21 +2724,10 @@ ofctrl_put(struct ovn_desired_flow_table *lflow_table,
 
     skipped_last_time = false;
 
-    if (ovs_list_back(&msgs) == &bundle_open->list_node) {
-        /* No flow updates.  Removing the bundle open request. */
-        ovs_list_pop_back(&msgs);
-        ofpbuf_delete(bundle_open);
-    } else {
-        /* Committing the bundle. */
-        bc.type = OFPBCT_COMMIT_REQUEST;
-        bundle_commit = ofputil_encode_bundle_ctrl_request(OFP15_VERSION, &bc);
-        ovs_list_push_back(&msgs, &bundle_commit->list_node);
-    }
-
     /* Iterate through the installed groups from previous runs. If they
      * are not needed delete them. */
-    struct ovn_extend_table_info *installed, *next_group;
-    EXTEND_TABLE_FOR_EACH_INSTALLED (installed, next_group, groups) {
+    struct ovn_extend_table_info *installed;
+    EXTEND_TABLE_FOR_EACH_INSTALLED (installed, groups) {
         /* Delete the group. */
         struct ofputil_group_mod gm;
         enum ofputil_protocol usable_protocols;
@@ -2658,7 +2737,7 @@ ofctrl_put(struct ovn_desired_flow_table *lflow_table,
                                               group_string, NULL, NULL,
                                               &usable_protocols);
         if (!error) {
-            add_group_mod(&gm, &msgs);
+            add_group_mod(&gm, &bc, &msgs);
         } else {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
             VLOG_ERR_RL(&rl, "Error deleting group %d: %s",
@@ -2670,13 +2749,24 @@ ofctrl_put(struct ovn_desired_flow_table *lflow_table,
         ovn_extend_table_remove_existing(groups, installed);
     }
 
+    if (ovs_list_back(&msgs) == &bundle_open->list_node) {
+        /* No flow updates.  Removing the bundle open request. */
+        ovs_list_pop_back(&msgs);
+        ofpbuf_delete(bundle_open);
+    } else {
+        /* Committing the bundle. */
+        bc.type = OFPBCT_COMMIT_REQUEST;
+        bundle_commit = ofputil_encode_bundle_ctrl_request(OFP15_VERSION, &bc);
+        ovs_list_push_back(&msgs, &bundle_commit->list_node);
+    }
+
     /* Sync the contents of groups->desired to groups->existing. */
     ovn_extend_table_sync(groups);
 
     /* Iterate through the installed meters from previous runs. If they
      * are not needed delete them. */
-    struct ovn_extend_table_info *m_installed, *next_meter;
-    EXTEND_TABLE_FOR_EACH_INSTALLED (m_installed, next_meter, meters) {
+    struct ovn_extend_table_info *m_installed;
+    EXTEND_TABLE_FOR_EACH_INSTALLED (m_installed, meters) {
         /* Delete the meter. */
         ofctrl_meter_bands_erase(m_installed, &msgs);
         if (!strncmp(m_installed->name, "__string: ", 10)) {
@@ -2714,8 +2804,8 @@ ofctrl_put(struct ovn_desired_flow_table *lflow_table,
         }
 
         /* Track the flow update. */
-        struct ofctrl_flow_update *fup, *prev;
-        LIST_FOR_EACH_REVERSE_SAFE (fup, prev, list_node, &flow_updates) {
+        struct ofctrl_flow_update *fup;
+        LIST_FOR_EACH_REVERSE_SAFE (fup, list_node, &flow_updates) {
             if (req_cfg < fup->req_cfg) {
                 /* This ofctrl_flow_update is for a configuration later than
                  * 'req_cfg'.  This should not normally happen, because it
diff --git a/controller/ofctrl.h b/controller/ofctrl.h
index ad8f4be..330b0b6 100644
--- a/controller/ofctrl.h
+++ b/controller/ofctrl.h
@@ -28,6 +28,7 @@ struct hmap;
 struct match;
 struct ofpbuf;
 struct ovsrec_bridge;
+struct ovsrec_open_vswitch_table;
 struct sbrec_meter_table;
 struct shash;
 
@@ -50,6 +51,7 @@ void ofctrl_init(struct ovn_extend_table *group_table,
                  struct ovn_extend_table *meter_table,
                  int inactivity_probe_interval);
 void ofctrl_run(const struct ovsrec_bridge *br_int,
+                const struct ovsrec_open_vswitch_table *,
                 struct shash *pending_ct_zones);
 enum mf_field_id ofctrl_get_mf_field_id(void);
 void ofctrl_put(struct ovn_desired_flow_table *lflow_table,
@@ -59,7 +61,7 @@ void ofctrl_put(struct ovn_desired_flow_table *lflow_table,
                 uint64_t nb_cfg,
                 bool lflow_changed,
                 bool pflow_changed);
-bool ofctrl_can_put(void);
+bool ofctrl_has_backlog(void);
 void ofctrl_wait(void);
 void ofctrl_destroy(void);
 uint64_t ofctrl_get_cur_cfg(void);
diff --git a/controller/ovn-controller.8.xml b/controller/ovn-controller.8.xml
index cc9a7d1..cb47c9b 100644
--- a/controller/ovn-controller.8.xml
+++ b/controller/ovn-controller.8.xml
@@ -182,6 +182,13 @@
         <code>external_ids:ovn-encap-type</code>.
       </dd>
 
+      <dt><code>external_ids:ovn-encap-df_default</code></dt>
+      <dd>
+        indicates the DF flag handling of the encapulation. Set to
+        <code>true</code> to set the DF flag for new data paths or
+        <code>false</code> to clear the DF flag.
+      </dd>
+
       <dt><code>external_ids:ovn-bridge-mappings</code></dt>
       <dd>
         A list of key-value pairs that map a physical network name to a local
@@ -257,6 +264,40 @@
         The default value is considered false if this option is not defined.
       </dd>
 
+      <dt><code>external_ids:ovn-ofctrl-wait-before-clear</code></dt>
+      <dd>
+        The time, in milliseconds, to wait before clearing flows in OVS after
+        OpenFlow connection/reconnection during <code>ovn-controller</code>
+        initialization. The purpose of this wait is to give time for
+        <code>ovn-controller</code> to compute the new flows before clearing
+        existing ones, to avoid data plane down time during
+        <code>ovn-controller</code> restart/upgrade at large scale
+        environments where recomputing the flows takes more than a few seconds
+        or even longer. It is difficult for <code>ovn-controller</code>
+        to determine when the new flows computing is completed, because of the
+        dynamics in the cloud environments, which is why this configuration is
+        provided for users to adjust based on the scale of the environment. By
+        default, it is 0, which means clearing existing flows without waiting.
+        Not setting the value, or setting it too small, may result in data
+        plane down time during upgrade/restart, while setting it too big may
+        result in unnecessary extra control plane latency of applying new
+        changes of CMS during upgrade/restart. In most cases, a slightly bigger
+        value is not harmful, because the extra control plane latency happens
+        only once during the OpenFlow connection. To get a reasonable range of
+        the value setting, it is recommended to run the below commands on a
+        node in the target environment and then set this configuration to twice
+        the value of <code>Maximum</code> shown in the output of the second
+        command.
+        <ul>
+          <li>
+            <code>ovn-appctl -t ovn-controller inc-engine/recompute</code>
+          </li>
+          <li>
+            <code>ovn-appctl -t ovn-controller stopwatch/show flow-generation</code>
+          </li>
+        </ul>
+      </dd>
+
       <dt><code>external_ids:ovn-enable-lflow-cache</code></dt>
       <dd>
         The boolean flag indicates if <code>ovn-controller</code> should
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 6d10c03..43fbf2b 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -53,6 +53,7 @@
 #include "lib/chassis-index.h"
 #include "lib/extend-table.h"
 #include "lib/ip-mcast-index.h"
+#include "lib/mac-binding-index.h"
 #include "lib/mcast-group-index.h"
 #include "lib/ovn-sb-idl.h"
 #include "lib/ovn-util.h"
@@ -93,6 +94,7 @@ static unixctl_cb_func debug_dump_lflow_conj_ids;
 static unixctl_cb_func lflow_cache_flush_cmd;
 static unixctl_cb_func lflow_cache_show_stats_cmd;
 static unixctl_cb_func debug_delay_nb_cfg_report;
+static unixctl_cb_func debug_ignore_startup_delay;
 
 #define DEFAULT_BRIDGE_NAME "br-int"
 #define DEFAULT_DATAPATH "system"
@@ -132,7 +134,7 @@ static const char *ssl_ca_cert_file;
 #define DEFAULT_LFLOW_CACHE_TRIM_TO_MS 30000
 
 /* SB Global options defaults. */
-#define DEFAULT_SB_GLOBAL_LB_HAIRPIN_USE_CT_MARK false
+#define DEFAULT_SB_GLOBAL_LB_HAIRPIN_USE_CT_MARK true
 
 struct controller_engine_ctx {
     struct lflow_cache *lflow_cache;
@@ -371,7 +373,9 @@ create_br_int(struct ovsdb_idl_txn *ovs_idl_txn,
     struct ovsrec_bridge **bridges;
     size_t bytes = sizeof *bridges * cfg->n_bridges;
     bridges = xmalloc(bytes + sizeof *bridges);
-    memcpy(bridges, cfg->bridges, bytes);
+    if (cfg->n_bridges) {
+        memcpy(bridges, cfg->bridges, bytes);
+    }
     bridges[cfg->n_bridges] = bridge;
     ovsrec_open_vswitch_verify_bridges(cfg);
     ovsrec_open_vswitch_set_bridges(cfg, bridges, cfg->n_bridges + 1);
@@ -649,7 +653,7 @@ update_ct_zones(const struct shash *binding_lports,
                 struct simap *ct_zones, unsigned long *ct_zone_bitmap,
                 struct shash *pending_ct_zones)
 {
-    struct simap_node *ct_zone, *ct_zone_next;
+    struct simap_node *ct_zone;
     int scan_start = 1;
     const char *user;
     struct sset all_users = SSET_INITIALIZER(&all_users);
@@ -683,12 +687,12 @@ update_ct_zones(const struct shash *binding_lports,
     }
 
     /* Delete zones that do not exist in above sset. */
-    SIMAP_FOR_EACH_SAFE(ct_zone, ct_zone_next, ct_zones) {
+    SIMAP_FOR_EACH_SAFE (ct_zone, ct_zones) {
         if (!sset_contains(&all_users, ct_zone->name)) {
             VLOG_DBG("removing ct zone %"PRId32" for '%s'",
                      ct_zone->data, ct_zone->name);
 
-            add_pending_ct_zone_entry(pending_ct_zones, CT_ZONE_DB_QUEUED,
+            add_pending_ct_zone_entry(pending_ct_zones, CT_ZONE_OF_QUEUED,
                                       ct_zone->data, false, ct_zone->name);
 
             bitmap_set0(ct_zone_bitmap, ct_zone->data);
@@ -719,8 +723,7 @@ update_ct_zones(const struct shash *binding_lports,
          */
         if (bitmap_is_set(unreq_snat_zones, snat_req_node->data)) {
             struct simap_node *dup;
-            struct simap_node *next;
-            SIMAP_FOR_EACH_SAFE (dup, next, ct_zones) {
+            SIMAP_FOR_EACH_SAFE (dup, ct_zones) {
                 if (dup != snat_req_node && dup->data == snat_req_node->data) {
                     simap_delete(ct_zones, dup);
                     break;
@@ -864,11 +867,12 @@ static void
 store_nb_cfg(struct ovsdb_idl_txn *sb_txn, struct ovsdb_idl_txn *ovs_txn,
              const struct sbrec_chassis_private *chassis,
              const struct ovsrec_bridge *br_int,
-             unsigned int delay_nb_cfg_report, int64_t startup_ts)
+             unsigned int delay_nb_cfg_report)
 {
     struct ofctrl_acked_seqnos *acked_nb_cfg_seqnos =
         ofctrl_acked_seqnos_get(ofctrl_seq_type_nb_cfg);
     uint64_t cur_cfg = acked_nb_cfg_seqnos->last_acked;
+    int64_t startup_ts = daemon_startup_ts();
 
     if (ovs_txn && br_int
             && startup_ts != smap_get_ullong(&br_int->external_ids,
@@ -978,7 +982,8 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
     SB_NODE(dns, "dns") \
     SB_NODE(load_balancer, "load_balancer") \
     SB_NODE(fdb, "fdb") \
-    SB_NODE(meter, "meter")
+    SB_NODE(meter, "meter") \
+    SB_NODE(static_mac_binding, "static_mac_binding")
 
 enum sb_engine_node {
 #define SB_NODE(NAME, NAME_STR) SB_##NAME,
@@ -1128,6 +1133,88 @@ ovs_interface_shadow_ovs_interface_handler(struct engine_node *node,
     return true;
 }
 
+struct ed_type_activated_ports {
+    struct ovs_list *activated_ports;
+};
+
+static void *
+en_activated_ports_init(struct engine_node *node OVS_UNUSED,
+                        struct engine_arg *arg OVS_UNUSED)
+{
+    struct ed_type_activated_ports *data = xzalloc(sizeof *data);
+    data->activated_ports = NULL;
+    return data;
+}
+
+static void
+en_activated_ports_cleanup(void *data_)
+{
+    struct ed_type_activated_ports *data = data_;
+    if (!data->activated_ports) {
+        return;
+    }
+
+    struct activated_port *pp;
+    LIST_FOR_EACH_POP (pp, list, data->activated_ports) {
+        free(pp);
+    }
+    free(data->activated_ports);
+    data->activated_ports = NULL;
+}
+
+static void
+en_activated_ports_clear_tracked_data(void *data)
+{
+    en_activated_ports_cleanup(data);
+}
+
+static void
+en_activated_ports_run(struct engine_node *node, void *data_)
+{
+    struct ed_type_activated_ports *data = data_;
+    enum engine_node_state state = EN_UNCHANGED;
+    data->activated_ports = get_ports_to_activate_in_engine();
+    if (data->activated_ports) {
+        state = EN_UPDATED;
+    }
+    engine_set_node_state(node, state);
+}
+
+struct ed_type_postponed_ports {
+    struct sset *postponed_ports;
+};
+
+static void *
+en_postponed_ports_init(struct engine_node *node OVS_UNUSED,
+                        struct engine_arg *arg OVS_UNUSED)
+{
+    struct ed_type_postponed_ports *data = xzalloc(sizeof *data);
+    data->postponed_ports = get_postponed_ports();
+    return data;
+}
+
+static void
+en_postponed_ports_cleanup(void *data_)
+{
+    struct ed_type_postponed_ports *data = data_;
+    if (!data->postponed_ports) {
+        return;
+    }
+    data->postponed_ports = NULL;
+}
+
+static void
+en_postponed_ports_run(struct engine_node *node, void *data_)
+{
+    struct ed_type_postponed_ports *data = data_;
+    enum engine_node_state state = EN_UNCHANGED;
+    data->postponed_ports = get_postponed_ports();
+    if (!sset_is_empty(data->postponed_ports)) {
+        state = EN_UPDATED;
+    }
+    engine_set_node_state(node, state);
+}
+
 struct ed_type_runtime_data {
     /* Contains "struct local_datapath" nodes. */
     struct hmap local_datapaths;
@@ -1158,6 +1245,8 @@ struct ed_type_runtime_data {
 
     struct shash local_active_ports_ipv6_pd;
     struct shash local_active_ports_ras;
+
+    struct sset *postponed_ports;
 };
 
 /* struct ed_type_runtime_data has the below members for tracking the
@@ -1265,8 +1354,8 @@ en_runtime_data_cleanup(void *data)
     sset_destroy(&rt_data->egress_ifaces);
     smap_destroy(&rt_data->local_iface_ids);
     local_datapaths_destroy(&rt_data->local_datapaths);
-    shash_destroy_free_data(&rt_data->local_active_ports_ipv6_pd);
-    shash_destroy_free_data(&rt_data->local_active_ports_ras);
+    shash_destroy(&rt_data->local_active_ports_ipv6_pd);
+    shash_destroy(&rt_data->local_active_ports_ras);
     local_binding_data_destroy(&rt_data->lbinding_data);
 }
 
@@ -1358,6 +1447,7 @@ init_binding_ctx(struct engine_node *node,
     b_ctx_out->egress_ifaces = &rt_data->egress_ifaces;
     b_ctx_out->lbinding_data = &rt_data->lbinding_data;
     b_ctx_out->local_iface_ids = &rt_data->local_iface_ids;
+    b_ctx_out->postponed_ports = rt_data->postponed_ports;
     b_ctx_out->tracked_dp_bindings = NULL;
     b_ctx_out->if_mgr = ctrl_ctx->if_mgr;
 }
@@ -1378,8 +1468,8 @@ en_runtime_data_run(struct engine_node *node, void *data)
         first_run = false;
     } else {
         local_datapaths_destroy(local_datapaths);
-        shash_clear_free_data(local_active_ipv6_pd);
-        shash_clear_free_data(local_active_ras);
+        shash_clear(local_active_ipv6_pd);
+        shash_clear(local_active_ras);
         local_binding_data_destroy(&rt_data->lbinding_data);
         sset_destroy(local_lports);
         related_lports_destroy(&rt_data->related_lports);
@@ -1395,6 +1485,10 @@ en_runtime_data_run(struct engine_node *node, void *data)
         local_binding_data_init(&rt_data->lbinding_data);
     }
 
+    struct ed_type_postponed_ports *pp_data =
+        engine_get_input_data("postponed_ports", node);
+    rt_data->postponed_ports = pp_data->postponed_ports;
+
     struct binding_ctx_in b_ctx_in;
     struct binding_ctx_out b_ctx_out;
     init_binding_ctx(node, rt_data, &b_ctx_in, &b_ctx_out);
@@ -1417,6 +1511,73 @@ en_runtime_data_run(struct engine_node *node, void *data)
     engine_set_node_state(node, EN_UPDATED);
 }
 
+struct ed_type_sb_ro {
+    bool sb_readonly;
+};
+
+static void *
+en_sb_ro_init(struct engine_node *node OVS_UNUSED,
+              struct engine_arg *arg OVS_UNUSED)
+{
+    struct ed_type_sb_ro *data = xzalloc(sizeof *data);
+    return data;
+}
+
+static void
+en_sb_ro_run(struct engine_node *node, void *data)
+{
+    struct ed_type_sb_ro *sb_ro_data = data;
+    bool sb_readonly = !engine_get_context()->ovnsb_idl_txn;
+    if (sb_ro_data->sb_readonly != sb_readonly) {
+        sb_ro_data->sb_readonly = sb_readonly;
+        if (!sb_ro_data->sb_readonly) {
+            engine_set_node_state(node, EN_UPDATED);
+        }
+    }
+}
+
+static void
+en_sb_ro_cleanup(void *data OVS_UNUSED)
+{
+}
+
+static bool
+runtime_data_sb_ro_handler(struct engine_node *node, void *data)
+{
+    const struct sbrec_chassis *chassis = NULL;
+
+    struct ovsrec_open_vswitch_table *ovs_table =
+        (struct ovsrec_open_vswitch_table *)EN_OVSDB_GET(
+            engine_get_input("OVS_open_vswitch", node));
+
+    const char *chassis_id = get_ovs_chassis_id(ovs_table);
+
+    struct ovsdb_idl_index *sbrec_chassis_by_name =
+        engine_ovsdb_node_get_index(
+                engine_get_input("SB_chassis", node),
+                "name");
+
+    if (chassis_id) {
+        chassis = chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
+    }
+    if (chassis) {
+        struct ed_type_runtime_data *rt_data = data;
+        bool sb_readonly = !engine_get_context()->ovnsb_idl_txn;
+        struct controller_engine_ctx *ctrl_ctx =
+            engine_get_context()->client_ctx;
+
+        if (if_status_handle_claims(ctrl_ctx->if_mgr,
+                                    &rt_data->lbinding_data,
+                                    chassis,
+                                    &rt_data->tracked_dp_bindings,
+                                    sb_readonly)) {
+            engine_set_node_state(node, EN_UPDATED);
+            rt_data->tracked = true;
+        }
+    }
+    return true;
+}
+
 static bool
 runtime_data_ovs_interface_shadow_handler(struct engine_node *node, void *data)
 {
@@ -1518,8 +1679,8 @@ en_addr_sets_clear_tracked_data(void *data)
     struct ed_type_addr_sets *as = data;
     sset_clear(&as->new);
     sset_clear(&as->deleted);
-    struct shash_node *node, *next;
-    SHASH_FOR_EACH_SAFE (node, next, &as->updated) {
+    struct shash_node *node;
+    SHASH_FOR_EACH_SAFE (node, &as->updated) {
         struct addr_set_diff *asd = node->data;
         expr_constant_set_destroy(asd->added);
         free(asd->added);
@@ -1689,8 +1850,8 @@ port_group_ssets_delete(struct shash *port_group_ssets,
 static void
 port_group_ssets_clear(struct shash *port_group_ssets)
 {
-    struct shash_node *node, *next;
-    SHASH_FOR_EACH_SAFE (node, next, port_group_ssets) {
+    struct shash_node *node;
+    SHASH_FOR_EACH_SAFE (node, port_group_ssets) {
         struct sset *lports = node->data;
         shash_delete(port_group_ssets, node);
         sset_destroy(lports);
@@ -2046,7 +2207,8 @@ ct_zones_runtime_data_handler(struct engine_node *node, void *data)
                 continue;
             }
 
-            if (t_lport->tracked_type == TRACKED_RESOURCE_NEW) {
+            if (t_lport->tracked_type == TRACKED_RESOURCE_NEW ||
+                t_lport->tracked_type == TRACKED_RESOURCE_UPDATED) {
                 if (!simap_contains(&ct_zones_data->current,
                                     t_lport->pb->logical_port)) {
                     alloc_id_to_ct_zone(t_lport->pb->logical_port,
@@ -2061,15 +2223,13 @@ ct_zones_runtime_data_handler(struct engine_node *node, void *data)
                                t_lport->pb->logical_port);
                 if (ct_zone) {
                     add_pending_ct_zone_entry(
-                        &ct_zones_data->pending, CT_ZONE_DB_QUEUED,
+                        &ct_zones_data->pending, CT_ZONE_OF_QUEUED,
                         ct_zone->data, false, ct_zone->name);
 
                     bitmap_set0(ct_zones_data->bitmap, ct_zone->data);
                     simap_delete(&ct_zones_data->current, ct_zone);
                     updated = true;
                 }
-            } else {
-                OVS_NOT_REACHED();
             }
         }
     }
@@ -2164,6 +2324,33 @@ load_balancers_by_dp_find(struct hmap *lbs,
     return NULL;
 }
 
+static void
+load_balancers_by_dp_add_one(const struct hmap *local_datapaths,
+                             const struct sbrec_datapath_binding *datapath,
+                             const struct sbrec_load_balancer *lb,
+                             struct hmap *lbs)
+{
+    struct local_datapath *ldp =
+        get_local_datapath(local_datapaths, datapath->tunnel_key);
+
+    if (!ldp) {
+        return;
+    }
+
+    struct load_balancers_by_dp *lbs_by_dp =
+        load_balancers_by_dp_find(lbs, ldp->datapath);
+    if (!lbs_by_dp) {
+        lbs_by_dp = load_balancers_by_dp_create(lbs, ldp->datapath);
+    }
+
+    if (lbs_by_dp->n_dp_lbs == lbs_by_dp->n_allocated_dp_lbs) {
+        lbs_by_dp->dp_lbs = x2nrealloc(lbs_by_dp->dp_lbs,
+                                       &lbs_by_dp->n_allocated_dp_lbs,
+                                       sizeof *lbs_by_dp->dp_lbs);
+    }
+    lbs_by_dp->dp_lbs[lbs_by_dp->n_dp_lbs++] = lb;
+}
+
 /* Builds and returns a hmap of 'load_balancers_by_dp', one record for each
  * local datapath.
  */
@@ -2177,25 +2364,14 @@ load_balancers_by_dp_init(const struct hmap *local_datapaths,
     const struct sbrec_load_balancer *lb;
     SBREC_LOAD_BALANCER_TABLE_FOR_EACH (lb, lb_table) {
         for (size_t i = 0; i < lb->n_datapaths; i++) {
-            struct local_datapath *ldp =
-                get_local_datapath(local_datapaths,
-                                   lb->datapaths[i]->tunnel_key);
-            if (!ldp) {
-                continue;
-            }
-
-            struct load_balancers_by_dp *lbs_by_dp =
-                load_balancers_by_dp_find(lbs, ldp->datapath);
-            if (!lbs_by_dp) {
-                lbs_by_dp = load_balancers_by_dp_create(lbs, ldp->datapath);
-            }
-
-            if (lbs_by_dp->n_dp_lbs == lbs_by_dp->n_allocated_dp_lbs) {
-                lbs_by_dp->dp_lbs = x2nrealloc(lbs_by_dp->dp_lbs,
-                                               &lbs_by_dp->n_allocated_dp_lbs,
-                                               sizeof *lbs_by_dp->dp_lbs);
-            }
-            lbs_by_dp->dp_lbs[lbs_by_dp->n_dp_lbs++] = lb;
+            load_balancers_by_dp_add_one(local_datapaths,
+                                         lb->datapaths[i], lb, lbs);
+        }
+        for (size_t i = 0; lb->datapath_group
+                           && i < lb->datapath_group->n_datapaths; i++) {
+            load_balancers_by_dp_add_one(local_datapaths,
+                                         lb->datapath_group->datapaths[i],
+                                         lb, lbs);
         }
     }
     return lbs;
@@ -2416,6 +2592,11 @@ init_lflow_ctx(struct engine_node *node,
                 engine_get_input("SB_mac_binding", node),
                 "datapath");
 
+    struct ovsdb_idl_index *sbrec_static_mac_binding_by_datapath =
+        engine_ovsdb_node_get_index(
+                engine_get_input("SB_static_mac_binding", node),
+                "datapath");
+
     struct sbrec_port_binding_table *port_binding_table =
         (struct sbrec_port_binding_table *)EN_OVSDB_GET(
             engine_get_input("SB_port_binding", node));
@@ -2452,6 +2633,10 @@ init_lflow_ctx(struct engine_node *node,
         (struct sbrec_fdb_table *)EN_OVSDB_GET(
             engine_get_input("SB_fdb", node));
 
+    struct sbrec_static_mac_binding_table *smb_table =
+        (struct sbrec_static_mac_binding_table *)EN_OVSDB_GET(
+            engine_get_input("SB_static_mac_binding", node));
+
     struct ovsrec_open_vswitch_table *ovs_table =
         (struct ovsrec_open_vswitch_table *)EN_OVSDB_GET(
             engine_get_input("OVS_open_vswitch", node));
@@ -2494,6 +2679,8 @@ init_lflow_ctx(struct engine_node *node,
     l_ctx_in->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
     l_ctx_in->sbrec_fdb_by_dp_key = sbrec_fdb_by_dp_key;
     l_ctx_in->sbrec_mac_binding_by_datapath = sbrec_mac_binding_by_datapath;
+    l_ctx_in->sbrec_static_mac_binding_by_datapath =
+        sbrec_static_mac_binding_by_datapath;
     l_ctx_in->port_binding_table = port_binding_table;
     l_ctx_in->dhcp_options_table  = dhcp_table;
     l_ctx_in->dhcpv6_options_table = dhcpv6_table;
@@ -2504,11 +2691,13 @@ init_lflow_ctx(struct engine_node *node,
     l_ctx_in->fdb_table = fdb_table,
     l_ctx_in->chassis = chassis;
     l_ctx_in->lb_table = lb_table;
+    l_ctx_in->static_mac_binding_table = smb_table;
     l_ctx_in->local_datapaths = &rt_data->local_datapaths;
     l_ctx_in->addr_sets = addr_sets;
     l_ctx_in->port_groups = port_groups;
     l_ctx_in->active_tunnels = &rt_data->active_tunnels;
     l_ctx_in->related_lport_ids = &rt_data->related_lports.lport_ids;
+    l_ctx_in->binding_lports = &rt_data->lbinding_data.lports;
     l_ctx_in->chassis_tunnels = &non_vif_data->chassis_tunnels;
     l_ctx_in->lb_hairpin_use_ct_mark = n_opts->lb_hairpin_use_ct_mark;
 
@@ -2647,7 +2836,7 @@ lflow_output_sb_mac_binding_handler(struct engine_node *node, void *data)
 
     struct ed_type_lflow_output *lfo = data;
 
-    lflow_handle_changed_neighbors(sbrec_port_binding_by_name,
+    lflow_handle_changed_mac_bindings(sbrec_port_binding_by_name,
             mac_binding_table, local_datapaths, &lfo->flow_table);
 
     engine_set_node_state(node, EN_UPDATED);
@@ -2655,6 +2844,32 @@ lflow_output_sb_mac_binding_handler(struct engine_node *node, void *data)
 }
 
 static bool
+lflow_output_sb_static_mac_binding_handler(struct engine_node *node,
+                                           void *data)
+{
+    struct ovsdb_idl_index *sbrec_port_binding_by_name =
+        engine_ovsdb_node_get_index(
+                engine_get_input("SB_port_binding", node),
+                "name");
+
+    struct sbrec_static_mac_binding_table *smb_table =
+        (struct sbrec_static_mac_binding_table *)EN_OVSDB_GET(
+            engine_get_input("SB_static_mac_binding", node));
+
+    struct ed_type_runtime_data *rt_data =
+        engine_get_input_data("runtime_data", node);
+    const struct hmap *local_datapaths = &rt_data->local_datapaths;
+
+    struct ed_type_lflow_output *lfo = data;
+
+    lflow_handle_changed_static_mac_bindings(sbrec_port_binding_by_name,
+        smb_table, local_datapaths, &lfo->flow_table);
+
+    engine_set_node_state(node, EN_UPDATED);
+    return true;
+}
+
+static bool
 lflow_output_sb_multicast_group_handler(struct engine_node *node, void *data)
 {
     struct ed_type_lflow_output *lfo = data;
@@ -2836,14 +3051,13 @@ lflow_output_runtime_data_handler(struct engine_node *node,
                     &l_ctx_in, &l_ctx_out)) {
                 return false;
             }
-        } else {
-            struct shash_node *shash_node;
-            SHASH_FOR_EACH (shash_node, &tdp->lports) {
-                struct tracked_lport *lport = shash_node->data;
-                if (!lflow_handle_flows_for_lport(lport->pb, &l_ctx_in,
-                                                  &l_ctx_out)) {
-                    return false;
-                }
+        }
+        struct shash_node *shash_node;
+        SHASH_FOR_EACH (shash_node, &tdp->lports) {
+            struct tracked_lport *lport = shash_node->data;
+            if (!lflow_handle_flows_for_lport(lport->pb, &l_ctx_in,
+                                                &l_ctx_out)) {
+                return false;
             }
         }
     }
@@ -2916,6 +3130,11 @@ static void init_physical_ctx(struct engine_node *node,
                 engine_get_input("SB_port_binding", node),
                 "name");
 
+    struct ovsdb_idl_index *sbrec_port_binding_by_datapath =
+        engine_ovsdb_node_get_index(
+                engine_get_input("SB_port_binding", node),
+                "datapath");
+
     struct sbrec_multicast_group_table *multicast_group_table =
         (struct sbrec_multicast_group_table *)EN_OVSDB_GET(
             engine_get_input("SB_multicast_group", node));
@@ -2955,6 +3174,7 @@ static void init_physical_ctx(struct engine_node *node,
     struct simap *ct_zones = &ct_zones_data->current;
 
     p_ctx->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
+    p_ctx->sbrec_port_binding_by_datapath = sbrec_port_binding_by_datapath;
     p_ctx->port_binding_table = port_binding_table;
     p_ctx->mc_group_table = multicast_group_table;
     p_ctx->br_int = br_int;
@@ -3127,6 +3347,49 @@ pflow_output_ct_zones_handler(struct engine_node *node OVS_UNUSED,
     return !ct_zones_data->recomputed;
 }
 
+static bool
+pflow_output_activated_ports_handler(struct engine_node *node, void *data)
+{
+    struct ed_type_activated_ports *ap =
+        engine_get_input_data("activated_ports", node);
+    if (!ap->activated_ports) {
+        return true;
+    }
+
+    struct ed_type_pflow_output *pfo = data;
+    struct ed_type_runtime_data *rt_data =
+        engine_get_input_data("runtime_data", node);
+    struct ed_type_non_vif_data *non_vif_data =
+        engine_get_input_data("non_vif_data", node);
+
+    struct physical_ctx p_ctx;
+    init_physical_ctx(node, rt_data, non_vif_data, &p_ctx);
+
+    struct activated_port *pp;
+    LIST_FOR_EACH (pp, list, ap->activated_ports) {
+        struct ovsdb_idl_index *sbrec_datapath_binding_by_key =
+            engine_ovsdb_node_get_index(
+                    engine_get_input("SB_datapath_binding", node),
+                    "key");
+        struct ovsdb_idl_index *sbrec_port_binding_by_key =
+            engine_ovsdb_node_get_index(
+                    engine_get_input("SB_port_binding", node),
+                    "key");
+        const struct sbrec_port_binding *pb = lport_lookup_by_key(
+            sbrec_datapath_binding_by_key, sbrec_port_binding_by_key,
+            pp->dp_key, pp->port_key);
+        if (pb) {
+            if (!physical_handle_flows_for_lport(pb, false, &p_ctx,
+                                                 &pfo->flow_table)) {
+                return false;
+            }
+            tag_port_as_activated_in_engine(pp);
+        }
+    }
+    engine_set_node_state(node, EN_UPDATED);
+    return true;
+}
+
 static void *
 en_flow_output_init(struct engine_node *node OVS_UNUSED,
                     struct engine_arg *arg OVS_UNUSED)
@@ -3321,9 +3584,7 @@ main(int argc, char *argv[])
         = ovsdb_idl_index_create1(ovnsb_idl_loop.idl,
                                   &sbrec_datapath_binding_col_tunnel_key);
     struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip
-        = ovsdb_idl_index_create2(ovnsb_idl_loop.idl,
-                                  &sbrec_mac_binding_col_logical_port,
-                                  &sbrec_mac_binding_col_ip);
+        = mac_binding_by_lport_ip_index_create(ovnsb_idl_loop.idl);
     struct ovsdb_idl_index *sbrec_ip_multicast
         = ip_mcast_index_create(ovnsb_idl_loop.idl);
     struct ovsdb_idl_index *sbrec_igmp_group
@@ -3336,8 +3597,10 @@ main(int argc, char *argv[])
                                   &sbrec_fdb_col_mac,
                                   &sbrec_fdb_col_dp_key);
     struct ovsdb_idl_index *sbrec_mac_binding_by_datapath
+        = mac_binding_by_datapath_index_create(ovnsb_idl_loop.idl);
+    struct ovsdb_idl_index *sbrec_static_mac_binding_by_datapath
         = ovsdb_idl_index_create1(ovnsb_idl_loop.idl,
-                                  &sbrec_mac_binding_col_datapath);
+                                  &sbrec_static_mac_binding_col_datapath);
 
     ovsdb_idl_track_add_all(ovnsb_idl_loop.idl);
     ovsdb_idl_omit_alert(ovnsb_idl_loop.idl,
@@ -3398,6 +3661,7 @@ main(int argc, char *argv[])
     stopwatch_create(VIF_PLUG_RUN_STOPWATCH_NAME, SW_MS);
 
     /* Define inc-proc-engine nodes. */
+    ENGINE_NODE(sb_ro, "sb_ro");
     ENGINE_NODE_WITH_CLEAR_TRACK_DATA_IS_VALID(ct_zones, "ct_zones");
     ENGINE_NODE_WITH_CLEAR_TRACK_DATA(ovs_interface_shadow,
                                       "ovs_interface_shadow");
@@ -3405,6 +3669,8 @@ main(int argc, char *argv[])
     ENGINE_NODE(non_vif_data, "non_vif_data");
     ENGINE_NODE(mff_ovn_geneve, "mff_ovn_geneve");
     ENGINE_NODE(ofctrl_is_connected, "ofctrl_is_connected");
+    ENGINE_NODE_WITH_CLEAR_TRACK_DATA(activated_ports, "activated_ports");
+    ENGINE_NODE(postponed_ports, "postponed_ports");
     ENGINE_NODE(pflow_output, "physical_flow_output");
     ENGINE_NODE_WITH_CLEAR_TRACK_DATA(lflow_output, "logical_flow_output");
     ENGINE_NODE(flow_output, "flow_output");
@@ -3452,6 +3718,14 @@ main(int argc, char *argv[])
     engine_add_input(&en_pflow_output, &en_sb_multicast_group,
                      pflow_output_sb_multicast_group_handler);
 
+    /* pflow_output needs to access the SB datapath binding and hence a noop
+     * handler.
+     */
+    engine_add_input(&en_pflow_output, &en_sb_datapath_binding,
+                     engine_noop_handler);
+    engine_add_input(&en_pflow_output, &en_activated_ports,
+                     pflow_output_activated_ports_handler);
+
     engine_add_input(&en_pflow_output, &en_runtime_data,
                      pflow_output_runtime_data_handler);
     engine_add_input(&en_pflow_output, &en_sb_encap, NULL);
@@ -3495,6 +3769,8 @@ main(int argc, char *argv[])
 
     engine_add_input(&en_lflow_output, &en_sb_mac_binding,
                      lflow_output_sb_mac_binding_handler);
+    engine_add_input(&en_lflow_output, &en_sb_static_mac_binding,
+                     lflow_output_sb_static_mac_binding_handler);
     engine_add_input(&en_lflow_output, &en_sb_logical_flow,
                      lflow_output_sb_logical_flow_handler);
     /* Using a noop handler since we don't really need any data from datapath
@@ -3534,6 +3810,11 @@ main(int argc, char *argv[])
                      runtime_data_sb_datapath_binding_handler);
     engine_add_input(&en_runtime_data, &en_sb_port_binding,
                      runtime_data_sb_port_binding_handler);
+    /* Reuse the same handler for any previously postponed ports. */
+    engine_add_input(&en_runtime_data, &en_postponed_ports,
+                     runtime_data_sb_port_binding_handler);
+    /* Run sb_ro_handler after port_binding_handler in case port get deleted */
+    engine_add_input(&en_runtime_data, &en_sb_ro, runtime_data_sb_ro_handler);
 
     /* The OVS interface handler for runtime_data changes MUST be executed
      * after the sb_port_binding_handler as port_binding deletes must be
@@ -3577,6 +3858,8 @@ main(int argc, char *argv[])
                                 sbrec_fdb_by_dp_key);
     engine_ovsdb_node_add_index(&en_sb_mac_binding, "datapath",
                                 sbrec_mac_binding_by_datapath);
+    engine_ovsdb_node_add_index(&en_sb_static_mac_binding, "datapath",
+                                sbrec_static_mac_binding_by_datapath);
 
     struct ed_type_lflow_output *lflow_output_data =
         engine_get_internal_data(&en_lflow_output);
@@ -3647,6 +3930,9 @@ main(int argc, char *argv[])
                              debug_dump_lflow_conj_ids,
                              &lflow_output_data->conj_ids);
 
+    unixctl_command_register("debug/ignore-startup-delay", "", 0, 0,
+                             debug_ignore_startup_delay, NULL);
+
     unsigned int ovs_cond_seqno = UINT_MAX;
     unsigned int ovnsb_cond_seqno = UINT_MAX;
     unsigned int ovnsb_expected_cond_seqno = UINT_MAX;
@@ -3668,7 +3954,6 @@ main(int argc, char *argv[])
     /* Main loop. */
     exiting = false;
     restart = false;
-    int64_t startup_ts = time_wall_msec();
     bool sb_monitor_all = false;
     while (!exiting) {
         memory_run();
@@ -3721,7 +4006,6 @@ main(int argc, char *argv[])
             if (!new_ovnsb_cond_seqno) {
                 VLOG_INFO("OVNSB IDL reconnected, force recompute.");
                 engine_set_force_recompute(true);
-                vif_plug_reset_idl_prime_counter();
             }
             ovnsb_cond_seqno = new_ovnsb_cond_seqno;
         }
@@ -3804,7 +4088,7 @@ main(int argc, char *argv[])
             if (br_int) {
                 ct_zones_data = engine_get_data(&en_ct_zones);
                 if (ct_zones_data) {
-                    ofctrl_run(br_int, &ct_zones_data->pending);
+                    ofctrl_run(br_int, ovs_table, &ct_zones_data->pending);
                 }
 
                 if (chassis) {
@@ -3819,7 +4103,7 @@ main(int argc, char *argv[])
                     stopwatch_start(CONTROLLER_LOOP_STOPWATCH_NAME,
                                     time_msec());
                     if (ovnsb_idl_txn) {
-                        if (!ofctrl_can_put()) {
+                        if (ofctrl_has_backlog()) {
                             /* When there are in-flight messages pending to
                              * ovs-vswitchd, we should hold on recomputing so
                              * that the previous flow installations won't be
@@ -3832,7 +4116,7 @@ main(int argc, char *argv[])
                              * change tracking is improved, we can simply skip
                              * this round of engine_run and continue processing
                              * acculated changes incrementally later when
-                             * ofctrl_can_put() returns true. */
+                             * ofctrl_has_backlog() returns false. */
                             engine_run(false);
                         } else {
                             engine_run(true);
@@ -3848,6 +4132,10 @@ main(int argc, char *argv[])
                     }
                     stopwatch_stop(CONTROLLER_LOOP_STOPWATCH_NAME,
                                    time_msec());
+                    if (engine_has_updated()) {
+                        daemon_started_recently_countdown();
+                    }
+
                     ct_zones_data = engine_get_data(&en_ct_zones);
                     if (ovs_idl_txn) {
                         if (ct_zones_data) {
@@ -3955,7 +4243,8 @@ main(int argc, char *argv[])
                         runtime_data ? &runtime_data->lbinding_data : NULL;
                     stopwatch_start(IF_STATUS_MGR_UPDATE_STOPWATCH_NAME,
                                     time_msec());
-                    if_status_mgr_update(if_mgr, binding_data);
+                    if_status_mgr_update(if_mgr, binding_data, chassis,
+                                         !ovnsb_idl_txn);
                     stopwatch_stop(IF_STATUS_MGR_UPDATE_STOPWATCH_NAME,
                                    time_msec());
 
@@ -3981,8 +4270,8 @@ main(int argc, char *argv[])
                                    time_msec());
                     stopwatch_start(IF_STATUS_MGR_RUN_STOPWATCH_NAME,
                                     time_msec());
-                    if_status_mgr_run(if_mgr, binding_data, !ovnsb_idl_txn,
-                                      !ovs_idl_txn);
+                    if_status_mgr_run(if_mgr, binding_data, chassis,
+                                      !ovnsb_idl_txn, !ovs_idl_txn);
                     stopwatch_stop(IF_STATUS_MGR_RUN_STOPWATCH_NAME,
                                    time_msec());
                 }
@@ -4010,7 +4299,7 @@ main(int argc, char *argv[])
             }
 
             store_nb_cfg(ovnsb_idl_txn, ovs_idl_txn, chassis_private,
-                         br_int, delay_nb_cfg_report, startup_ts);
+                         br_int, delay_nb_cfg_report);
 
             if (pending_pkt.conn) {
                 struct ed_type_addr_sets *as_data =
@@ -4042,6 +4331,8 @@ main(int argc, char *argv[])
                 ofctrl_wait();
                 pinctrl_wait(ovnsb_idl_txn);
             }
+
+            binding_wait();
         }
 
         if (!northd_version_match && br_int) {
@@ -4075,9 +4366,8 @@ main(int argc, char *argv[])
              * (or it did not change anything in the database). */
             ct_zones_data = engine_get_data(&en_ct_zones);
             if (ct_zones_data) {
-                struct shash_node *iter, *iter_next;
-                SHASH_FOR_EACH_SAFE (iter, iter_next,
-                                     &ct_zones_data->pending) {
+                struct shash_node *iter;
+                SHASH_FOR_EACH_SAFE (iter, &ct_zones_data->pending) {
                     struct ct_zone_pending_entry *ctzpe = iter->data;
                     if (ctzpe->state == CT_ZONE_DB_SENT) {
                         shash_delete(&ct_zones_data->pending, iter);
@@ -4170,6 +4460,7 @@ loop_done:
     lflow_destroy();
     ofctrl_destroy();
     pinctrl_destroy();
+    binding_destroy();
     patch_destroy();
     if_status_mgr_destroy(if_mgr);
     shash_destroy(&vif_plug_deleted_iface_ids);
@@ -4485,3 +4776,11 @@ debug_dump_lflow_conj_ids(struct unixctl_conn *conn, int argc OVS_UNUSED,
     unixctl_command_reply(conn, ds_cstr(&conj_ids_dump));
     ds_destroy(&conj_ids_dump);
 }
+
+static void
+debug_ignore_startup_delay(struct unixctl_conn *conn, int argc OVS_UNUSED,
+                           const char *argv[] OVS_UNUSED, void *arg OVS_UNUSED)
+{
+    daemon_started_recently_ignore();
+    unixctl_command_reply(conn, NULL);
+}
diff --git a/controller/ovn-controller.h b/controller/ovn-controller.h
index df28c62..3a0e953 100644
--- a/controller/ovn-controller.h
+++ b/controller/ovn-controller.h
@@ -45,10 +45,4 @@ const struct ovsrec_bridge *get_bridge(const struct ovsrec_bridge_table *,
 
 uint32_t get_tunnel_type(const char *name);
 
-struct pb_ld_binding {
-    const struct sbrec_port_binding *pb;
-    const struct local_datapath *ld;
-    struct hmap_node hmap_node;
-};
-
 #endif /* controller/ovn-controller.h */
diff --git a/controller/patch.c b/controller/patch.c
index 0d0d538..12e0b6f 100644
--- a/controller/patch.c
+++ b/controller/patch.c
@@ -307,11 +307,18 @@ patch_run(struct ovsdb_idl_txn *ovs_idl_txn,
 
     /* Now 'existing_ports' only still contains patch ports that exist in the
      * database but shouldn't.  Delete them from the database. */
-    struct shash_node *port_node, *port_next_node;
-    SHASH_FOR_EACH_SAFE (port_node, port_next_node, &existing_ports) {
+    struct shash_node *port_node;
+    SHASH_FOR_EACH_SAFE (port_node, &existing_ports) {
         port = port_node->data;
         shash_delete(&existing_ports, port_node);
-        remove_port(bridge_table, port);
+        /* Wait for some iterations before really deleting any patch ports,
+         * because with conditional monitoring it is possible that related SB
+         * data is not completely downloaded yet after last restart of
+         * ovn-controller.  Otherwise it may cause unncessary dataplane
+         * interruption during restart/upgrade. */
+        if (!daemon_started_recently()) {
+            remove_port(bridge_table, port);
+        }
     }
     shash_destroy(&existing_ports);
 }
diff --git a/controller/physical.c b/controller/physical.c
index adf4632..f3c8bdd 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -40,7 +40,9 @@
 #include "lib/mcast-group-index.h"
 #include "lib/ovn-sb-idl.h"
 #include "lib/ovn-util.h"
+#include "ovn/actions.h"
 #include "physical.h"
+#include "pinctrl.h"
 #include "openvswitch/shash.h"
 #include "simap.h"
 #include "smap.h"
@@ -60,6 +62,11 @@ struct zone_ids {
     int snat;                   /* MFF_LOG_SNAT_ZONE. */
 };
 
+struct tunnel {
+    struct ovs_list list_node;
+    const struct chassis_tunnel *tun;
+};
+
 static void
 load_logical_ingress_metadata(const struct sbrec_port_binding *binding,
                               const struct zone_ids *zone_ids,
@@ -124,17 +131,15 @@ put_resubmit(uint8_t table_id, struct ofpbuf *ofpacts)
 }
 
 /*
- * For a port binding, get the corresponding ovn-chassis-id tunnel port
- * from the associated encap.
+ * For an encap and a chassis, get the corresponding ovn-chassis-id tunnel
+ * port.
  */
 static struct chassis_tunnel *
-get_port_binding_tun(const struct sbrec_port_binding *binding,
+get_port_binding_tun(const struct sbrec_encap *encap,
+                     const struct sbrec_chassis *chassis,
                      const struct hmap *chassis_tunnels)
 {
-    struct sbrec_encap *encap = binding->encap;
-    struct sbrec_chassis *chassis = binding->chassis;
     struct chassis_tunnel *tun = NULL;
-
     if (encap) {
         tun = chassis_tunnel_find(chassis_tunnels, chassis->name, encap->ip);
     }
@@ -280,98 +285,194 @@ put_remote_port_redirect_bridged(const struct
 }
 
 static void
-put_remote_port_redirect_overlay(const struct
-                                 sbrec_port_binding *binding,
-                                 bool is_ha_remote,
-                                 struct ha_chassis_ordered *ha_ch_ordered,
+match_outport_dp_and_port_keys(struct match *match,
+                               uint32_t dp_key, uint32_t port_key)
+{
+    match_init_catchall(match);
+    match_set_metadata(match, htonll(dp_key));
+    match_set_reg(match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+}
+
+static struct sbrec_encap *
+find_additional_encap_for_chassis(const struct sbrec_port_binding *pb,
+                                  const struct sbrec_chassis *chassis_rec)
+{
+    for (size_t i = 0; i < pb->n_additional_encap; i++) {
+        if (!strcmp(pb->additional_encap[i]->chassis_name,
+                    chassis_rec->name)) {
+            return pb->additional_encap[i];
+        }
+    }
+    return NULL;
+}
+
+static struct ovs_list *
+get_remote_tunnels(const struct sbrec_port_binding *binding,
+                   const struct sbrec_chassis *chassis,
+                   const struct hmap *chassis_tunnels)
+{
+    const struct chassis_tunnel *tun;
+
+    struct ovs_list *tunnels = xmalloc(sizeof *tunnels);
+    ovs_list_init(tunnels);
+
+    if (binding->chassis && binding->chassis != chassis) {
+        tun = get_port_binding_tun(binding->encap, binding->chassis,
+                                   chassis_tunnels);
+        if (!tun) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(
+                &rl, "Failed to locate tunnel to reach main chassis %s "
+                     "for port %s. Cloning packets disabled for the chassis.",
+                binding->chassis->name, binding->logical_port);
+        } else {
+            struct tunnel *tun_elem = xmalloc(sizeof *tun_elem);
+            tun_elem->tun = tun;
+            ovs_list_push_back(tunnels, &tun_elem->list_node);
+        }
+    }
+
+    for (size_t i = 0; i < binding->n_additional_chassis; i++) {
+        if (binding->additional_chassis[i] == chassis) {
+            continue;
+        }
+        const struct sbrec_encap *additional_encap;
+        additional_encap = find_additional_encap_for_chassis(binding, chassis);
+        tun = get_port_binding_tun(additional_encap,
+                                   binding->additional_chassis[i],
+                                   chassis_tunnels);
+        if (!tun) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(
+                &rl, "Failed to locate tunnel to reach additional chassis %s "
+                     "for port %s. Cloning packets disabled for the chassis.",
+                binding->additional_chassis[i]->name, binding->logical_port);
+            continue;
+        }
+        struct tunnel *tun_elem = xmalloc(sizeof *tun_elem);
+        tun_elem->tun = tun;
+        ovs_list_push_back(tunnels, &tun_elem->list_node);
+    }
+    return tunnels;
+}
+
+static void
+put_remote_port_redirect_overlay(const struct sbrec_port_binding *binding,
                                  enum mf_field_id mff_ovn_geneve,
-                                 const struct chassis_tunnel *tun,
                                  uint32_t port_key,
                                  struct match *match,
                                  struct ofpbuf *ofpacts_p,
+                                 const struct sbrec_chassis *chassis,
                                  const struct hmap *chassis_tunnels,
                                  struct ovn_desired_flow_table *flow_table)
 {
-    if (!is_ha_remote) {
-        /* Setup encapsulation */
-        const struct chassis_tunnel *rem_tun =
-            get_port_binding_tun(binding, chassis_tunnels);
-        if (!rem_tun) {
-            return;
+    /* Setup encapsulation */
+    struct ovs_list *tuns = get_remote_tunnels(binding, chassis,
+                                               chassis_tunnels);
+    if (!ovs_list_is_empty(tuns)) {
+        bool is_vtep_port = !strcmp(binding->type, "vtep");
+        /* rewrite MFF_IN_PORT to bypass OpenFlow loopback check for ARP/ND
+         * responder in L3 networks. */
+        if (is_vtep_port) {
+            put_load(ofp_to_u16(OFPP_NONE), MFF_IN_PORT, 0, 16, ofpacts_p);
         }
-        put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
-                          !strcmp(binding->type, "vtep"),
-                          ofpacts_p);
-        /* Output to tunnel. */
-        ofpact_put_OUTPUT(ofpacts_p)->port = rem_tun->ofport;
-    } else {
-        /* Make sure all tunnel endpoints use the same encapsulation,
-         * and set it up */
-        for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
-            const struct sbrec_chassis *ch = ha_ch_ordered->ha_ch[i].chassis;
-            if (!ch) {
-                continue;
-            }
-            if (!tun) {
-                tun = chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
-            } else {
-                struct chassis_tunnel *chassis_tunnel =
-                    chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
-                if (chassis_tunnel &&
-                    tun->type != chassis_tunnel->type) {
-                    static struct vlog_rate_limit rl =
-                                  VLOG_RATE_LIMIT_INIT(1, 1);
-                    VLOG_ERR_RL(&rl, "Port %s has Gateway_Chassis "
-                                "with mixed encapsulations, only "
-                                "uniform encapsulations are "
-                                "supported.", binding->logical_port);
-                    return;
-                }
-            }
+
+        struct tunnel *tun;
+        LIST_FOR_EACH (tun, list_node, tuns) {
+            put_encapsulation(mff_ovn_geneve, tun->tun,
+                              binding->datapath, port_key, is_vtep_port,
+                              ofpacts_p);
+            ofpact_put_OUTPUT(ofpacts_p)->port = tun->tun->ofport;
+        }
+        put_resubmit(OFTABLE_LOCAL_OUTPUT, ofpacts_p);
+        ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                        binding->header_.uuid.parts[0], match, ofpacts_p,
+                        &binding->header_.uuid);
+    }
+    struct tunnel *tun_elem;
+    LIST_FOR_EACH_POP (tun_elem, list_node, tuns) {
+        free(tun_elem);
+    }
+    free(tuns);
+}
+
+static void
+put_remote_port_redirect_overlay_ha_remote(
+    const struct sbrec_port_binding *binding,
+    struct ha_chassis_ordered *ha_ch_ordered,
+    enum mf_field_id mff_ovn_geneve, uint32_t port_key,
+    struct match *match, struct ofpbuf *ofpacts_p,
+    const struct hmap *chassis_tunnels,
+    struct ovn_desired_flow_table *flow_table)
+{
+    /* Make sure all tunnel endpoints use the same encapsulation,
+     * and set it up */
+    const struct chassis_tunnel *tun = NULL;
+    for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
+        const struct sbrec_chassis *ch = ha_ch_ordered->ha_ch[i].chassis;
+        if (!ch) {
+            continue;
         }
         if (!tun) {
-            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-            VLOG_ERR_RL(&rl, "No tunnel endpoint found for HA chassis in "
-                        "HA chassis group of port %s",
-                        binding->logical_port);
-            return;
+            tun = chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
+        } else {
+            struct chassis_tunnel *chassis_tunnel =
+                chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
+            if (chassis_tunnel &&
+                tun->type != chassis_tunnel->type) {
+                static struct vlog_rate_limit rl =
+                              VLOG_RATE_LIMIT_INIT(1, 1);
+                VLOG_ERR_RL(&rl, "Port %s has Gateway_Chassis "
+                            "with mixed encapsulations, only "
+                            "uniform encapsulations are "
+                            "supported.", binding->logical_port);
+                return;
+            }
         }
+    }
+    if (!tun) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+        VLOG_ERR_RL(&rl, "No tunnel endpoint found for HA chassis in "
+                    "HA chassis group of port %s",
+                    binding->logical_port);
+        return;
+    }
 
-        put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
-                          !strcmp(binding->type, "vtep"),
-                          ofpacts_p);
+    put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
+                      !strcmp(binding->type, "vtep"),
+                      ofpacts_p);
 
-        /* Output to tunnels with active/backup */
-        struct ofpact_bundle *bundle = ofpact_put_BUNDLE(ofpacts_p);
+    /* Output to tunnels with active/backup */
+    struct ofpact_bundle *bundle = ofpact_put_BUNDLE(ofpacts_p);
 
-        for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
-            const struct sbrec_chassis *ch =
-                ha_ch_ordered->ha_ch[i].chassis;
-            if (!ch) {
-                continue;
-            }
-            tun = chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
-            if (!tun) {
-                continue;
-            }
-            if (bundle->n_members >= BUNDLE_MAX_MEMBERS) {
-                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-                VLOG_WARN_RL(&rl, "Remote endpoints for port beyond "
-                             "BUNDLE_MAX_MEMBERS");
-                break;
-            }
-            ofpbuf_put(ofpacts_p, &tun->ofport, sizeof tun->ofport);
-            bundle = ofpacts_p->header;
-            bundle->n_members++;
+    for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
+        const struct sbrec_chassis *ch =
+            ha_ch_ordered->ha_ch[i].chassis;
+        if (!ch) {
+            continue;
         }
-
-        bundle->algorithm = NX_BD_ALG_ACTIVE_BACKUP;
-        /* Although ACTIVE_BACKUP bundle algorithm seems to ignore
-         * the next two fields, those are always set */
-        bundle->basis = 0;
-        bundle->fields = NX_HASH_FIELDS_ETH_SRC;
-        ofpact_finish_BUNDLE(ofpacts_p, &bundle);
+        tun = chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
+        if (!tun) {
+            continue;
+        }
+        if (bundle->n_members >= BUNDLE_MAX_MEMBERS) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+            VLOG_WARN_RL(&rl, "Remote endpoints for port beyond "
+                         "BUNDLE_MAX_MEMBERS");
+            break;
+        }
+        ofpbuf_put(ofpacts_p, &tun->ofport, sizeof tun->ofport);
+        bundle = ofpacts_p->header;
+        bundle->n_members++;
     }
+
+    bundle->algorithm = NX_BD_ALG_ACTIVE_BACKUP;
+    /* Although ACTIVE_BACKUP bundle algorithm seems to ignore
+     * the next two fields, those are always set */
+    bundle->basis = 0;
+    bundle->fields = NX_HASH_FIELDS_ETH_SRC;
+    ofpact_finish_BUNDLE(ofpacts_p, &bundle);
+
     ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
                     binding->header_.uuid.parts[0],
                     match, ofpacts_p, &binding->header_.uuid);
@@ -443,9 +544,9 @@ populate_remote_chassis_macs(const struct sbrec_chassis *my_chassis,
 static void
 free_remote_chassis_macs(void)
 {
-    struct remote_chassis_mac *mac, *next_mac;
+    struct remote_chassis_mac *mac;
 
-    HMAP_FOR_EACH_SAFE (mac, next_mac, hmap_node, &remote_chassis_macs) {
+    HMAP_FOR_EACH_SAFE (mac, hmap_node, &remote_chassis_macs) {
         hmap_remove(&remote_chassis_macs, &mac->hmap_node);
         free(mac->chassis_mac);
         free(mac->chassis_id);
@@ -677,7 +778,6 @@ put_replace_router_port_mac_flows(struct ovsdb_idl_index
          * a. Flow replaces ingress router port mac with a chassis mac.
          * b. Flow appends the vlan id localnet port is configured with.
          */
-        match_init_catchall(&match);
         ofpbuf_clear(ofpacts_p);
 
         ovs_assert(rport_binding->n_mac == 1);
@@ -691,8 +791,7 @@ put_replace_router_port_mac_flows(struct ovsdb_idl_index
         }
 
         /* Replace Router mac flow */
-        match_set_metadata(&match, htonll(dp_key));
-        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
         match_set_dl_src(&match, router_port_mac);
 
         replace_mac = ofpact_put_SET_ETH_SRC(ofpacts_p);
@@ -711,6 +810,22 @@ put_replace_router_port_mac_flows(struct ovsdb_idl_index
 }
 
 static void
+put_zones_ofpacts(const struct zone_ids *zone_ids, struct ofpbuf *ofpacts_p)
+{
+    if (zone_ids) {
+        if (zone_ids->ct) {
+            put_load(zone_ids->ct, MFF_LOG_CT_ZONE, 0, 32, ofpacts_p);
+        }
+        if (zone_ids->dnat) {
+            put_load(zone_ids->dnat, MFF_LOG_DNAT_ZONE, 0, 32, ofpacts_p);
+        }
+        if (zone_ids->snat) {
+            put_load(zone_ids->snat, MFF_LOG_SNAT_ZONE, 0, 32, ofpacts_p);
+        }
+    }
+}
+
+static void
 put_local_common_flows(uint32_t dp_key,
                        const struct sbrec_port_binding *pb,
                        const struct sbrec_port_binding *parent_pb,
@@ -730,24 +845,12 @@ put_local_common_flows(uint32_t dp_key,
      * table 39.
      */
 
-    match_init_catchall(&match);
     ofpbuf_clear(ofpacts_p);
 
     /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-    match_set_metadata(&match, htonll(dp_key));
-    match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+    match_outport_dp_and_port_keys(&match, dp_key, port_key);
 
-    if (zone_ids) {
-        if (zone_ids->ct) {
-            put_load(zone_ids->ct, MFF_LOG_CT_ZONE, 0, 32, ofpacts_p);
-        }
-        if (zone_ids->dnat) {
-            put_load(zone_ids->dnat, MFF_LOG_DNAT_ZONE, 0, 32, ofpacts_p);
-        }
-        if (zone_ids->snat) {
-            put_load(zone_ids->snat, MFF_LOG_SNAT_ZONE, 0, 32, ofpacts_p);
-        }
-    }
+    put_zones_ofpacts(zone_ids, ofpacts_p);
 
     /* Resubmit to table 39. */
     put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
@@ -793,10 +896,8 @@ put_local_common_flows(uint32_t dp_key,
      * */
 
     bool nested_container = parent_pb ? true: false;
-    match_init_catchall(&match);
     ofpbuf_clear(ofpacts_p);
-    match_set_metadata(&match, htonll(dp_key));
-    match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+    match_outport_dp_and_port_keys(&match, dp_key, port_key);
     if (!nested_container) {
         match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
                              MLF_ALLOW_LOOPBACK, MLF_ALLOW_LOOPBACK);
@@ -827,11 +928,8 @@ put_local_common_flows(uint32_t dp_key,
          * ports even if they don't have any child ports which is
          * unnecessary.
          */
-        match_init_catchall(&match);
         ofpbuf_clear(ofpacts_p);
-        match_set_metadata(&match, htonll(dp_key));
-        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0,
-                      parent_pb->tunnel_key);
+        match_outport_dp_and_port_keys(&match, dp_key, parent_pb->tunnel_key);
         match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
                              MLF_NESTED_CONTAINER, MLF_NESTED_CONTAINER);
 
@@ -850,17 +948,7 @@ load_logical_ingress_metadata(const struct sbrec_port_binding *binding,
                               const struct zone_ids *zone_ids,
                               struct ofpbuf *ofpacts_p)
 {
-    if (zone_ids) {
-        if (zone_ids->ct) {
-            put_load(zone_ids->ct, MFF_LOG_CT_ZONE, 0, 32, ofpacts_p);
-        }
-        if (zone_ids->dnat) {
-            put_load(zone_ids->dnat, MFF_LOG_DNAT_ZONE, 0, 32, ofpacts_p);
-        }
-        if (zone_ids->snat) {
-            put_load(zone_ids->snat, MFF_LOG_SNAT_ZONE, 0, 32, ofpacts_p);
-        }
-    }
+    put_zones_ofpacts(zone_ids, ofpacts_p);
 
     /* Set MFF_LOG_DATAPATH and MFF_LOG_INPORT. */
     uint32_t dp_key = binding->datapath->tunnel_key;
@@ -891,6 +979,152 @@ get_binding_peer(struct ovsdb_idl_index *sbrec_port_binding_by_name,
     return peer;
 }
 
+enum access_type {
+    PORT_LOCAL = 0,
+    PORT_LOCALNET,
+    PORT_REMOTE,
+    PORT_HA_REMOTE,
+};
+
+static void
+setup_rarp_activation_strategy(const struct sbrec_port_binding *binding,
+                               ofp_port_t ofport, struct zone_ids *zone_ids,
+                               struct ovn_desired_flow_table *flow_table)
+{
+    struct match match = MATCH_CATCHALL_INITIALIZER;
+    uint64_t stub[1024 / 8];
+    struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+    /* Unblock the port on ingress RARP. */
+    match_set_dl_type(&match, htons(ETH_TYPE_RARP));
+    match_set_in_port(&match, ofport);
+
+    load_logical_ingress_metadata(binding, zone_ids, &ofpacts);
+
+    encode_controller_op(ACTION_OPCODE_ACTIVATION_STRATEGY_RARP,
+                         NX_CTLR_NO_METER, &ofpacts);
+
+    put_resubmit(OFTABLE_LOG_INGRESS_PIPELINE, &ofpacts);
+
+    ofctrl_add_flow(flow_table, OFTABLE_PHY_TO_LOG, 1010,
+                    binding->header_.uuid.parts[0],
+                    &match, &ofpacts, &binding->header_.uuid);
+    ofpbuf_clear(&ofpacts);
+
+    /* Block all non-RARP traffic for the port, both directions. */
+    match_init_catchall(&match);
+    match_set_in_port(&match, ofport);
+
+    ofctrl_add_flow(flow_table, OFTABLE_PHY_TO_LOG, 1000,
+                    binding->header_.uuid.parts[0],
+                    &match, &ofpacts, &binding->header_.uuid);
+
+    match_init_catchall(&match);
+    uint32_t dp_key = binding->datapath->tunnel_key;
+    uint32_t port_key = binding->tunnel_key;
+    match_set_metadata(&match, htonll(dp_key));
+    match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+
+    ofctrl_add_flow(flow_table, OFTABLE_LOG_TO_PHY, 1000,
+                    binding->header_.uuid.parts[0],
+                    &match, &ofpacts, &binding->header_.uuid);
+
+    ofpbuf_uninit(&ofpacts);
+}
+
+static void
+setup_activation_strategy(const struct sbrec_port_binding *binding,
+                          const struct sbrec_chassis *chassis,
+                          uint32_t dp_key, uint32_t port_key,
+                          ofp_port_t ofport, struct zone_ids *zone_ids,
+                          struct ovn_desired_flow_table *flow_table)
+{
+    for (size_t i = 0; i < binding->n_additional_chassis; i++) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+        if (binding->additional_chassis[i] == chassis) {
+            const char *strategy = smap_get(&binding->options,
+                                            "activation-strategy");
+            if (strategy
+                    && !lport_is_activated_by_activation_strategy(binding,
+                                                                  chassis)
+                    && !pinctrl_is_port_activated(dp_key, port_key)) {
+                if (!strcmp(strategy, "rarp")) {
+                    setup_rarp_activation_strategy(binding, ofport,
+                                                   zone_ids, flow_table);
+                } else {
+                    VLOG_WARN_RL(&rl,
+                                 "Unknown activation strategy defined for "
+                                 "port %s: %s",
+                                 binding->logical_port, strategy);
+                    return;
+                }
+            }
+            return;
+        }
+    }
+}
+
+static void
+enforce_tunneling_for_multichassis_ports(
+    struct local_datapath *ld,
+    const struct sbrec_port_binding *binding,
+    const struct sbrec_chassis *chassis,
+    const struct hmap *chassis_tunnels,
+    enum mf_field_id mff_ovn_geneve,
+    struct ovn_desired_flow_table *flow_table)
+{
+    if (shash_is_empty(&ld->multichassis_ports)) {
+        return;
+    }
+
+    struct ovs_list *tuns = get_remote_tunnels(binding, chassis,
+                                               chassis_tunnels);
+    if (ovs_list_is_empty(tuns)) {
+        free(tuns);
+        return;
+    }
+
+    uint32_t dp_key = binding->datapath->tunnel_key;
+    uint32_t port_key = binding->tunnel_key;
+
+    struct shash_node *node;
+    SHASH_FOR_EACH (node, &ld->multichassis_ports) {
+        const struct sbrec_port_binding *mcp = node->data;
+
+        struct ofpbuf ofpacts;
+        ofpbuf_init(&ofpacts, 0);
+
+        bool is_vtep_port = !strcmp(binding->type, "vtep");
+        /* rewrite MFF_IN_PORT to bypass OpenFlow loopback check for ARP/ND
+         * responder in L3 networks. */
+        if (is_vtep_port) {
+            put_load(ofp_to_u16(OFPP_NONE), MFF_IN_PORT, 0, 16, &ofpacts);
+        }
+
+        struct match match;
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
+        match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0, mcp->tunnel_key);
+
+        struct tunnel *tun;
+        LIST_FOR_EACH (tun, list_node, tuns) {
+            put_encapsulation(mff_ovn_geneve, tun->tun,
+                              binding->datapath, port_key, is_vtep_port,
+                              &ofpacts);
+            ofpact_put_OUTPUT(&ofpacts)->port = tun->tun->ofport;
+        }
+        ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 110,
+                        binding->header_.uuid.parts[0], &match, &ofpacts,
+                        &binding->header_.uuid);
+        ofpbuf_uninit(&ofpacts);
+    }
+
+    struct tunnel *tun_elem;
+    LIST_FOR_EACH_POP (tun_elem, list_node, tuns) {
+        free(tun_elem);
+    }
+    free(tuns);
+}
+
 static void
 consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                       enum mf_field_id mff_ovn_geneve,
@@ -927,10 +1161,8 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         put_local_common_flows(dp_key, binding, NULL, &binding_zones,
                                ofpacts_p, flow_table);
 
-        match_init_catchall(&match);
         ofpbuf_clear(ofpacts_p);
-        match_set_metadata(&match, htonll(dp_key));
-        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
 
         size_t clone_ofs = ofpacts_p->size;
         struct ofpact_nest *clone = ofpact_put_CLONE(ofpacts_p);
@@ -955,10 +1187,6 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                         &match, ofpacts_p, &binding->header_.uuid);
         return;
     }
-
-    struct ha_chassis_ordered *ha_ch_ordered
-        = ha_chassis_get_ordered(binding->ha_chassis_group);
-
     if (!strcmp(binding->type, "chassisredirect")
         && (binding->chassis == chassis
             || ha_chassis_group_is_active(binding->ha_chassis_group,
@@ -973,10 +1201,8 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
          * output port is changed from the "chassisredirect" port to the
          * underlying distributed port. */
 
-        match_init_catchall(&match);
         ofpbuf_clear(ofpacts_p);
-        match_set_metadata(&match, htonll(dp_key));
-        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
 
         const char *distributed_port = smap_get_def(&binding->options,
                                                     "distributed-port", "");
@@ -1006,15 +1232,7 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
 
             struct zone_ids zone_ids = get_zone_ids(distributed_binding,
                                                     ct_zones);
-            if (zone_ids.ct) {
-                put_load(zone_ids.ct, MFF_LOG_CT_ZONE, 0, 32, ofpacts_p);
-            }
-            if (zone_ids.dnat) {
-                put_load(zone_ids.dnat, MFF_LOG_DNAT_ZONE, 0, 32, ofpacts_p);
-            }
-            if (zone_ids.snat) {
-                put_load(zone_ids.snat, MFF_LOG_SNAT_ZONE, 0, 32, ofpacts_p);
-            }
+            put_zones_ofpacts(&zone_ids, ofpacts_p);
 
             /* Resubmit to table 39. */
             put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
@@ -1024,14 +1242,14 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                         binding->header_.uuid.parts[0],
                         &match, ofpacts_p, &binding->header_.uuid);
 
-        goto out;
+        return;
     }
 
     /* Find the OpenFlow port for the logical port, as 'ofport'.  This is
      * one of:
      *
      *     - If the port is a VIF on the chassis we're managing, the
-     *       OpenFlow port for the VIF.  'tun' will be NULL.
+     *       OpenFlow port for the VIF.
      *
      *       The same logic handles ports that OVN implements as Open vSwitch
      *       patch ports, that is, "localnet" and "l2gateway" ports.
@@ -1041,20 +1259,15 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
      *
      *       For a localnet or l2gateway patch port, if a VLAN ID was
      *       configured, 'tag' is set to that VLAN ID; otherwise 'tag' is 0.
-     *
-     *     - If the port is on a remote chassis, the OpenFlow port for a
-     *       tunnel to the VIF's remote chassis.  'tun' identifies that
-     *       tunnel.
      */
 
     int tag = 0;
     bool nested_container = false;
     const struct sbrec_port_binding *parent_port = NULL;
     ofp_port_t ofport;
-    bool is_remote = false;
     if (binding->parent_port && *binding->parent_port) {
         if (!binding->tag) {
-            goto out;
+            return;
         }
         ofport = local_binding_get_lport_ofport(local_bindings,
                                                 binding->parent_port);
@@ -1077,46 +1290,42 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                                                 binding->logical_port);
         if (ofport && !lport_can_bind_on_this_chassis(chassis, binding)) {
             /* Even though there is an ofport for this port_binding, it is
-             * requested on a different chassis. So ignore this ofport.
+             * requested on different chassis. So ignore this ofport.
              */
             ofport = 0;
         }
     }
 
-    bool is_ha_remote = false;
-    const struct chassis_tunnel *tun = NULL;
     const struct sbrec_port_binding *localnet_port =
         get_localnet_port(local_datapaths, dp_key);
+
+    struct ha_chassis_ordered *ha_ch_ordered;
+    ha_ch_ordered = ha_chassis_get_ordered(binding->ha_chassis_group);
+
+    /* Determine how the port is accessed. */
+    enum access_type access_type = PORT_LOCAL;
     if (!ofport) {
-        /* It is remote port, may be reached by tunnel or localnet port */
-        is_remote = true;
-        if (localnet_port) {
+        /* Enforce tunneling while we clone packets to additional chassis b/c
+         * otherwise upstream switch won't flood the packet to both chassis. */
+        if (localnet_port && !binding->additional_chassis) {
             ofport = u16_to_ofp(simap_get(patch_ofports,
                                           localnet_port->logical_port));
             if (!ofport) {
                 goto out;
             }
+            access_type = PORT_LOCALNET;
         } else {
             if (!ha_ch_ordered || ha_ch_ordered->n_ha_ch < 2) {
-                /* It's on a single remote chassis */
-                if (!binding->chassis) {
-                    goto out;
-                }
-                tun = chassis_tunnel_find(chassis_tunnels,
-                                          binding->chassis->name, NULL);
-                if (!tun) {
-                    goto out;
-                }
-                ofport = tun->ofport;
+                access_type = PORT_REMOTE;
             } else {
                 /* It's distributed across the chassis belonging to
                  * an HA chassis group. */
-                is_ha_remote = true;
+                access_type = PORT_HA_REMOTE;
             }
         }
     }
 
-    if (!is_remote) {
+    if (access_type == PORT_LOCAL) {
         /* Packets that arrive from a vif can belong to a VM or
          * to a container located inside that VM. Packets that
          * arrive from containers have a tag (vlan) associated with them.
@@ -1171,6 +1380,9 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
             }
         }
 
+        setup_activation_strategy(binding, chassis, dp_key, port_key,
+                                  ofport, &zone_ids, flow_table);
+
         /* Remember the size with just strip vlan added so far,
          * as we're going to remove this with ofpbuf_pull() later. */
         uint32_t ofpacts_orig_size = ofpacts_p->size;
@@ -1210,10 +1422,8 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
          * =======================
          *
          * Deliver the packet to the local vif. */
-        match_init_catchall(&match);
         ofpbuf_clear(ofpacts_p);
-        match_set_metadata(&match, htonll(dp_key));
-        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
         if (tag) {
             /* For containers sitting behind a local vif, tag the packets
              * before delivering them. */
@@ -1247,10 +1457,8 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
          */
         if (!strcmp(binding->type, "localnet")) {
             /* do not forward traffic from localport to localnet port */
-            match_init_catchall(&match);
             ofpbuf_clear(ofpacts_p);
-            match_set_metadata(&match, htonll(dp_key));
-            match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+            match_outport_dp_and_port_keys(&match, dp_key, port_key);
             match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
                                  MLF_LOCALPORT, MLF_LOCALPORT);
             ofctrl_add_flow(flow_table, OFTABLE_CHECK_LOOPBACK, 160,
@@ -1258,10 +1466,8 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                             ofpacts_p, &binding->header_.uuid);
 
             /* Drop LOCAL_ONLY traffic leaking through localnet ports. */
-            match_init_catchall(&match);
             ofpbuf_clear(ofpacts_p);
-            match_set_metadata(&match, htonll(dp_key));
-            match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+            match_outport_dp_and_port_keys(&match, dp_key, port_key);
             match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
                                  MLF_LOCAL_ONLY, MLF_LOCAL_ONLY);
             ofctrl_add_flow(flow_table, OFTABLE_CHECK_LOOPBACK, 160,
@@ -1300,10 +1506,7 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                         continue;
                     }
 
-                    match_init_catchall(&match);
-                    match_set_metadata(&match, htonll(dp_key));
-                    match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0,
-                                  port_key);
+                    match_outport_dp_and_port_keys(&match, dp_key, port_key);
                     match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
                                          MLF_LOCALPORT, MLF_LOCALPORT);
                     match_set_dl_dst(&match, peer_mac);
@@ -1333,7 +1536,7 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                             binding->header_.uuid.parts[0], &match,
                             ofpacts_p, &binding->header_.uuid);
         }
-    } else if (!tun && !is_ha_remote) {
+    } else if (access_type == PORT_LOCALNET) {
         /* Remote port connected by localnet port */
         /* Table 38, priority 100.
          * =======================
@@ -1343,12 +1546,10 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
          * to connected localnet port and resubmits to same table.
          */
 
-        match_init_catchall(&match);
         ofpbuf_clear(ofpacts_p);
 
         /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-        match_set_metadata(&match, htonll(dp_key));
-        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
 
         put_load(localnet_port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, ofpacts_p);
 
@@ -1357,37 +1558,39 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
                         binding->header_.uuid.parts[0],
                         &match, ofpacts_p, &binding->header_.uuid);
-    } else {
 
-        const char *redirect_type = smap_get(&binding->options,
-                                             "redirect-type");
+        enforce_tunneling_for_multichassis_ports(
+            ld, binding, chassis, chassis_tunnels, mff_ovn_geneve, flow_table);
 
-        /* Remote port connected by tunnel */
-
-        /* Table 38, priority 100.
-         * =======================
-         *
-         * Handles traffic that needs to be sent to a remote hypervisor.  Each
-         * flow matches an output port that includes a logical port on a remote
-         * hypervisor, and tunnels the packet to that hypervisor.
-         */
-        match_init_catchall(&match);
-        ofpbuf_clear(ofpacts_p);
+        /* No more tunneling to set up. */
+        goto out;
+    }
 
-        /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-        match_set_metadata(&match, htonll(dp_key));
-        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+    /* Send packets to additional chassis if needed. */
+    const char *redirect_type = smap_get(&binding->options,
+                                         "redirect-type");
 
-        if (redirect_type && !strcasecmp(redirect_type, "bridged")) {
-            put_remote_port_redirect_bridged(binding, local_datapaths,
-                                             ld, &match, ofpacts_p,
-                                             flow_table);
-        } else {
-            put_remote_port_redirect_overlay(binding, is_ha_remote,
-                                             ha_ch_ordered, mff_ovn_geneve,
-                                             tun, port_key, &match, ofpacts_p,
-                                             chassis_tunnels, flow_table);
-        }
+    /* Table 38, priority 100.
+     * =======================
+     *
+     * Handles traffic that needs to be sent to a remote hypervisor.  Each
+     * flow matches an output port that includes a logical port on a remote
+     * hypervisor, and tunnels the packet to that hypervisor.
+     */
+    ofpbuf_clear(ofpacts_p);
+    match_outport_dp_and_port_keys(&match, dp_key, port_key);
+
+    if (redirect_type && !strcasecmp(redirect_type, "bridged")) {
+        put_remote_port_redirect_bridged(
+            binding, local_datapaths, ld, &match, ofpacts_p, flow_table);
+    } else if (access_type == PORT_HA_REMOTE) {
+        put_remote_port_redirect_overlay_ha_remote(
+            binding, ha_ch_ordered, mff_ovn_geneve, port_key,
+            &match, ofpacts_p, chassis_tunnels, flow_table);
+    } else {
+        put_remote_port_redirect_overlay(
+            binding, mff_ovn_geneve, port_key, &match, ofpacts_p,
+            chassis, chassis_tunnels, flow_table);
     }
 out:
     if (ha_ch_ordered) {
@@ -1406,6 +1609,26 @@ get_vxlan_port_key(int64_t port_key)
     return port_key;
 }
 
+/* Encapsulate and send to a single remote chassis. */
+static void
+tunnel_to_chassis(enum mf_field_id mff_ovn_geneve,
+                  const char *chassis_name,
+                  const struct hmap *chassis_tunnels,
+                  const struct sbrec_datapath_binding *datapath,
+                  uint16_t outport, struct ofpbuf *remote_ofpacts)
+{
+    const struct chassis_tunnel *tun
+        = chassis_tunnel_find(chassis_tunnels, chassis_name, NULL);
+    if (!tun) {
+        return;
+    }
+
+    put_encapsulation(mff_ovn_geneve, tun, datapath, outport, false,
+                      remote_ofpacts);
+    ofpact_put_OUTPUT(remote_ofpacts)->port = tun->ofport;
+}
+
+/* Encapsulate and send to a set of remote chassis. */
 static void
 fanout_to_chassis(enum mf_field_id mff_ovn_geneve,
                   struct sset *remote_chassis,
@@ -1432,6 +1655,12 @@ fanout_to_chassis(enum mf_field_id mff_ovn_geneve,
     }
 }
 
+static bool
+chassis_is_vtep(const struct sbrec_chassis *chassis)
+{
+    return smap_get_bool(&chassis->other_config, "is-vtep", false);
+}
+
 static void
 consider_mc_group(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                   enum mf_field_id mff_ovn_geneve,
@@ -1452,11 +1681,9 @@ consider_mc_group(struct ovsdb_idl_index *sbrec_port_binding_by_name,
 
     struct sset remote_chassis = SSET_INITIALIZER(&remote_chassis);
     struct sset vtep_chassis = SSET_INITIALIZER(&vtep_chassis);
-    struct match match;
 
-    match_init_catchall(&match);
-    match_set_metadata(&match, htonll(dp_key));
-    match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, mc->tunnel_key);
+    struct match match;
+    match_outport_dp_and_port_keys(&match, dp_key, mc->tunnel_key);
 
     /* Go through all of the ports in the multicast group:
      *
@@ -1511,11 +1738,20 @@ consider_mc_group(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                          &remote_ofpacts);
                 put_resubmit(OFTABLE_CHECK_LOOPBACK, &remote_ofpacts);
             }
+        } if (!strcmp(port->type, "remote")) {
+            if (port->chassis) {
+                put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32,
+                         &remote_ofpacts);
+                tunnel_to_chassis(mff_ovn_geneve, port->chassis->name,
+                                  chassis_tunnels, mc->datapath,
+                                  port->tunnel_key, &remote_ofpacts);
+            }
         } else if (!strcmp(port->type, "localport")) {
             put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32,
                      &remote_ofpacts);
             put_resubmit(OFTABLE_CHECK_LOOPBACK, &remote_ofpacts);
-        } else if (port->chassis == chassis
+        } else if ((port->chassis == chassis
+                    || is_additional_chassis(port, chassis))
                    && (local_binding_get_primary_pb(local_bindings, lport_name)
                        || !strcmp(port->type, "l3gateway"))) {
             put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
@@ -1538,16 +1774,26 @@ consider_mc_group(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                     put_resubmit(OFTABLE_CHECK_LOOPBACK, &ofpacts);
                 }
             }
-        } else if (port->chassis && !get_localnet_port(
-                local_datapaths, mc->datapath->tunnel_key)) {
+        } else if (!get_localnet_port(local_datapaths,
+                                      mc->datapath->tunnel_key)) {
             /* Add remote chassis only when localnet port not exist,
              * otherwise multicast will reach remote ports through localnet
              * port. */
-            if (smap_get_bool(&port->chassis->other_config,
-                              "is-vtep", false)) {
-                sset_add(&vtep_chassis, port->chassis->name);
-            } else {
-                sset_add(&remote_chassis, port->chassis->name);
+            if (port->chassis) {
+                if (chassis_is_vtep(port->chassis)) {
+                    sset_add(&vtep_chassis, port->chassis->name);
+                } else {
+                    sset_add(&remote_chassis, port->chassis->name);
+                }
+            }
+            for (size_t j = 0; j < port->n_additional_chassis; j++) {
+                if (chassis_is_vtep(port->additional_chassis[j])) {
+                    sset_add(&vtep_chassis,
+                             port->additional_chassis[j]->name);
+                } else {
+                    sset_add(&remote_chassis,
+                             port->additional_chassis[j]->name);
+                }
             }
         }
     }
@@ -1634,20 +1880,49 @@ physical_handle_flows_for_lport(const struct sbrec_port_binding *pb,
 
     ofctrl_remove_flows(flow_table, &pb->header_.uuid);
 
+    struct local_datapath *ldp =
+        get_local_datapath(p_ctx->local_datapaths,
+                           pb->datapath->tunnel_key);
     if (!strcmp(pb->type, "external")) {
         /* External lports have a dependency on the localnet port.
          * We need to remove the flows of the localnet port as well
          * and re-consider adding the flows for it.
          */
-        struct local_datapath *ldp =
-            get_local_datapath(p_ctx->local_datapaths,
-                               pb->datapath->tunnel_key);
         if (ldp && ldp->localnet_port) {
             ofctrl_remove_flows(flow_table, &ldp->localnet_port->header_.uuid);
             physical_eval_port_binding(p_ctx, ldp->localnet_port, flow_table);
         }
     }
 
+    if (ldp) {
+        bool multichassis_state_changed = (
+            !!pb->additional_chassis ==
+            !!shash_find(&ldp->multichassis_ports, pb->logical_port)
+        );
+        if (multichassis_state_changed) {
+            if (pb->additional_chassis) {
+                add_local_datapath_multichassis_port(
+                    ldp, pb->logical_port, pb);
+            } else {
+                remove_local_datapath_multichassis_port(
+                    ldp, pb->logical_port);
+            }
+
+            struct sbrec_port_binding *target =
+                sbrec_port_binding_index_init_row(
+                    p_ctx->sbrec_port_binding_by_datapath);
+            sbrec_port_binding_index_set_datapath(target, ldp->datapath);
+
+            const struct sbrec_port_binding *port;
+            SBREC_PORT_BINDING_FOR_EACH_EQUAL (
+                    port, target, p_ctx->sbrec_port_binding_by_datapath) {
+                ofctrl_remove_flows(flow_table, &port->header_.uuid);
+                physical_eval_port_binding(p_ctx, port, flow_table);
+            }
+            sbrec_port_binding_index_destroy_row(target);
+        }
+    }
+
     if (!removed) {
         physical_eval_port_binding(p_ctx, pb, flow_table);
         if (!strcmp(pb->type, "patch")) {
@@ -1837,12 +2112,13 @@ physical_run(struct physical_ctx *p_ctx,
      * Handles packets received from a VXLAN tunnel which get resubmitted to
      * OFTABLE_LOG_INGRESS_PIPELINE due to lack of needed metadata in VXLAN,
      * explicitly skip sending back out any tunnels and resubmit to table 38
-     * for local delivery.
+     * for local delivery, except packets which have MLF_ALLOW_LOOPBACK bit
+     * set.
      */
     struct match match;
     match_init_catchall(&match);
-    match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
-                         MLF_RCV_FROM_RAMP, MLF_RCV_FROM_RAMP);
+    match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0, MLF_RCV_FROM_RAMP,
+                         MLF_RCV_FROM_RAMP | MLF_ALLOW_LOOPBACK);
 
     /* Resubmit to table 38. */
     ofpbuf_clear(&ofpacts);
diff --git a/controller/physical.h b/controller/physical.h
index ee4b1ae..1b8f1ea 100644
--- a/controller/physical.h
+++ b/controller/physical.h
@@ -45,6 +45,7 @@ struct local_nonvif_data;
 
 struct physical_ctx {
     struct ovsdb_idl_index *sbrec_port_binding_by_name;
+    struct ovsdb_idl_index *sbrec_port_binding_by_datapath;
     const struct sbrec_port_binding_table *port_binding_table;
     const struct sbrec_multicast_group_table *mc_group_table;
     const struct ovsrec_bridge *br_int;
diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index 2f718ac..3f5d0af 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -29,10 +29,12 @@
 #include "lport.h"
 #include "mac-learn.h"
 #include "nx-match.h"
+#include "ofctrl.h"
 #include "latch.h"
 #include "lib/packets.h"
 #include "lib/sset.h"
 #include "openvswitch/ofp-actions.h"
+#include "openvswitch/ofp-flow.h"
 #include "openvswitch/ofp-msgs.h"
 #include "openvswitch/ofp-packet.h"
 #include "openvswitch/ofp-print.h"
@@ -152,8 +154,8 @@ VLOG_DEFINE_THIS_MODULE(pinctrl);
  *  and pinctrl_run().
  *  'pinctrl_handler_seq' is used by pinctrl_run() to
  *  wake up pinctrl_handler thread from poll_block() if any changes happened
- *  in 'send_garp_rarp_data', 'ipv6_ras' and 'buffered_mac_bindings'
- *  structures.
+ *  in 'send_garp_rarp_data', 'ipv6_ras', 'ports_to_activate_in_db' and
+ *  'buffered_mac_bindings' structures.
  *
  *  'pinctrl_main_seq' is used by pinctrl_handler() thread to wake up
  *  the main thread from poll_block() when mac bindings/igmp groups need to
@@ -179,6 +181,7 @@ static void init_buffered_packets_map(void);
 static void destroy_buffered_packets_map(void);
 static void
 run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
+                     struct ovsdb_idl_index *sbrec_port_binding_by_datapath,
                      const struct hmap *local_datapaths)
     OVS_REQUIRES(pinctrl_mutex);
 
@@ -198,6 +201,17 @@ static void wait_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn);
 static void send_mac_binding_buffered_pkts(struct rconn *swconn)
     OVS_REQUIRES(pinctrl_mutex);
 
+static void pinctrl_rarp_activation_strategy_handler(const struct match *md);
+
+static void init_activated_ports(void);
+static void destroy_activated_ports(void);
+static void wait_activated_ports(void);
+static void run_activated_ports(
+    struct ovsdb_idl_txn *ovnsb_idl_txn,
+    struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
+    struct ovsdb_idl_index *sbrec_port_binding_by_name,
+    const struct sbrec_chassis *chassis);
+
 static void init_send_garps_rarps(void);
 static void destroy_send_garps_rarps(void);
 static void send_garp_rarp_wait(long long int send_garp_rarp_time);
@@ -391,10 +405,10 @@ init_event_table(void)
 static void
 empty_lb_backends_event_gc(bool flush)
 {
-    struct empty_lb_backends_event *cur_ce, *next_ce;
+    struct empty_lb_backends_event *cur_ce;
     long long int now = time_msec();
 
-    HMAP_FOR_EACH_SAFE (cur_ce, next_ce, hmap_node,
+    HMAP_FOR_EACH_SAFE (cur_ce, hmap_node,
                         &event_table[OVN_EVENT_EMPTY_LB_BACKENDS]) {
         if ((now < cur_ce->timestamp + EVENT_TIMEOUT) && !flush) {
             continue;
@@ -522,6 +536,7 @@ pinctrl_init(void)
     init_ipv6_ras();
     init_ipv6_prefixd();
     init_buffered_packets_map();
+    init_activated_ports();
     init_event_table();
     ip_mcast_snoop_init();
     init_put_vport_bindings();
@@ -610,6 +625,39 @@ set_actions_and_enqueue_msg(struct rconn *swconn,
     ofpbuf_uninit(&ofpacts);
 }
 
+/* Forwards a packet to 'out_port_key' even if that's on a remote
+ * hypervisor, i.e., the packet is re-injected in table OFTABLE_REMOTE_OUTPUT.
+ */
+static void
+pinctrl_forward_pkt(struct rconn *swconn, int64_t dp_key,
+                    int64_t in_port_key, int64_t out_port_key,
+                    const struct dp_packet *pkt)
+{
+    /* Reinject the packet and flood it to all registered mrouters. */
+    uint64_t ofpacts_stub[4096 / 8];
+    struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(ofpacts_stub);
+    enum ofp_version version = rconn_get_version(swconn);
+    put_load(dp_key, MFF_LOG_DATAPATH, 0, 64, &ofpacts);
+    put_load(in_port_key, MFF_LOG_INPORT, 0, 32, &ofpacts);
+    put_load(out_port_key, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
+
+    struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&ofpacts);
+    resubmit->in_port = OFPP_CONTROLLER;
+    resubmit->table_id = OFTABLE_REMOTE_OUTPUT;
+
+    struct ofputil_packet_out po = {
+        .packet = dp_packet_data(pkt),
+        .packet_len = dp_packet_size(pkt),
+        .buffer_id = UINT32_MAX,
+        .ofpacts = ofpacts.data,
+        .ofpacts_len = ofpacts.size,
+    };
+    match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
+    enum ofputil_protocol proto = ofputil_protocol_from_ofp_version(version);
+    queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
+    ofpbuf_uninit(&ofpacts);
+}
+
 static struct shash ipv6_prefixd;
 
 enum {
@@ -657,8 +705,8 @@ init_ipv6_prefixd(void)
 static void
 destroy_ipv6_prefixd(void)
 {
-    struct shash_node *iter, *next;
-    SHASH_FOR_EACH_SAFE (iter, next, &ipv6_prefixd) {
+    struct shash_node *iter;
+    SHASH_FOR_EACH_SAFE (iter, &ipv6_prefixd) {
         struct ipv6_prefixd_state *pfd = iter->data;
         free(pfd);
         shash_delete(&ipv6_prefixd, iter);
@@ -873,7 +921,9 @@ pinctrl_parse_dhcpv6_reply(struct dp_packet *pkt_in,
     OVS_REQUIRES(pinctrl_mutex)
 {
     struct eth_header *eth = dp_packet_eth(pkt_in);
-    struct ip6_hdr *in_ip = dp_packet_l3(pkt_in);
+    struct ovs_16aligned_ip6_hdr *in_ip = dp_packet_l3(pkt_in);
+    struct in6_addr ip6_src;
+    memcpy(&ip6_src, &in_ip->ip6_src, sizeof ip6_src);
     struct udp_header *udp_in = dp_packet_l4(pkt_in);
     unsigned char *in_dhcpv6_data = (unsigned char *)(udp_in + 1);
     size_t dlen = MIN(ntohs(udp_in->udp_len), dp_packet_l4_size(pkt_in));
@@ -957,7 +1007,7 @@ pinctrl_parse_dhcpv6_reply(struct dp_packet *pkt_in,
                         " aid %d", ip6_s, prefix, prefix_len, aid);
         }
         pinctrl_prefixd_state_handler(ip_flow, ipv6, aid, eth->eth_src,
-                                      in_ip->ip6_src, prefix_len, t1, t2,
+                                      ip6_src, prefix_len, t1, t2,
                                       plife_time, vlife_time, uuid, uuid_len);
     } else if (uuid) {
         free(uuid);
@@ -1278,18 +1328,20 @@ prepare_ipv6_prefixd(struct ovsdb_idl_txn *ovnsb_idl_txn,
                      struct ovsdb_idl_index *sbrec_port_binding_by_name,
                      const struct shash *local_active_ports_ipv6_pd,
                      const struct sbrec_chassis *chassis,
-                     const struct sset *active_tunnels)
+                     const struct sset *active_tunnels,
+                     const struct hmap *local_datapaths)
     OVS_REQUIRES(pinctrl_mutex)
 {
     bool changed = false;
 
     struct shash_node *iter;
     SHASH_FOR_EACH (iter, local_active_ports_ipv6_pd) {
-        const struct pb_ld_binding *pb_ipv6 = iter->data;
-        const struct sbrec_port_binding *pb = pb_ipv6->pb;
+        const struct sbrec_port_binding *pb = iter->data;
+        const struct local_datapath *ld =
+            get_local_datapath(local_datapaths, pb->datapath->tunnel_key);
         int j;
 
-        if (!pb_ipv6->ld) {
+        if (!ld) {
             continue;
         }
 
@@ -1340,14 +1392,13 @@ prepare_ipv6_prefixd(struct ovsdb_idl_txn *ovnsb_idl_txn,
             in6_generate_lla(ea, &ip6_addr);
         }
 
-        changed |= fill_ipv6_prefix_state(ovnsb_idl_txn, pb_ipv6->ld,
+        changed |= fill_ipv6_prefix_state(ovnsb_idl_txn, ld,
                                           ea, ip6_addr,
                                           peer->tunnel_key,
                                           peer->datapath->tunnel_key);
     }
 
-    struct shash_node *next;
-    SHASH_FOR_EACH_SAFE (iter, next, &ipv6_prefixd) {
+    SHASH_FOR_EACH_SAFE (iter, &ipv6_prefixd) {
         struct ipv6_prefixd_state *pfd = iter->data;
         if (pfd->last_used + IPV6_PREFIXD_STALE_TIMEOUT < time_msec()) {
             if (pfd->uuid.len) {
@@ -1412,8 +1463,8 @@ destroy_buffered_packets(struct buffered_packets *bp)
 static void
 destroy_buffered_packets_map(void)
 {
-    struct buffered_packets *bp, *next;
-    HMAP_FOR_EACH_SAFE (bp, next, hmap_node, &buffered_packets_map) {
+    struct buffered_packets *bp;
+    HMAP_FOR_EACH_SAFE (bp, hmap_node, &buffered_packets_map) {
         destroy_buffered_packets(bp);
         hmap_remove(&buffered_packets_map, &bp->hmap_node);
         free(bp);
@@ -1492,10 +1543,10 @@ buffered_send_packets(struct rconn *swconn, struct buffered_packets *bp,
 static void
 buffered_packets_map_gc(void)
 {
-    struct buffered_packets *cur_qp, *next_qp;
+    struct buffered_packets *cur_qp;
     long long int now = time_msec();
 
-    HMAP_FOR_EACH_SAFE (cur_qp, next_qp, hmap_node, &buffered_packets_map) {
+    HMAP_FOR_EACH_SAFE (cur_qp, hmap_node, &buffered_packets_map) {
         if (now > cur_qp->timestamp + BUFFER_MAP_TIMEOUT) {
             destroy_buffered_packets(cur_qp);
             hmap_remove(&buffered_packets_map, &cur_qp->hmap_node);
@@ -2204,30 +2255,56 @@ pinctrl_handle_put_dhcp_opts(
      *| 4 Bytes padding | 1 Byte (option end 0xFF ) | 4 Bytes padding|
      * --------------------------------------------------------------
      */
-    struct dhcp_opt_header *in_dhcp_opt =
-        (struct dhcp_opt_header *)reply_dhcp_opts_ptr->data;
-    if (in_dhcp_opt->code == DHCP_OPT_BOOTFILE_CODE) {
-        unsigned char *ptr = (unsigned char *)in_dhcp_opt;
-        int len = sizeof *in_dhcp_opt + in_dhcp_opt->len;
-        struct dhcp_opt_header *next_dhcp_opt =
-            (struct dhcp_opt_header *)(ptr + len);
-
-        if (next_dhcp_opt->code == DHCP_OPT_BOOTFILE_ALT_CODE) {
-            if (!ipxe_req) {
-                ofpbuf_pull(reply_dhcp_opts_ptr, len);
-                next_dhcp_opt->code = DHCP_OPT_BOOTFILE_CODE;
-            } else {
-                char *buf = xmalloc(len);
+    ovs_be32 next_server = in_dhcp_data->siaddr;
+    bool bootfile_name_set = false;
+    in_dhcp_ptr = reply_dhcp_opts_ptr->data;
+    end = (const char *)reply_dhcp_opts_ptr->data + reply_dhcp_opts_ptr->size;
+
+    while (in_dhcp_ptr < end) {
+        struct dhcp_opt_header *in_dhcp_opt =
+            (struct dhcp_opt_header *)in_dhcp_ptr;
+
+        switch (in_dhcp_opt->code) {
+        case DHCP_OPT_NEXT_SERVER_CODE:
+            next_server = get_unaligned_be32(DHCP_OPT_PAYLOAD(in_dhcp_opt));
+            break;
+        case DHCP_OPT_BOOTFILE_CODE: ;
+            unsigned char *ptr = (unsigned char *)in_dhcp_opt;
+            int len = sizeof *in_dhcp_opt + in_dhcp_opt->len;
+            struct dhcp_opt_header *next_dhcp_opt =
+                (struct dhcp_opt_header *)(ptr + len);
+
+            if (next_dhcp_opt->code == DHCP_OPT_BOOTFILE_ALT_CODE) {
+                if (!ipxe_req) {
+                    ofpbuf_pull(reply_dhcp_opts_ptr, len);
+                    next_dhcp_opt->code = DHCP_OPT_BOOTFILE_CODE;
+                } else {
+                    char *buf = xmalloc(len);
 
-                memcpy(buf, in_dhcp_opt, len);
-                ofpbuf_pull(reply_dhcp_opts_ptr,
-                            sizeof *in_dhcp_opt + next_dhcp_opt->len);
-                memcpy(reply_dhcp_opts_ptr->data, buf, len);
-                free(buf);
+                    memcpy(buf, in_dhcp_opt, len);
+                    ofpbuf_pull(reply_dhcp_opts_ptr,
+                                sizeof *in_dhcp_opt + next_dhcp_opt->len);
+                    memcpy(reply_dhcp_opts_ptr->data, buf, len);
+                    free(buf);
+                }
             }
+            bootfile_name_set = true;
+            break;
+        case DHCP_OPT_BOOTFILE_ALT_CODE:
+            if (!bootfile_name_set) {
+                in_dhcp_opt->code = DHCP_OPT_BOOTFILE_CODE;
+            }
+            break;
+        }
+
+        in_dhcp_ptr += sizeof *in_dhcp_opt;
+        if (in_dhcp_ptr > end) {
+            break;
+        }
+        in_dhcp_ptr += in_dhcp_opt->len;
+        if (in_dhcp_ptr > end) {
+            break;
         }
-    } else if (in_dhcp_opt->code == DHCP_OPT_BOOTFILE_ALT_CODE) {
-        in_dhcp_opt->code = DHCP_OPT_BOOTFILE_CODE;
     }
 
     uint16_t new_l4_size = UDP_HEADER_LEN + DHCP_HEADER_LEN + 16;
@@ -2260,6 +2337,7 @@ pinctrl_handle_put_dhcp_opts(
 
     if (*in_dhcp_msg_type != OVN_DHCP_MSG_INFORM) {
         dhcp_data->yiaddr = (msg_type == DHCP_MSG_NAK) ? 0 : *offer_ip;
+        dhcp_data->siaddr = (msg_type == DHCP_MSG_NAK) ? 0 : next_server;
     } else {
         dhcp_data->yiaddr = 0;
     }
@@ -2699,8 +2777,7 @@ sync_dns_cache(const struct sbrec_dns_table *dns_table)
         }
     }
 
-    struct shash_node *next;
-    SHASH_FOR_EACH_SAFE (iter, next, &dns_cache) {
+    SHASH_FOR_EACH_SAFE (iter, &dns_cache) {
         struct dns_data *d = iter->data;
         if (d->delete) {
             shash_delete(&dns_cache, iter);
@@ -2714,8 +2791,8 @@ sync_dns_cache(const struct sbrec_dns_table *dns_table)
 static void
 destroy_dns_cache(void)
 {
-    struct shash_node *iter, *next;
-    SHASH_FOR_EACH_SAFE (iter, next, &dns_cache) {
+    struct shash_node *iter;
+    SHASH_FOR_EACH_SAFE (iter, &dns_cache) {
         struct dns_data *d = iter->data;
         shash_delete(&dns_cache, iter);
         smap_destroy(&d->records);
@@ -3244,6 +3321,12 @@ process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
         ovs_mutex_unlock(&pinctrl_mutex);
         break;
 
+    case ACTION_OPCODE_ACTIVATION_STRATEGY_RARP:
+        ovs_mutex_lock(&pinctrl_mutex);
+        pinctrl_rarp_activation_strategy_handler(&pin.flow_metadata);
+        ovs_mutex_unlock(&pinctrl_mutex);
+        break;
+
     default:
         VLOG_WARN_RL(&rl, "unrecognized packet-in opcode %"PRIu32,
                      ntohl(ah->opcode));
@@ -3409,11 +3492,11 @@ pinctrl_handler(void *arg_)
 
                 ip_mcast_querier_run(swconn, &send_mcast_query_time);
             }
-        }
 
-        ovs_mutex_lock(&pinctrl_mutex);
-        svc_monitors_run(swconn, &svc_monitors_next_run_time);
-        ovs_mutex_unlock(&pinctrl_mutex);
+            ovs_mutex_lock(&pinctrl_mutex);
+            svc_monitors_run(swconn, &svc_monitors_next_run_time);
+            ovs_mutex_unlock(&pinctrl_mutex);
+        }
 
         rconn_run_wait(swconn);
         rconn_recv_wait(swconn);
@@ -3493,7 +3576,7 @@ pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
     prepare_ipv6_ras(local_active_ports_ras, sbrec_port_binding_by_name);
     prepare_ipv6_prefixd(ovnsb_idl_txn, sbrec_port_binding_by_name,
                          local_active_ports_ipv6_pd, chassis,
-                         active_tunnels);
+                         active_tunnels, local_datapaths);
     sync_dns_cache(dns_table);
     controller_event_run(ovnsb_idl_txn, ce_table, chassis);
     ip_mcast_sync(ovnsb_idl_txn, chassis, local_datapaths,
@@ -3502,12 +3585,15 @@ pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
                   sbrec_igmp_groups,
                   sbrec_ip_multicast_opts);
     run_buffered_binding(sbrec_mac_binding_by_lport_ip,
+                         sbrec_port_binding_by_datapath,
                          local_datapaths);
     sync_svc_monitors(ovnsb_idl_txn, svc_mon_table, sbrec_port_binding_by_name,
                       chassis);
     bfd_monitor_run(ovnsb_idl_txn, bfd_table, sbrec_port_binding_by_name,
                     chassis, active_tunnels);
     run_put_fdbs(ovnsb_idl_txn, sbrec_fdb_by_dp_key_mac);
+    run_activated_ports(ovnsb_idl_txn, sbrec_datapath_binding_by_key,
+                        sbrec_port_binding_by_key, chassis);
     ovs_mutex_unlock(&pinctrl_mutex);
 }
 
@@ -3569,8 +3655,8 @@ ipv6_ra_delete(struct ipv6_ra_state *ra)
 static void
 destroy_ipv6_ras(void)
 {
-    struct shash_node *iter, *next;
-    SHASH_FOR_EACH_SAFE (iter, next, &ipv6_ras) {
+    struct shash_node *iter;
+    SHASH_FOR_EACH_SAFE (iter, &ipv6_ras) {
         struct ipv6_ra_state *ra = iter->data;
         ipv6_ra_delete(ra);
         shash_delete(&ipv6_ras, iter);
@@ -3689,7 +3775,7 @@ packet_put_ra_rdnss_opt(struct dp_packet *b, uint8_t num,
                         ovs_be32 lifetime, const struct in6_addr *dns)
 {
     size_t prev_l4_size = dp_packet_l4_size(b);
-    struct ip6_hdr *nh = dp_packet_l3(b);
+    struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(b);
     size_t len = 2 * num + 1;
 
     nh->ip6_plen = htons(prev_l4_size + len * 8);
@@ -3724,7 +3810,7 @@ packet_put_ra_dnssl_opt(struct dp_packet *b, ovs_be32 lifetime,
         return;
     }
 
-    struct ip6_hdr *nh = dp_packet_l3(b);
+    struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(b);
     nh->ip6_plen = htons(prev_l4_size + size);
 
     struct ovs_nd_dnssl *nd_dnssl = dp_packet_put_uninit(b, sizeof *nd_dnssl);
@@ -3803,7 +3889,7 @@ packet_put_ra_route_info_opt(struct dp_packet *b, ovs_be32 lifetime,
         }
     }
 
-    struct ip6_hdr *nh = dp_packet_l3(b);
+    struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(b);
     nh->ip6_plen = htons(prev_l4_size + size);
     struct ovs_ra_msg *ra = dp_packet_l4(b);
     ra->icmph.icmp6_cksum = 0;
@@ -3926,7 +4012,7 @@ prepare_ipv6_ras(const struct shash *local_active_ports_ras,
                  struct ovsdb_idl_index *sbrec_port_binding_by_name)
     OVS_REQUIRES(pinctrl_mutex)
 {
-    struct shash_node *iter, *iter_next;
+    struct shash_node *iter;
 
     SHASH_FOR_EACH (iter, &ipv6_ras) {
         struct ipv6_ra_state *ra = iter->data;
@@ -3935,8 +4021,7 @@ prepare_ipv6_ras(const struct shash *local_active_ports_ras,
 
     bool changed = false;
     SHASH_FOR_EACH (iter, local_active_ports_ras) {
-        const struct pb_ld_binding *ras = iter->data;
-        const struct sbrec_port_binding *pb = ras->pb;
+        const struct sbrec_port_binding *pb = iter->data;
 
         const char *peer_s = smap_get(&pb->options, "peer");
         if (!peer_s) {
@@ -3986,7 +4071,7 @@ prepare_ipv6_ras(const struct shash *local_active_ports_ras,
     }
 
     /* Remove those that are no longer in the SB database */
-    SHASH_FOR_EACH_SAFE (iter, iter_next, &ipv6_ras) {
+    SHASH_FOR_EACH_SAFE (iter, &ipv6_ras) {
         struct ipv6_ra_state *ra = iter->data;
         if (ra->delete_me) {
             shash_delete(&ipv6_ras, iter);
@@ -4005,12 +4090,15 @@ prepare_ipv6_ras(const struct shash *local_active_ports_ras,
 void
 pinctrl_wait(struct ovsdb_idl_txn *ovnsb_idl_txn)
 {
+    ovs_mutex_lock(&pinctrl_mutex);
     wait_put_mac_bindings(ovnsb_idl_txn);
     wait_controller_event(ovnsb_idl_txn);
     wait_put_vport_bindings(ovnsb_idl_txn);
     int64_t new_seq = seq_read(pinctrl_main_seq);
     seq_wait(pinctrl_main_seq, new_seq);
     wait_put_fdbs(ovnsb_idl_txn);
+    wait_activated_ports();
+    ovs_mutex_unlock(&pinctrl_mutex);
 }
 
 /* Called by ovn-controller. */
@@ -4025,6 +4113,7 @@ pinctrl_destroy(void)
     destroy_ipv6_ras();
     destroy_ipv6_prefixd();
     destroy_buffered_packets_map();
+    destroy_activated_ports();
     event_table_destroy();
     destroy_put_mac_bindings();
     destroy_put_vport_bindings();
@@ -4083,9 +4172,13 @@ pinctrl_handle_put_mac_binding(const struct flow *md,
         memcpy(&ip_key, &ip6, sizeof ip_key);
     }
 
+    /* If the ARP reply was unicast we should not delay it,
+     * there won't be any race. */
+    bool is_unicast = !eth_addr_is_multicast(headers->dl_dst);
     struct mac_binding *mb = ovn_mac_binding_add(&put_mac_bindings, dp_key,
                                                  port_key, &ip_key,
-                                                 headers->dl_src);
+                                                 headers->dl_src,
+                                                 is_unicast);
     if (!mb) {
         COVERAGE_INC(pinctrl_drop_put_mac_binding);
         return;
@@ -4154,8 +4247,10 @@ mac_binding_add_to_sb(struct ovsdb_idl_txn *ovnsb_idl_txn,
         sbrec_mac_binding_set_ip(b, ip);
         sbrec_mac_binding_set_mac(b, mac_string);
         sbrec_mac_binding_set_datapath(b, dp);
+        sbrec_mac_binding_set_timestamp(b, time_wall_msec());
     } else if (strcmp(b->mac, mac_string)) {
         sbrec_mac_binding_set_mac(b, mac_string);
+        sbrec_mac_binding_set_timestamp(b, time_wall_msec());
     }
 }
 
@@ -4245,18 +4340,23 @@ run_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn,
         return;
     }
 
-    const struct mac_binding *mb;
-    HMAP_FOR_EACH (mb, hmap_node, &put_mac_bindings) {
-        run_put_mac_binding(ovnsb_idl_txn, sbrec_datapath_binding_by_key,
-                            sbrec_port_binding_by_key,
-                            sbrec_mac_binding_by_lport_ip,
-                            mb);
+    long long now = time_msec();
+
+    struct mac_binding *mb;
+    HMAP_FOR_EACH_SAFE (mb, hmap_node, &put_mac_bindings) {
+        if (ovn_mac_binding_can_commit(mb, now)) {
+            run_put_mac_binding(ovnsb_idl_txn,
+                                sbrec_datapath_binding_by_key,
+                                sbrec_port_binding_by_key,
+                                sbrec_mac_binding_by_lport_ip, mb);
+            ovn_mac_binding_remove(mb, &put_mac_bindings);
+        }
     }
-    ovn_mac_bindings_flush(&put_mac_bindings);
 }
 
 static void
 run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
+                     struct ovsdb_idl_index *sbrec_port_binding_by_datapath,
                      const struct hmap *local_datapaths)
     OVS_REQUIRES(pinctrl_mutex)
 {
@@ -4272,12 +4372,17 @@ run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
             continue;
         }
 
-        for (size_t i = 0; i < ld->n_peer_ports; i++) {
-
-            const struct sbrec_port_binding *pb = ld->peer_ports[i].local;
-            struct buffered_packets *cur_qp, *next_qp;
-            HMAP_FOR_EACH_SAFE (cur_qp, next_qp, hmap_node,
-                                &buffered_packets_map) {
+        struct sbrec_port_binding *target =
+            sbrec_port_binding_index_init_row(sbrec_port_binding_by_datapath);
+        sbrec_port_binding_index_set_datapath(target, ld->datapath);
+        const struct sbrec_port_binding *pb;
+        SBREC_PORT_BINDING_FOR_EACH_EQUAL (pb, target,
+                                           sbrec_port_binding_by_datapath) {
+            if (strcmp(pb->type, "patch")) {
+                continue;
+            }
+            struct buffered_packets *cur_qp;
+            HMAP_FOR_EACH_SAFE (cur_qp, hmap_node, &buffered_packets_map) {
                 struct ds ip_s = DS_EMPTY_INITIALIZER;
                 ipv6_format_mapped(&cur_qp->ip, &ip_s);
                 const struct sbrec_mac_binding *b = mac_binding_lookup(
@@ -4292,6 +4397,7 @@ run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
                 ds_destroy(&ip_s);
             }
         }
+        sbrec_port_binding_index_destroy_row(target);
     }
     buffered_packets_map_gc();
 
@@ -4302,9 +4408,10 @@ run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
 
 static void
 wait_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn)
+    OVS_REQUIRES(pinctrl_mutex)
 {
     if (ovnsb_idl_txn && !hmap_is_empty(&put_mac_bindings)) {
-        poll_immediate_wake();
+        ovn_mac_binding_wait(&put_mac_bindings);
     }
 }
 
@@ -4957,9 +5064,9 @@ static void
 ip_mcast_snoop_destroy(void)
     OVS_NO_THREAD_SAFETY_ANALYSIS
 {
-    struct ip_mcast_snoop *ip_ms, *ip_ms_next;
+    struct ip_mcast_snoop *ip_ms;
 
-    HMAP_FOR_EACH_SAFE (ip_ms, ip_ms_next, hmap_node, &mcast_snoop_map) {
+    HMAP_FOR_EACH_SAFE (ip_ms, hmap_node, &mcast_snoop_map) {
         ip_mcast_snoop_remove(ip_ms);
     }
     hmap_destroy(&mcast_snoop_map);
@@ -4975,7 +5082,7 @@ static void
 ip_mcast_snoop_run(void)
     OVS_REQUIRES(pinctrl_mutex)
 {
-    struct ip_mcast_snoop *ip_ms, *ip_ms_next;
+    struct ip_mcast_snoop *ip_ms;
 
     /* First read the config updated by pinctrl_main. If there's any new or
      * updated config then apply it.
@@ -4996,7 +5103,7 @@ ip_mcast_snoop_run(void)
     bool notify = false;
 
     /* Then walk the multicast snoop instances. */
-    HMAP_FOR_EACH_SAFE (ip_ms, ip_ms_next, hmap_node, &mcast_snoop_map) {
+    HMAP_FOR_EACH_SAFE (ip_ms, hmap_node, &mcast_snoop_map) {
 
         /* Delete the stale ones. */
         if (!ip_mcast_snoop_state_find(ip_ms->dp_key)) {
@@ -5061,7 +5168,7 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
     }
 
     struct sbrec_ip_multicast *ip_mcast;
-    struct ip_mcast_snoop_state *ip_ms_state, *ip_ms_state_next;
+    struct ip_mcast_snoop_state *ip_ms_state;
 
     /* First read and update our own local multicast configuration for the
      * local datapaths.
@@ -5082,14 +5189,14 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
     }
 
     /* Then delete the old entries. */
-    HMAP_FOR_EACH_SAFE (ip_ms_state, ip_ms_state_next, hmap_node,
-                        &mcast_cfg_map) {
+    HMAP_FOR_EACH_SAFE (ip_ms_state, hmap_node, &mcast_cfg_map) {
         if (!get_local_datapath(local_datapaths, ip_ms_state->dp_key)) {
             ip_mcast_snoop_state_remove(ip_ms_state);
             notify = true;
         }
     }
 
+    const struct sbrec_igmp_group *sbrec_ip_mrouter;
     const struct sbrec_igmp_group *sbrec_igmp;
 
     /* Then flush any IGMP_Group entries that are not needed anymore:
@@ -5125,7 +5232,9 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
             continue;
         }
 
-        if (ip_parse(sbrec_igmp->address, &group_v4_addr)) {
+        if (!strcmp(sbrec_igmp->address, OVN_IGMP_GROUP_MROUTERS)) {
+            continue;
+        } else if (ip_parse(sbrec_igmp->address, &group_v4_addr)) {
             group_addr = in6_addr_mapped_ipv4(group_v4_addr);
         } else if (!ipv6_parse(sbrec_igmp->address, &group_addr)) {
             continue;
@@ -5141,13 +5250,13 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
         ovs_rwlock_unlock(&ip_ms->ms->rwlock);
     }
 
-    struct ip_mcast_snoop *ip_ms, *ip_ms_next;
+    struct ip_mcast_snoop *ip_ms;
 
     /* Last: write new IGMP_Groups to the southbound DB and update existing
      * ones (if needed). We also flush any old per-datapath multicast snoop
      * structures.
      */
-    HMAP_FOR_EACH_SAFE (ip_ms, ip_ms_next, hmap_node, &mcast_snoop_map) {
+    HMAP_FOR_EACH_SAFE (ip_ms, hmap_node, &mcast_snoop_map) {
         /* Flush any non-local snooping datapaths (e.g., stale). */
         struct local_datapath *local_dp =
             get_local_datapath(local_datapaths, ip_ms->dp_key);
@@ -5164,6 +5273,8 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
         struct mcast_group *mc_group;
 
         ovs_rwlock_rdlock(&ip_ms->ms->rwlock);
+
+        /* Groups. */
         LIST_FOR_EACH (mc_group, group_node, &ip_ms->ms->group_lru) {
             if (ovs_list_is_empty(&mc_group->bundle_lru)) {
                 continue;
@@ -5179,6 +5290,20 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
                                     sbrec_port_binding_by_key, ip_ms->ms,
                                     mc_group);
         }
+
+        /* Mrouters. */
+        sbrec_ip_mrouter = igmp_mrouter_lookup(sbrec_igmp_groups,
+                                               local_dp->datapath,
+                                               chassis);
+        if (!sbrec_ip_mrouter) {
+            sbrec_ip_mrouter = igmp_mrouter_create(ovnsb_idl_txn,
+                                                   local_dp->datapath,
+                                                   chassis);
+        }
+        igmp_mrouter_update_ports(sbrec_ip_mrouter,
+                                  sbrec_datapath_binding_by_key,
+                                  sbrec_port_binding_by_key, ip_ms->ms);
+
         ovs_rwlock_unlock(&ip_ms->ms->rwlock);
     }
 
@@ -5187,12 +5312,35 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
     }
 }
 
+/* Reinject the packet and flood it to all registered mrouters (also those
+ * who are not local to this chassis). */
+static void
+ip_mcast_forward_report(struct rconn *swconn, struct ip_mcast_snoop *ip_ms,
+                        uint32_t orig_in_port_key,
+                        const struct dp_packet *report)
+{
+    pinctrl_forward_pkt(swconn, ip_ms->dp_key, orig_in_port_key,
+                        OVN_MCAST_MROUTER_FLOOD_TUNNEL_KEY, report);
+}
+
+
+static void
+ip_mcast_forward_query(struct rconn *swconn, struct ip_mcast_snoop *ip_ms,
+                       uint32_t orig_in_port_key,
+                       const struct dp_packet *query)
+{
+    pinctrl_forward_pkt(swconn, ip_ms->dp_key, orig_in_port_key,
+                        OVN_MCAST_FLOOD_L2_TUNNEL_KEY, query);
+}
+
 static bool
-pinctrl_ip_mcast_handle_igmp(struct ip_mcast_snoop *ip_ms,
+pinctrl_ip_mcast_handle_igmp(struct rconn *swconn,
+                             struct ip_mcast_snoop *ip_ms,
                              const struct flow *ip_flow,
                              struct dp_packet *pkt_in,
-                             void *port_key_data)
+                             uint32_t in_port_key)
 {
+    void *port_key_data = (void *)(uintptr_t)in_port_key;
     const struct igmp_header *igmp;
     size_t offset;
 
@@ -5222,9 +5370,6 @@ pinctrl_ip_mcast_handle_igmp(struct ip_mcast_snoop *ip_ms,
                                         port_key_data);
         break;
     case IGMP_HOST_MEMBERSHIP_QUERY:
-        /* Shouldn't be receiving any of these since we are the multicast
-         * router. Store them for now.
-         */
         group_change =
             mcast_snooping_add_mrouter(ip_ms->ms, IP_MCAST_VLAN,
                                        port_key_data);
@@ -5236,15 +5381,26 @@ pinctrl_ip_mcast_handle_igmp(struct ip_mcast_snoop *ip_ms,
         break;
     }
     ovs_rwlock_unlock(&ip_ms->ms->rwlock);
+
+    /* Forward reports to all registered mrouters and flood queries to
+     * the whole L2 domain.
+     */
+    if (mcast_snooping_is_membership(ip_flow->tp_src)) {
+        ip_mcast_forward_report(swconn, ip_ms, in_port_key, pkt_in);
+    } else if (mcast_snooping_is_query(ip_flow->tp_src)) {
+        ip_mcast_forward_query(swconn, ip_ms, in_port_key, pkt_in);
+    }
     return group_change;
 }
 
 static bool
-pinctrl_ip_mcast_handle_mld(struct ip_mcast_snoop *ip_ms,
+pinctrl_ip_mcast_handle_mld(struct rconn *swconn,
+                            struct ip_mcast_snoop *ip_ms,
                             const struct flow *ip_flow,
                             struct dp_packet *pkt_in,
-                            void *port_key_data)
+                            uint32_t in_port_key)
 {
+    void *port_key_data = (void *)(uintptr_t)in_port_key;
     const struct mld_header *mld;
     size_t offset;
 
@@ -5284,11 +5440,20 @@ pinctrl_ip_mcast_handle_mld(struct ip_mcast_snoop *ip_ms,
         break;
     }
     ovs_rwlock_unlock(&ip_ms->ms->rwlock);
+
+    /* Forward reports to all registered mrouters and flood queries to
+     * the whole L2 domain.
+     */
+    if (is_mld_report(ip_flow, NULL)) {
+        ip_mcast_forward_report(swconn, ip_ms, in_port_key, pkt_in);
+    } else if (is_mld_query(ip_flow, NULL)) {
+        ip_mcast_forward_query(swconn, ip_ms, in_port_key, pkt_in);
+    }
     return group_change;
 }
 
 static void
-pinctrl_ip_mcast_handle(struct rconn *swconn OVS_UNUSED,
+pinctrl_ip_mcast_handle(struct rconn *swconn,
                         const struct flow *ip_flow,
                         struct dp_packet *pkt_in,
                         const struct match *md,
@@ -5317,18 +5482,17 @@ pinctrl_ip_mcast_handle(struct rconn *swconn OVS_UNUSED,
     }
 
     uint32_t port_key = md->flow.regs[MFF_LOG_INPORT - MFF_REG0];
-    void *port_key_data = (void *)(uintptr_t)port_key;
 
     switch (dl_type) {
     case ETH_TYPE_IP:
-        if (pinctrl_ip_mcast_handle_igmp(ip_ms, ip_flow, pkt_in,
-                                         port_key_data)) {
+        if (pinctrl_ip_mcast_handle_igmp(swconn, ip_ms, ip_flow, pkt_in,
+                                         port_key)) {
             notify_pinctrl_main();
         }
         break;
     case ETH_TYPE_IPV6:
-        if (pinctrl_ip_mcast_handle_mld(ip_ms, ip_flow, pkt_in,
-                                        port_key_data)) {
+        if (pinctrl_ip_mcast_handle_mld(swconn, ip_ms, ip_flow, pkt_in,
+                                        port_key)) {
             notify_pinctrl_main();
         }
         break;
@@ -5767,8 +5931,8 @@ send_garp_rarp_prepare(struct ovsdb_idl_txn *ovnsb_idl_txn,
                                &nat_addresses);
     /* For deleted ports and deleted nat ips, remove from
      * send_garp_rarp_data. */
-    struct shash_node *iter, *next;
-    SHASH_FOR_EACH_SAFE (iter, next, &send_garp_rarp_data) {
+    struct shash_node *iter;
+    SHASH_FOR_EACH_SAFE (iter, &send_garp_rarp_data) {
         if (!sset_contains(&localnet_vifs, iter->name) &&
             !sset_contains(&nat_ip_keys, iter->name)) {
             send_garp_rarp_delete(iter->name);
@@ -5803,7 +5967,7 @@ send_garp_rarp_prepare(struct ovsdb_idl_txn *ovnsb_idl_txn,
     sset_destroy(&localnet_vifs);
     sset_destroy(&local_l3gw_ports);
 
-    SHASH_FOR_EACH_SAFE (iter, next, &nat_addresses) {
+    SHASH_FOR_EACH_SAFE (iter, &nat_addresses) {
         struct lport_addresses *laddrs = iter->data;
         destroy_lport_addresses(laddrs);
         shash_delete(&nat_addresses, iter);
@@ -6586,8 +6750,7 @@ sync_svc_monitors(struct ovsdb_idl_txn *ovnsb_idl_txn,
         svc_mon->delete = false;
     }
 
-    struct svc_monitor *next;
-    LIST_FOR_EACH_SAFE (svc_mon, next, list_node, &svc_monitors) {
+    LIST_FOR_EACH_SAFE (svc_mon, list_node, &svc_monitors) {
         if (svc_mon->delete) {
             hmap_remove(&svc_monitors_map, &svc_mon->hmap_node);
             ovs_list_remove(&svc_mon->list_node);
@@ -7160,7 +7323,7 @@ bfd_monitor_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
                 const struct sset *active_tunnels)
     OVS_REQUIRES(pinctrl_mutex)
 {
-    struct bfd_entry *entry, *next_entry;
+    struct bfd_entry *entry;
     long long int cur_time = time_msec();
     bool changed = false;
 
@@ -7281,7 +7444,7 @@ bfd_monitor_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
         entry->erase = false;
     }
 
-    HMAP_FOR_EACH_SAFE (entry, next_entry, node, &bfd_monitor_map) {
+    HMAP_FOR_EACH_SAFE (entry, node, &bfd_monitor_map) {
         if (entry->erase) {
             hmap_remove(&bfd_monitor_map, &entry->node);
             free(entry);
@@ -7479,7 +7642,6 @@ svc_monitors_run(struct rconn *swconn,
                 svc_mon->next_send_time = current_time + svc_mon->interval;
                 next_run_time = svc_mon->next_send_time;
             } else {
-                next_run_time = svc_mon->wait_time - current_time;
                 next_run_time = svc_mon->wait_time;
             }
             break;
@@ -7705,6 +7867,152 @@ pinctrl_handle_svc_check(struct rconn *swconn, const struct flow *ip_flow,
     }
 }
 
+static struct ovs_list ports_to_activate_in_db = OVS_LIST_INITIALIZER(
+    &ports_to_activate_in_db);
+static struct ovs_list ports_to_activate_in_engine = OVS_LIST_INITIALIZER(
+    &ports_to_activate_in_engine);
+
+struct ovs_list *
+get_ports_to_activate_in_engine(void)
+{
+    ovs_mutex_lock(&pinctrl_mutex);
+    if (ovs_list_is_empty(&ports_to_activate_in_engine)) {
+        ovs_mutex_unlock(&pinctrl_mutex);
+        return NULL;
+    }
+
+    struct ovs_list *ap = xmalloc(sizeof *ap);
+    ovs_list_init(ap);
+    struct activated_port *pp;
+    LIST_FOR_EACH (pp, list, &ports_to_activate_in_engine) {
+        struct activated_port *new = xmalloc(sizeof *new);
+        new->dp_key = pp->dp_key;
+        new->port_key = pp->port_key;
+        ovs_list_push_front(ap, &new->list);
+    }
+    ovs_mutex_unlock(&pinctrl_mutex);
+    return ap;
+}
+
+static void
+init_activated_ports(void)
+    OVS_REQUIRES(pinctrl_mutex)
+{
+    ovs_list_init(&ports_to_activate_in_db);
+    ovs_list_init(&ports_to_activate_in_engine);
+}
+
+static void
+destroy_activated_ports(void)
+    OVS_REQUIRES(pinctrl_mutex)
+{
+    struct activated_port *pp;
+    LIST_FOR_EACH_POP (pp, list, &ports_to_activate_in_db) {
+        free(pp);
+    }
+    LIST_FOR_EACH_POP (pp, list, &ports_to_activate_in_engine) {
+        free(pp);
+    }
+}
+
+static void
+wait_activated_ports(void)
+    OVS_REQUIRES(pinctrl_mutex)
+{
+    if (!ovs_list_is_empty(&ports_to_activate_in_engine)) {
+        poll_immediate_wake();
+    }
+}
+
+bool pinctrl_is_port_activated(int64_t dp_key, int64_t port_key)
+{
+    const struct activated_port *pp;
+    ovs_mutex_lock(&pinctrl_mutex);
+    LIST_FOR_EACH (pp, list, &ports_to_activate_in_db) {
+        if (pp->dp_key == dp_key && pp->port_key == port_key) {
+            ovs_mutex_unlock(&pinctrl_mutex);
+            return true;
+        }
+    }
+    LIST_FOR_EACH (pp, list, &ports_to_activate_in_engine) {
+        if (pp->dp_key == dp_key && pp->port_key == port_key) {
+            ovs_mutex_unlock(&pinctrl_mutex);
+            return true;
+        }
+    }
+    ovs_mutex_unlock(&pinctrl_mutex);
+    return false;
+}
+
+static void
+run_activated_ports(struct ovsdb_idl_txn *ovnsb_idl_txn,
+                    struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
+                    struct ovsdb_idl_index *sbrec_port_binding_by_key,
+                    const struct sbrec_chassis *chassis)
+    OVS_REQUIRES(pinctrl_mutex)
+{
+    if (!ovnsb_idl_txn) {
+        return;
+    }
+
+    struct activated_port *pp;
+    LIST_FOR_EACH_SAFE (pp, list, &ports_to_activate_in_db) {
+        const struct sbrec_port_binding *pb = lport_lookup_by_key(
+            sbrec_datapath_binding_by_key, sbrec_port_binding_by_key,
+            pp->dp_key, pp->port_key);
+        if (!pb || lport_is_activated_by_activation_strategy(pb, chassis)) {
+            ovs_list_remove(&pp->list);
+            free(pp);
+            continue;
+        }
+        const char *activated_chassis = smap_get(
+            &pb->options, "additional-chassis-activated");
+        char *activated_str;
+        if (activated_chassis) {
+            activated_str = xasprintf(
+                "%s,%s", activated_chassis, chassis->name);
+            sbrec_port_binding_update_options_setkey(
+                pb, "additional-chassis-activated", activated_str);
+            free(activated_str);
+        } else {
+            sbrec_port_binding_update_options_setkey(
+                pb, "additional-chassis-activated", chassis->name);
+        }
+    }
+}
+
+void
+tag_port_as_activated_in_engine(struct activated_port *ap) {
+    ovs_mutex_lock(&pinctrl_mutex);
+    struct activated_port *pp;
+    LIST_FOR_EACH_SAFE (pp, list, &ports_to_activate_in_engine) {
+        if (pp->dp_key == ap->dp_key && pp->port_key == ap->port_key) {
+            ovs_list_remove(&pp->list);
+            free(pp);
+        }
+    }
+    ovs_mutex_unlock(&pinctrl_mutex);
+}
+
+static void
+pinctrl_rarp_activation_strategy_handler(const struct match *md)
+    OVS_REQUIRES(pinctrl_mutex)
+{
+    /* Tag the port as activated in-memory. */
+    struct activated_port *pp = xmalloc(sizeof *pp);
+    pp->port_key = md->flow.regs[MFF_LOG_INPORT - MFF_REG0];
+    pp->dp_key = ntohll(md->flow.metadata);
+    ovs_list_push_front(&ports_to_activate_in_db, &pp->list);
+
+    pp = xmalloc(sizeof *pp);
+    pp->port_key = md->flow.regs[MFF_LOG_INPORT - MFF_REG0];
+    pp->dp_key = ntohll(md->flow.metadata);
+    ovs_list_push_front(&ports_to_activate_in_engine, &pp->list);
+
+    /* Notify main thread on pending additional-chassis-activated updates. */
+    notify_pinctrl_main();
+}
+
 static struct hmap put_fdbs;
 
 /* MAC learning (fdb) related functions.  Runs within the main
diff --git a/controller/pinctrl.h b/controller/pinctrl.h
index 88f18e9..d4f52e9 100644
--- a/controller/pinctrl.h
+++ b/controller/pinctrl.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 
 #include "lib/sset.h"
+#include "openvswitch/list.h"
 #include "openvswitch/meta-flow.h"
 
 struct hmap;
@@ -33,6 +34,7 @@ struct sbrec_dns_table;
 struct sbrec_controller_event_table;
 struct sbrec_service_monitor_table;
 struct sbrec_bfd_table;
+struct sbrec_port_binding;
 
 void pinctrl_init(void);
 void pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
@@ -56,4 +58,14 @@ void pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
 void pinctrl_wait(struct ovsdb_idl_txn *ovnsb_idl_txn);
 void pinctrl_destroy(void);
 void pinctrl_set_br_int_name(char *br_int_name);
+
+struct activated_port {
+    uint32_t dp_key;
+    uint32_t port_key;
+    struct ovs_list list;
+};
+
+void tag_port_as_activated_in_engine(struct activated_port *ap);
+struct ovs_list *get_ports_to_activate_in_engine(void);
+bool pinctrl_is_port_activated(int64_t dp_key, int64_t port_key);
 #endif /* controller/pinctrl.h */
diff --git a/controller/test-vif-plug.c b/controller/test-vif-plug.c
index 01ff37d..d709419 100644
--- a/controller/test-vif-plug.c
+++ b/controller/test-vif-plug.c
@@ -36,7 +36,7 @@ test_vif_plug(struct ovs_cmdl_context *ctx OVS_UNUSED)
     ovs_assert(
         sset_contains(
             vif_plug_get_maintained_iface_options(vif_plug_class),
-            "plug-dummy-option"));
+            "vif-plug-dummy-option"));
 
     struct vif_plug_port_ctx_in ctx_in = {
         .op_type = PLUG_OP_CREATE,
diff --git a/controller/vif-plug.c b/controller/vif-plug.c
index 62b7526..38348bf 100644
--- a/controller/vif-plug.c
+++ b/controller/vif-plug.c
@@ -532,22 +532,14 @@ vif_plug_handle_iface(const struct ovsrec_interface *iface_rec,
  * completeness of the initial data downloading we need this counter so that we
  * do not erronously unplug ports because the data is just not loaded yet.
  */
-#define VIF_PLUG_PRIME_IDL_COUNT_SEEED 10
-static int vif_plug_prime_idl_count = VIF_PLUG_PRIME_IDL_COUNT_SEEED;
-
-void
-vif_plug_reset_idl_prime_counter(void)
-{
-    vif_plug_prime_idl_count = VIF_PLUG_PRIME_IDL_COUNT_SEEED;
-}
-
 void
 vif_plug_run(struct vif_plug_ctx_in *vif_plug_ctx_in,
              struct vif_plug_ctx_out *vif_plug_ctx_out)
 {
-    if (vif_plug_prime_idl_count && --vif_plug_prime_idl_count > 0) {
-        VLOG_DBG("vif_plug_run: vif_plug_prime_idl_count=%d, will not unplug "
-                 "ports in this iteration.", vif_plug_prime_idl_count);
+    bool delay_plug = daemon_started_recently();
+    if (delay_plug) {
+        VLOG_DBG("vif_plug_run: daemon started recently, will not unplug "
+                 "ports in this iteration.");
     }
 
     if (!vif_plug_ctx_in->chassis_rec) {
@@ -557,7 +549,7 @@ vif_plug_run(struct vif_plug_ctx_in *vif_plug_ctx_in,
     OVSREC_INTERFACE_TABLE_FOR_EACH (iface_rec,
                                      vif_plug_ctx_in->iface_table) {
         vif_plug_handle_iface(iface_rec, vif_plug_ctx_in, vif_plug_ctx_out,
-                              !vif_plug_prime_idl_count);
+                              !delay_plug);
     }
 
     struct sbrec_port_binding *target =
@@ -573,7 +565,7 @@ vif_plug_run(struct vif_plug_ctx_in *vif_plug_ctx_in,
         enum en_lport_type lport_type = get_lport_type(pb);
         if (lport_type == LP_VIF) {
             vif_plug_handle_lport_vif(pb, vif_plug_ctx_in, vif_plug_ctx_out,
-                                      !vif_plug_prime_idl_count);
+                                      !delay_plug);
         }
     }
     sbrec_port_binding_index_destroy_row(target);
@@ -582,8 +574,8 @@ vif_plug_run(struct vif_plug_ctx_in *vif_plug_ctx_in,
 static void
 vif_plug_finish_deleted__(struct shash *deleted_iface_ids, bool txn_success)
 {
-    struct shash_node *node, *next;
-    SHASH_FOR_EACH_SAFE (node, next, deleted_iface_ids) {
+    struct shash_node *node;
+    SHASH_FOR_EACH_SAFE (node, deleted_iface_ids) {
         struct vif_plug_port_ctx *vif_plug_port_ctx = node->data;
         if (txn_success) {
             vif_plug_port_finish(vif_plug_port_ctx->vif_plug,
@@ -608,8 +600,8 @@ vif_plug_finish_deleted(struct shash *deleted_iface_ids) {
 static void
 vif_plug_finish_changed__(struct shash *changed_iface_ids, bool txn_success)
 {
-    struct shash_node *node, *next;
-    SHASH_FOR_EACH_SAFE (node, next, changed_iface_ids) {
+    struct shash_node *node;
+    SHASH_FOR_EACH_SAFE (node, changed_iface_ids) {
         struct vif_plug_port_ctx *vif_plug_port_ctx = node->data;
         if (txn_success) {
             vif_plug_port_finish(vif_plug_port_ctx->vif_plug,
diff --git a/controller/vif-plug.h b/controller/vif-plug.h
index 7606359..7a1978e 100644
--- a/controller/vif-plug.h
+++ b/controller/vif-plug.h
@@ -71,7 +71,6 @@ void vif_plug_clear_changed(struct shash *deleted_iface_ids);
 void vif_plug_finish_changed(struct shash *changed_iface_ids);
 void vif_plug_clear_deleted(struct shash *deleted_iface_ids);
 void vif_plug_finish_deleted(struct shash *changed_iface_ids);
-void vif_plug_reset_idl_prime_counter(void);
 
 #ifdef  __cplusplus
 }
diff --git a/ic/ovn-ic.c b/ic/ovn-ic.c
index a9b797a..e5c193d 100644
--- a/ic/ovn-ic.c
+++ b/ic/ovn-ic.c
@@ -313,10 +313,6 @@ sync_isb_gw_to_sb(struct ic_context *ctx,
 {
     sbrec_chassis_set_hostname(chassis, gw->hostname);
     sbrec_chassis_update_other_config_setkey(chassis, "is-remote", "true");
-    /* TODO(lucasagomes): Continue writing the configuration to the
-     * external_ids column for backward compatibility with the current
-     * systems, this behavior should be removed in the future. */
-    sbrec_chassis_update_external_ids_setkey(chassis, "is-remote", "true");
 
     /* Sync encaps used by this gateway. */
     ovs_assert(gw->n_encaps);
@@ -925,7 +921,12 @@ parse_route(const char *s_prefix, const char *s_nexthop,
     }
 
     unsigned int nlen;
-    return ip46_parse_cidr(s_nexthop, nexthop, &nlen);
+    if (!ip46_parse_cidr(s_nexthop, nexthop, &nlen)) {
+        return false;
+    }
+
+    /* Do not learn routes with link-local next hop. */
+    return !in6_is_lla(nexthop);
 }
 
 /* Return false if can't be added due to bad format. */
@@ -1406,8 +1407,8 @@ sync_learned_routes(struct ic_context *ctx,
     }
 
     /* Delete extra learned routes. */
-    struct ic_route_info *route_learned, *next;
-    HMAP_FOR_EACH_SAFE (route_learned, next, node, &ic_lr->routes_learned) {
+    struct ic_route_info *route_learned;
+    HMAP_FOR_EACH_SAFE (route_learned, node, &ic_lr->routes_learned) {
         VLOG_DBG("Delete route %s -> %s that is not in IC-SB from NB.",
                  route_learned->nb_route->ip_prefix,
                  route_learned->nb_route->nexthop);
@@ -1481,8 +1482,8 @@ advertise_routes(struct ic_context *ctx,
     icsbrec_route_index_destroy_row(isb_route_key);
 
     /* Create the missing routes in IC-SB */
-    struct ic_route_info *route_adv, *next;
-    HMAP_FOR_EACH_SAFE (route_adv, next, node, routes_ad) {
+    struct ic_route_info *route_adv;
+    HMAP_FOR_EACH_SAFE (route_adv, node, routes_ad) {
         isb_route = icsbrec_route_insert(ctx->ovnisb_txn);
         icsbrec_route_set_transit_switch(isb_route, ts_name);
         icsbrec_route_set_availability_zone(isb_route, az);
@@ -1673,8 +1674,8 @@ route_run(struct ic_context *ctx,
     }
     icsbrec_port_binding_index_destroy_row(isb_pb_key);
 
-    struct ic_router_info *ic_lr, *next;
-    HMAP_FOR_EACH_SAFE (ic_lr, next, node, &ic_lrs) {
+    struct ic_router_info *ic_lr;
+    HMAP_FOR_EACH_SAFE (ic_lr, node, &ic_lrs) {
         advertise_lr_routes(ctx, az, ic_lr);
         sync_learned_routes(ctx, az, ic_lr);
         free(ic_lr->isb_pbs);
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index 5477975..d7ee84d 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -116,6 +116,11 @@ struct ovn_extend_table;
     OVNACT(PUT_FDB,           ovnact_put_fdb)         \
     OVNACT(GET_FDB,           ovnact_get_fdb)         \
     OVNACT(LOOKUP_FDB,        ovnact_lookup_fdb)      \
+    OVNACT(CHECK_IN_PORT_SEC,  ovnact_result)         \
+    OVNACT(CHECK_OUT_PORT_SEC, ovnact_result)         \
+    OVNACT(COMMIT_ECMP_NH,    ovnact_commit_ecmp_nh)  \
+    OVNACT(CHK_ECMP_NH_MAC,   ovnact_result)          \
+    OVNACT(CHK_ECMP_NH,       ovnact_result)          \
 
 /* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
 enum OVS_PACKED_ENUM ovnact_type {
@@ -451,6 +456,13 @@ struct ovnact_lookup_fdb {
     struct expr_field dst;     /* 1-bit destination field. */
 };
 
+/* OVNACT_COMMIT_ECMP_NH. */
+struct ovnact_commit_ecmp_nh {
+    struct ovnact ovnact;
+    bool ipv6;
+    uint8_t proto;
+};
+
 /* Internal use by the helpers below. */
 void ovnact_init(struct ovnact *, enum ovnact_type, size_t len);
 void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len);
@@ -681,6 +693,9 @@ enum action_opcode {
     /* put_fdb(inport, eth.src).
      */
     ACTION_OPCODE_PUT_FDB,
+
+    /* activation_strategy_rarp() */
+    ACTION_OPCODE_ACTIVATION_STRATEGY_RARP,
 };
 
 /* Header. */
@@ -806,6 +821,10 @@ struct ovnact_encode_params {
                          * 'get_fdb' to resubmit. */
     uint8_t fdb_lookup_ptable; /* OpenFlow table for
                                 * 'lookup_fdb' to resubmit. */
+    uint8_t in_port_sec_ptable; /* OpenFlow table for
+                                * 'check_in_port_sec' to resubmit. */
+    uint8_t out_port_sec_ptable; /* OpenFlow table for
+                                * 'check_out_port_sec' to resubmit. */
     uint32_t ctrl_meter_id;     /* Meter to be used if the resulting flow
                                    sends packets to controller. */
     uint32_t common_nat_ct_zone; /* When performing NAT in a common CT zone,
@@ -820,4 +839,7 @@ void ovnacts_free(struct ovnact[], size_t ovnacts_len);
 char *ovnact_op_to_string(uint32_t);
 int encode_ra_dnssl_opt(char *data, char *buf, int buf_len);
 
+void encode_controller_op(enum action_opcode opcode, uint32_t meter_id,
+                          struct ofpbuf *ofpacts);
+
 #endif /* ovn/actions.h */
diff --git a/include/ovn/features.h b/include/ovn/features.h
index 8fbdbf1..679f674 100644
--- a/include/ovn/features.h
+++ b/include/ovn/features.h
@@ -23,6 +23,7 @@
 /* ovn-controller supported feature names. */
 #define OVN_FEATURE_PORT_UP_NOTIF      "port-up-notif"
 #define OVN_FEATURE_CT_NO_MASKED_LABEL "ct-no-masked-label"
+#define OVN_FEATURE_MAC_BINDING_TIMESTAMP "mac-binding-timestamp"
 
 /* OVS datapath supported features.  Based on availability OVN might generate
  * different types of openflows.
diff --git a/include/ovn/logical-fields.h b/include/ovn/logical-fields.h
index 1851663..3db7265 100644
--- a/include/ovn/logical-fields.h
+++ b/include/ovn/logical-fields.h
@@ -69,6 +69,8 @@ enum mff_log_flags_bits {
     MLF_SKIP_SNAT_FOR_LB_BIT = 9,
     MLF_LOCALPORT_BIT = 10,
     MLF_USE_SNAT_ZONE = 11,
+    MLF_CHECK_PORT_SEC_BIT = 12,
+    MLF_LOOKUP_COMMIT_ECMP_NH_BIT = 13,
 };
 
 /* MFF_LOG_FLAGS_REG flag assignments */
@@ -112,6 +114,8 @@ enum mff_log_flags {
 
     /* Indicate the packet has been received from a localport */
     MLF_LOCALPORT = (1 << MLF_LOCALPORT_BIT),
+
+    MLF_LOOKUP_COMMIT_ECMP_NH = (1 << MLF_LOOKUP_COMMIT_ECMP_NH_BIT),
 };
 
 /* OVN logical fields
diff --git a/lib/actions.c b/lib/actions.c
index a9c2760..adbb42d 100644
--- a/lib/actions.c
+++ b/lib/actions.c
@@ -41,6 +41,7 @@
 #include "uuid.h"
 #include "socket-util.h"
 #include "lib/ovn-util.h"
+#include "controller/lflow.h"
 
 VLOG_DEFINE_THIS_MODULE(actions);
 
@@ -108,7 +109,7 @@ encode_finish_controller_op(size_t ofs, struct ofpbuf *ofpacts)
     ofpact_finish_CONTROLLER(ofpacts, &oc);
 }
 
-static void
+void
 encode_controller_op(enum action_opcode opcode, uint32_t meter_id,
                      struct ofpbuf *ofpacts)
 {
@@ -2862,10 +2863,21 @@ encode_PUT_DHCPV4_OPTS(const struct ovnact_put_opts *pdo,
         opt_header[1] = strlen(c->string);
         ofpbuf_put(ofpacts, c->string, opt_header[1]);
     }
+    /* Encode next_server opt (253) */
+    const struct ovnact_gen_option *next_server_opt = find_opt(
+        pdo->options, pdo->n_options, DHCP_OPT_NEXT_SERVER_CODE);
+    if (next_server_opt) {
+        uint8_t *opt_header = ofpbuf_put_zeros(ofpacts, 2);
+        const union expr_constant *c = next_server_opt->value.values;
+        opt_header[0] = next_server_opt->option->code;
+        opt_header[1] = sizeof(ovs_be32);
+        ofpbuf_put(ofpacts, &c->value.ipv4, sizeof(ovs_be32));
+    }
 
     for (size_t i = 0; i < pdo->n_options; i++) {
         const struct ovnact_gen_option *o = &pdo->options[i];
-        if (o != offerip_opt && o != boot_opt && o != boot_alt_opt) {
+        if (o != offerip_opt && o != boot_opt && o != boot_alt_opt && \
+            o != next_server_opt) {
             encode_put_dhcpv4_option(o, ofpacts);
         }
     }
@@ -4004,19 +4016,20 @@ format_CHK_LB_HAIRPIN_REPLY(const struct ovnact_result *res, struct ds *s)
 }
 
 static void
-encode_chk_lb_hairpin__(const struct ovnact_result *res,
-                        uint8_t hairpin_table,
-                        struct ofpbuf *ofpacts)
+encode_result_action__(const struct ovnact_result *res,
+                       uint8_t resubmit_table,
+                       int log_flags_result_bit,
+                       struct ofpbuf *ofpacts)
 {
     struct mf_subfield dst = expr_resolve_field(&res->dst);
     ovs_assert(dst.field);
-    put_load(0, MFF_LOG_FLAGS, MLF_LOOKUP_LB_HAIRPIN_BIT, 1, ofpacts);
-    emit_resubmit(ofpacts, hairpin_table);
+    put_load(0, MFF_LOG_FLAGS, log_flags_result_bit, 1, ofpacts);
+    emit_resubmit(ofpacts, resubmit_table);
 
     struct ofpact_reg_move *orm = ofpact_put_REG_MOVE(ofpacts);
     orm->dst = dst;
     orm->src.field = mf_from_id(MFF_LOG_FLAGS);
-    orm->src.ofs = MLF_LOOKUP_LB_HAIRPIN_BIT;
+    orm->src.ofs = log_flags_result_bit;
     orm->src.n_bits = 1;
 }
 
@@ -4025,7 +4038,8 @@ encode_CHK_LB_HAIRPIN(const struct ovnact_result *res,
                       const struct ovnact_encode_params *ep,
                       struct ofpbuf *ofpacts)
 {
-    encode_chk_lb_hairpin__(res, ep->lb_hairpin_ptable, ofpacts);
+    encode_result_action__(res, ep->lb_hairpin_ptable,
+                           MLF_LOOKUP_LB_HAIRPIN_BIT, ofpacts);
 }
 
 static void
@@ -4033,7 +4047,8 @@ encode_CHK_LB_HAIRPIN_REPLY(const struct ovnact_result *res,
                             const struct ovnact_encode_params *ep,
                             struct ofpbuf *ofpacts)
 {
-    encode_chk_lb_hairpin__(res, ep->lb_hairpin_reply_ptable, ofpacts);
+    encode_result_action__(res, ep->lb_hairpin_reply_ptable,
+                           MLF_LOOKUP_LB_HAIRPIN_BIT, ofpacts);
 }
 
 static void
@@ -4216,6 +4231,375 @@ ovnact_lookup_fdb_free(struct ovnact_lookup_fdb *get_fdb OVS_UNUSED)
 {
 }
 
+static void
+parse_check_in_port_sec(struct action_context *ctx,
+                        const struct expr_field *dst,
+                        struct ovnact_result *dl)
+{
+    parse_ovnact_result(ctx, "check_in_port_sec", NULL, dst, dl);
+}
+
+static void
+format_CHECK_IN_PORT_SEC(const struct ovnact_result *dl, struct ds *s)
+{
+    expr_field_format(&dl->dst, s);
+    ds_put_cstr(s, " = check_in_port_sec();");
+}
+
+static void
+encode_CHECK_IN_PORT_SEC(const struct ovnact_result *dl,
+                         const struct ovnact_encode_params *ep,
+                         struct ofpbuf *ofpacts)
+{
+    encode_result_action__(dl, ep->in_port_sec_ptable,
+                           MLF_CHECK_PORT_SEC_BIT, ofpacts);
+}
+
+static void
+parse_check_out_port_sec(struct action_context *ctx,
+                         const struct expr_field *dst,
+                         struct ovnact_result *dl)
+{
+    parse_ovnact_result(ctx, "check_out_port_sec", NULL, dst, dl);
+}
+
+static void
+format_CHECK_OUT_PORT_SEC(const struct ovnact_result *dl, struct ds *s)
+{
+    expr_field_format(&dl->dst, s);
+    ds_put_cstr(s, " = check_out_port_sec();");
+}
+
+static void
+encode_CHECK_OUT_PORT_SEC(const struct ovnact_result *dl,
+                         const struct ovnact_encode_params *ep,
+                         struct ofpbuf *ofpacts)
+{
+    encode_result_action__(dl, ep->out_port_sec_ptable,
+                           MLF_CHECK_PORT_SEC_BIT, ofpacts);
+}
+
+static void
+parse_commit_ecmp_nh(struct action_context *ctx,
+                     struct ovnact_commit_ecmp_nh *ecmp_nh)
+{
+    uint8_t proto;
+    bool ipv6;
+
+    lexer_force_match(ctx->lexer, LEX_T_LPAREN); /* Skip '('. */
+    if (!lexer_match_id(ctx->lexer, "ipv6")) {
+        lexer_syntax_error(ctx->lexer, "invalid parameter");
+        return;
+    }
+    if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) {
+        lexer_syntax_error(ctx->lexer, "invalid parameter");
+        return;
+    }
+    if (lexer_match_string(ctx->lexer, "true") ||
+        lexer_match_id(ctx->lexer, "true")) {
+        ipv6 = true;
+    } else if (lexer_match_string(ctx->lexer, "false") ||
+               lexer_match_id(ctx->lexer, "false")) {
+        ipv6 = false;
+    } else {
+        lexer_syntax_error(ctx->lexer,
+                           "expecting true or false");
+        return;
+    }
+
+    lexer_force_match(ctx->lexer, LEX_T_COMMA);
+
+    if (!lexer_match_id(ctx->lexer, "proto")) {
+        lexer_syntax_error(ctx->lexer, "invalid parameter");
+        return;
+    }
+    if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) {
+        lexer_syntax_error(ctx->lexer, "invalid parameter");
+        return;
+    }
+    if (lexer_match_id(ctx->lexer, "tcp")) {
+        proto = IPPROTO_TCP;
+    } else if (lexer_match_id(ctx->lexer, "udp")) {
+        proto = IPPROTO_UDP;
+    } else if (lexer_match_id(ctx->lexer, "sctp")) {
+        proto = IPPROTO_SCTP;
+    } else {
+        lexer_syntax_error(ctx->lexer, "invalid protocol");
+        return;
+    }
+
+    lexer_force_match(ctx->lexer, LEX_T_RPAREN); /* Skip ')'. */
+
+    ecmp_nh->proto = proto;
+    ecmp_nh->ipv6 = ipv6;
+}
+
+static void
+format_COMMIT_ECMP_NH(const struct ovnact_commit_ecmp_nh *ecmp_nh,
+                      struct ds *s)
+{
+    const char *proto;
+
+    switch (ecmp_nh->proto) {
+    case IPPROTO_UDP:
+        proto = "udp";
+        break;
+    case IPPROTO_SCTP:
+        proto = "sctp";
+        break;
+    case IPPROTO_TCP:
+    default:
+        proto = "tcp";
+        break;
+    }
+    ds_put_format(s, "commit_ecmp_nh(ipv6 = %s, proto = %s);",
+                  ecmp_nh->ipv6 ? "true" : "false", proto);
+}
+
+static void
+ovnact_commit_ecmp_nh_free(struct ovnact_commit_ecmp_nh *ecmp_nh OVS_UNUSED)
+{
+}
+
+static void
+commit_ecmp_learn_action(struct ofpbuf *ofpacts, bool nw_conn,
+                         bool ipv6, uint8_t proto)
+{
+    struct ofpact_learn *ol = ofpact_put_LEARN(ofpacts);
+    struct match match = MATCH_CATCHALL_INITIALIZER;
+    struct ofpact_learn_spec *ol_spec;
+    unsigned int imm_bytes;
+    uint8_t *src_imm;
+
+    ol->flags = NX_LEARN_F_DELETE_LEARNED;
+    ol->idle_timeout = 20; /* seconds. */
+    ol->hard_timeout = 30; /* seconds. */
+    ol->priority = OFP_DEFAULT_PRIORITY;
+    ol->table_id = nw_conn ? OFTABLE_ECMP_NH_MAC : OFTABLE_ECMP_NH;
+
+    /* Match on metadata of the packet that created the new table. */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field = mf_from_id(MFF_METADATA);
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_FIELD;
+    ol_spec->src.field = mf_from_id(MFF_METADATA);
+
+    if (nw_conn) {
+        ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+        ol_spec->dst.field = mf_from_id(MFF_ETH_SRC);
+        ol_spec->src.field = mf_from_id(MFF_ETH_SRC);
+        ol_spec->dst.ofs = 0;
+        ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+        ol_spec->n_bits = ol_spec->dst.n_bits;
+        ol_spec->dst_type = NX_LEARN_DST_MATCH;
+        ol_spec->src_type = NX_LEARN_SRC_FIELD;
+    }
+
+    /* Match on the same ETH type as the packet that created the new table. */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field = mf_from_id(MFF_ETH_TYPE);
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+    union mf_value imm_eth_type = {
+        .be16 = ipv6 ? htons(ETH_TYPE_IPV6) : htons(ETH_TYPE_IP)
+    };
+    mf_write_subfield_value(&ol_spec->dst, &imm_eth_type, &match);
+    /* Push value last, as this may reallocate 'ol_spec'. */
+    imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+    src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+    memcpy(src_imm, &imm_eth_type, imm_bytes);
+
+    /* IP src. */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field =
+        ipv6 ? mf_from_id(MFF_IPV6_SRC) : mf_from_id(MFF_IPV4_SRC);
+    if (nw_conn) {
+        ol_spec->src.field =
+            ipv6 ? mf_from_id(MFF_IPV6_SRC) : mf_from_id(MFF_IPV4_SRC);
+    } else {
+        ol_spec->src.field =
+            ipv6 ? mf_from_id(MFF_IPV6_DST) : mf_from_id(MFF_IPV4_DST);
+    }
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_FIELD;
+
+    /* IP dst. */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field =
+        ipv6 ? mf_from_id(MFF_IPV6_DST) : mf_from_id(MFF_IPV4_DST);
+    if (nw_conn) {
+        ol_spec->src.field =
+            ipv6 ? mf_from_id(MFF_IPV6_DST) : mf_from_id(MFF_IPV4_DST);
+    } else {
+        ol_spec->src.field =
+            ipv6 ? mf_from_id(MFF_IPV6_SRC) : mf_from_id(MFF_IPV4_SRC);
+    }
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_FIELD;
+
+    /* IP proto. */
+    union mf_value imm_proto = {
+        .u8 = proto,
+    };
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field = mf_from_id(MFF_IP_PROTO);
+    ol_spec->src.field = mf_from_id(MFF_IP_PROTO);
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+    mf_write_subfield_value(&ol_spec->dst, &imm_proto, &match);
+    /* Push value last, as this may reallocate 'ol_spec' */
+    imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+    src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+    memcpy(src_imm, &imm_proto, imm_bytes);
+
+    /* src port */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    switch (proto) {
+    case IPPROTO_TCP:
+        ol_spec->dst.field = mf_from_id(MFF_TCP_SRC);
+        ol_spec->src.field =
+            nw_conn ? mf_from_id(MFF_TCP_SRC) : mf_from_id(MFF_TCP_DST);
+        break;
+    case IPPROTO_UDP:
+        ol_spec->dst.field = mf_from_id(MFF_UDP_SRC);
+        ol_spec->src.field =
+            nw_conn ? mf_from_id(MFF_UDP_SRC) : mf_from_id(MFF_UDP_DST);
+        break;
+    case IPPROTO_SCTP:
+        ol_spec->dst.field = mf_from_id(MFF_SCTP_SRC);
+        ol_spec->src.field =
+            nw_conn ? mf_from_id(MFF_SCTP_SRC) : mf_from_id(MFF_SCTP_DST);
+        break;
+    default:
+        OVS_NOT_REACHED();
+        break;
+    }
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_FIELD;
+
+    /* dst port */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    switch (proto) {
+    case IPPROTO_TCP:
+        ol_spec->dst.field = mf_from_id(MFF_TCP_DST);
+        ol_spec->src.field =
+            nw_conn ? mf_from_id(MFF_TCP_DST) : mf_from_id(MFF_TCP_SRC);
+        break;
+    case IPPROTO_UDP:
+        ol_spec->dst.field = mf_from_id(MFF_UDP_DST);
+        ol_spec->src.field =
+            nw_conn ? mf_from_id(MFF_UDP_DST) : mf_from_id(MFF_UDP_SRC);
+        break;
+    case IPPROTO_SCTP:
+        ol_spec->dst.field = mf_from_id(MFF_SCTP_DST);
+        ol_spec->src.field =
+            nw_conn ? mf_from_id(MFF_SCTP_DST) : mf_from_id(MFF_SCTP_SRC);
+        break;
+    default:
+        OVS_NOT_REACHED();
+        break;
+    }
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_FIELD;
+
+    /* Set MLF_LOOKUP_COMMIT_ECMP_NH_BIT for ecmp replies. */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field = mf_from_id(MFF_LOG_FLAGS);
+    ol_spec->dst.ofs = MLF_LOOKUP_COMMIT_ECMP_NH_BIT;
+    ol_spec->dst.n_bits = 1;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_LOAD;
+    ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+    union mf_value imm_reg_value = {
+        .u8 = 1
+    };
+    mf_write_subfield_value(&ol_spec->dst, &imm_reg_value, &match);
+
+    /* Push value last, as this may reallocate 'ol_spec' */
+    imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+    src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+    ol = ofpacts->header;
+    memcpy(src_imm, &imm_reg_value, imm_bytes);
+
+    ofpact_finish_LEARN(ofpacts, &ol);
+}
+
+static void
+encode_COMMIT_ECMP_NH(const struct ovnact_commit_ecmp_nh *ecmp_nh,
+                      const struct ovnact_encode_params *ep OVS_UNUSED,
+                      struct ofpbuf *ofpacts)
+{
+     commit_ecmp_learn_action(ofpacts, true, ecmp_nh->ipv6, ecmp_nh->proto);
+     commit_ecmp_learn_action(ofpacts, false, ecmp_nh->ipv6, ecmp_nh->proto);
+}
+
+static void
+parse_chk_ecmp_nh_mac(struct action_context *ctx, const struct expr_field *dst,
+                      struct ovnact_result *res)
+{
+    parse_ovnact_result(ctx, "chk_ecmp_nh_mac", NULL, dst, res);
+}
+
+static void
+format_CHK_ECMP_NH_MAC(const struct ovnact_result *res, struct ds *s)
+{
+    expr_field_format(&res->dst, s);
+    ds_put_cstr(s, " = chk_ecmp_nh_mac();");
+}
+
+static void
+encode_CHK_ECMP_NH_MAC(const struct ovnact_result *res,
+                       const struct ovnact_encode_params *ep OVS_UNUSED,
+                       struct ofpbuf *ofpacts)
+{
+    encode_result_action__(res, OFTABLE_ECMP_NH_MAC,
+                           MLF_LOOKUP_COMMIT_ECMP_NH_BIT, ofpacts);
+}
+
+static void
+parse_chk_ecmp_nh(struct action_context *ctx, const struct expr_field *dst,
+                  struct ovnact_result *res)
+{
+    parse_ovnact_result(ctx, "chk_ecmp_nh", NULL, dst, res);
+}
+
+static void
+format_CHK_ECMP_NH(const struct ovnact_result *res, struct ds *s)
+{
+    expr_field_format(&res->dst, s);
+    ds_put_cstr(s, " = chk_ecmp_nh();");
+}
+
+static void
+encode_CHK_ECMP_NH(const struct ovnact_result *res,
+                   const struct ovnact_encode_params *ep OVS_UNUSED,
+                   struct ofpbuf *ofpacts)
+{
+    encode_result_action__(res, OFTABLE_ECMP_NH,
+                           MLF_LOOKUP_COMMIT_ECMP_NH_BIT, ofpacts);
+}
+
 /* Parses an assignment or exchange or put_dhcp_opts action. */
 static void
 parse_set_action(struct action_context *ctx)
@@ -4284,6 +4668,22 @@ parse_set_action(struct action_context *ctx)
                    && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
             parse_lookup_fdb(
                 ctx, &lhs, ovnact_put_LOOKUP_FDB(ctx->ovnacts));
+        } else if (!strcmp(ctx->lexer->token.s, "check_in_port_sec")
+                   && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+            parse_check_in_port_sec(
+                ctx, &lhs, ovnact_put_CHECK_IN_PORT_SEC(ctx->ovnacts));
+        } else if (!strcmp(ctx->lexer->token.s, "check_out_port_sec")
+                   && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+            parse_check_out_port_sec(
+                ctx, &lhs, ovnact_put_CHECK_OUT_PORT_SEC(ctx->ovnacts));
+        } else if (!strcmp(ctx->lexer->token.s, "chk_ecmp_nh_mac")
+                   && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+            parse_chk_ecmp_nh_mac(ctx, &lhs,
+                    ovnact_put_CHK_ECMP_NH_MAC(ctx->ovnacts));
+        } else if (!strcmp(ctx->lexer->token.s, "chk_ecmp_nh")
+                   && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+            parse_chk_ecmp_nh(ctx, &lhs,
+                    ovnact_put_CHK_ECMP_NH(ctx->ovnacts));
         } else {
             parse_assignment_action(ctx, false, &lhs);
         }
@@ -4388,6 +4788,8 @@ parse_action(struct action_context *ctx)
         ovnact_put_CT_SNAT_TO_VIP(ctx->ovnacts);
     } else if (lexer_match_id(ctx->lexer, "put_fdb")) {
         parse_put_fdb(ctx, ovnact_put_PUT_FDB(ctx->ovnacts));
+    } else if (lexer_match_id(ctx->lexer, "commit_ecmp_nh")) {
+        parse_commit_ecmp_nh(ctx, ovnact_put_COMMIT_ECMP_NH(ctx->ovnacts));
     } else {
         lexer_syntax_error(ctx->lexer, "expecting action");
     }
diff --git a/lib/automake.mk b/lib/automake.mk
index 829aedf..60bead6 100644
--- a/lib/automake.mk
+++ b/lib/automake.mk
@@ -26,6 +26,8 @@ lib_libovn_la_SOURCES = \
 	lib/ovn-parallel-hmap.c \
 	lib/ip-mcast-index.c \
 	lib/ip-mcast-index.h \
+	lib/mac-binding-index.c \
+	lib/mac-binding-index.h \
 	lib/mcast-group-index.c \
 	lib/mcast-group-index.h \
 	lib/lex.c \
@@ -38,6 +40,8 @@ lib_libovn_la_SOURCES = \
 	lib/inc-proc-eng.h \
 	lib/lb.c \
 	lib/lb.h \
+	lib/static-mac-binding-index.c \
+	lib/static-mac-binding-index.h \
 	lib/stopwatch-names.h \
 	lib/vif-plug-provider.h \
 	lib/vif-plug-provider.c \
diff --git a/lib/expr.c b/lib/expr.c
index 058390a..d1f9d28 100644
--- a/lib/expr.c
+++ b/lib/expr.c
@@ -33,19 +33,6 @@
 
 VLOG_DEFINE_THIS_MODULE(expr);
 
-/* Right now conjunction flows generated by ovn-controller
- * has issues. If there are multiple flows with the same
- * match for different conjunctions, ovn-controller doesn't
- * handle it properly.
- * Eg.
- * match 1 - ip4.src == {IP1, IP2} && tcp.dst >=500 && tcp.src <=600
- * action - drop
- *
- * match 2 - ip4.src == {IP1, IP2} && tcp.dst >=700 && tcp.src <=800
- * action - allow.
- */
-static bool force_crossproduct = false;
-
 static struct expr *parse_and_annotate(const char *s,
                                        const struct shash *symtab,
                                        struct ovs_list *nesting,
@@ -269,9 +256,9 @@ expr_not(struct expr *expr)
 static struct expr *
 expr_fix_andor(struct expr *expr, bool short_circuit)
 {
-    struct expr *sub, *next;
+    struct expr *sub;
 
-    LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
+    LIST_FOR_EACH_SAFE (sub, node, &expr->andor) {
         if (sub->type == EXPR_T_BOOLEAN) {
             if (sub->boolean == short_circuit) {
                 expr_destroy(expr);
@@ -1289,9 +1276,9 @@ expr_const_sets_remove(struct shash *const_sets, const char *name)
 void
 expr_const_sets_destroy(struct shash *const_sets)
 {
-    struct shash_node *node, *next;
+    struct shash_node *node;
 
-    SHASH_FOR_EACH_SAFE (node, next, const_sets) {
+    SHASH_FOR_EACH_SAFE (node, const_sets) {
         struct expr_constant_set *cs = node->data;
 
         shash_delete(const_sets, node);
@@ -1834,9 +1821,9 @@ expr_symtab_add_ovn_field(struct shash *symtab, const char *name,
 void
 expr_symtab_destroy(struct shash *symtab)
 {
-    struct shash_node *node, *next;
+    struct shash_node *node;
 
-    SHASH_FOR_EACH_SAFE (node, next, symtab) {
+    SHASH_FOR_EACH_SAFE (node, symtab) {
         struct expr_symbol *symbol = node->data;
 
         shash_delete(symtab, node);
@@ -1914,7 +1901,7 @@ expr_destroy(struct expr *expr)
 
     free(expr->as_name);
 
-    struct expr *sub, *next;
+    struct expr *sub;
 
     switch (expr->type) {
     case EXPR_T_CMP:
@@ -1925,7 +1912,7 @@ expr_destroy(struct expr *expr)
 
     case EXPR_T_AND:
     case EXPR_T_OR:
-        LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
+        LIST_FOR_EACH_SAFE (sub, node, &expr->andor) {
             ovs_list_remove(&sub->node);
             expr_destroy(sub);
         }
@@ -2498,7 +2485,7 @@ crush_and_string(struct expr *expr, const struct expr_symbol *symbol)
 
     /* Otherwise the result is the intersection of all of the ORs. */
     struct sset result = SSET_INITIALIZER(&result);
-    LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
+    LIST_FOR_EACH_SAFE (sub, node, &expr->andor) {
         struct sset strings = SSET_INITIALIZER(&strings);
         const struct expr *s;
         LIST_FOR_EACH (s, node, &sub->andor) {
@@ -2602,7 +2589,7 @@ crush_and_numeric(struct expr *expr, const struct expr_symbol *symbol)
         ovs_list_init(&or->andor);
 
         ovs_assert(disjuncts->type == EXPR_T_OR);
-        LIST_FOR_EACH_SAFE (sub, next, node, &disjuncts->andor) {
+        LIST_FOR_EACH_SAFE (sub, node, &disjuncts->andor) {
             ovs_assert(sub->type == EXPR_T_CMP);
             ovs_list_remove(&sub->node);
             if (mf_subvalue_intersect(&value, &mask,
@@ -2920,7 +2907,7 @@ expr_normalize_and(struct expr *expr)
 
         ovs_assert(sub->type == EXPR_T_OR);
         const struct expr_symbol *symbol = expr_get_unique_symbol(sub);
-        if (!symbol || force_crossproduct || symbol->must_crossproduct ) {
+        if (!symbol || symbol->must_crossproduct) {
             struct expr *or = expr_create_andor(EXPR_T_OR);
             struct expr *k;
 
@@ -3262,7 +3249,7 @@ add_cmp_flow(const struct expr *cmp,
  * a collection of Open vSwitch flows in 'matches', which this function
  * initializes to an hmap of "struct expr_match" structures.  Returns the
  * number of conjunctive match IDs consumed by 'matches', which uses
- * conjunctive match IDs beginning with 0; the caller must offset or remap them
+ * conjunctive match IDs beginning with 1; the caller must offset or remap them
  * into the desired range as necessary.
  *
  * The matches inserted into 'matches' will be of three distinct kinds:
diff --git a/lib/extend-table.c b/lib/extend-table.c
index 32d541b..ebb1a05 100644
--- a/lib/extend-table.c
+++ b/lib/extend-table.c
@@ -40,13 +40,17 @@ ovn_extend_table_init(struct ovn_extend_table *table)
 }
 
 static struct ovn_extend_table_info *
-ovn_extend_table_info_alloc(const char *name, uint32_t id, bool is_new_id,
+ovn_extend_table_info_alloc(const char *name, uint32_t id,
+                            struct ovn_extend_table_info *peer,
                             uint32_t hash)
 {
     struct ovn_extend_table_info *e = xmalloc(sizeof *e);
     e->name = xstrdup(name);
     e->table_id = id;
-    e->new_table_id = is_new_id;
+    e->peer = peer;
+    if (peer) {
+        peer->peer = e;
+    }
     e->hmap_node.hash = hash;
     hmap_init(&e->references);
     return e;
@@ -56,8 +60,8 @@ static void
 ovn_extend_table_info_destroy(struct ovn_extend_table_info *e)
 {
     free(e->name);
-    struct ovn_extend_table_lflow_ref *r, *r_next;
-    HMAP_FOR_EACH_SAFE (r, r_next, hmap_node, &e->references) {
+    struct ovn_extend_table_lflow_ref *r;
+    HMAP_FOR_EACH_SAFE (r, hmap_node, &e->references) {
         hmap_remove(&e->references, &r->hmap_node);
         ovs_list_remove(&r->list_node);
         free(r);
@@ -170,23 +174,24 @@ ovn_extend_info_del_lflow_ref(struct ovn_extend_table_lflow_ref *r)
 void
 ovn_extend_table_clear(struct ovn_extend_table *table, bool existing)
 {
-    struct ovn_extend_table_info *g, *next;
+    struct ovn_extend_table_info *g;
     struct hmap *target = existing ? &table->existing : &table->desired;
 
     /* Clear lflow_to_desired index, if the target is desired table. */
     if (!existing) {
-        struct ovn_extend_table_lflow_to_desired *l, *l_next;
-        HMAP_FOR_EACH_SAFE (l, l_next, hmap_node, &table->lflow_to_desired) {
+        struct ovn_extend_table_lflow_to_desired *l;
+        HMAP_FOR_EACH_SAFE (l, hmap_node, &table->lflow_to_desired) {
             ovn_extend_table_delete_desired(table, l);
         }
     }
 
     /* Clear the target table. */
-    HMAP_FOR_EACH_SAFE (g, next, hmap_node, target) {
+    HMAP_FOR_EACH_SAFE (g, hmap_node, target) {
         hmap_remove(target, &g->hmap_node);
-        /* Don't unset bitmap for desired group_info if the group_id
-         * was not freshly reserved. */
-        if (existing || g->new_table_id) {
+        if (g->peer) {
+            g->peer->peer = NULL;
+        } else {
+            /* Unset the bitmap because the peer is deleted already. */
             bitmap_set0(table->table_ids, g->table_id);
         }
         ovn_extend_table_info_destroy(g);
@@ -209,11 +214,15 @@ void
 ovn_extend_table_remove_existing(struct ovn_extend_table *table,
                                  struct ovn_extend_table_info *existing)
 {
-    /* Remove 'existing' from 'groups->existing' */
+    /* Remove 'existing' from 'table->existing' */
     hmap_remove(&table->existing, &existing->hmap_node);
 
-    /* Dealloc group_id. */
-    bitmap_set0(table->table_ids, existing->table_id);
+    if (existing->peer) {
+        existing->peer->peer = NULL;
+    } else {
+        /* Dealloc the ID. */
+        bitmap_set0(table->table_ids, existing->table_id);
+    }
     ovn_extend_table_info_destroy(existing);
 }
 
@@ -222,15 +231,17 @@ ovn_extend_table_delete_desired(struct ovn_extend_table *table,
                                 struct ovn_extend_table_lflow_to_desired *l)
 {
     hmap_remove(&table->lflow_to_desired, &l->hmap_node);
-    struct ovn_extend_table_lflow_ref *r, *next_r;
-    LIST_FOR_EACH_SAFE (r, next_r, list_node, &l->desired) {
+    struct ovn_extend_table_lflow_ref *r;
+    LIST_FOR_EACH_SAFE (r, list_node, &l->desired) {
         struct ovn_extend_table_info *e = r->desired;
         ovn_extend_info_del_lflow_ref(r);
         if (hmap_is_empty(&e->references)) {
             VLOG_DBG("%s: %s, "UUID_FMT, __func__,
                      e->name, UUID_ARGS(&l->lflow_uuid));
             hmap_remove(&table->desired, &e->hmap_node);
-            if (e->new_table_id) {
+            if (e->peer) {
+                e->peer->peer = NULL;
+            } else {
                 bitmap_set0(table->table_ids, e->table_id);
             }
             ovn_extend_table_info_destroy(e);
@@ -254,30 +265,21 @@ ovn_extend_table_remove_desired(struct ovn_extend_table *table,
     ovn_extend_table_delete_desired(table, l);
 }
 
-static struct ovn_extend_table_info*
-ovn_extend_info_clone(struct ovn_extend_table_info *source)
-{
-    struct ovn_extend_table_info *clone =
-        ovn_extend_table_info_alloc(source->name,
-                                    source->table_id,
-                                    source->new_table_id,
-                                    source->hmap_node.hash);
-    return clone;
-}
-
 void
 ovn_extend_table_sync(struct ovn_extend_table *table)
 {
-    struct ovn_extend_table_info *desired, *next;
+    struct ovn_extend_table_info *desired;
 
     /* Copy the contents of desired to existing. */
-    HMAP_FOR_EACH_SAFE (desired, next, hmap_node, &table->desired) {
+    HMAP_FOR_EACH_SAFE (desired, hmap_node, &table->desired) {
         if (!ovn_extend_table_lookup(&table->existing, desired)) {
-            desired->new_table_id = false;
-            struct ovn_extend_table_info *clone =
-                ovn_extend_info_clone(desired);
-            hmap_insert(&table->existing, &clone->hmap_node,
-                        clone->hmap_node.hash);
+            struct ovn_extend_table_info *existing =
+                ovn_extend_table_info_alloc(desired->name,
+                                            desired->table_id,
+                                            desired,
+                                            desired->hmap_node.hash);
+            hmap_insert(&table->existing, &existing->hmap_node,
+                        existing->hmap_node.hash);
         }
     }
 }
@@ -289,7 +291,7 @@ ovn_extend_table_assign_id(struct ovn_extend_table *table, const char *name,
                            struct uuid lflow_uuid)
 {
     uint32_t table_id = 0, hash;
-    struct ovn_extend_table_info *table_info;
+    struct ovn_extend_table_info *table_info, *existing_info;
 
     hash = hash_string(name, 0);
 
@@ -307,17 +309,18 @@ ovn_extend_table_assign_id(struct ovn_extend_table *table, const char *name,
 
     /* Check whether we already have an installed entry for this
      * combination. */
+    existing_info = NULL;
     HMAP_FOR_EACH_WITH_HASH (table_info, hmap_node, hash, &table->existing) {
         if (!strcmp(table_info->name, name)) {
-            table_id = table_info->table_id;
+            existing_info = table_info;
+            table_id = existing_info->table_id;
+            break;
         }
     }
 
-    bool new_table_id = false;
-    if (!table_id) {
-        /* Reserve a new group_id. */
+    if (!existing_info) {
+        /* Reserve a new id. */
         table_id = bitmap_scan(table->table_ids, 0, 1, MAX_EXT_TABLE_ID + 1);
-        new_table_id = true;
     }
 
     if (table_id == MAX_EXT_TABLE_ID + 1) {
@@ -327,7 +330,7 @@ ovn_extend_table_assign_id(struct ovn_extend_table *table, const char *name,
     }
     bitmap_set1(table->table_ids, table_id);
 
-    table_info = ovn_extend_table_info_alloc(name, table_id, new_table_id,
+    table_info = ovn_extend_table_info_alloc(name, table_id, existing_info,
                                              hash);
 
     hmap_insert(&table->desired,
diff --git a/lib/extend-table.h b/lib/extend-table.h
index 6240b94..b43a146 100644
--- a/lib/extend-table.h
+++ b/lib/extend-table.h
@@ -28,8 +28,12 @@
  * such as the Group Table or Meter Table. */
 struct ovn_extend_table {
     unsigned long *table_ids;  /* Used as a bitmap with value set
-                                * for allocated group ids in either
-                                * desired or existing. */
+                                * for allocated ids in either desired or
+                                * existing (or both).  If the same "name"
+                                * exists in both desired and existing tables,
+                                * they must share the same ID.  The "peer"
+                                * pointer would tell if the ID is still used by
+                                * the same item in the peer table. */
     struct hmap desired;
     struct hmap lflow_to_desired; /* Index for looking up desired table
                                    * items from given lflow uuid, with
@@ -48,8 +52,13 @@ struct ovn_extend_table_info {
     struct hmap_node hmap_node;
     char *name;         /* Name for the table entity. */
     uint32_t table_id;
-    bool new_table_id;  /* 'True' if 'table_id' was reserved from
-                         * ovn_extend_table's 'table_ids' bitmap. */
+    struct ovn_extend_table_info *peer; /* The extend tables exist as pairs,
+                                           one for desired items and one for
+                                           existing items. "peer" maintains the
+                                           link between a pair of items in
+                                           these tables. If "peer" is NULL, it
+                                           means the counterpart is not created
+                                           yet or deleted already. */
     struct hmap references; /* The lflows that are using this item, with
                              * ovn_extend_table_lflow_ref nodes. Only useful
                              * for items in ovn_extend_table.desired. */
@@ -108,8 +117,8 @@ ovn_extend_table_desired_lookup_by_name(struct ovn_extend_table * table,
 /* Iterates 'EXISTING' through all of the 'ovn_extend_table_info's in
  * 'TABLE'->existing that are not in 'TABLE'->desired.  (The loop body
  * presumably removes them.) */
-#define EXTEND_TABLE_FOR_EACH_INSTALLED(EXISTING, NEXT, TABLE)         \
-    HMAP_FOR_EACH_SAFE (EXISTING, NEXT, hmap_node, &(TABLE)->existing) \
+#define EXTEND_TABLE_FOR_EACH_INSTALLED(EXISTING, TABLE)               \
+    HMAP_FOR_EACH_SAFE (EXISTING, hmap_node, &(TABLE)->existing)        \
         if (!ovn_extend_table_lookup(&(TABLE)->desired, EXISTING))
 
 #endif /* lib/extend-table.h */
diff --git a/lib/inc-proc-eng.c b/lib/inc-proc-eng.c
index 575b774..2e2b310 100644
--- a/lib/inc-proc-eng.c
+++ b/lib/inc-proc-eng.c
@@ -313,6 +313,17 @@ engine_has_run(void)
 }
 
 bool
+engine_has_updated(void)
+{
+    for (size_t i = 0; i < engine_n_nodes; i++) {
+        if (engine_nodes[i]->state == EN_UPDATED) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool
 engine_aborted(void)
 {
     return engine_run_aborted;
diff --git a/lib/inc-proc-eng.h b/lib/inc-proc-eng.h
index 9bfab1f..dc365dc 100644
--- a/lib/inc-proc-eng.h
+++ b/lib/inc-proc-eng.h
@@ -238,6 +238,10 @@ bool engine_node_changed(struct engine_node *node);
 /* Return true if the engine has run in the last iteration. */
 bool engine_has_run(void);
 
+/* Return true if the engine has any update in any node, i.e. any input
+ * has changed; false if nothing has changed. */
+bool engine_has_updated(void);
+
 /* Returns true if during the last engine run we had to abort processing. */
 bool engine_aborted(void);
 
diff --git a/lib/ip-mcast-index.h b/lib/ip-mcast-index.h
index 3ac6579..87a7bf2 100644
--- a/lib/ip-mcast-index.h
+++ b/lib/ip-mcast-index.h
@@ -20,6 +20,11 @@ struct ovsdb_idl;
 
 struct sbrec_datapath_binding;
 
+/* Fixed group name to denote an IGMP_Group that actually stores
+ * a list of learned mrouters.
+ */
+#define OVN_IGMP_GROUP_MROUTERS "mrouters"
+
 #define OVN_MCAST_MIN_IDLE_TIMEOUT_S           15
 #define OVN_MCAST_MAX_IDLE_TIMEOUT_S           3600
 #define OVN_MCAST_DEFAULT_IDLE_TIMEOUT_S       300
diff --git a/lib/lb.c b/lib/lb.c
index 7b0ed1a..477cf8f 100644
--- a/lib/lb.c
+++ b/lib/lb.c
@@ -26,6 +26,51 @@
 
 VLOG_DEFINE_THIS_MODULE(lb);
 
+struct ovn_lb_ip_set *
+ovn_lb_ip_set_create(void)
+{
+    struct ovn_lb_ip_set *lb_ip_set = xzalloc(sizeof *lb_ip_set);
+
+    sset_init(&lb_ip_set->ips_v4);
+    sset_init(&lb_ip_set->ips_v4_routable);
+    sset_init(&lb_ip_set->ips_v4_reachable);
+    sset_init(&lb_ip_set->ips_v6);
+    sset_init(&lb_ip_set->ips_v6_routable);
+    sset_init(&lb_ip_set->ips_v6_reachable);
+
+    return lb_ip_set;
+}
+
+void
+ovn_lb_ip_set_destroy(struct ovn_lb_ip_set *lb_ip_set)
+{
+    if (!lb_ip_set) {
+        return;
+    }
+    sset_destroy(&lb_ip_set->ips_v4);
+    sset_destroy(&lb_ip_set->ips_v4_routable);
+    sset_destroy(&lb_ip_set->ips_v4_reachable);
+    sset_destroy(&lb_ip_set->ips_v6);
+    sset_destroy(&lb_ip_set->ips_v6_routable);
+    sset_destroy(&lb_ip_set->ips_v6_reachable);
+    free(lb_ip_set);
+}
+
+struct ovn_lb_ip_set *
+ovn_lb_ip_set_clone(struct ovn_lb_ip_set *lb_ip_set)
+{
+    struct ovn_lb_ip_set *clone = ovn_lb_ip_set_create();
+
+    sset_clone(&clone->ips_v4, &lb_ip_set->ips_v4);
+    sset_clone(&clone->ips_v4_routable, &lb_ip_set->ips_v4_routable);
+    sset_clone(&clone->ips_v4_reachable, &lb_ip_set->ips_v4_reachable);
+    sset_clone(&clone->ips_v6, &lb_ip_set->ips_v6);
+    sset_clone(&clone->ips_v6_routable, &lb_ip_set->ips_v6_routable);
+    sset_clone(&clone->ips_v6_reachable, &lb_ip_set->ips_v6_reachable);
+
+    return clone;
+}
+
 static
 bool ovn_lb_vip_init(struct ovn_lb_vip *lb_vip, const char *lb_key,
                      const char *lb_value)
@@ -173,6 +218,13 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
     lb->n_vips = smap_count(&nbrec_lb->vips);
     lb->vips = xcalloc(lb->n_vips, sizeof *lb->vips);
     lb->vips_nb = xcalloc(lb->n_vips, sizeof *lb->vips_nb);
+    lb->controller_event = smap_get_bool(&nbrec_lb->options, "event", false);
+    lb->routable = smap_get_bool(&nbrec_lb->options, "add_route", false);
+    lb->skip_snat = smap_get_bool(&nbrec_lb->options, "skip_snat", false);
+    const char *mode =
+        smap_get_def(&nbrec_lb->options, "neighbor_responder", "reachable");
+    lb->neigh_mode = strcmp(mode, "all") ? LB_NEIGH_RESPOND_REACHABLE
+                                         : LB_NEIGH_RESPOND_ALL;
     sset_init(&lb->ips_v4);
     sset_init(&lb->ips_v6);
     struct smap_node *node;
@@ -226,7 +278,7 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
 }
 
 struct ovn_northd_lb *
-ovn_northd_lb_find(struct hmap *lbs, const struct uuid *uuid)
+ovn_northd_lb_find(const struct hmap *lbs, const struct uuid *uuid)
 {
     struct ovn_northd_lb *lb;
     size_t hash = uuid_hash(uuid);
@@ -239,23 +291,27 @@ ovn_northd_lb_find(struct hmap *lbs, const struct uuid *uuid)
 }
 
 void
-ovn_northd_lb_add_lr(struct ovn_northd_lb *lb, struct ovn_datapath *od)
+ovn_northd_lb_add_lr(struct ovn_northd_lb *lb, size_t n,
+                     struct ovn_datapath **ods)
 {
-    if (lb->n_allocated_nb_lr == lb->n_nb_lr) {
+    while (lb->n_allocated_nb_lr <= lb->n_nb_lr + n) {
         lb->nb_lr = x2nrealloc(lb->nb_lr, &lb->n_allocated_nb_lr,
                                sizeof *lb->nb_lr);
     }
-    lb->nb_lr[lb->n_nb_lr++] = od;
+    memcpy(&lb->nb_lr[lb->n_nb_lr], ods, n * sizeof *ods);
+    lb->n_nb_lr += n;
 }
 
 void
-ovn_northd_lb_add_ls(struct ovn_northd_lb *lb, struct ovn_datapath *od)
+ovn_northd_lb_add_ls(struct ovn_northd_lb *lb, size_t n,
+                     struct ovn_datapath **ods)
 {
-    if (lb->n_allocated_nb_ls == lb->n_nb_ls) {
+    while (lb->n_allocated_nb_ls <= lb->n_nb_ls + n) {
         lb->nb_ls = x2nrealloc(lb->nb_ls, &lb->n_allocated_nb_ls,
                                sizeof *lb->nb_ls);
     }
-    lb->nb_ls[lb->n_nb_ls++] = od;
+    memcpy(&lb->nb_ls[lb->n_nb_ls], ods, n * sizeof *ods);
+    lb->n_nb_ls += n;
 }
 
 void
@@ -275,6 +331,62 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
     free(lb);
 }
 
+/* Constructs a new 'struct ovn_lb_group' object from the Nb LB Group record
+ * and a hash map of all existing 'struct ovn_northd_lb' objects.  Space will
+ * be allocated for 'max_datapaths' logical switches and the same amount of
+ * logical routers to which this LB Group is applied.  Can be filled later
+ * with ovn_lb_group_add_ls() and ovn_lb_group_add_lr() respectively. */
+struct ovn_lb_group *
+ovn_lb_group_create(const struct nbrec_load_balancer_group *nbrec_lb_group,
+                    const struct hmap *lbs, size_t max_datapaths)
+{
+    struct ovn_lb_group *lb_group;
+
+    lb_group = xzalloc(sizeof *lb_group);
+    lb_group->uuid = nbrec_lb_group->header_.uuid;
+    lb_group->n_lbs = nbrec_lb_group->n_load_balancer;
+    lb_group->lbs = xmalloc(lb_group->n_lbs * sizeof *lb_group->lbs);
+    lb_group->ls = xmalloc(max_datapaths * sizeof *lb_group->ls);
+    lb_group->lr = xmalloc(max_datapaths * sizeof *lb_group->lr);
+    lb_group->lb_ips = ovn_lb_ip_set_create();
+
+    for (size_t i = 0; i < nbrec_lb_group->n_load_balancer; i++) {
+        const struct uuid *lb_uuid =
+            &nbrec_lb_group->load_balancer[i]->header_.uuid;
+        lb_group->lbs[i] = ovn_northd_lb_find(lbs, lb_uuid);
+    }
+
+    return lb_group;
+}
+
+void
+ovn_lb_group_destroy(struct ovn_lb_group *lb_group)
+{
+    if (!lb_group) {
+        return;
+    }
+
+    ovn_lb_ip_set_destroy(lb_group->lb_ips);
+    free(lb_group->lbs);
+    free(lb_group->ls);
+    free(lb_group->lr);
+    free(lb_group);
+}
+
+struct ovn_lb_group *
+ovn_lb_group_find(const struct hmap *lb_groups, const struct uuid *uuid)
+{
+    struct ovn_lb_group *lb_group;
+    size_t hash = uuid_hash(uuid);
+
+    HMAP_FOR_EACH_WITH_HASH (lb_group, hmap_node, hash, lb_groups) {
+        if (uuid_equals(&lb_group->uuid, uuid)) {
+            return lb_group;
+        }
+    }
+    return NULL;
+}
+
 struct ovn_controller_lb *
 ovn_controller_lb_create(const struct sbrec_load_balancer *sbrec_lb)
 {
diff --git a/lib/lb.h b/lib/lb.h
index 832ed31..9b902f0 100644
--- a/lib/lb.h
+++ b/lib/lb.h
@@ -20,15 +20,38 @@
 #include <sys/types.h>
 #include <netinet/in.h>
 #include "openvswitch/hmap.h"
-#include "sset.h"
 #include "ovn-util.h"
+#include "sset.h"
+#include "uuid.h"
 
 struct nbrec_load_balancer;
+struct nbrec_load_balancer_group;
 struct sbrec_load_balancer;
 struct sbrec_datapath_binding;
+struct ovn_datapath;
 struct ovn_port;
 struct uuid;
 
+enum lb_neighbor_responder_mode {
+    LB_NEIGH_RESPOND_REACHABLE,
+    LB_NEIGH_RESPOND_ALL,
+};
+
+/* The "routable" ssets are subsets of the load balancer IPs for which IP
+ * routes and ARP resolution flows are automatically added. */
+struct ovn_lb_ip_set {
+    struct sset ips_v4;
+    struct sset ips_v4_routable;
+    struct sset ips_v4_reachable;
+    struct sset ips_v6;
+    struct sset ips_v6_routable;
+    struct sset ips_v6_reachable;
+};
+
+struct ovn_lb_ip_set *ovn_lb_ip_set_create(void);
+void ovn_lb_ip_set_destroy(struct ovn_lb_ip_set *);
+struct ovn_lb_ip_set *ovn_lb_ip_set_clone(struct ovn_lb_ip_set *);
+
 struct ovn_northd_lb {
     struct hmap_node hmap_node;
 
@@ -40,6 +63,11 @@ struct ovn_northd_lb {
     struct ovn_northd_lb_vip *vips_nb;
     size_t n_vips;
 
+    enum lb_neighbor_responder_mode neigh_mode;
+    bool controller_event;
+    bool routable;
+    bool skip_snat;
+
     struct sset ips_v4;
     struct sset ips_v6;
 
@@ -86,12 +114,47 @@ struct ovn_northd_lb_backend {
 };
 
 struct ovn_northd_lb *ovn_northd_lb_create(const struct nbrec_load_balancer *);
-struct ovn_northd_lb * ovn_northd_lb_find(struct hmap *, const struct uuid *);
+struct ovn_northd_lb *ovn_northd_lb_find(const struct hmap *,
+                                         const struct uuid *);
 void ovn_northd_lb_destroy(struct ovn_northd_lb *);
-void
-ovn_northd_lb_add_lr(struct ovn_northd_lb *lb, struct ovn_datapath *od);
-void
-ovn_northd_lb_add_ls(struct ovn_northd_lb *lb, struct ovn_datapath *od);
+void ovn_northd_lb_add_lr(struct ovn_northd_lb *lb, size_t n,
+                          struct ovn_datapath **ods);
+void ovn_northd_lb_add_ls(struct ovn_northd_lb *lb, size_t n,
+                          struct ovn_datapath **ods);
+
+struct ovn_lb_group {
+    struct hmap_node hmap_node;
+    struct uuid uuid;
+    size_t n_lbs;
+    struct ovn_northd_lb **lbs;
+    struct ovn_lb_ip_set *lb_ips;
+
+    /* Datapaths to which this LB group is applied. */
+    size_t n_ls;
+    struct ovn_datapath **ls;
+    size_t n_lr;
+    struct ovn_datapath **lr;
+};
+
+struct ovn_lb_group *ovn_lb_group_create(
+    const struct nbrec_load_balancer_group *,
+    const struct hmap *lbs,
+    size_t max_datapaths);
+void ovn_lb_group_destroy(struct ovn_lb_group *lb_group);
+struct ovn_lb_group *ovn_lb_group_find(const struct hmap *lb_groups,
+                                       const struct uuid *);
+
+static inline void
+ovn_lb_group_add_ls(struct ovn_lb_group *lb_group, struct ovn_datapath *ls)
+{
+    lb_group->ls[lb_group->n_ls++] = ls;
+}
+
+static inline void
+ovn_lb_group_add_lr(struct ovn_lb_group *lb_group, struct ovn_datapath *lr)
+{
+    lb_group->lr[lb_group->n_lr++] = lr;
+}
 
 struct ovn_controller_lb {
     const struct sbrec_load_balancer *slb; /* May be NULL. */
diff --git a/lib/mac-binding-index.c b/lib/mac-binding-index.c
new file mode 100644
index 0000000..d774f12
--- /dev/null
+++ b/lib/mac-binding-index.c
@@ -0,0 +1,33 @@
+/* Copyright (c) 2022, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "lib/mac-binding-index.h"
+#include "lib/ovn-sb-idl.h"
+
+struct ovsdb_idl_index *
+mac_binding_by_datapath_index_create(struct ovsdb_idl *idl)
+{
+    return ovsdb_idl_index_create1(idl, &sbrec_mac_binding_col_datapath);
+}
+
+struct ovsdb_idl_index *
+mac_binding_by_lport_ip_index_create(struct ovsdb_idl *idl)
+{
+    return ovsdb_idl_index_create2(idl,
+                                   &sbrec_mac_binding_col_logical_port,
+                                   &sbrec_mac_binding_col_ip);
+}
diff --git a/lib/mac-binding-index.h b/lib/mac-binding-index.h
new file mode 100644
index 0000000..8e977ec
--- /dev/null
+++ b/lib/mac-binding-index.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2022, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OVN_MAC_BINDING_INDEX_H
+#define OVN_MAC_BINDING_INDEX_H 1
+
+#include "lib/ovn-sb-idl.h"
+
+struct ovsdb_idl_index *mac_binding_by_datapath_index_create(
+    struct ovsdb_idl *idl);
+struct ovsdb_idl_index *mac_binding_by_lport_ip_index_create(
+    struct ovsdb_idl *idl);
+
+#endif /* lib/mac-binding-index.h */
diff --git a/lib/mcast-group-index.h b/lib/mcast-group-index.h
index 5bc7254..9664a94 100644
--- a/lib/mcast-group-index.h
+++ b/lib/mcast-group-index.h
@@ -28,11 +28,23 @@ struct sbrec_datapath_binding;
 enum ovn_mcast_tunnel_keys {
 
     OVN_MCAST_FLOOD_TUNNEL_KEY = OVN_MIN_MULTICAST,
-    OVN_MCAST_UNKNOWN_TUNNEL_KEY,
-    OVN_MCAST_MROUTER_FLOOD_TUNNEL_KEY,
-    OVN_MCAST_MROUTER_STATIC_TUNNEL_KEY,
-    OVN_MCAST_STATIC_TUNNEL_KEY,
-    OVN_MCAST_FLOOD_L2_TUNNEL_KEY,
+    OVN_MCAST_UNKNOWN_TUNNEL_KEY,        /* For L2 unknown dest traffic. */
+    OVN_MCAST_MROUTER_FLOOD_TUNNEL_KEY,  /* For L3 multicast traffic that must
+                                          * be relayed (multicast routed).
+                                          */
+    OVN_MCAST_STATIC_TUNNEL_KEY,         /* For switches:
+                                          * - for L3 multicast traffic that
+                                          *   needs to be forwarded
+                                          *   statically.
+                                          * For routers:
+                                          * - for L3 multicast traffic AND
+                                          *   reports that need to be
+                                          *   forwarded statically.
+                                          */
+    OVN_MCAST_FLOOD_L2_TUNNEL_KEY,       /* Logical switch broadcast domain
+                                          * excluding ports towards logical
+                                          * routers.
+                                          */
     OVN_MIN_IP_MULTICAST,
     OVN_MAX_IP_MULTICAST = OVN_MAX_MULTICAST,
 };
diff --git a/lib/ovn-l7.h b/lib/ovn-l7.h
index 49ecea8..0b2da9f 100644
--- a/lib/ovn-l7.h
+++ b/lib/ovn-l7.h
@@ -145,6 +145,14 @@ struct gen_opts_map {
 #define DHCP_OPT_TCP_KEEPALIVE_INTERVAL \
     DHCP_OPTION("tcp_keepalive_interval", 38, "uint32")
 
+/* Use unused 253 option for DHCP next-server DHCP option.
+ * This option is used for setting the "Next server IP address"
+ * field in the DHCP header.
+ */
+#define DHCP_OPT_NEXT_SERVER_CODE 253
+#define DHCP_OPT_NEXT_SERVER \
+    DHCP_OPTION("next_server", DHCP_OPT_NEXT_SERVER_CODE, "ipv4")
+
 static inline uint32_t
 gen_opt_hash(char *opt_name)
 {
diff --git a/lib/ovn-parallel-hmap.c b/lib/ovn-parallel-hmap.c
index 7edc4c0..828e5a0 100644
--- a/lib/ovn-parallel-hmap.c
+++ b/lib/ovn-parallel-hmap.c
@@ -38,14 +38,10 @@ VLOG_DEFINE_THIS_MODULE(ovn_parallel_hmap);
 
 #ifndef OVS_HAS_PARALLEL_HMAP
 
-#define WORKER_SEM_NAME "%x-%p-%x"
+#define WORKER_SEM_NAME "%x-%p-%"PRIxSIZE
 #define MAIN_SEM_NAME "%x-%p-main"
 
-/* These are accessed under mutex inside add_worker_pool().
- * They do not need to be atomic.
- */
 static atomic_bool initial_pool_setup = ATOMIC_VAR_INIT(false);
-static bool can_parallelize = false;
 
 /* This is set only in the process of exit and the set is
  * accompanied by a fence. It does not need to be atomic or be
@@ -57,18 +53,18 @@ static struct ovs_list worker_pools = OVS_LIST_INITIALIZER(&worker_pools);
 
 static struct ovs_mutex init_mutex = OVS_MUTEX_INITIALIZER;
 
-static int pool_size;
+static size_t pool_size = 1;
 
 static int sembase;
 
 static void worker_pool_hook(void *aux OVS_UNUSED);
-static void setup_worker_pools(bool force);
+static void setup_worker_pools(void);
 static void merge_list_results(struct worker_pool *pool OVS_UNUSED,
                                void *fin_result, void *result_frags,
-                               int index);
+                               size_t index);
 static void merge_hash_results(struct worker_pool *pool OVS_UNUSED,
                                void *fin_result, void *result_frags,
-                               int index);
+                               size_t index);
 
 bool
 ovn_stop_parallel_processing(void)
@@ -76,107 +72,184 @@ ovn_stop_parallel_processing(void)
     return workers_must_exit;
 }
 
-bool
-ovn_can_parallelize_hashes(bool force_parallel)
+size_t
+ovn_get_worker_pool_size(void)
 {
-    bool test = false;
+    return pool_size;
+}
 
-    if (atomic_compare_exchange_strong(
-            &initial_pool_setup,
-            &test,
-            true)) {
-        ovs_mutex_lock(&init_mutex);
-        setup_worker_pools(force_parallel);
-        ovs_mutex_unlock(&init_mutex);
+static void
+stop_controls(struct worker_pool *pool)
+{
+    if (pool->controls) {
+        workers_must_exit = true;
+
+        /* unlock threads. */
+        for (size_t i = 0; i < pool->size ; i++) {
+            if (pool->controls[i].fire != SEM_FAILED) {
+                sem_post(pool->controls[i].fire);
+            }
+        }
+
+        /* Wait for completion. */
+        for (size_t i = 0; i < pool->size ; i++) {
+            if (pool->controls[i].worker) {
+                pthread_join(pool->controls[i].worker, NULL);
+                pool->controls[i].worker = 0;
+            }
+        }
+        workers_must_exit = false;
     }
-    return can_parallelize;
 }
 
-struct worker_pool *
-ovn_add_worker_pool(void *(*start)(void *))
+static void
+free_controls(struct worker_pool *pool)
+{
+    char sem_name[256];
+    if (pool->controls) {
+        /* Close/unlink semaphores. */
+        for (size_t i = 0; i < pool->size; i++) {
+            ovs_mutex_destroy(&pool->controls[i].mutex);
+            if (pool->controls[i].fire != SEM_FAILED) {
+                sem_close(pool->controls[i].fire);
+                sprintf(sem_name, WORKER_SEM_NAME, sembase, pool, i);
+                sem_unlink(sem_name);
+            } else {
+                /* This and following controls are not initialized */
+                break;
+            }
+        }
+        free(pool->controls);
+        pool->controls = NULL;
+    }
+}
+
+static void
+free_pool(struct worker_pool *pool)
+{
+    char sem_name[256];
+    stop_controls(pool);
+    free_controls(pool);
+    if (pool->done != SEM_FAILED) {
+        sem_close(pool->done);
+        sprintf(sem_name, MAIN_SEM_NAME, sembase, pool);
+        sem_unlink(sem_name);
+    }
+    free(pool);
+}
+
+static int
+init_controls(struct worker_pool *pool)
 {
-    struct worker_pool *new_pool = NULL;
     struct worker_control *new_control;
+    char sem_name[256];
+
+    pool->controls = xmalloc(sizeof(struct worker_control) * pool->size);
+    for (size_t i = 0; i < pool->size ; i++) {
+        pool->controls[i].fire = SEM_FAILED;
+    }
+    for (size_t i = 0; i < pool->size; i++) {
+        new_control = &pool->controls[i];
+        new_control->id = i;
+        new_control->done = pool->done;
+        new_control->data = NULL;
+        new_control->pool = pool;
+        new_control->worker = 0;
+        ovs_mutex_init(&new_control->mutex);
+        new_control->finished = ATOMIC_VAR_INIT(false);
+        sprintf(sem_name, WORKER_SEM_NAME, sembase, pool, i);
+        new_control->fire = sem_open(sem_name, O_CREAT, S_IRWXU, 0);
+        if (new_control->fire == SEM_FAILED) {
+            free_controls(pool);
+            return -1;
+        }
+    }
+    return 0;
+}
+
+static void
+init_threads(struct worker_pool *pool, void *(*start)(void *))
+{
+    for (size_t i = 0; i < pool_size; i++) {
+        pool->controls[i].worker =
+            ovs_thread_create("worker pool helper", start, &pool->controls[i]);
+    }
+    ovs_list_push_back(&worker_pools, &pool->list_node);
+}
+
+enum pool_update_status
+ovn_update_worker_pool(size_t requested_pool_size,
+                       struct worker_pool **pool, void *(*start)(void *))
+{
     bool test = false;
-    int i;
     char sem_name[256];
 
-    /* Belt and braces - initialize the pool system just in case if
-     * if it is not yet initialized.
-     */
+    if (requested_pool_size == pool_size) {
+        return POOL_UNCHANGED;
+    }
+
     if (atomic_compare_exchange_strong(
             &initial_pool_setup,
             &test,
             true)) {
         ovs_mutex_lock(&init_mutex);
-        setup_worker_pools(false);
+        setup_worker_pools();
         ovs_mutex_unlock(&init_mutex);
     }
-
     ovs_mutex_lock(&init_mutex);
-    if (can_parallelize) {
-        new_pool = xmalloc(sizeof(struct worker_pool));
-        new_pool->size = pool_size;
-        new_pool->controls = NULL;
-        sprintf(sem_name, MAIN_SEM_NAME, sembase, new_pool);
-        new_pool->done = sem_open(sem_name, O_CREAT, S_IRWXU, 0);
-        if (new_pool->done == SEM_FAILED) {
-            goto cleanup;
-        }
-
-        new_pool->controls =
-            xmalloc(sizeof(struct worker_control) * new_pool->size);
-
-        for (i = 0; i < new_pool->size; i++) {
-            new_control = &new_pool->controls[i];
-            new_control->id = i;
-            new_control->done = new_pool->done;
-            new_control->data = NULL;
-            ovs_mutex_init(&new_control->mutex);
-            new_control->finished = ATOMIC_VAR_INIT(false);
-            sprintf(sem_name, WORKER_SEM_NAME, sembase, new_pool, i);
-            new_control->fire = sem_open(sem_name, O_CREAT, S_IRWXU, 0);
-            if (new_control->fire == SEM_FAILED) {
+    pool_size = requested_pool_size;
+    VLOG_INFO("Setting thread count to %"PRIuSIZE, pool_size);
+
+    if (*pool == NULL) {
+        if (pool_size > 1) {
+            VLOG_INFO("Creating new pool with size %"PRIuSIZE, pool_size);
+            *pool = xmalloc(sizeof(struct worker_pool));
+            (*pool)->size = pool_size;
+            (*pool)->controls = NULL;
+            sprintf(sem_name, MAIN_SEM_NAME, sembase, *pool);
+            (*pool)->done = sem_open(sem_name, O_CREAT, S_IRWXU, 0);
+            if ((*pool)->done == SEM_FAILED) {
                 goto cleanup;
             }
+            if (init_controls(*pool) == -1) {
+                goto cleanup;
+            }
+            init_threads(*pool, start);
         }
-
-        for (i = 0; i < pool_size; i++) {
-            new_pool->controls[i].worker =
-                ovs_thread_create("worker pool helper", start, &new_pool->controls[i]);
+    } else {
+        if (pool_size > 1) {
+            VLOG_INFO("Changing size of existing pool to %"PRIuSIZE,
+                      pool_size);
+            stop_controls(*pool);
+            free_controls(*pool);
+            ovs_list_remove(&(*pool)->list_node);
+            (*pool)->size = pool_size;
+            if (init_controls(*pool) == -1) {
+                goto cleanup;
+            }
+            init_threads(*pool, start);
+        } else {
+            VLOG_INFO("Deleting existing pool");
+            worker_pool_hook(NULL);
+            *pool = NULL;
         }
-        ovs_list_push_back(&worker_pools, &new_pool->list_node);
     }
     ovs_mutex_unlock(&init_mutex);
-    return new_pool;
-cleanup:
+    return POOL_UPDATED;
 
+cleanup:
     /* Something went wrong when opening semaphores. In this case
      * it is better to shut off parallel procesing altogether
      */
-
-    VLOG_INFO("Failed to initialize parallel processing, error %d", errno);
-    can_parallelize = false;
-    if (new_pool->controls) {
-        for (i = 0; i < new_pool->size; i++) {
-            if (new_pool->controls[i].fire != SEM_FAILED) {
-                sem_close(new_pool->controls[i].fire);
-                sprintf(sem_name, WORKER_SEM_NAME, sembase, new_pool, i);
-                sem_unlink(sem_name);
-                break; /* semaphores past this one are uninitialized */
-            }
-        }
-    }
-    if (new_pool->done != SEM_FAILED) {
-        sem_close(new_pool->done);
-        sprintf(sem_name, MAIN_SEM_NAME, sembase, new_pool);
-        sem_unlink(sem_name);
-    }
+    VLOG_ERR("Failed to initialize parallel processing: %s",
+             ovs_strerror(errno));
+    free_pool(*pool);
+    *pool = NULL;
+    pool_size = 1;
     ovs_mutex_unlock(&init_mutex);
-    return NULL;
+    return POOL_UPDATE_FAILED;
 }
 
-
 /* Initializes 'hmap' as an empty hash table with mask N. */
 void
 ovn_fast_hmap_init(struct hmap *hmap, ssize_t mask)
@@ -225,9 +298,9 @@ ovn_run_pool_callback(struct worker_pool *pool,
                       void *fin_result, void *result_frags,
                       void (*helper_func)(struct worker_pool *pool,
                                           void *fin_result,
-                                          void *result_frags, int index))
+                                          void *result_frags, size_t index))
 {
-    int index, completed;
+    size_t index, completed;
 
     /* Ensure that all worker threads see the same data as the
      * main thread.
@@ -367,9 +440,7 @@ ovn_update_hashrow_locks(struct hmap *lflows, struct hashrow_locks *hrl)
 
 static void
 worker_pool_hook(void *aux OVS_UNUSED) {
-    int i;
     static struct worker_pool *pool;
-    char sem_name[256];
 
     workers_must_exit = true;
 
@@ -380,55 +451,15 @@ worker_pool_hook(void *aux OVS_UNUSED) {
      */
     atomic_thread_fence(memory_order_acq_rel);
 
-    /* Wake up the workers after the must_exit flag has been set */
-
-    LIST_FOR_EACH (pool, list_node, &worker_pools) {
-        for (i = 0; i < pool->size ; i++) {
-            sem_post(pool->controls[i].fire);
-        }
-        for (i = 0; i < pool->size ; i++) {
-            pthread_join(pool->controls[i].worker, NULL);
-        }
-        for (i = 0; i < pool->size ; i++) {
-            sem_close(pool->controls[i].fire);
-            sprintf(sem_name, WORKER_SEM_NAME, sembase, pool, i);
-            sem_unlink(sem_name);
-        }
-        sem_close(pool->done);
-        sprintf(sem_name, MAIN_SEM_NAME, sembase, pool);
-        sem_unlink(sem_name);
+    LIST_FOR_EACH_SAFE (pool, list_node, &worker_pools) {
+        ovs_list_remove(&pool->list_node);
+        free_pool(pool);
     }
 }
 
 static void
-setup_worker_pools(bool force) {
-    int cores, nodes;
-
-    ovs_numa_init();
-    nodes = ovs_numa_get_n_numas();
-    if (nodes == OVS_NUMA_UNSPEC || nodes <= 0) {
-        nodes = 1;
-    }
-    cores = ovs_numa_get_n_cores();
-
-    /* If there is no NUMA config, use 4 cores.
-     * If there is NUMA config use half the cores on
-     * one node so that the OS does not start pushing
-     * threads to other nodes.
-     */
-    if (cores == OVS_CORE_UNSPEC || cores <= 0) {
-        /* If there is no NUMA we can try the ovs-threads routine.
-         * It falls back to sysconf and/or affinity mask.
-         */
-        cores = count_cpu_cores();
-        pool_size = cores;
-    } else {
-        pool_size = cores / nodes;
-    }
-    if ((pool_size < 4) && force) {
-        pool_size = 4;
-    }
-    can_parallelize = (pool_size >= 3);
+setup_worker_pools(void)
+{
     fatal_signal_add_hook(worker_pool_hook, NULL, NULL, true);
     sembase = random_uint32();
 }
@@ -436,7 +467,7 @@ setup_worker_pools(bool force) {
 static void
 merge_list_results(struct worker_pool *pool OVS_UNUSED,
                    void *fin_result, void *result_frags,
-                   int index)
+                   size_t index)
 {
     struct ovs_list *result = (struct ovs_list *)fin_result;
     struct ovs_list *res_frags = (struct ovs_list *)result_frags;
@@ -450,7 +481,7 @@ merge_list_results(struct worker_pool *pool OVS_UNUSED,
 static void
 merge_hash_results(struct worker_pool *pool OVS_UNUSED,
                    void *fin_result, void *result_frags,
-                   int index)
+                   size_t index)
 {
     struct hmap *result = (struct hmap *)fin_result;
     struct hmap *res_frags = (struct hmap *)result_frags;
diff --git a/lib/ovn-parallel-hmap.h b/lib/ovn-parallel-hmap.h
index 0f7d687..362b6fd 100644
--- a/lib/ovn-parallel-hmap.h
+++ b/lib/ovn-parallel-hmap.h
@@ -81,21 +81,31 @@ struct worker_control {
     sem_t *done; /* Work completion semaphore - sem_post on completion. */
     struct ovs_mutex mutex; /* Guards the data. */
     void *data; /* Pointer to data to be processed. */
-    void *workload; /* back-pointer to the worker pool structure. */
     pthread_t worker;
+    struct worker_pool *pool;
 };
 
 struct worker_pool {
-    int size;   /* Number of threads in the pool. */
+    size_t size;   /* Number of threads in the pool. */
     struct ovs_list list_node; /* List of pools - used in cleanup/exit. */
     struct worker_control *controls; /* "Handles" in this pool. */
     sem_t *done; /* Work completion semaphorew. */
 };
 
-/* Add a worker pool for thread function start() which expects a pointer to
- * a worker_control structure as an argument. */
+/* Return pool size; bigger than 1 means parallelization has been enabled. */
+size_t ovn_get_worker_pool_size(void);
 
-struct worker_pool *ovn_add_worker_pool(void *(*start)(void *));
+enum pool_update_status {
+     POOL_UNCHANGED,     /* no change to pool */
+     POOL_UPDATED,       /* pool has been updated */
+     POOL_UPDATE_FAILED, /* pool update failed; parallelization disabled */
+};
+
+/* Add/delete a worker pool for thread function start() which expects a pointer
+ * to a worker_control structure as an argument. Return true if updated */
+enum pool_update_status ovn_update_worker_pool(size_t requested_pool_size,
+                                               struct worker_pool **,
+                                               void *(*start)(void *));
 
 /* Setting this to true will make all processing threads exit */
 
@@ -140,7 +150,8 @@ void ovn_run_pool_list(struct worker_pool *pool,
 void ovn_run_pool_callback(struct worker_pool *pool, void *fin_result,
                            void *result_frags,
                            void (*helper_func)(struct worker_pool *pool,
-                           void *fin_result, void *result_frags, int index));
+                           void *fin_result, void *result_frags,
+                           size_t index));
 
 
 /* Returns the first node in 'hmap' in the bucket in which the given 'hash'
@@ -251,17 +262,17 @@ static inline void init_hash_row_locks(struct hashrow_locks *hrl)
     hrl->row_locks = NULL;
 }
 
-bool ovn_can_parallelize_hashes(bool force_parallel);
-
 /* Use the OVN library functions for stuff which OVS has not defined
  * If OVS has defined these, they will still compile using the OVN
  * local names, but will be dropped by the linker in favour of the OVS
  * supplied functions.
  */
+#define update_worker_pool(requested_pool_size, existing_pool, func) \
+    ovn_update_worker_pool(requested_pool_size, existing_pool, func)
 
-#define update_hashrow_locks(lflows, hrl) ovn_update_hashrow_locks(lflows, hrl)
+#define get_worker_pool_size() ovn_get_worker_pool_size()
 
-#define can_parallelize_hashes(force) ovn_can_parallelize_hashes(force)
+#define update_hashrow_locks(lflows, hrl) ovn_update_hashrow_locks(lflows, hrl)
 
 #define stop_parallel_processing() ovn_stop_parallel_processing()
 
diff --git a/lib/ovn-util.c b/lib/ovn-util.c
index 81f18d6..d80db17 100644
--- a/lib/ovn-util.c
+++ b/lib/ovn-util.c
@@ -370,6 +370,62 @@ destroy_lport_addresses(struct lport_addresses *laddrs)
     free(laddrs->ipv6_addrs);
 }
 
+/* Returns a string of the IP address of 'laddrs' that overlaps with 'ip_s'.
+ * If one is not found, returns NULL.
+ *
+ * The caller must not free the returned string. */
+const char *
+find_lport_address(const struct lport_addresses *laddrs, const char *ip_s)
+{
+    bool is_ipv4 = strchr(ip_s, '.') ? true : false;
+
+    if (is_ipv4) {
+        ovs_be32 ip;
+
+        if (!ip_parse(ip_s, &ip)) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(&rl, "bad ip address %s", ip_s);
+            return NULL;
+        }
+
+        for (int i = 0; i < laddrs->n_ipv4_addrs; i++) {
+            const struct ipv4_netaddr *na = &laddrs->ipv4_addrs[i];
+
+            if (!((na->network ^ ip) & na->mask)) {
+                /* There should be only 1 interface that matches the
+                 * supplied IP.  Otherwise, it's a configuration error,
+                 * because subnets of a router's interfaces should NOT
+                 * overlap. */
+                return na->addr_s;
+            }
+        }
+    } else {
+        struct in6_addr ip6;
+
+        if (!ipv6_parse(ip_s, &ip6)) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(&rl, "bad ipv6 address %s", ip_s);
+            return NULL;
+        }
+
+        for (int i = 0; i < laddrs->n_ipv6_addrs; i++) {
+            const struct ipv6_netaddr *na = &laddrs->ipv6_addrs[i];
+            struct in6_addr xor_addr = ipv6_addr_bitxor(&na->network, &ip6);
+            struct in6_addr and_addr = ipv6_addr_bitand(&xor_addr, &na->mask);
+
+            if (ipv6_is_zero(&and_addr)) {
+                /* There should be only 1 interface that matches the
+                 * supplied IP.  Otherwise, it's a configuration error,
+                 * because subnets of a router's interfaces should NOT
+                 * overlap. */
+                return na->addr_s;
+            }
+        }
+    }
+
+    return NULL;
+}
+
 /* Go through 'addresses' and add found IPv4 addresses to 'ipv4_addrs' and
  * IPv6 addresses to 'ipv6_addrs'. */
 void
@@ -827,3 +883,55 @@ get_bridge(const struct ovsrec_bridge_table *bridge_table, const char *br_name)
     }
     return NULL;
 }
+
+#define DAEMON_STARTUP_DELAY_SEED 20
+#define DAEMON_STARTUP_DELAY_MS   10000
+
+static int64_t startup_ts;
+static int startup_delay = DAEMON_STARTUP_DELAY_SEED;
+
+/* Used by debug command only, for tests. */
+static bool ignore_startup_delay = false;
+
+OVS_CONSTRUCTOR(startup_ts_initializer) {
+    startup_ts = time_wall_msec();
+}
+
+int64_t
+daemon_startup_ts(void)
+{
+    return startup_ts;
+}
+
+void
+daemon_started_recently_countdown(void)
+{
+    if (startup_delay > 0) {
+        startup_delay--;
+    }
+}
+
+void
+daemon_started_recently_ignore(void)
+{
+    ignore_startup_delay = true;
+}
+
+bool
+daemon_started_recently(void)
+{
+    if (ignore_startup_delay) {
+        return false;
+    }
+
+    VLOG_DBG("startup_delay: %d, startup_ts: %"PRId64, startup_delay,
+             startup_ts);
+
+    /* Ensure that at least an amount of updates has been handled. */
+    if (startup_delay) {
+        return true;
+    }
+
+    /* Ensure that at least an amount of time has passed. */
+    return time_wall_msec() - startup_ts <= DAEMON_STARTUP_DELAY_MS;
+}
diff --git a/lib/ovn-util.h b/lib/ovn-util.h
index 024b86b..145f974 100644
--- a/lib/ovn-util.h
+++ b/lib/ovn-util.h
@@ -86,6 +86,8 @@ bool extract_lrp_networks__(char *mac, char **networks, size_t n_networks,
 
 bool lport_addresses_is_empty(struct lport_addresses *);
 void destroy_lport_addresses(struct lport_addresses *);
+const char *find_lport_address(const struct lport_addresses *laddrs,
+                               const char *ip_s);
 
 void split_addresses(const char *addresses, struct svec *ipv4_addrs,
                      struct svec *ipv6_addrs);
@@ -307,5 +309,9 @@ struct ovsrec_bridge_table;
 const struct ovsrec_bridge *get_bridge(const struct ovsrec_bridge_table *,
                                        const char *br_name);
 
+void daemon_started_recently_countdown(void);
+void daemon_started_recently_ignore(void);
+bool daemon_started_recently(void);
+int64_t daemon_startup_ts(void);
 
 #endif /* OVN_UTIL_H */
diff --git a/lib/static-mac-binding-index.c b/lib/static-mac-binding-index.c
new file mode 100644
index 0000000..fecc9b7
--- /dev/null
+++ b/lib/static-mac-binding-index.c
@@ -0,0 +1,43 @@
+/* Copyright (c) 2021
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "lib/static-mac-binding-index.h"
+#include "lib/ovn-sb-idl.h"
+
+struct ovsdb_idl_index *
+static_mac_binding_index_create(struct ovsdb_idl *idl)
+{
+    return ovsdb_idl_index_create2(idl,
+                                   &sbrec_static_mac_binding_col_logical_port,
+                                   &sbrec_static_mac_binding_col_ip);
+}
+
+const struct sbrec_static_mac_binding *
+static_mac_binding_lookup(struct ovsdb_idl_index *smb_index,
+                          const char *logical_port, const char *ip)
+{
+    struct sbrec_static_mac_binding *target =
+        sbrec_static_mac_binding_index_init_row(smb_index);
+    sbrec_static_mac_binding_index_set_logical_port(target, logical_port);
+    sbrec_static_mac_binding_index_set_ip(target, ip);
+
+    struct sbrec_static_mac_binding *smb =
+        sbrec_static_mac_binding_index_find(smb_index, target);
+    sbrec_static_mac_binding_index_destroy_row(target);
+
+    return smb;
+}
diff --git a/lib/static-mac-binding-index.h b/lib/static-mac-binding-index.h
new file mode 100644
index 0000000..3d4ff06
--- /dev/null
+++ b/lib/static-mac-binding-index.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2021
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OVN_STATIC_MAC_BINDING_INDEX_H
+#define OVN_STATIC_MAC_BINDING_INDEX_H 1
+
+struct ovsdb_idl;
+
+struct ovsdb_idl_index *static_mac_binding_index_create(struct ovsdb_idl *);
+const struct sbrec_static_mac_binding *static_mac_binding_lookup(
+    struct ovsdb_idl_index *smb_index,
+    const char *logical_port,
+    const char *ip);
+
+#endif /* lib/static-mac-binding-index.h */
diff --git a/lib/vif-plug-provider.c b/lib/vif-plug-provider.c
index 798e90e..d318ed4 100644
--- a/lib/vif-plug-provider.c
+++ b/lib/vif-plug-provider.c
@@ -179,10 +179,10 @@ vif_plug_provider_get(const char *type)
 bool
 vif_plug_provider_run_all(void)
 {
-    struct shash_node *node, *next;
+    struct shash_node *node;
     bool changed = false;
 
-    SHASH_FOR_EACH_SAFE (node, next, &vif_plug_classes) {
+    SHASH_FOR_EACH_SAFE (node, &vif_plug_classes) {
         struct vif_plug_class *vif_plug_class = node->data;
         if (vif_plug_class->run && vif_plug_class->run(vif_plug_class)) {
             changed = true;
@@ -195,9 +195,9 @@ vif_plug_provider_run_all(void)
 void
 vif_plug_provider_destroy_all(void)
 {
-    struct shash_node *node, *next;
+    struct shash_node *node;
 
-    SHASH_FOR_EACH_SAFE (node, next, &vif_plug_classes) {
+    SHASH_FOR_EACH_SAFE (node, &vif_plug_classes) {
         struct vif_plug_class *vif_plug_class = node->data;
         vif_plug_provider_unregister(vif_plug_class->type);
     }
diff --git a/m4/ovn.m4 b/m4/ovn.m4
index 2909914..bc2ac1a 100644
--- a/m4/ovn.m4
+++ b/m4/ovn.m4
@@ -300,7 +300,7 @@ dnl Checks for valgrind/valgrind.h.
 AC_DEFUN([OVN_CHECK_VALGRIND],
   [AC_CHECK_HEADERS([valgrind/valgrind.h])])
 
-dnl Checks for Python 3.4 or later.
+dnl Checks for Python 3.6 or later.
 AC_DEFUN([OVN_CHECK_PYTHON3],
   [AC_CACHE_CHECK(
      [for Python 3 (version 3.4 or later)],
@@ -309,13 +309,13 @@ AC_DEFUN([OVN_CHECK_PYTHON3],
         ovs_cv_python3=$PYTHON3
       else
         ovs_cv_python3=no
-        for binary in python3 python3.4 python3.5 python3.6 python3.7; do
+        for binary in python3 python3.4 python3.5 python3.6 python3.7 python3.8 python3.9 python 3.10; do
           ovs_save_IFS=$IFS; IFS=$PATH_SEPARATOR
           for dir in $PATH; do
             IFS=$ovs_save_IFS
             test -z "$dir" && dir=.
             if test -x "$dir"/"$binary" && "$dir"/"$binary" -c 'import sys
-if sys.hexversion >= 0x03040000 and sys.hexversion < 0x04000000:
+if sys.hexversion >= 0x03060000 and sys.hexversion < 0x04000000:
     sys.exit(0)
 else:
     sys.exit(1)'; then
@@ -326,7 +326,7 @@ else:
         done
       fi])
    if test "$ovs_cv_python3" = no; then
-     AC_MSG_ERROR([Python 3.4 or later is required but not found in $PATH, please install it or set $PYTHON3 to point to it])
+     AC_MSG_ERROR([Python 3.6 or later is required but not found in $PATH, please install it or set $PYTHON3 to point to it])
    fi
    AC_ARG_VAR([PYTHON3])
    PYTHON3=$ovs_cv_python3])
diff --git a/northd/automake.mk b/northd/automake.mk
index 4862ec7..8158286 100644
--- a/northd/automake.mk
+++ b/northd/automake.mk
@@ -1,6 +1,8 @@
 # ovn-northd
 bin_PROGRAMS += northd/ovn-northd
 northd_ovn_northd_SOURCES = \
+	northd/mac-binding-aging.c \
+	northd/mac-binding-aging.h \
 	northd/northd.c \
 	northd/northd.h \
 	northd/ovn-northd.c \
diff --git a/northd/en-northd.c b/northd/en-northd.c
index 79da7e1..7fe83db 100644
--- a/northd/en-northd.c
+++ b/northd/en-northd.c
@@ -55,6 +55,10 @@ void en_northd_run(struct engine_node *node, void *data)
         engine_ovsdb_node_get_index(
             engine_get_input("SB_ip_multicast", node),
             "sbrec_ip_mcast_by_dp");
+    input_data.sbrec_static_mac_binding_by_lport_ip =
+        engine_ovsdb_node_get_index(
+            engine_get_input("SB_static_mac_binding", node),
+            "sbrec_static_mac_binding_by_lport_ip");
 
     input_data.nbrec_nb_global_table =
         EN_OVSDB_GET(engine_get_input("NB_nb_global", node));
@@ -64,6 +68,8 @@ void en_northd_run(struct engine_node *node, void *data)
         EN_OVSDB_GET(engine_get_input("NB_logical_router", node));
     input_data.nbrec_load_balancer_table =
         EN_OVSDB_GET(engine_get_input("NB_load_balancer", node));
+    input_data.nbrec_load_balancer_group_table =
+        EN_OVSDB_GET(engine_get_input("NB_load_balancer_group", node));
     input_data.nbrec_port_group_table =
         EN_OVSDB_GET(engine_get_input("NB_port_group", node));
     input_data.nbrec_address_set_table =
@@ -72,6 +78,8 @@ void en_northd_run(struct engine_node *node, void *data)
         EN_OVSDB_GET(engine_get_input("NB_meter", node));
     input_data.nbrec_acl_table =
         EN_OVSDB_GET(engine_get_input("NB_acl", node));
+    input_data.nbrec_static_mac_binding_table =
+        EN_OVSDB_GET(engine_get_input("NB_static_mac_binding", node));
 
     input_data.sbrec_sb_global_table =
         EN_OVSDB_GET(engine_get_input("SB_sb_global", node));
@@ -103,6 +111,8 @@ void en_northd_run(struct engine_node *node, void *data)
         EN_OVSDB_GET(engine_get_input("SB_ip_multicast", node));
     input_data.sbrec_chassis_private_table =
         EN_OVSDB_GET(engine_get_input("SB_chassis_private", node));
+    input_data.sbrec_static_mac_binding_table =
+        EN_OVSDB_GET(engine_get_input("SB_static_mac_binding", node));
 
     northd_run(&input_data, data,
                eng_ctx->ovnnb_idl_txn,
diff --git a/northd/inc-proc-northd.c b/northd/inc-proc-northd.c
index af55221..54e0ad3 100644
--- a/northd/inc-proc-northd.c
+++ b/northd/inc-proc-northd.c
@@ -20,10 +20,13 @@
 
 #include "chassis-index.h"
 #include "ip-mcast-index.h"
+#include "static-mac-binding-index.h"
 #include "lib/inc-proc-eng.h"
+#include "lib/mac-binding-index.h"
 #include "lib/ovn-nb-idl.h"
 #include "lib/ovn-sb-idl.h"
 #include "mcast-group-index.h"
+#include "northd/mac-binding-aging.h"
 #include "openvswitch/poll-loop.h"
 #include "openvswitch/vlog.h"
 #include "inc-proc-northd.h"
@@ -60,7 +63,8 @@ VLOG_DEFINE_THIS_MODULE(inc_proc_northd);
     NB_NODE(gateway_chassis, "gateway_chassis") \
     NB_NODE(ha_chassis_group, "ha_chassis_group") \
     NB_NODE(ha_chassis, "ha_chassis") \
-    NB_NODE(bfd, "bfd")
+    NB_NODE(bfd, "bfd") \
+    NB_NODE(static_mac_binding, "static_mac_binding")
 
     enum nb_engine_node {
 #define NB_NODE(NAME, NAME_STR) NB_##NAME,
@@ -109,7 +113,8 @@ VLOG_DEFINE_THIS_MODULE(inc_proc_northd);
     SB_NODE(service_monitor, "service_monitor") \
     SB_NODE(load_balancer, "load_balancer") \
     SB_NODE(bfd, "bfd") \
-    SB_NODE(fdb, "fdb")
+    SB_NODE(fdb, "fdb") \
+    SB_NODE(static_mac_binding, "static_mac_binding")
 
 enum sb_engine_node {
 #define SB_NODE(NAME, NAME_STR) SB_##NAME,
@@ -146,6 +151,8 @@ enum sb_engine_node {
  * avoid sparse errors. */
 static ENGINE_NODE(northd, "northd");
 static ENGINE_NODE(lflow, "lflow");
+static ENGINE_NODE(mac_binding_aging, "mac_binding_aging");
+static ENGINE_NODE(mac_binding_aging_waker, "mac_binding_aging_waker");
 
 void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
                           struct ovsdb_idl_loop *sb)
@@ -178,6 +185,7 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
     engine_add_input(&en_northd, &en_nb_gateway_chassis, NULL);
     engine_add_input(&en_northd, &en_nb_ha_chassis_group, NULL);
     engine_add_input(&en_northd, &en_nb_ha_chassis, NULL);
+    engine_add_input(&en_northd, &en_nb_static_mac_binding, NULL);
 
     engine_add_input(&en_northd, &en_sb_sb_global, NULL);
     engine_add_input(&en_northd, &en_sb_chassis, NULL);
@@ -206,12 +214,20 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
     engine_add_input(&en_northd, &en_sb_service_monitor, NULL);
     engine_add_input(&en_northd, &en_sb_load_balancer, NULL);
     engine_add_input(&en_northd, &en_sb_fdb, NULL);
+    engine_add_input(&en_northd, &en_sb_static_mac_binding, NULL);
+    engine_add_input(&en_mac_binding_aging, &en_nb_nb_global, NULL);
+    engine_add_input(&en_mac_binding_aging, &en_sb_mac_binding, NULL);
+    engine_add_input(&en_mac_binding_aging, &en_northd, NULL);
+    engine_add_input(&en_mac_binding_aging, &en_mac_binding_aging_waker, NULL);
     engine_add_input(&en_lflow, &en_nb_bfd, NULL);
     engine_add_input(&en_lflow, &en_sb_bfd, NULL);
     engine_add_input(&en_lflow, &en_sb_logical_flow, NULL);
     engine_add_input(&en_lflow, &en_sb_multicast_group, NULL);
     engine_add_input(&en_lflow, &en_sb_igmp_group, NULL);
     engine_add_input(&en_lflow, &en_northd, NULL);
+    /* XXX: The "en_mac_binding_aging" should be separate "root" node
+     * once I-P engine allows multiple root nodes. */
+    engine_add_input(&en_lflow, &en_mac_binding_aging, NULL);
 
     struct engine_arg engine_arg = {
         .nb_idl = nb->idl,
@@ -228,6 +244,10 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
                          ip_mcast_index_create(sb->idl);
     struct ovsdb_idl_index *sbrec_chassis_by_hostname =
         chassis_hostname_index_create(sb->idl);
+    struct ovsdb_idl_index *sbrec_static_mac_binding_by_lport_ip
+        = static_mac_binding_index_create(sb->idl);
+    struct ovsdb_idl_index *sbrec_mac_binding_by_datapath
+        = mac_binding_by_datapath_index_create(sb->idl);
 
     engine_init(&en_lflow, &engine_arg);
 
@@ -246,6 +266,12 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
     engine_ovsdb_node_add_index(&en_sb_ip_multicast,
                                 "sbrec_ip_mcast_by_dp",
                                 sbrec_ip_mcast_by_dp);
+    engine_ovsdb_node_add_index(&en_sb_static_mac_binding,
+                                "sbrec_static_mac_binding_by_lport_ip",
+                                sbrec_static_mac_binding_by_lport_ip);
+    engine_ovsdb_node_add_index(&en_sb_mac_binding,
+                                "sbrec_mac_binding_by_datapath",
+                                sbrec_mac_binding_by_datapath);
 }
 
 void inc_proc_northd_run(struct ovsdb_idl_txn *ovnnb_txn,
diff --git a/northd/mac-binding-aging.c b/northd/mac-binding-aging.c
new file mode 100644
index 0000000..f65353a
--- /dev/null
+++ b/northd/mac-binding-aging.c
@@ -0,0 +1,192 @@
+/* Copyright (c) 2022, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "lib/inc-proc-eng.h"
+#include "lib/ovn-nb-idl.h"
+#include "lib/ovn-sb-idl.h"
+#include "lib/timeval.h"
+#include "northd/mac-binding-aging.h"
+#include "northd/northd.h"
+#include "openvswitch/hmap.h"
+#include "openvswitch/poll-loop.h"
+#include "openvswitch/util.h"
+#include "openvswitch/vlog.h"
+
+VLOG_DEFINE_THIS_MODULE(mac_binding_aging);
+
+#define MAC_BINDING_BULK_REMOVAL_DELAY_MSEC 5000
+
+struct mac_binding_waker {
+    bool should_schedule;
+    long long next_wake_msec;
+};
+
+static void
+mac_binding_aging_run_for_datapath(const struct sbrec_datapath_binding *dp,
+                                   const struct nbrec_logical_router *nbr,
+                                   struct ovsdb_idl_index *mb_by_datapath,
+                                   int64_t now, int64_t *wake_delay,
+                                   uint32_t removal_limit, uint32_t *removed_n)
+{
+    uint64_t threshold = smap_get_uint(&nbr->options,
+                                       "mac_binding_age_threshold",
+                                       0) * 1000;
+    if (!threshold) {
+        return;
+    }
+
+    struct sbrec_mac_binding *mb_index_row =
+        sbrec_mac_binding_index_init_row(mb_by_datapath);
+    sbrec_mac_binding_index_set_datapath(mb_index_row, dp);
+
+    const struct sbrec_mac_binding *mb;
+    SBREC_MAC_BINDING_FOR_EACH_EQUAL (mb, mb_index_row, mb_by_datapath) {
+        int64_t elapsed = now - mb->timestamp;
+
+        if (elapsed < 0) {
+            continue;
+        } else if (elapsed >= threshold) {
+            sbrec_mac_binding_delete(mb);
+            (*removed_n)++;
+            if (removal_limit && *removed_n == removal_limit) {
+                break;
+            }
+        } else {
+            *wake_delay = MIN(*wake_delay, threshold - elapsed);
+        }
+    }
+    sbrec_mac_binding_index_destroy_row(mb_index_row);
+}
+
+static uint32_t
+get_removal_limit(struct engine_node *node)
+{
+    const struct nbrec_nb_global_table *nb_global_table =
+        EN_OVSDB_GET(engine_get_input("NB_nb_global", node));
+    const struct nbrec_nb_global *nb =
+        nbrec_nb_global_table_first(nb_global_table);
+    if (!nb) {
+       return 0;
+    }
+
+    return smap_get_uint(&nb->options, "mac_binding_removal_limit", 0);
+}
+
+void
+en_mac_binding_aging_run(struct engine_node *node, void *data OVS_UNUSED)
+{
+    const struct engine_context *eng_ctx = engine_get_context();
+    struct northd_data *northd_data = engine_get_input_data("northd", node);
+    struct mac_binding_waker *waker =
+        engine_get_input_data("mac_binding_aging_waker", node);
+
+    if (!eng_ctx->ovnsb_idl_txn ||
+        !northd_data->features.mac_binding_timestamp ||
+        time_msec() < waker->next_wake_msec) {
+        return;
+    }
+
+    int64_t next_expire_msec = INT64_MAX;
+    int64_t now = time_wall_msec();
+    uint32_t removal_limit = get_removal_limit(node);
+    uint32_t removed_n = 0;
+    struct ovsdb_idl_index *sbrec_mac_binding_by_datapath =
+        engine_ovsdb_node_get_index(engine_get_input("SB_mac_binding", node),
+                                    "sbrec_mac_binding_by_datapath");
+
+    struct ovn_datapath *od;
+    HMAP_FOR_EACH (od, key_node, &northd_data->datapaths) {
+        if (od->sb && od->nbr) {
+            mac_binding_aging_run_for_datapath(od->sb, od->nbr,
+                                               sbrec_mac_binding_by_datapath,
+                                               now, &next_expire_msec,
+                                               removal_limit, &removed_n);
+            if (removal_limit && removed_n == removal_limit) {
+                /* Schedule the next run after specified delay. */
+                next_expire_msec = MAC_BINDING_BULK_REMOVAL_DELAY_MSEC;
+                break;
+            }
+        }
+    }
+
+    if (next_expire_msec < INT64_MAX) {
+        waker->should_schedule = true;
+        waker->next_wake_msec = time_msec() + next_expire_msec;
+        poll_timer_wait_until(waker->next_wake_msec);
+    } else {
+        waker->should_schedule = false;
+    }
+
+    /* This node is part of lflow, but lflow does not depend on it. Setting
+     * state as unchanged does not trigger lflow node when it is not needed. */
+    engine_set_node_state(node, EN_UNCHANGED);
+}
+
+void *
+en_mac_binding_aging_init(struct engine_node *node OVS_UNUSED,
+                          struct engine_arg *arg OVS_UNUSED)
+{
+    return NULL;
+}
+
+void
+en_mac_binding_aging_cleanup(void *data OVS_UNUSED)
+{
+}
+
+/* The waker node is an input node, but the data about when to wake up
+ * the aging node are populated by the aging node.
+ * The reason being that engine periodically runs input nodes to check
+ * if we there are updates, so it could process the other nodes, however
+ * the waker cannot be dependent on other node because it wouldn't be
+ * input node anymore. */
+void
+en_mac_binding_aging_waker_run(struct engine_node *node, void *data)
+{
+    struct mac_binding_waker *waker = data;
+
+    engine_set_node_state(node, EN_UNCHANGED);
+
+    if (!waker->should_schedule) {
+        return;
+    }
+
+    if (time_msec() >= waker->next_wake_msec) {
+        waker->should_schedule = false;
+        engine_set_node_state(node, EN_UPDATED);
+        return;
+    }
+
+    poll_timer_wait_until(waker->next_wake_msec);
+}
+
+void *
+en_mac_binding_aging_waker_init(struct engine_node *node OVS_UNUSED,
+                                struct engine_arg *arg OVS_UNUSED)
+{
+    struct mac_binding_waker *waker = xmalloc(sizeof *waker);
+
+    waker->should_schedule = false;
+    waker->next_wake_msec = 0;
+
+    return waker;
+}
+
+void
+en_mac_binding_aging_waker_cleanup(void *data OVS_UNUSED)
+{
+}
diff --git a/northd/mac-binding-aging.h b/northd/mac-binding-aging.h
new file mode 100644
index 0000000..296a7ab
--- /dev/null
+++ b/northd/mac-binding-aging.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2022, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MAC_BINDING_AGING_H
+#define MAC_BINDING_AGING_H 1
+
+#include "lib/inc-proc-eng.h"
+
+/* The MAC binding aging node functions. */
+void en_mac_binding_aging_run(struct engine_node *node, void *data);
+void *en_mac_binding_aging_init(struct engine_node *node,
+                                struct engine_arg *arg);
+void en_mac_binding_aging_cleanup(void *data);
+
+/* The MAC binding aging waker node functions. */
+void en_mac_binding_aging_waker_run(struct engine_node *node, void *data);
+void *en_mac_binding_aging_waker_init(struct engine_node *node,
+                                      struct engine_arg *arg);
+void en_mac_binding_aging_waker_cleanup(void *data);
+
+#endif /* northd/mac-binding-aging.h */
diff --git a/northd/northd.c b/northd/northd.c
index d06f6a7..84440a4 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -28,6 +28,7 @@
 #include "ovn/lex.h"
 #include "lib/chassis-index.h"
 #include "lib/ip-mcast-index.h"
+#include "lib/static-mac-binding-index.h"
 #include "lib/copp.h"
 #include "lib/mcast-group-index.h"
 #include "lib/ovn-l7.h"
@@ -58,9 +59,12 @@
 VLOG_DEFINE_THIS_MODULE(northd);
 
 static bool controller_event_en;
+static bool lflow_hash_lock_initialized = false;
 
 static bool check_lsp_is_up;
 
+static bool install_ls_lb_from_router;
+
 /* MAC allocated for service monitor usage. Just one mac is allocated
  * for this purpose and ovn-controller's on each chassis will make use
  * of this mac when sending out the packets to monitor the services
@@ -73,6 +77,12 @@ static struct eth_addr svc_monitor_mac_ea;
  * Otherwise, it will avoid using it.  The default is true. */
 static bool use_ct_inv_match = true;
 
+/* If this option is 'true' northd will implicitly add a lowest-priority
+ * drop rule in the ACL stage of logical switches that have at least one
+ * ACL.
+ */
+static bool default_acl_drop;
+
 #define MAX_OVN_TAGS 4096
 
 /* Pipeline stages. */
@@ -100,32 +110,31 @@ enum ovn_datapath_type {
 enum ovn_stage {
 #define PIPELINE_STAGES                                                   \
     /* Logical switch ingress stages. */                                  \
-    PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_L2,    0, "ls_in_port_sec_l2")   \
-    PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_IP,    1, "ls_in_port_sec_ip")   \
-    PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_ND,    2, "ls_in_port_sec_nd")   \
-    PIPELINE_STAGE(SWITCH, IN,  LOOKUP_FDB ,    3, "ls_in_lookup_fdb")    \
-    PIPELINE_STAGE(SWITCH, IN,  PUT_FDB,        4, "ls_in_put_fdb")       \
-    PIPELINE_STAGE(SWITCH, IN,  PRE_ACL,        5, "ls_in_pre_acl")       \
-    PIPELINE_STAGE(SWITCH, IN,  PRE_LB,         6, "ls_in_pre_lb")        \
-    PIPELINE_STAGE(SWITCH, IN,  PRE_STATEFUL,   7, "ls_in_pre_stateful")  \
-    PIPELINE_STAGE(SWITCH, IN,  ACL_HINT,       8, "ls_in_acl_hint")      \
-    PIPELINE_STAGE(SWITCH, IN,  ACL,            9, "ls_in_acl")           \
-    PIPELINE_STAGE(SWITCH, IN,  QOS_MARK,      10, "ls_in_qos_mark")      \
-    PIPELINE_STAGE(SWITCH, IN,  QOS_METER,     11, "ls_in_qos_meter")     \
-    PIPELINE_STAGE(SWITCH, IN,  LB,            12, "ls_in_lb")  \
-    PIPELINE_STAGE(SWITCH, IN,  ACL_AFTER_LB,  13, "ls_in_acl_after_lb")  \
-    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,      14, "ls_in_stateful")      \
-    PIPELINE_STAGE(SWITCH, IN,  PRE_HAIRPIN,   15, "ls_in_pre_hairpin")   \
-    PIPELINE_STAGE(SWITCH, IN,  NAT_HAIRPIN,   16, "ls_in_nat_hairpin")   \
-    PIPELINE_STAGE(SWITCH, IN,  HAIRPIN,       17, "ls_in_hairpin")       \
-    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    18, "ls_in_arp_rsp")       \
-    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  19, "ls_in_dhcp_options")  \
-    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 20, "ls_in_dhcp_response") \
-    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,    21, "ls_in_dns_lookup")    \
-    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  22, "ls_in_dns_response")  \
-    PIPELINE_STAGE(SWITCH, IN,  EXTERNAL_PORT, 23, "ls_in_external_port") \
-    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       24, "ls_in_l2_lkup")       \
-    PIPELINE_STAGE(SWITCH, IN,  L2_UNKNOWN,    25, "ls_in_l2_unknown")    \
+    PIPELINE_STAGE(SWITCH, IN,  CHECK_PORT_SEC, 0, "ls_in_check_port_sec")   \
+    PIPELINE_STAGE(SWITCH, IN,  APPLY_PORT_SEC, 1, "ls_in_apply_port_sec")   \
+    PIPELINE_STAGE(SWITCH, IN,  LOOKUP_FDB ,    2, "ls_in_lookup_fdb")    \
+    PIPELINE_STAGE(SWITCH, IN,  PUT_FDB,        3, "ls_in_put_fdb")       \
+    PIPELINE_STAGE(SWITCH, IN,  PRE_ACL,        4, "ls_in_pre_acl")       \
+    PIPELINE_STAGE(SWITCH, IN,  PRE_LB,         5, "ls_in_pre_lb")        \
+    PIPELINE_STAGE(SWITCH, IN,  PRE_STATEFUL,   6, "ls_in_pre_stateful")  \
+    PIPELINE_STAGE(SWITCH, IN,  ACL_HINT,       7, "ls_in_acl_hint")      \
+    PIPELINE_STAGE(SWITCH, IN,  ACL,            8, "ls_in_acl")           \
+    PIPELINE_STAGE(SWITCH, IN,  QOS_MARK,       9, "ls_in_qos_mark")      \
+    PIPELINE_STAGE(SWITCH, IN,  QOS_METER,     10, "ls_in_qos_meter")     \
+    PIPELINE_STAGE(SWITCH, IN,  LB,            11, "ls_in_lb")            \
+    PIPELINE_STAGE(SWITCH, IN,  ACL_AFTER_LB,  12, "ls_in_acl_after_lb")  \
+    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,      13, "ls_in_stateful")      \
+    PIPELINE_STAGE(SWITCH, IN,  PRE_HAIRPIN,   14, "ls_in_pre_hairpin")   \
+    PIPELINE_STAGE(SWITCH, IN,  NAT_HAIRPIN,   15, "ls_in_nat_hairpin")   \
+    PIPELINE_STAGE(SWITCH, IN,  HAIRPIN,       16, "ls_in_hairpin")       \
+    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    17, "ls_in_arp_rsp")       \
+    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  18, "ls_in_dhcp_options")  \
+    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 19, "ls_in_dhcp_response") \
+    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,    20, "ls_in_dns_lookup")    \
+    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  21, "ls_in_dns_response")  \
+    PIPELINE_STAGE(SWITCH, IN,  EXTERNAL_PORT, 22, "ls_in_external_port") \
+    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       23, "ls_in_l2_lkup")       \
+    PIPELINE_STAGE(SWITCH, IN,  L2_UNKNOWN,    24, "ls_in_l2_unknown")    \
                                                                           \
     /* Logical switch egress stages. */                                   \
     PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0, "ls_out_pre_lb")         \
@@ -136,8 +145,8 @@ enum ovn_stage {
     PIPELINE_STAGE(SWITCH, OUT, QOS_MARK,     5, "ls_out_qos_mark")       \
     PIPELINE_STAGE(SWITCH, OUT, QOS_METER,    6, "ls_out_qos_meter")      \
     PIPELINE_STAGE(SWITCH, OUT, STATEFUL,     7, "ls_out_stateful")       \
-    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP,  8, "ls_out_port_sec_ip")    \
-    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2,  9, "ls_out_port_sec_l2")    \
+    PIPELINE_STAGE(SWITCH, OUT, CHECK_PORT_SEC,  8, "ls_out_check_port_sec") \
+    PIPELINE_STAGE(SWITCH, OUT, APPLY_PORT_SEC,  9, "ls_out_apply_port_sec") \
                                                                       \
     /* Logical router ingress stages. */                              \
     PIPELINE_STAGE(ROUTER, IN,  ADMISSION,       0, "lr_in_admission")    \
@@ -199,6 +208,8 @@ enum ovn_stage {
 #define REGBIT_LKUP_FDB           "reg0[11]"
 #define REGBIT_HAIRPIN_REPLY      "reg0[12]"
 #define REGBIT_ACL_LABEL          "reg0[13]"
+#define REGBIT_FROM_RAMP          "reg0[14]"
+#define REGBIT_PORT_SEC_DROP      "reg0[15]"
 
 #define REG_ORIG_DIP_IPV4         "reg1"
 #define REG_ORIG_DIP_IPV6         "xxreg1"
@@ -216,6 +227,7 @@ enum ovn_stage {
 #define REGBIT_LOOKUP_NEIGHBOR_RESULT "reg9[2]"
 #define REGBIT_LOOKUP_NEIGHBOR_IP_RESULT "reg9[3]"
 #define REGBIT_DST_NAT_IP_LOCAL "reg9[4]"
+#define REGBIT_KNOWN_ECMP_NH    "reg9[5]"
 
 /* Register to store the eth address associated to a router port for packets
  * received in S_ROUTER_IN_ADMISSION.
@@ -261,15 +273,15 @@ enum ovn_stage {
  * |    | REGBIT_ACL_HINT_{ALLOW_NEW/ALLOW/DROP/BLOCK} |   |                  |
  * |    |     REGBIT_ACL_LABEL                         | X |                  |
  * +----+----------------------------------------------+ X |                  |
- * | R1 |         ORIG_DIP_IPV4 (>= IN_STATEFUL)       | R |                  |
+ * | R1 |         ORIG_DIP_IPV4 (>= IN_PRE_STATEFUL)   | R |                  |
  * +----+----------------------------------------------+ E |                  |
- * | R2 |         ORIG_TP_DPORT (>= IN_STATEFUL)       | G |                  |
+ * | R2 |         ORIG_TP_DPORT (>= IN_PRE_STATEFUL)   | G |                  |
  * +----+----------------------------------------------+ 0 |                  |
  * | R3 |                  ACL LABEL                   |   |                  |
  * +----+----------------------------------------------+---+------------------+
  * | R4 |                   UNUSED                     |   |                  |
- * +----+----------------------------------------------+ X |   ORIG_DIP_IPV6  |
- * | R5 |                   UNUSED                     | X | (>= IN_STATEFUL) |
+ * +----+----------------------------------------------+ X | ORIG_DIP_IPV6(>= |
+ * | R5 |                   UNUSED                     | X | IN_PRE_STATEFUL) |
  * +----+----------------------------------------------+ R |                  |
  * | R6 |                   UNUSED                     | E |                  |
  * +----+----------------------------------------------+ G |                  |
@@ -316,7 +328,8 @@ enum ovn_stage {
  * |     |   EGRESS_LOOPBACK/       | G |     UNUSED      |
  * | R9  |   PKT_LARGER/            | 4 |                 |
  * |     |   LOOKUP_NEIGHBOR_RESULT/|   |                 |
- * |     |   SKIP_LOOKUP_NEIGHBOR}  |   |                 |
+ * |     |   SKIP_LOOKUP_NEIGHBOR/  |   |                 |
+ * |     |   KNOWN_ECMP_NH}         |   |                 |
  * |     |                          |   |                 |
  * |     | REG_ORIG_TP_DPORT_ROUTER |   |                 |
  * |     |                          |   |                 |
@@ -396,14 +409,23 @@ build_chassis_features(const struct northd_input *input_data,
     const struct sbrec_chassis *chassis;
 
     SBREC_CHASSIS_TABLE_FOR_EACH (chassis, input_data->sbrec_chassis) {
-        if (!smap_get_bool(&chassis->other_config,
-                           OVN_FEATURE_CT_NO_MASKED_LABEL,
-                           false)) {
+        bool ct_no_masked_label =
+            smap_get_bool(&chassis->other_config,
+                          OVN_FEATURE_CT_NO_MASKED_LABEL,
+                          false);
+        if (!ct_no_masked_label && chassis_features->ct_no_masked_label) {
             chassis_features->ct_no_masked_label = false;
-            return;
+        }
+
+        bool mac_binding_timestamp =
+            smap_get_bool(&chassis->other_config,
+                          OVN_FEATURE_MAC_BINDING_TIMESTAMP,
+                          false);
+        if (!mac_binding_timestamp &&
+            chassis_features->mac_binding_timestamp) {
+            chassis_features->mac_binding_timestamp = false;
         }
     }
-    chassis_features->ct_no_masked_label = true;
 }
 
 struct ovn_chassis_qdisc_queues {
@@ -503,74 +525,6 @@ port_has_qos_params(const struct smap *opts)
 }
 
 
-/*
- * Multicast snooping and querier per datapath configuration.
- */
-struct mcast_switch_info {
-
-    bool enabled;               /* True if snooping enabled. */
-    bool querier;               /* True if querier enabled. */
-    bool flood_unregistered;    /* True if unregistered multicast should be
-                                 * flooded.
-                                 */
-    bool flood_relay;           /* True if the switch is connected to a
-                                 * multicast router and unregistered multicast
-                                 * should be flooded to the mrouter. Only
-                                 * applicable if flood_unregistered == false.
-                                 */
-    bool flood_reports;         /* True if the switch has at least one port
-                                 * configured to flood reports.
-                                 */
-    bool flood_static;          /* True if the switch has at least one port
-                                 * configured to flood traffic.
-                                 */
-    int64_t table_size;         /* Max number of IP multicast groups. */
-    int64_t idle_timeout;       /* Timeout after which an idle group is
-                                 * flushed.
-                                 */
-    int64_t query_interval;     /* Interval between multicast queries. */
-    char *eth_src;              /* ETH src address of the queries. */
-    char *ipv4_src;             /* IPv4 src address of the queries. */
-    char *ipv6_src;             /* IPv6 src address of the queries. */
-
-    int64_t query_max_response; /* Expected time after which reports should
-                                 * be received for queries that were sent out.
-                                 */
-
-    atomic_uint64_t active_v4_flows;   /* Current number of active IPv4 multicast
-                                 * flows.
-                                 */
-    atomic_uint64_t active_v6_flows;   /* Current number of active IPv6 multicast
-                                 * flows.
-                                 */
-};
-
-struct mcast_router_info {
-    bool relay;        /* True if the router should relay IP multicast. */
-    bool flood_static; /* True if the router has at least one port configured
-                        * to flood traffic.
-                        */
-};
-
-struct mcast_info {
-
-    struct hmap group_tnlids;  /* Group tunnel IDs in use on this DP. */
-    uint32_t group_tnlid_hint; /* Hint for allocating next group tunnel ID. */
-    struct ovs_list groups;    /* List of groups learnt on this DP. */
-
-    union {
-        struct mcast_switch_info sw;  /* Switch specific multicast info. */
-        struct mcast_router_info rtr; /* Router specific multicast info. */
-    };
-};
-
-struct mcast_port_info {
-    bool flood;         /* True if the port should flood IP multicast traffic
-                         * regardless if it's registered or not. */
-    bool flood_reports; /* True if the port should flood IP multicast reports
-                         * (e.g., IGMP join/leave). */
-};
-
 static void
 init_mcast_port_info(struct mcast_port_info *mcast_info,
                      const struct nbrec_logical_switch_port *nbsp,
@@ -600,91 +554,6 @@ ovn_mcast_group_allocate_key(struct mcast_info *mcast_info)
                               &mcast_info->group_tnlid_hint);
 }
 
-/* The 'key' comes from nbs->header_.uuid or nbr->header_.uuid or
- * sb->external_ids:logical-switch. */
-struct ovn_datapath {
-    struct hmap_node key_node;  /* Index on 'key'. */
-    struct uuid key;            /* (nbs/nbr)->header_.uuid. */
-
-    const struct nbrec_logical_switch *nbs;  /* May be NULL. */
-    const struct nbrec_logical_router *nbr;  /* May be NULL. */
-    const struct sbrec_datapath_binding *sb; /* May be NULL. */
-
-    struct ovs_list list;       /* In list of similar records. */
-
-    uint32_t tunnel_key;
-
-    /* Logical switch data. */
-    struct ovn_port **router_ports;
-    size_t n_router_ports;
-
-    struct hmap port_tnlids;
-    uint32_t port_key_hint;
-
-    bool has_stateful_acl;
-    bool has_lb_vip;
-    bool has_unknown;
-    bool has_acls;
-
-    /* IPAM data. */
-    struct ipam_info ipam_info;
-
-    /* Multicast data. */
-    struct mcast_info mcast_info;
-
-    /* Applies to only logical router datapath.
-     * True if logical router is a gateway router. i.e options:chassis is set.
-     * If this is true, then 'l3dgw_port' will be ignored. */
-    bool is_gw_router;
-
-    /* OVN northd only needs to know about the logical router gateway port for
-     * NAT on a distributed router.  The "distributed gateway ports" are
-     * populated only when there is a gateway chassis or ha chassis group
-     * specified for some of the ports on the logical router. Otherwise this
-     * will be NULL. */
-    struct ovn_port **l3dgw_ports;
-    size_t n_l3dgw_ports;
-
-    /* NAT entries configured on the router. */
-    struct ovn_nat *nat_entries;
-    size_t n_nat_entries;
-
-    bool has_distributed_nat;
-
-    /* Set of nat external ips on the router. */
-    struct sset external_ips;
-
-    /* SNAT IPs owned by the router (shash of 'struct ovn_snat_ip'). */
-    struct shash snat_ips;
-
-    struct lport_addresses dnat_force_snat_addrs;
-    struct lport_addresses lb_force_snat_addrs;
-    bool lb_force_snat_router_ip;
-    /* The "routable" ssets are subsets of the load balancer
-     * IPs for which IP routes and ARP resolution flows are automatically
-     * added
-     */
-    struct sset lb_ips_v4;
-    struct sset lb_ips_v4_routable;
-    struct sset lb_ips_v4_reachable;
-    struct sset lb_ips_v6;
-    struct sset lb_ips_v6_routable;
-    struct sset lb_ips_v6_reachable;
-
-    struct ovn_port **localnet_ports;
-    size_t n_localnet_ports;
-
-    struct ovs_list lr_list; /* In list of logical router datapaths. */
-    /* The logical router group to which this datapath belongs.
-     * Valid only if it is logical router datapath. NULL otherwise. */
-    struct lrouter_group *lr_group;
-
-    /* Port groups related to the datapath, used only when nbs is NOT NULL. */
-    struct hmap nb_pgs;
-
-    struct ovs_list port_list;
-};
-
 /* Contains a NAT entry with the external addresses pre-parsed. */
 struct ovn_nat {
     const struct nbrec_nat *nb;
@@ -952,13 +821,6 @@ lr_lb_address_set_ref(const struct ovn_datapath *od, int addr_family)
 static void
 init_lb_for_datapath(struct ovn_datapath *od)
 {
-    sset_init(&od->lb_ips_v4);
-    sset_init(&od->lb_ips_v4_routable);
-    sset_init(&od->lb_ips_v4_reachable);
-    sset_init(&od->lb_ips_v6);
-    sset_init(&od->lb_ips_v6_routable);
-    sset_init(&od->lb_ips_v6_reachable);
-
     if (od->nbs) {
         od->has_lb_vip = ls_has_lb_vip(od);
     } else {
@@ -969,16 +831,12 @@ init_lb_for_datapath(struct ovn_datapath *od)
 static void
 destroy_lb_for_datapath(struct ovn_datapath *od)
 {
+    ovn_lb_ip_set_destroy(od->lb_ips);
+    od->lb_ips = NULL;
+
     if (!od->nbs && !od->nbr) {
         return;
     }
-
-    sset_destroy(&od->lb_ips_v4);
-    sset_destroy(&od->lb_ips_v4_routable);
-    sset_destroy(&od->lb_ips_v4_reachable);
-    sset_destroy(&od->lb_ips_v6);
-    sset_destroy(&od->lb_ips_v6_routable);
-    sset_destroy(&od->lb_ips_v6_reachable);
 }
 
 /* A group of logical router datapaths which are connected - either
@@ -1097,6 +955,17 @@ ovn_datapath_from_sbrec(const struct hmap *datapaths,
     return NULL;
 }
 
+static void
+ovn_datapath_add_router_port(struct ovn_datapath *od, struct ovn_port *op)
+{
+    if (od->n_router_ports == od->n_allocated_router_ports) {
+        od->router_ports = x2nrealloc(od->router_ports,
+                                      &od->n_allocated_router_ports,
+                                      sizeof *od->router_ports);
+    }
+    od->router_ports[od->n_router_ports++] = op;
+}
+
 static bool
 lrouter_is_enabled(const struct nbrec_logical_router *lrouter)
 {
@@ -1312,8 +1181,8 @@ join_datapaths(struct northd_input *input_data,
     ovs_list_init(nb_only);
     ovs_list_init(both);
 
-    const struct sbrec_datapath_binding *sb, *sb_next;
-    SBREC_DATAPATH_BINDING_TABLE_FOR_EACH_SAFE (sb, sb_next,
+    const struct sbrec_datapath_binding *sb;
+    SBREC_DATAPATH_BINDING_TABLE_FOR_EACH_SAFE (sb,
                             input_data->sbrec_datapath_binding_table) {
         struct uuid key;
         if (!smap_get_uuid(&sb->external_ids, "logical-switch", &key) &&
@@ -1478,6 +1347,10 @@ ovn_datapath_assign_requested_tnl_id(struct northd_input *input_data,
     }
 }
 
+/* Array of all datapaths, with 'od->index' being their index in the array. */
+static struct ovn_datapath **datapaths_array = NULL;
+static size_t n_datapaths = 0; /* Size of the 'datapaths_array'. */
+
 /* Updates the southbound Datapath_Binding table so that it contains the
  * logical switches and routers specified by the northbound database.
  *
@@ -1496,7 +1369,7 @@ build_datapaths(struct northd_input *input_data,
 
     /* Assign explicitly requested tunnel ids first. */
     struct hmap dp_tnlids = HMAP_INITIALIZER(&dp_tnlids);
-    struct ovn_datapath *od, *next;
+    struct ovn_datapath *od;
     LIST_FOR_EACH (od, list, &both) {
         ovn_datapath_assign_requested_tnl_id(input_data, &dp_tnlids, od);
     }
@@ -1513,11 +1386,11 @@ build_datapaths(struct northd_input *input_data,
 
     /* Assign new tunnel ids where needed. */
     uint32_t hint = 0;
-    LIST_FOR_EACH_SAFE (od, next, list, &both) {
+    LIST_FOR_EACH_SAFE (od, list, &both) {
         ovn_datapath_allocate_key(input_data,
                                   datapaths, &dp_tnlids, od, &hint);
     }
-    LIST_FOR_EACH_SAFE (od, next, list, &nb_only) {
+    LIST_FOR_EACH_SAFE (od, list, &nb_only) {
         ovn_datapath_allocate_key(input_data,
                                   datapaths, &dp_tnlids, od, &hint);
     }
@@ -1537,11 +1410,24 @@ build_datapaths(struct northd_input *input_data,
     ovn_destroy_tnlids(&dp_tnlids);
 
     /* Delete southbound records without northbound matches. */
-    LIST_FOR_EACH_SAFE (od, next, list, &sb_only) {
+    LIST_FOR_EACH_SAFE (od, list, &sb_only) {
         ovs_list_remove(&od->list);
         sbrec_datapath_binding_delete(od->sb);
         ovn_datapath_destroy(datapaths, od);
     }
+
+    /* Assign unique sequential indexes to all datapaths.  These are not
+     * visible outside of the northd loop, so, unlike the tunnel keys, it
+     * doesn't matter if they are different on every iteration. */
+    size_t index = 0;
+
+    n_datapaths = hmap_count(datapaths);
+    datapaths_array = xrealloc(datapaths_array,
+                               n_datapaths * sizeof *datapaths_array);
+    HMAP_FOR_EACH (od, key_node, datapaths) {
+        od->index = index;
+        datapaths_array[index++] = od;
+    }
 }
 
 /* Structure representing logical router port
@@ -1813,11 +1699,35 @@ lsp_is_router(const struct nbrec_logical_switch_port *nbsp)
 }
 
 static bool
+lsp_is_remote(const struct nbrec_logical_switch_port *nbsp)
+{
+    return !strcmp(nbsp->type, "remote");
+}
+
+static bool
+lsp_is_localnet(const struct nbrec_logical_switch_port *nbsp)
+{
+    return !strcmp(nbsp->type, "localnet");
+}
+
+static bool
+lsp_is_vtep(const struct nbrec_logical_switch_port *nbsp)
+{
+    return !strcmp(nbsp->type, "vtep");
+}
+
+static bool
+localnet_can_learn_mac(const struct nbrec_logical_switch_port *nbsp)
+{
+    return smap_get_bool(&nbsp->options, "localnet_learn_fdb", false);
+}
+
+static bool
 lsp_is_type_changed(const struct sbrec_port_binding *sb,
                 const struct nbrec_logical_switch_port *nbsp,
-                bool *is_old_container_lport)
+                bool *update_sbrec)
 {
-    *is_old_container_lport = false;
+    *update_sbrec = false;
     if (!sb || !nbsp) {
         return false;
     }
@@ -1829,13 +1739,19 @@ lsp_is_type_changed(const struct sbrec_port_binding *sb,
          */
         if ((!sb->parent_port && nbsp->parent_name) ||
                         (sb->parent_port && !nbsp->parent_name)) {
-            *is_old_container_lport = true;
+            *update_sbrec = true;
             return true;
         } else {
             return false;
         }
     }
 
+    /* Cover cases where port changed to/from virtual port */
+    if (!strcmp(sb->type, "virtual") ||
+                !strcmp(nbsp->type, "virtual")) {
+        *update_sbrec = true;
+    }
+
     /* Both lports are not "VIF's" it is safe to use strcmp. */
     if (sb->type[0] && nbsp->type[0]) {
         return strcmp(sb->type, nbsp->type);
@@ -1857,6 +1773,10 @@ ovn_port_get_peer(const struct hmap *ports, struct ovn_port *op)
         return NULL;
     }
 
+    if (op->peer) {
+        return op->peer;
+    }
+
     const char *peer_name = smap_get(&op->nbsp->options, "router-port");
     if (!peer_name) {
         return NULL;
@@ -1951,6 +1871,23 @@ ipam_add_port_addresses(struct ovn_datapath *od, struct ovn_port *op)
     }
 }
 
+static const char *find_lrp_member_ip(const struct ovn_port *op,
+                                      const char *ip_s);
+
+/* Returns true if the given router port 'op' (assumed to be a distributed
+ * gateway port) is the relevant DGP where the NAT rule of the router needs to
+ * be applied. */
+static bool
+is_nat_gateway_port(const struct nbrec_nat *nat, const struct ovn_port *op)
+{
+    if (op->od->n_l3dgw_ports > 1
+        && ((!nat->gateway_port && !find_lrp_member_ip(op, nat->external_ip))
+            || (nat->gateway_port && nat->gateway_port != op->nbrp))) {
+        return false;
+    }
+    return true;
+}
+
 enum dynamic_update_type {
     NONE,    /* No change to the address */
     REMOVE,  /* Address is no longer dynamic */
@@ -2536,19 +2473,18 @@ join_logical_ports(struct northd_input *input_data,
                      *    created one and recompute everything that is needed
                      *    for this lport.
                      *
-                     * This change will affect container lport type changes
-                     * only for now, this change is needed in container
-                     * lport cases to avoid port type conflicts in the
-                     * ovn-controller when the user clears the parent_port
-                     * field in the container lport.
+                     * This change will affect container/virtual lport type
+                     * changes only for now, this change is needed in
+                     * contaier/virtual lport cases to avoid port type
+                     * conflicts in the ovn-controller when the user clears
+                     * the parent_port field in the container lport or updated
+                     * the lport type.
                      *
-                     * This approach can be applied to all other lport types
-                     * changes by removing the is_old_container_lport.
                      */
-                    bool is_old_container_lport = false;
+                    bool update_sbrec = false;
                     if (op->sb && lsp_is_type_changed(op->sb, nbsp,
-                                                      &is_old_container_lport)
-                                   && is_old_container_lport) {
+                                                      &update_sbrec)
+                                   && update_sbrec) {
                         ovs_list_remove(&op->list);
                         sbrec_port_binding_delete(op->sb);
                         ovn_port_destroy(ports, op);
@@ -2579,7 +2515,7 @@ join_logical_ports(struct northd_input *input_data,
                     ovs_list_push_back(nb_only, &op->list);
                 }
 
-                if (!strcmp(nbsp->type, "localnet")) {
+                if (lsp_is_localnet(nbsp)) {
                    if (od->n_localnet_ports >= n_allocated_localnet_ports) {
                        od->localnet_ports = x2nrealloc(
                            od->localnet_ports, &n_allocated_localnet_ports,
@@ -2588,6 +2524,10 @@ join_logical_ports(struct northd_input *input_data,
                    od->localnet_ports[od->n_localnet_ports++] = op;
                 }
 
+                if (lsp_is_vtep(nbsp)) {
+                    od->has_vtep_lports = true;
+                }
+
                 op->lsp_addrs
                     = xmalloc(sizeof *op->lsp_addrs * nbsp->n_addresses);
                 for (size_t j = 0; j < nbsp->n_addresses; j++) {
@@ -2733,12 +2673,9 @@ join_logical_ports(struct northd_input *input_data,
                 continue;
             }
 
+            ovn_datapath_add_router_port(op->od, op);
             peer->peer = op;
             op->peer = peer;
-            op->od->router_ports = xrealloc(
-                op->od->router_ports,
-                sizeof *op->od->router_ports * (op->od->n_router_ports + 1));
-            op->od->router_ports[op->od->n_router_ports++] = op;
 
             /* Fill op->lsp_addrs for op->nbsp->addresses[] with
              * contents "router", which was skipped in the loop above. */
@@ -2790,9 +2727,10 @@ join_logical_ports(struct northd_input *input_data,
  * by one or more IP addresses, and if the port is a distributed gateway
  * port, followed by 'is_chassis_resident("LPORT_NAME")', where the
  * LPORT_NAME is the name of the L3 redirect port or the name of the
- * logical_port specified in a NAT rule.  These strings include the
- * external IP addresses of all NAT rules defined on that router, and all
- * of the IP addresses used in load balancer VIPs defined on that router.
+ * logical_port specified in a NAT rule. These strings include the
+ * external IP addresses of NAT rules defined on that router whose
+ * gateway_port is router port 'op', and all of the IP addresses used in
+ * load balancer VIPs defined on that router.
  *
  * The caller must free each of the n returned strings with free(),
  * and must free the returned array when it is no longer needed. */
@@ -2804,8 +2742,7 @@ get_nat_addresses(const struct ovn_port *op, size_t *n, bool routable_only,
     struct eth_addr mac;
     if (!op || !op->nbrp || !op->od || !op->od->nbr
         || (!op->od->nbr->n_nat && !op->od->has_lb_vip)
-        || !eth_addr_from_string(op->nbrp->mac, &mac)
-        || op->od->n_l3dgw_ports > 1) {
+        || !eth_addr_from_string(op->nbrp->mac, &mac)) {
         *n = n_nats;
         return NULL;
     }
@@ -2834,6 +2771,12 @@ get_nat_addresses(const struct ovn_port *op, size_t *n, bool routable_only,
             continue;
         }
 
+        /* Not including external IP of NAT rules whose gateway_port is
+         * not 'op'. */
+        if (!is_nat_gateway_port(nat, op)) {
+            continue;
+        }
+
         /* Determine whether this NAT rule satisfies the conditions for
          * distributed NAT processing. */
         if (op->od->n_l3dgw_ports && !strcmp(nat->type, "dnat_and_snat")
@@ -2881,20 +2824,20 @@ get_nat_addresses(const struct ovn_port *op, size_t *n, bool routable_only,
     if (include_lb_ips) {
         const char *ip_address;
         if (routable_only) {
-            SSET_FOR_EACH (ip_address, &op->od->lb_ips_v4_routable) {
+            SSET_FOR_EACH (ip_address, &op->od->lb_ips->ips_v4_routable) {
                 ds_put_format(&c_addresses, " %s", ip_address);
                 central_ip_address = true;
             }
-            SSET_FOR_EACH (ip_address, &op->od->lb_ips_v6_routable) {
+            SSET_FOR_EACH (ip_address, &op->od->lb_ips->ips_v6_routable) {
                 ds_put_format(&c_addresses, " %s", ip_address);
                 central_ip_address = true;
             }
         } else {
-            SSET_FOR_EACH (ip_address, &op->od->lb_ips_v4) {
+            SSET_FOR_EACH (ip_address, &op->od->lb_ips->ips_v4) {
                 ds_put_format(&c_addresses, " %s", ip_address);
                 central_ip_address = true;
             }
-            SSET_FOR_EACH (ip_address, &op->od->lb_ips_v6) {
+            SSET_FOR_EACH (ip_address, &op->od->lb_ips->ips_v6) {
                 ds_put_format(&c_addresses, " %s", ip_address);
                 central_ip_address = true;
             }
@@ -2904,9 +2847,9 @@ get_nat_addresses(const struct ovn_port *op, size_t *n, bool routable_only,
     if (central_ip_address) {
         /* Gratuitous ARP for centralized NAT rules on distributed gateway
          * ports should be restricted to the gateway chassis. */
-        if (op->od->n_l3dgw_ports) {
+        if (is_l3dgw_port(op)) {
             ds_put_format(&c_addresses, " is_chassis_resident(%s)",
-                          op->od->l3dgw_ports[0]->cr_port->json_key);
+                          op->cr_port->json_key);
         }
 
         addresses[n_nats++] = ds_steal_cstr(&c_addresses);
@@ -3066,8 +3009,8 @@ chassis_group_list_changed(
         }
     }
 
-    struct shash_node *node, *next;
-    SHASH_FOR_EACH_SAFE (node, next, &nb_ha_chassis_list) {
+    struct shash_node *node;
+    SHASH_FOR_EACH_SAFE (node, &nb_ha_chassis_list) {
         shash_delete(&nb_ha_chassis_list, node);
         changed = true;
     }
@@ -3209,6 +3152,74 @@ ovn_update_ipv6_prefix(struct hmap *ports)
     }
 }
 
+static const struct sbrec_chassis *
+chassis_lookup(struct ovsdb_idl_index *sbrec_chassis_by_name,
+               struct ovsdb_idl_index *sbrec_chassis_by_hostname,
+               const char *name_or_hostname)
+{
+    const struct sbrec_chassis *chassis; /* May be NULL. */
+    chassis = chassis_lookup_by_name(sbrec_chassis_by_name,
+                                     name_or_hostname);
+    return chassis ? chassis : chassis_lookup_by_hostname(
+                    sbrec_chassis_by_hostname, name_or_hostname);
+}
+
+static void
+ovn_port_update_sbrec_chassis(
+        struct ovsdb_idl_index *sbrec_chassis_by_name,
+        struct ovsdb_idl_index *sbrec_chassis_by_hostname,
+        const struct ovn_port *op)
+{
+    const char *requested_chassis; /* May be NULL. */
+
+    size_t n_requested_chassis = 0;
+    struct sbrec_chassis **requested_chassis_sb = xcalloc(
+        n_requested_chassis, sizeof *requested_chassis_sb);
+
+    requested_chassis = smap_get(&op->nbsp->options,
+                                 "requested-chassis");
+    if (requested_chassis) {
+        char *tokstr = xstrdup(requested_chassis);
+        char *save_ptr = NULL;
+        char *chassis;
+        for (chassis = strtok_r(tokstr, ",", &save_ptr); chassis != NULL;
+             chassis = strtok_r(NULL, ",", &save_ptr)) {
+            const struct sbrec_chassis *chassis_sb = chassis_lookup(
+                sbrec_chassis_by_name, sbrec_chassis_by_hostname, chassis);
+            if (chassis_sb) {
+                requested_chassis_sb = xrealloc(
+                    requested_chassis_sb,
+                    ++n_requested_chassis * (sizeof *requested_chassis_sb));
+                requested_chassis_sb[n_requested_chassis - 1] = (
+                    (struct sbrec_chassis *) chassis_sb);
+            } else {
+                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(
+                    1, 1);
+                VLOG_WARN_RL(
+                    &rl,
+                    "Unknown chassis '%s' set in "
+                    "options:requested-chassis on LSP '%s'.",
+                    chassis, op->nbsp->name);
+            }
+        }
+        free(tokstr);
+    }
+
+    if (n_requested_chassis > 0) {
+        sbrec_port_binding_set_requested_chassis(op->sb,
+                                                 *requested_chassis_sb);
+    } else {
+        sbrec_port_binding_set_requested_chassis(op->sb, NULL);
+    }
+    if (n_requested_chassis > 1) {
+        sbrec_port_binding_set_requested_additional_chassis(
+            op->sb, &requested_chassis_sb[1], n_requested_chassis - 1);
+    } else {
+        sbrec_port_binding_set_requested_additional_chassis(op->sb, NULL, 0);
+    }
+    free(requested_chassis_sb);
+}
+
 static void
 ovn_port_update_sbrec(struct northd_input *input_data,
                       struct ovsdb_idl_txn *ovnsb_txn,
@@ -3328,9 +3339,7 @@ ovn_port_update_sbrec(struct northd_input *input_data,
         sbrec_port_binding_set_mac(op->sb, &addresses, 1);
         ds_destroy(&s);
 
-        struct smap ids = SMAP_INITIALIZER(&ids);
-        smap_clone(&ids, &op->nbrp->external_ids);
-        sbrec_port_binding_set_external_ids(op->sb, &ids);
+        sbrec_port_binding_set_external_ids(op->sb, &op->nbrp->external_ids);
 
         sbrec_port_binding_set_nat_addresses(op->sb, NULL, 0);
     } else {
@@ -3342,7 +3351,7 @@ ovn_port_update_sbrec(struct northd_input *input_data,
             struct smap options;
             char *name = "";
 
-            if (!strcmp(op->nbsp->type, "localnet")) {
+            if (lsp_is_localnet(op->nbsp)) {
                 uuid = &op->sb->header_.uuid;
                 name = "localnet";
             } else if (op->sb->chassis) {
@@ -3368,6 +3377,16 @@ ovn_port_update_sbrec(struct northd_input *input_data,
                 smap_add(&options, "vlan-passthru", "true");
             }
 
+            /* Retain activated chassis flags. */
+            if (op->sb->requested_additional_chassis) {
+                const char *activated_str = smap_get(
+                    &op->sb->options, "additional-chassis-activated");
+                if (activated_str) {
+                    smap_add(&options, "additional-chassis-activated",
+                             activated_str);
+                }
+            }
+
             sbrec_port_binding_set_options(op->sb, &options);
             smap_destroy(&options);
             if (ovn_is_known_nb_lsp_type(op->nbsp->type)) {
@@ -3400,35 +3419,8 @@ ovn_port_update_sbrec(struct northd_input *input_data,
                 sbrec_port_binding_set_ha_chassis_group(op->sb, NULL);
             }
 
-            const char *requested_chassis; /* May be NULL. */
-            bool reset_requested_chassis = false;
-            requested_chassis = smap_get(&op->nbsp->options,
-                                         "requested-chassis");
-            if (requested_chassis) {
-                const struct sbrec_chassis *chassis; /* May be NULL. */
-                chassis = chassis_lookup_by_name(sbrec_chassis_by_name,
-                                                 requested_chassis);
-                chassis = chassis ? chassis : chassis_lookup_by_hostname(
-                                sbrec_chassis_by_hostname, requested_chassis);
-
-                if (chassis) {
-                    sbrec_port_binding_set_requested_chassis(op->sb, chassis);
-                } else {
-                    reset_requested_chassis = true;
-                    static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(
-                        1, 1);
-                    VLOG_WARN_RL(
-                        &rl,
-                        "Unknown chassis '%s' set as "
-                        "options:requested-chassis on LSP '%s'.",
-                        requested_chassis, op->nbsp->name);
-                }
-            } else if (op->sb->requested_chassis) {
-                reset_requested_chassis = true;
-            }
-            if (reset_requested_chassis) {
-                sbrec_port_binding_set_requested_chassis(op->sb, NULL);
-            }
+            ovn_port_update_sbrec_chassis(sbrec_chassis_by_name,
+                                          sbrec_chassis_by_hostname, op);
         } else {
             const char *chassis = NULL;
             if (op->peer && op->peer->od && op->peer->od->nbr) {
@@ -3463,6 +3455,8 @@ ovn_port_update_sbrec(struct northd_input *input_data,
                                            "nat-addresses");
             size_t n_nats = 0;
             char **nats = NULL;
+            bool l3dgw_ports = op->peer && op->peer->od &&
+                               op->peer->od->n_l3dgw_ports;
             if (nat_addresses && !strcmp(nat_addresses, "router")) {
                 if (op->peer && op->peer->od
                     && (chassis || op->peer->od->n_l3dgw_ports)) {
@@ -3471,9 +3465,7 @@ ovn_port_update_sbrec(struct northd_input *input_data,
                     nats = get_nat_addresses(op->peer, &n_nats, false,
                                              !exclude_lb_vips);
                 }
-            /* Only accept manual specification of ethernet address
-             * followed by IPv4 addresses on type "l3gateway" ports. */
-            } else if (nat_addresses && chassis) {
+            } else if (nat_addresses && (chassis || l3dgw_ports)) {
                 struct lport_addresses laddrs;
                 if (!extract_lsp_addresses(nat_addresses, &laddrs)) {
                     static struct vlog_rate_limit rl =
@@ -3483,7 +3475,18 @@ ovn_port_update_sbrec(struct northd_input *input_data,
                     destroy_lport_addresses(&laddrs);
                     n_nats = 1;
                     nats = xcalloc(1, sizeof *nats);
-                    nats[0] = xstrdup(nat_addresses);
+                    struct ds nat_addr = DS_EMPTY_INITIALIZER;
+                    ds_put_format(&nat_addr, "%s", nat_addresses);
+                    if (l3dgw_ports) {
+                        const struct ovn_port *l3dgw_port = (
+                            is_l3dgw_port(op->peer)
+                            ? op->peer
+                            : op->peer->od->l3dgw_ports[0]);
+                        ds_put_format(&nat_addr, " is_chassis_resident(%s)",
+                            l3dgw_port->cr_port->json_key);
+                    }
+                    nats[0] = xstrdup(ds_cstr(&nat_addr));
+                    ds_destroy(&nat_addr);
                 }
             }
 
@@ -3539,9 +3542,12 @@ ovn_port_update_sbrec(struct northd_input *input_data,
                 }
 
                 if (op->peer->od->n_l3dgw_ports) {
+                    const struct ovn_port *l3dgw_port = (
+                        is_l3dgw_port(op->peer)
+                        ? op->peer
+                        : op->peer->od->l3dgw_ports[0]);
                     ds_put_format(&garp_info, " is_chassis_resident(%s)",
-                                  op->peer->od->l3dgw_ports[0]
-                                  ->cr_port->json_key);
+                                  l3dgw_port->cr_port->json_key);
                 }
 
                 n_nats++;
@@ -3561,6 +3567,9 @@ ovn_port_update_sbrec(struct northd_input *input_data,
         sbrec_port_binding_set_tag(op->sb, op->nbsp->tag, op->nbsp->n_tag);
         sbrec_port_binding_set_mac(op->sb, (const char **) op->nbsp->addresses,
                                    op->nbsp->n_addresses);
+        sbrec_port_binding_set_port_security(
+            op->sb, (const char **) op->nbsp->port_security,
+            op->nbsp->n_port_security);
 
         struct smap ids = SMAP_INITIALIZER(&ids);
         smap_clone(&ids, &op->nbsp->external_ids);
@@ -3591,8 +3600,8 @@ cleanup_mac_bindings(struct northd_input *input_data,
                      struct hmap *datapaths,
                      struct hmap *ports)
 {
-    const struct sbrec_mac_binding *b, *n;
-    SBREC_MAC_BINDING_TABLE_FOR_EACH_SAFE (b, n,
+    const struct sbrec_mac_binding *b;
+    SBREC_MAC_BINDING_TABLE_FOR_EACH_SAFE (b,
                              input_data->sbrec_mac_binding_table) {
         const struct ovn_datapath *od =
             ovn_datapath_from_sbrec(datapaths, b->datapath);
@@ -3608,8 +3617,8 @@ static void
 cleanup_sb_ha_chassis_groups(struct northd_input *input_data,
                              struct sset *active_ha_chassis_groups)
 {
-    const struct sbrec_ha_chassis_group *b, *n;
-    SBREC_HA_CHASSIS_GROUP_TABLE_FOR_EACH_SAFE (b, n,
+    const struct sbrec_ha_chassis_group *b;
+    SBREC_HA_CHASSIS_GROUP_TABLE_FOR_EACH_SAFE (b,
                                 input_data->sbrec_ha_chassis_group_table) {
         if (!sset_contains(active_ha_chassis_groups, b->name)) {
             sbrec_ha_chassis_group_delete(b);
@@ -3618,11 +3627,11 @@ cleanup_sb_ha_chassis_groups(struct northd_input *input_data,
 }
 
 static void
-cleanup_stale_fdp_entries(struct northd_input *input_data,
+cleanup_stale_fdb_entries(struct northd_input *input_data,
                           struct hmap *datapaths)
 {
-    const struct sbrec_fdb *fdb_e, *next;
-    SBREC_FDB_TABLE_FOR_EACH_SAFE (fdb_e, next,
+    const struct sbrec_fdb *fdb_e;
+    SBREC_FDB_TABLE_FOR_EACH_SAFE (fdb_e,
                          input_data->sbrec_fdb_table) {
         bool delete = true;
         struct ovn_datapath *od
@@ -3710,7 +3719,8 @@ ovn_lb_svc_create(struct ovsdb_idl_txn *ovnsb_txn, struct ovn_northd_lb *lb,
             backend_nb->op = op;
             backend_nb->svc_mon_src_ip = svc_mon_src_ip;
 
-            if (!lb_vip_nb->lb_health_check || !op || !svc_mon_src_ip) {
+            if (!lb_vip_nb->lb_health_check || !op || !svc_mon_src_ip ||
+                !lsp_is_enabled(op->nbsp)) {
                 continue;
             }
 
@@ -3744,6 +3754,13 @@ ovn_lb_svc_create(struct ovsdb_idl_txn *ovnsb_txn, struct ovn_northd_lb *lb,
                     backend_nb->svc_mon_src_ip);
             }
 
+            if ((!op->sb->n_up || !op->sb->up[0])
+                && mon_info->sbrec_mon->status
+                && !strcmp(mon_info->sbrec_mon->status, "online")) {
+                sbrec_service_monitor_set_status(mon_info->sbrec_mon,
+                                                 "offline");
+            }
+
             backend_nb->sbrec_monitor = mon_info->sbrec_mon;
             mon_info->required = true;
         }
@@ -3767,9 +3784,10 @@ build_lb_vip_actions(struct ovn_lb_vip *lb_vip,
             struct ovn_lb_backend *backend = &lb_vip->backends[i];
             struct ovn_northd_lb_backend *backend_nb =
                 &lb_vip_nb->backends_nb[i];
-            if (backend_nb->health_check && backend_nb->sbrec_monitor &&
-                backend_nb->sbrec_monitor->status &&
-                strcmp(backend_nb->sbrec_monitor->status, "online")) {
+            if (!backend_nb->health_check ||
+                (backend_nb->health_check && backend_nb->sbrec_monitor &&
+                 backend_nb->sbrec_monitor->status &&
+                 strcmp(backend_nb->sbrec_monitor->status, "online"))) {
                 continue;
             }
 
@@ -3812,57 +3830,35 @@ build_lb_vip_actions(struct ovn_lb_vip *lb_vip,
 }
 
 static void
-build_ovn_lr_lbs(struct hmap *datapaths, struct hmap *lbs)
+build_lrouter_lb_ips(struct ovn_lb_ip_set *lb_ips,
+                     const struct ovn_northd_lb *lb)
 {
-    struct ovn_northd_lb *lb;
-    struct ovn_datapath *od;
-
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbr) {
-            continue;
-        }
-        if (!smap_get(&od->nbr->options, "chassis")
-            && od->n_l3dgw_ports != 1) {
-            if (od->n_l3dgw_ports > 1 && od->has_lb_vip) {
-                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-                VLOG_WARN_RL(&rl, "Load-balancers are configured on logical "
-                             "router %s, which has %"PRIuSIZE" distributed "
-                             "gateway ports. Load-balancer is not supported "
-                             "yet when there is more than one distributed "
-                             "gateway port on the router.",
-                             od->nbr->name, od->n_l3dgw_ports);
-            }
-            continue;
-        }
+    const char *ip_address;
 
-        for (size_t i = 0; i < od->nbr->n_load_balancer; i++) {
-            const struct uuid *lb_uuid =
-                &od->nbr->load_balancer[i]->header_.uuid;
-            lb = ovn_northd_lb_find(lbs, lb_uuid);
-            ovn_northd_lb_add_lr(lb, od);
+    SSET_FOR_EACH (ip_address, &lb->ips_v4) {
+        sset_add(&lb_ips->ips_v4, ip_address);
+        if (lb->routable) {
+            sset_add(&lb_ips->ips_v4_routable, ip_address);
         }
-
-        for (size_t i = 0; i < od->nbr->n_load_balancer_group; i++) {
-            const struct nbrec_load_balancer_group *lbg =
-                od->nbr->load_balancer_group[i];
-            for (size_t j = 0; j < lbg->n_load_balancer; j++) {
-                const struct uuid *lb_uuid =
-                    &lbg->load_balancer[j]->header_.uuid;
-                lb = ovn_northd_lb_find(lbs, lb_uuid);
-                ovn_northd_lb_add_lr(lb, od);
-            }
+    }
+    SSET_FOR_EACH (ip_address, &lb->ips_v6) {
+        sset_add(&lb_ips->ips_v6, ip_address);
+        if (lb->routable) {
+            sset_add(&lb_ips->ips_v6_routable, ip_address);
         }
     }
 }
 
 static void
-build_ovn_lbs(struct northd_input *input_data,
-              struct ovsdb_idl_txn *ovnsb_txn,
-              struct hmap *datapaths, struct hmap *lbs)
+build_lbs(struct northd_input *input_data, struct hmap *datapaths,
+          struct hmap *lbs, struct hmap *lb_groups)
 {
+    const struct nbrec_load_balancer_group *nbrec_lb_group;
+    struct ovn_lb_group *lb_group;
     struct ovn_northd_lb *lb;
 
     hmap_init(lbs);
+    hmap_init(lb_groups);
 
     const struct nbrec_load_balancer *nbrec_lb;
     NBREC_LOAD_BALANCER_TABLE_FOR_EACH (nbrec_lb,
@@ -3872,6 +3868,19 @@ build_ovn_lbs(struct northd_input *input_data,
                     uuid_hash(&nbrec_lb->header_.uuid));
     }
 
+    NBREC_LOAD_BALANCER_GROUP_TABLE_FOR_EACH (nbrec_lb_group,
+                               input_data->nbrec_load_balancer_group_table) {
+        lb_group = ovn_lb_group_create(nbrec_lb_group, lbs,
+                                       hmap_count(datapaths));
+
+        for (size_t i = 0; i < lb_group->n_lbs; i++) {
+            build_lrouter_lb_ips(lb_group->lb_ips, lb_group->lbs[i]);
+        }
+
+        hmap_insert(lb_groups, &lb_group->hmap_node,
+                    uuid_hash(&lb_group->uuid));
+    }
+
     struct ovn_datapath *od;
     HMAP_FOR_EACH (od, key_node, datapaths) {
         if (!od->nbs) {
@@ -3882,109 +3891,83 @@ build_ovn_lbs(struct northd_input *input_data,
             const struct uuid *lb_uuid =
                 &od->nbs->load_balancer[i]->header_.uuid;
             lb = ovn_northd_lb_find(lbs, lb_uuid);
-            ovn_northd_lb_add_ls(lb, od);
+            ovn_northd_lb_add_ls(lb, 1, &od);
         }
 
         for (size_t i = 0; i < od->nbs->n_load_balancer_group; i++) {
-            const struct nbrec_load_balancer_group *lbg =
-                od->nbs->load_balancer_group[i];
-            for (size_t j = 0; j < lbg->n_load_balancer; j++) {
-                const struct uuid *lb_uuid =
-                    &lbg->load_balancer[j]->header_.uuid;
-                lb = ovn_northd_lb_find(lbs, lb_uuid);
-                ovn_northd_lb_add_ls(lb, od);
-            }
+            nbrec_lb_group = od->nbs->load_balancer_group[i];
+            lb_group = ovn_lb_group_find(lb_groups,
+                                         &nbrec_lb_group->header_.uuid);
+            ovn_lb_group_add_ls(lb_group, od);
         }
     }
 
-    /* Delete any stale SB load balancer rows. */
-    struct hmapx existing_lbs = HMAPX_INITIALIZER(&existing_lbs);
-    const struct sbrec_load_balancer *sbrec_lb, *next;
-    SBREC_LOAD_BALANCER_TABLE_FOR_EACH_SAFE (sbrec_lb, next,
-                            input_data->sbrec_load_balancer_table) {
-        const char *nb_lb_uuid = smap_get(&sbrec_lb->external_ids, "lb_id");
-        struct uuid lb_uuid;
-        if (!nb_lb_uuid || !uuid_from_string(&lb_uuid, nb_lb_uuid)) {
-            sbrec_load_balancer_delete(sbrec_lb);
-            continue;
+    HMAP_FOR_EACH (lb_group, hmap_node, lb_groups) {
+        for (size_t j = 0; j < lb_group->n_lbs; j++) {
+            ovn_northd_lb_add_ls(lb_group->lbs[j], lb_group->n_ls,
+                                 lb_group->ls);
         }
+    }
 
-        /* Delete any SB load balancer entries that refer to NB load balancers
-         * that don't exist anymore or are not applied to switches anymore.
-         *
-         * There is also a special case in which duplicate LBs might be created
-         * in the SB, e.g., due to the fact that OVSDB only ensures
-         * "at-least-once" consistency for clustered database tables that
-         * are not indexed in any way.
-         */
-        lb = ovn_northd_lb_find(lbs, &lb_uuid);
-        if (!lb || !lb->n_nb_ls || !hmapx_add(&existing_lbs, lb)) {
-            sbrec_load_balancer_delete(sbrec_lb);
-        } else {
-            lb->slb = sbrec_lb;
+    HMAP_FOR_EACH (od, key_node, datapaths) {
+        if (!od->nbr) {
+            continue;
         }
-    }
-    hmapx_destroy(&existing_lbs);
 
-    /* Create SB Load balancer records if not present and sync
-     * the SB load balancer columns. */
-    HMAP_FOR_EACH (lb, hmap_node, lbs) {
+        /* Checking load balancer groups first, starting from the largest one,
+         * to more efficiently copy IP sets. */
+        size_t largest_group = 0;
 
-        if (!lb->n_nb_ls) {
-            continue;
+        for (size_t i = 1; i < od->nbr->n_load_balancer_group; i++) {
+            if (od->nbr->load_balancer_group[i]->n_load_balancer >
+                od->nbr->load_balancer_group[largest_group]->n_load_balancer) {
+                largest_group = i;
+            }
         }
 
-        /* Store the fact that northd provides the original (destination IP +
-         * transport port) tuple.
-         */
-        struct smap options;
-        smap_clone(&options, &lb->nlb->options);
-        smap_replace(&options, "hairpin_orig_tuple", "true");
+        for (size_t i = 0; i < od->nbr->n_load_balancer_group; i++) {
+            size_t idx = (i + largest_group) % od->nbr->n_load_balancer_group;
 
-        struct sbrec_datapath_binding **lb_dps =
-            xmalloc(lb->n_nb_ls * sizeof *lb_dps);
-        for (size_t i = 0; i < lb->n_nb_ls; i++) {
-            lb_dps[i] = CONST_CAST(struct sbrec_datapath_binding *,
-                                   lb->nb_ls[i]->sb);
+            nbrec_lb_group = od->nbr->load_balancer_group[idx];
+            lb_group = ovn_lb_group_find(lb_groups,
+                                         &nbrec_lb_group->header_.uuid);
+            ovn_lb_group_add_lr(lb_group, od);
+
+            if (!od->lb_ips) {
+                od->lb_ips = ovn_lb_ip_set_clone(lb_group->lb_ips);
+            } else {
+                for (size_t j = 0; j < lb_group->n_lbs; j++) {
+                    build_lrouter_lb_ips(od->lb_ips, lb_group->lbs[j]);
+                }
+            }
         }
 
-        if (!lb->slb) {
-            sbrec_lb = sbrec_load_balancer_insert(ovnsb_txn);
-            lb->slb = sbrec_lb;
-            char *lb_id = xasprintf(
-                UUID_FMT, UUID_ARGS(&lb->nlb->header_.uuid));
-            const struct smap external_ids =
-                SMAP_CONST1(&external_ids, "lb_id", lb_id);
-            sbrec_load_balancer_set_external_ids(sbrec_lb, &external_ids);
-            free(lb_id);
+        if (!od->lb_ips) {
+            od->lb_ips = ovn_lb_ip_set_create();
         }
-        sbrec_load_balancer_set_name(lb->slb, lb->nlb->name);
-        sbrec_load_balancer_set_vips(lb->slb, &lb->nlb->vips);
-        sbrec_load_balancer_set_protocol(lb->slb, lb->nlb->protocol);
-        sbrec_load_balancer_set_datapaths(lb->slb, lb_dps, lb->n_nb_ls);
-        sbrec_load_balancer_set_options(lb->slb, &options);
-        smap_destroy(&options);
-        free(lb_dps);
-    }
 
-    /* Datapath_Binding.load_balancers is not used anymore, it's still in the
-     * schema for compatibility reasons.  Reset it to empty, just in case.
-     */
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbs) {
-            continue;
+        for (size_t i = 0; i < od->nbr->n_load_balancer; i++) {
+            const struct uuid *lb_uuid =
+                &od->nbr->load_balancer[i]->header_.uuid;
+            lb = ovn_northd_lb_find(lbs, lb_uuid);
+            ovn_northd_lb_add_lr(lb, 1, &od);
+            build_lrouter_lb_ips(od->lb_ips, lb);
         }
+    }
 
-        if (od->sb->n_load_balancers) {
-            sbrec_datapath_binding_set_load_balancers(od->sb, NULL, 0);
+    HMAP_FOR_EACH (lb_group, hmap_node, lb_groups) {
+        for (size_t j = 0; j < lb_group->n_lbs; j++) {
+            ovn_northd_lb_add_lr(lb_group->lbs[j], lb_group->n_lr,
+                                 lb_group->lr);
         }
     }
 }
 
 static void
-build_ovn_lb_svcs(struct northd_input *input_data,
-                  struct ovsdb_idl_txn *ovnsb_txn,
-                  struct hmap *ports, struct hmap *lbs)
+build_lb_svcs(struct northd_input *input_data,
+              struct ovsdb_idl_txn *ovnsb_txn,
+              struct hmap *ports,
+              struct hmap *lbs)
 {
     struct hmap monitor_map = HMAP_INITIALIZER(&monitor_map);
 
@@ -4016,26 +3999,6 @@ build_ovn_lb_svcs(struct northd_input *input_data,
     hmap_destroy(&monitor_map);
 }
 
-static void
-build_lrouter_lb_ips(struct ovn_datapath *od, const struct ovn_northd_lb *lb)
-{
-    bool is_routable = smap_get_bool(&lb->nlb->options, "add_route",  false);
-    const char *ip_address;
-
-    SSET_FOR_EACH (ip_address, &lb->ips_v4) {
-        sset_add(&od->lb_ips_v4, ip_address);
-        if (is_routable) {
-            sset_add(&od->lb_ips_v4_routable, ip_address);
-        }
-    }
-    SSET_FOR_EACH (ip_address, &lb->ips_v6) {
-        sset_add(&od->lb_ips_v6, ip_address);
-        if (is_routable) {
-            sset_add(&od->lb_ips_v6_routable, ip_address);
-        }
-    }
-}
-
 static bool lrouter_port_ipv4_reachable(const struct ovn_port *op,
                                         ovs_be32 addr);
 static bool lrouter_port_ipv6_reachable(const struct ovn_port *op,
@@ -4044,6 +4007,23 @@ static void
 build_lrouter_lb_reachable_ips(struct ovn_datapath *od,
                                const struct ovn_northd_lb *lb)
 {
+    /* If configured to reply to neighbor requests for all VIPs force them
+     * all to be considered "reachable".
+     */
+    if (lb->neigh_mode == LB_NEIGH_RESPOND_ALL) {
+        for (size_t i = 0; i < lb->n_vips; i++) {
+            if (IN6_IS_ADDR_V4MAPPED(&lb->vips[i].vip)) {
+                sset_add(&od->lb_ips->ips_v4_reachable, lb->vips[i].vip_str);
+            } else {
+                sset_add(&od->lb_ips->ips_v6_reachable, lb->vips[i].vip_str);
+            }
+        }
+        return;
+    }
+
+    /* Otherwise, a VIP is reachable if there's at least one router
+     * subnet that includes it.
+     */
     for (size_t i = 0; i < lb->n_vips; i++) {
         if (IN6_IS_ADDR_V4MAPPED(&lb->vips[i].vip)) {
             ovs_be32 vip_ip4 = in6_addr_get_mapped_ipv4(&lb->vips[i].vip);
@@ -4051,7 +4031,8 @@ build_lrouter_lb_reachable_ips(struct ovn_datapath *od,
 
             LIST_FOR_EACH (op, dp_node, &od->port_list) {
                 if (lrouter_port_ipv4_reachable(op, vip_ip4)) {
-                    sset_add(&od->lb_ips_v4_reachable, lb->vips[i].vip_str);
+                    sset_add(&od->lb_ips->ips_v4_reachable,
+                             lb->vips[i].vip_str);
                     break;
                 }
             }
@@ -4060,7 +4041,8 @@ build_lrouter_lb_reachable_ips(struct ovn_datapath *od,
 
             LIST_FOR_EACH (op, dp_node, &od->port_list) {
                 if (lrouter_port_ipv6_reachable(op, &lb->vips[i].vip)) {
-                    sset_add(&od->lb_ips_v6_reachable, lb->vips[i].vip_str);
+                    sset_add(&od->lb_ips->ips_v6_reachable,
+                             lb->vips[i].vip_str);
                     break;
                 }
             }
@@ -4069,7 +4051,7 @@ build_lrouter_lb_reachable_ips(struct ovn_datapath *od,
 }
 
 static void
-build_lrouter_lbs(struct hmap *datapaths, struct hmap *lbs)
+build_lrouter_lbs_check(const struct hmap *datapaths)
 {
     struct ovn_datapath *od;
 
@@ -4078,28 +4060,22 @@ build_lrouter_lbs(struct hmap *datapaths, struct hmap *lbs)
             continue;
         }
 
-        for (size_t i = 0; i < od->nbr->n_load_balancer; i++) {
-            struct ovn_northd_lb *lb =
-                ovn_northd_lb_find(lbs,
-                                   &od->nbr->load_balancer[i]->header_.uuid);
-            build_lrouter_lb_ips(od, lb);
-        }
-
-        for (size_t i = 0; i < od->nbr->n_load_balancer_group; i++) {
-            const struct nbrec_load_balancer_group *lbg =
-                od->nbr->load_balancer_group[i];
-            for (size_t j = 0; j < lbg->n_load_balancer; j++) {
-                struct ovn_northd_lb *lb =
-                    ovn_northd_lb_find(lbs,
-                                       &lbg->load_balancer[j]->header_.uuid);
-                build_lrouter_lb_ips(od, lb);
-            }
+        if (od->has_lb_vip && od->n_l3dgw_ports > 1
+                && !smap_get(&od->nbr->options, "chassis")) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+            VLOG_WARN_RL(&rl, "Load-balancers are configured on logical "
+                         "router %s, which has %"PRIuSIZE" distributed "
+                         "gateway ports. Load-balancer is not supported "
+                         "yet when there is more than one distributed "
+                         "gateway port on the router.",
+                         od->nbr->name, od->n_l3dgw_ports);
         }
     }
 }
 
 static void
-build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs)
+build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs,
+                                struct hmap *lb_groups)
 {
     struct ovn_datapath *od;
 
@@ -4116,15 +4092,280 @@ build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs)
         }
 
         for (size_t i = 0; i < od->nbr->n_load_balancer_group; i++) {
-            const struct nbrec_load_balancer_group *lbg =
+            const struct nbrec_load_balancer_group *nbrec_lb_group =
                 od->nbr->load_balancer_group[i];
-            for (size_t j = 0; j < lbg->n_load_balancer; j++) {
-                struct ovn_northd_lb *lb =
-                    ovn_northd_lb_find(lbs,
-                                       &lbg->load_balancer[j]->header_.uuid);
-                build_lrouter_lb_reachable_ips(od, lb);
+            struct ovn_lb_group *lb_group;
+
+            lb_group = ovn_lb_group_find(lb_groups,
+                                         &nbrec_lb_group->header_.uuid);
+            for (size_t j = 0; j < lb_group->n_lbs; j++) {
+                build_lrouter_lb_reachable_ips(od, lb_group->lbs[j]);
+            }
+        }
+    }
+}
+
+static void
+build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
+{
+    if (!install_ls_lb_from_router) {
+        return;
+    }
+
+    struct ovn_datapath *od;
+    HMAP_FOR_EACH (od, key_node, datapaths) {
+        if (!od->nbs) {
+            continue;
+        }
+
+        struct ovn_port *op;
+        LIST_FOR_EACH (op, dp_node, &od->port_list) {
+            if (!lsp_is_router(op->nbsp)) {
+                continue;
+            }
+            if (!op->peer) {
+                continue;
+            }
+
+            struct ovn_datapath *peer_od = op->peer->od;
+            for (size_t i = 0; i < peer_od->nbr->n_load_balancer; i++) {
+                bool installed = false;
+                const struct uuid *lb_uuid =
+                    &peer_od->nbr->load_balancer[i]->header_.uuid;
+                struct ovn_northd_lb *lb = ovn_northd_lb_find(lbs, lb_uuid);
+                if (!lb) {
+                    continue;
+                }
+
+                for (size_t j = 0; j < lb->n_nb_ls; j++) {
+                   if (lb->nb_ls[j] == od) {
+                       installed = true;
+                       break;
+                   }
+                }
+                if (!installed) {
+                    ovn_northd_lb_add_ls(lb, 1, &od);
+                }
+                if (lb->nlb) {
+                    od->has_lb_vip |= lb_has_vip(lb->nlb);
+                }
+            }
+        }
+    }
+}
+
+/* This must be called after all ports have been processed, i.e., after
+ * build_ports() because the reachability check requires the router ports
+ * networks to have been parsed.
+ */
+static void
+build_lb_port_related_data(struct hmap *datapaths, struct hmap *ports,
+                           struct hmap *lbs, struct hmap *lb_groups,
+                           struct northd_input *input_data,
+                           struct ovsdb_idl_txn *ovnsb_txn)
+{
+    build_lrouter_lbs_check(datapaths);
+    build_lrouter_lbs_reachable_ips(datapaths, lbs, lb_groups);
+    build_lb_svcs(input_data, ovnsb_txn, ports, lbs);
+    build_lswitch_lbs_from_lrouter(datapaths, lbs);
+}
+
+
+struct ovn_dp_group {
+    unsigned long *bitmap;
+    struct sbrec_logical_dp_group *dp_group;
+    struct hmap_node node;
+};
+
+static struct ovn_dp_group *
+ovn_dp_group_find(const struct hmap *dp_groups,
+                  const unsigned long *dpg_bitmap, uint32_t hash)
+{
+    struct ovn_dp_group *dpg;
+
+    HMAP_FOR_EACH_WITH_HASH (dpg, node, hash, dp_groups) {
+        if (bitmap_equal(dpg->bitmap, dpg_bitmap, n_datapaths)) {
+            return dpg;
+        }
+    }
+    return NULL;
+}
+
+static struct sbrec_logical_dp_group *
+ovn_sb_insert_logical_dp_group(struct ovsdb_idl_txn *ovnsb_txn,
+                               const unsigned long *dpg_bitmap)
+{
+    struct sbrec_logical_dp_group *dp_group;
+    const struct sbrec_datapath_binding **sb;
+    size_t n = 0, index;
+
+    sb = xmalloc(bitmap_count1(dpg_bitmap, n_datapaths) * sizeof *sb);
+    BITMAP_FOR_EACH_1 (index, n_datapaths, dpg_bitmap) {
+        sb[n++] = datapaths_array[index]->sb;
+    }
+    dp_group = sbrec_logical_dp_group_insert(ovnsb_txn);
+    sbrec_logical_dp_group_set_datapaths(
+        dp_group, (struct sbrec_datapath_binding **) sb, n);
+    free(sb);
+
+    return dp_group;
+}
+
+/* Syncs relevant load balancers (applied to logical switches) to the
+ * Southbound database.
+ */
+static void
+sync_lbs(struct northd_input *input_data, struct ovsdb_idl_txn *ovnsb_txn,
+         struct hmap *datapaths, struct hmap *lbs)
+{
+    struct hmap dp_groups = HMAP_INITIALIZER(&dp_groups);
+    struct ovn_northd_lb *lb;
+
+    /* Delete any stale SB load balancer rows and collect existing valid
+     * datapath groups. */
+    struct hmapx existing_sb_dp_groups =
+        HMAPX_INITIALIZER(&existing_sb_dp_groups);
+    struct hmapx existing_lbs = HMAPX_INITIALIZER(&existing_lbs);
+    const struct sbrec_load_balancer *sbrec_lb;
+    SBREC_LOAD_BALANCER_TABLE_FOR_EACH_SAFE (sbrec_lb,
+                            input_data->sbrec_load_balancer_table) {
+        const char *nb_lb_uuid = smap_get(&sbrec_lb->external_ids, "lb_id");
+        struct uuid lb_uuid;
+        if (!nb_lb_uuid || !uuid_from_string(&lb_uuid, nb_lb_uuid)) {
+            sbrec_load_balancer_delete(sbrec_lb);
+            continue;
+        }
+
+        /* Delete any SB load balancer entries that refer to NB load balancers
+         * that don't exist anymore or are not applied to switches anymore.
+         *
+         * There is also a special case in which duplicate LBs might be created
+         * in the SB, e.g., due to the fact that OVSDB only ensures
+         * "at-least-once" consistency for clustered database tables that
+         * are not indexed in any way.
+         */
+        lb = ovn_northd_lb_find(lbs, &lb_uuid);
+        if (!lb || !lb->n_nb_ls || !hmapx_add(&existing_lbs, lb)) {
+            sbrec_load_balancer_delete(sbrec_lb);
+            continue;
+        }
+
+        lb->slb = sbrec_lb;
+
+        /* Collect the datapath group. */
+        struct sbrec_logical_dp_group *dp_group = sbrec_lb->datapath_group;
+
+        if (!dp_group || !hmapx_add(&existing_sb_dp_groups, dp_group)) {
+            continue;
+        }
+
+        struct ovn_dp_group *dpg = xzalloc(sizeof *dpg);
+        size_t i, n = 0;
+
+        dpg->bitmap = bitmap_allocate(n_datapaths);
+        for (i = 0; i < dp_group->n_datapaths; i++) {
+            struct ovn_datapath *datapath_od;
+
+            datapath_od = ovn_datapath_from_sbrec(datapaths,
+                                                  dp_group->datapaths[i]);
+            if (!datapath_od || ovn_datapath_is_stale(datapath_od)) {
+                break;
+            }
+            bitmap_set1(dpg->bitmap, datapath_od->index);
+            n++;
+        }
+        if (i == dp_group->n_datapaths) {
+            uint32_t hash = hash_int(n, 0);
+
+            if (!ovn_dp_group_find(&dp_groups, dpg->bitmap, hash)) {
+                dpg->dp_group = dp_group;
+                hmap_insert(&dp_groups, &dpg->node, hash);
+                continue;
             }
         }
+        bitmap_free(dpg->bitmap);
+        free(dpg);
+    }
+    hmapx_destroy(&existing_lbs);
+    hmapx_destroy(&existing_sb_dp_groups);
+
+    /* Create SB Load balancer records if not present and sync
+     * the SB load balancer columns. */
+    HMAP_FOR_EACH (lb, hmap_node, lbs) {
+
+        if (!lb->n_nb_ls) {
+            continue;
+        }
+
+        /* Store the fact that northd provides the original (destination IP +
+         * transport port) tuple.
+         */
+        struct smap options;
+        smap_clone(&options, &lb->nlb->options);
+        smap_replace(&options, "hairpin_orig_tuple", "true");
+
+        if (!lb->slb) {
+            sbrec_lb = sbrec_load_balancer_insert(ovnsb_txn);
+            lb->slb = sbrec_lb;
+            char *lb_id = xasprintf(
+                UUID_FMT, UUID_ARGS(&lb->nlb->header_.uuid));
+            const struct smap external_ids =
+                SMAP_CONST1(&external_ids, "lb_id", lb_id);
+            sbrec_load_balancer_set_external_ids(sbrec_lb, &external_ids);
+            free(lb_id);
+        }
+
+        /* Find datapath group for this load balancer. */
+        unsigned long *lb_dps_bitmap;
+        struct ovn_dp_group *dpg;
+        uint32_t hash;
+
+        lb_dps_bitmap = bitmap_allocate(n_datapaths);
+        for (size_t i = 0; i < lb->n_nb_ls; i++) {
+            bitmap_set1(lb_dps_bitmap, lb->nb_ls[i]->index);
+        }
+
+        hash = hash_int(bitmap_count1(lb_dps_bitmap, n_datapaths), 0);
+        dpg = ovn_dp_group_find(&dp_groups, lb_dps_bitmap, hash);
+        if (!dpg) {
+            dpg = xzalloc(sizeof *dpg);
+            dpg->dp_group = ovn_sb_insert_logical_dp_group(ovnsb_txn,
+                                                           lb_dps_bitmap);
+            dpg->bitmap = bitmap_clone(lb_dps_bitmap, n_datapaths);
+            hmap_insert(&dp_groups, &dpg->node, hash);
+        }
+        bitmap_free(lb_dps_bitmap);
+
+        /* Update columns. */
+        sbrec_load_balancer_set_name(lb->slb, lb->nlb->name);
+        sbrec_load_balancer_set_vips(lb->slb, &lb->nlb->vips);
+        sbrec_load_balancer_set_protocol(lb->slb, lb->nlb->protocol);
+        sbrec_load_balancer_set_datapath_group(lb->slb, dpg->dp_group);
+        sbrec_load_balancer_set_options(lb->slb, &options);
+        /* Clearing 'datapaths' column, since 'dp_group' is in use. */
+        sbrec_load_balancer_set_datapaths(lb->slb, NULL, 0);
+        smap_destroy(&options);
+    }
+
+    struct ovn_dp_group *dpg;
+    HMAP_FOR_EACH_POP (dpg, node, &dp_groups) {
+        bitmap_free(dpg->bitmap);
+        free(dpg);
+    }
+    hmap_destroy(&dp_groups);
+
+    /* Datapath_Binding.load_balancers is not used anymore, it's still in the
+     * schema for compatibility reasons.  Reset it to empty, just in case.
+     */
+    struct ovn_datapath *od;
+    HMAP_FOR_EACH (od, key_node, datapaths) {
+        if (!od->nbs) {
+            continue;
+        }
+
+        if (od->sb->n_load_balancers) {
+            sbrec_datapath_binding_set_load_balancers(od->sb, NULL, 0);
+        }
     }
 }
 
@@ -4217,7 +4458,7 @@ build_ports(struct northd_input *input_data,
     bool remove_mac_bindings = !ovs_list_is_empty(&sb_only);
 
     /* Assign explicitly requested tunnel ids first. */
-    struct ovn_port *op, *next;
+    struct ovn_port *op;
     LIST_FOR_EACH (op, list, &both) {
         ovn_port_assign_requested_tnl_id(input_data, op);
     }
@@ -4233,10 +4474,10 @@ build_ports(struct northd_input *input_data,
     }
 
     /* Assign new tunnel ids where needed. */
-    LIST_FOR_EACH_SAFE (op, next, list, &both) {
+    LIST_FOR_EACH_SAFE (op, list, &both) {
         ovn_port_allocate_key(input_data, ports, op);
     }
-    LIST_FOR_EACH_SAFE (op, next, list, &nb_only) {
+    LIST_FOR_EACH_SAFE (op, list, &nb_only) {
         ovn_port_allocate_key(input_data, ports, op);
     }
 
@@ -4244,7 +4485,7 @@ build_ports(struct northd_input *input_data,
      * record based on northbound data.
      * For logical ports that are in NB database, do any tag allocation
      * needed. */
-    LIST_FOR_EACH_SAFE (op, next, list, &both) {
+    LIST_FOR_EACH_SAFE (op, list, &both) {
         /* When reusing stale Port_Bindings, make sure that stale
          * Mac_Bindings are purged.
          */
@@ -4262,7 +4503,7 @@ build_ports(struct northd_input *input_data,
     }
 
     /* Add southbound record for each unmatched northbound record. */
-    LIST_FOR_EACH_SAFE (op, next, list, &nb_only) {
+    LIST_FOR_EACH_SAFE (op, list, &nb_only) {
         op->sb = sbrec_port_binding_insert(ovnsb_txn);
         ovn_port_update_sbrec(input_data,
                               ovnsb_txn, sbrec_chassis_by_name,
@@ -4274,7 +4515,7 @@ build_ports(struct northd_input *input_data,
 
     /* Delete southbound records without northbound matches. */
     if (!ovs_list_is_empty(&sb_only)) {
-        LIST_FOR_EACH_SAFE (op, next, list, &sb_only) {
+        LIST_FOR_EACH_SAFE (op, list, &sb_only) {
             ovs_list_remove(&op->list);
             sbrec_port