diff --git a/.zuul.yaml b/.zuul.yaml
index 1ae7507..879001e 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -21,9 +21,18 @@
       - openstack/python-barbicanclient
       - openstack/python-heatclient
       - openstack/heat-agents
+<<<<<<< .zuul.yaml
       - openstack/tempest
     vars:
       configure_swap_size: 8192
+=======
+      - openstack/python-zaqarclient
+      - openstack/zaqar
+      - openstack/tempest
+    vars:
+      configure_swap_size: 8192
+      gabbi_tempest_path: heat_tempest_plugin.tests.api.gabbits
+>>>>>>> .zuul.yaml
       tempest_plugins:
         - heat-tempest-plugin
       devstack_localrc:
@@ -39,6 +48,10 @@
         tempest: true
       devstack_plugins:
         barbican: https://opendev.org/openstack/barbican
+<<<<<<< .zuul.yaml
+=======
+        zaqar: https://opendev.org/openstack/zaqar
+>>>>>>> .zuul.yaml
         heat: https://opendev.org/openstack/heat
         octavia: https://opendev.org/openstack/octavia
       devstack_local_conf:
@@ -52,6 +65,7 @@
               logging_exception_prefix: "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s"
               enable_stack_adopt: true
               enable_stack_abandon: true
+<<<<<<< .zuul.yaml
               max_stacks_per_tenant: 256
             heat_api:
               workers: 2
@@ -63,6 +77,18 @@
               client_socket_timeout: 120
             oslo_messaging_notifications:
               driver: messagingv2
+=======
+              heat_api:
+                workers: 2
+              heat_api_cfn:
+                workers: 2
+              cache:
+                enabled: True
+              eventlet_opts:
+                client_socket_timeout: 120
+              oslo_messaging_notifications:
+                driver: messagingv2
+>>>>>>> .zuul.yaml
         test-config:
           $TEMPEST_CONFIG:
             service_available:
@@ -72,7 +98,11 @@
               minimal_image_ref: ${DEFAULT_IMAGE_NAME:-cirros-0.3.6-x86_64-disk}
               instance_type: m1.heat_int
               minimal_instance_type: m1.heat_micro
+<<<<<<< .zuul.yaml
               image_ref: Fedora-Cloud-Base-36-1.5.x86_64
+=======
+              image_ref: Fedora-Cloud-Base-33-1.2.x86_64
+>>>>>>> .zuul.yaml
               hidden_stack_tag: hidden
               heat_config_notify_script: /opt/stack/heat-agents/heat-config/bin/heat-config-notify
               boot_config_env: /opt/stack/heat-templates/hot/software-config/boot-config/test_image_env.yaml
@@ -131,18 +161,26 @@
         HEAT_USE_MOD_WSGI: False
 
 - job:
+<<<<<<< .zuul.yaml
     name: grenade-heat-multinode
     parent: grenade-multinode
+=======
+    name: grenade-heat
+    parent: grenade
+>>>>>>> .zuul.yaml
     required-projects:
       - opendev.org/openstack/heat
       - opendev.org/openstack/heat-tempest-plugin
       - opendev.org/openstack/python-heatclient
     vars:
+<<<<<<< .zuul.yaml
       grenade_devstack_localrc:
         shared:
           HOST_TOPOLOGY: multinode
           HOST_TOPOLOGY_ROLE: primary
           HOST_TOPOLOGY_SUBNODES: "{{ hostvars['compute1']['nodepool']['public_ipv4'] }}"
+=======
+>>>>>>> .zuul.yaml
       configure_swap_size: 8192
       devstack_services:
         h-api: true
@@ -150,12 +188,17 @@
         h-eng: true
         heat: true
       # We do run a list of tests after upgrade. This is just to bypass the req from parent.
+<<<<<<< .zuul.yaml
       tempest_test_regex: ^tempest\.api\.identity\.v3\.test_tokens
+=======
+      tempest_test_regex: ^heat_tempest_plugin\.tests\.functional\.test_nova_server_networks
+>>>>>>> .zuul.yaml
       tox_envlist: all
       devstack_plugins:
         heat: https://opendev.org/openstack/heat
       tempest_plugins:
         - heat-tempest-plugin
+<<<<<<< .zuul.yaml
     group-vars:
       subnode:
         grenade_devstack_localrc:
@@ -186,12 +229,26 @@
       - openstack-cover-jobs
       - openstack-python3-jobs
       - openstack-python3-jobs-arm64
+=======
+
+- job:
+    name: grenade-heat-multinode
+    parent: grenade-heat
+    nodeset: openstack-two-node-bionic
+
+- project:
+    templates:
+      - openstack-cover-jobs
+      - openstack-lower-constraints-jobs
+      - openstack-python3-victoria-jobs
+>>>>>>> .zuul.yaml
       - periodic-stable-jobs
       - publish-openstack-docs-pti
       - check-requirements
       - release-notes-jobs-python3
     check:
       jobs:
+<<<<<<< .zuul.yaml
         - grenade-heat-multinode
         - heat-functional
         - heat-functional-legacy
@@ -200,3 +257,20 @@
         - grenade-heat-multinode
         - heat-functional
         - heat-functional-legacy
+=======
+        - grenade-heat
+        - grenade-heat-multinode
+        - heat-functional
+        - heat-functional-legacy
+        - heat-functional-non-apache
+    gate:
+      queue: heat
+      jobs:
+        - grenade-heat
+        - grenade-heat-multinode
+        - heat-functional
+        - heat-functional-legacy
+    experimental:
+      jobs:
+        - tripleo-ci-centos-7-scenario002-standalone
+>>>>>>> .zuul.yaml
diff --git a/AUTHORS b/AUTHORS
index b99ee50..f6d177b 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -7,7 +7,10 @@ Aigerim <aigerim.sametkhanova@gmail.com>
 Ala Rezmerita <ala.rezmerita@orange.com>
 Alan <kaihongd@cn.ibm.com>
 Alan Pevec <apevec@redhat.com>
+<<<<<<< AUTHORS
 Alejandro García <agarcia@whitestack.com>
+=======
+>>>>>>> AUTHORS
 AleptNamrata <sitlani.namrata@yahoo.in>
 AleptNmarata <sitlani.namrata@yahoo.in>
 Alex Gaynor <alex.gaynor@gmail.com>
@@ -50,7 +53,10 @@ Atsushi Sasaki <atsaki01@gmail.com>
 Attila Fazekas <afazekas@redhat.com>
 AvnishPal <avnish.pal@nectechnologies.in>
 BK Box <bk@theboxes.org>
+<<<<<<< AUTHORS
 Balazs Gibizer <balazs.gibizer@est.tech>
+=======
+>>>>>>> AUTHORS
 Bartosz Górski <bartosz.gorski@ntti3.com>
 Ben Nemec <bnemec@redhat.com>
 Ben Nemec <openstack@nemebean.com>
@@ -60,7 +66,10 @@ Bernard Van De Walle <bernard@bvandewa.com>
 Bernhard M. Wiedemann <bwiedemann@suse.de>
 Bertrand Lallau <bertrand.lallau@gmail.com>
 Bertrand Lallau <bertrand.lallau@thalesgroup.com>
+<<<<<<< AUTHORS
 Bhagyashri Shewale <bshewale@redhat.com>
+=======
+>>>>>>> AUTHORS
 Bill Arnold <barnold@us.ibm.com>
 Bin Zhou <zhou.bin9@zte.com.cn>
 Bo Wang <bo.wang@easystack.cn>
@@ -71,7 +80,10 @@ Brendan Shephard <bshephar@redhat.com>
 Brent Eagles <beagles@redhat.com>
 Brian Moss <kallimachos@gmail.com>
 Bryan Jones <jonesbr@us.ibm.com>
+<<<<<<< AUTHORS
 Bui Doan Dang <doandang27052000@gmail.com>
+=======
+>>>>>>> AUTHORS
 Béla Vancsics <vancsics@inf.u-szeged.hu>
 Cao Xuan Hoang <hoangcx@vn.fujitsu.com>
 Carlos Goncalves <cgoncalves@redhat.com>
@@ -83,7 +95,10 @@ Chaozhe.Chen <chaozhe.chen@easystack.cn>
 Chaserjim <james.bagwell@nokia.com>
 Chen Xiao <chenxiao@cn.ibm.com>
 ChenZheng <chen.zheng@easystack.cn>
+<<<<<<< AUTHORS
 Chengen Du <chengen.du@canonical.com>
+=======
+>>>>>>> AUTHORS
 Chmouel Boudjnah <chmouel@enovance.com>
 Chris <cm876n@att.com>
 Chris Alfonso <calfonso@redhat.com>
@@ -139,10 +154,15 @@ Eoghan Glynn <eglynn@redhat.com>
 Eoghan Glynn <elynn@redhat.com>
 Eric Brown <browne@vmware.com>
 Erik Olof Gunnar Andersson <eandersson@blizzard.com>
+<<<<<<< AUTHORS
 Erik Panter <e.panter@mittwald.de>
 Ethan Lynn <xjunlin@cn.ibm.com>
 Ethan Lynn <xuanlangjian@gmail.com>
 Eyal <eyal.bar-ilan@nokia.com>
+=======
+Ethan Lynn <xjunlin@cn.ibm.com>
+Ethan Lynn <xuanlangjian@gmail.com>
+>>>>>>> AUTHORS
 Fabien Boucher <fabien.boucher@enovance.com>
 FeihuJiang <jiangfeihu@huawei.com>
 Feilong Wang <flwang@catalyst.net.nz>
@@ -192,7 +212,10 @@ JUN JIE NAN <nanjj@cn.ibm.com>
 JUNJIE NAN <nanjj@cn.ibm.com>
 Jaewoo Park <jp655p@att.com>
 Jaime Guerrero <jg3755@att.com>
+<<<<<<< AUTHORS
 Jake Yip <jake.yip@ardc.edu.au>
+=======
+>>>>>>> AUTHORS
 James Combs <cornracker@gmail.com>
 James E. Blair <jeblair@redhat.com>
 James Reeves <james.reeves5546@gmail.com>
@@ -258,10 +281,15 @@ Khaled Qarout <khaled.qarout@exalt.ps>
 Kien Nguyen <kiennt@vn.fujitsu.com>
 Krishna Raman <kraman@redhat.com>
 Krzysztof Opasiak <k.opasiak@samsung.com>
+<<<<<<< AUTHORS
 Lance Bragstad <lbragstad@gmail.com>
 Lars Kellogg-Stedman <lars@redhat.com>
 Laura Fitzgerald <laura.fitzgerald@ammeon.com>
 LeopardMa <mabao@inspur.com>
+=======
+Lars Kellogg-Stedman <lars@redhat.com>
+Laura Fitzgerald <laura.fitzgerald@ammeon.com>
+>>>>>>> AUTHORS
 Li Jinjing <jinjingx.li@intel.com>
 Liang Chen <cbjchen@cn.ibm.com>
 LiangChen <hs.chen@huawei.com>
@@ -279,7 +307,10 @@ Luke Short <ekultails@gmail.com>
 Luong Anh Tuan <tuanla@vn.fujitsu.com>
 M V P Nitesh <m.nitesh@nectechnologies.in>
 Maksym Iarmak <miarmak@mirantis.com>
+<<<<<<< AUTHORS
 Manpreet Kaur <kaurmanpreet2620@gmail.com>
+=======
+>>>>>>> AUTHORS
 Marc Methot <mmethot@redhat.com>
 Marga Millet <marga.sfo@gmail.com>
 Mark McClain <mark.mcclain@dreamhost.com>
@@ -301,7 +332,10 @@ Mehdi Abaakouk (sileht) <sileht@sileht.net>
 Mehdi Abaakouk <sileht@redhat.com>
 Mehdi Abaakouk <sileht@sileht.net>
 Michael Ionkin <mionkin@mirantis.com>
+<<<<<<< AUTHORS
 Michael Johnson <johnsomor@gmail.com>
+=======
+>>>>>>> AUTHORS
 Michael Krotscheck <krotscheck@gmail.com>
 Michael Still <mikal@stillhq.com>
 Michal Jastrzebski (inc0) <michal.jastrzebski@intel.com>
@@ -312,7 +346,10 @@ Miguel Grinberg <miguelgrinberg50@gmail.com>
 Mike Asthalter <mike.asthalter@rackspace.com>
 Mike Spreitzer <mspreitz@us.ibm.com>
 Mitsuru Kanabuchi <kanabuchi.mitsuru@po.ntts.co.jp>
+<<<<<<< AUTHORS
 Mitya_Eremeev <mitossvyaz@mail.ru>
+=======
+>>>>>>> AUTHORS
 Mohammed Naser <mnaser@vexxhost.com>
 Mohankumar <nmohankumar1011@gmail.com>
 Monty Taylor <mordred@inaugust.com>
@@ -349,14 +386,20 @@ PhilippeJ <philippejeurissen@gmail.com>
 Pierre <pierre.freund@osones.com>
 Pierre Freund <pierre.freund@gmail.com>
 Pierre Padrixe <pierre.padrixe@numergy.com>
+<<<<<<< AUTHORS
 Pierre Riteau <pierre@stackhpc.com>
+=======
+>>>>>>> AUTHORS
 Pierre Riteau <priteau@uchicago.edu>
 Pradeep Kilambi <pkilambi@redhat.com>
 Pradeep Kumar Singh <pradeep.singh@nectechnologies.in>
 Pratik Mallya <pratik.mallya@gmail.com>
 Praveen Yalagandula <ypraveen@avinetworks.com>
 Prince Katiyar <prince.katiyar@nectechnologies.in>
+<<<<<<< AUTHORS
 Przemyslaw Szczerbik <przemyslaw.szczerbik@est.tech>
+=======
+>>>>>>> AUTHORS
 QI ZHANG <qizh@cn.ibm.com>
 Rabi Mihsra <ramishra@redhat.com>
 Rabi Mishra <ramishra@redhat.com>
@@ -375,7 +418,10 @@ Robert Pothier <rpothier@cisco.com>
 Robert van Leeuwen <robert.vanleeuwen@spilgames.com>
 Roberto Polli <robipolli@gmail.com>
 Rocky <shi.yan@unimelb.edu.au>
+<<<<<<< AUTHORS
 Rodolfo Alonso Hernandez <ralonsoh@redhat.com>
+=======
+>>>>>>> AUTHORS
 Roman Podoliaka <rpodolyaka@mirantis.com>
 Russell Bryant <rbryant@redhat.com>
 Ryan Brown <rybrown@redhat.com>
@@ -384,7 +430,10 @@ Sabeen Syed <sabeen.syed@rackspace.com>
 Sagi Shnaidman <sshnaidm@redhat.com>
 Sahdev Zala <spzala@us.ibm.com>
 Sam Alba <sam.alba@gmail.com>
+<<<<<<< AUTHORS
 Sam Kumar <sp810x@att.com>
+=======
+>>>>>>> AUTHORS
 Sampat P <sp810x@att.com>
 Samuel de Medeiros Queiroz <samuel@lsd.ufcg.edu.br>
 Saravanan KR <skramaja@redhat.com>
@@ -415,7 +464,10 @@ Spencer Yu <yushb@gohighsec.com>
 Sreeram Vancheeswaran <sreeram.vancheeswaran@in.ibm.com>
 Stan Lagun <slagun@mirantis.com>
 Stefan Nica <snica@suse.com>
+<<<<<<< AUTHORS
 Stephen Finucane <stephenfin@redhat.com>
+=======
+>>>>>>> AUTHORS
 Stephen Gordon <sgordon@redhat.com>
 Stephen Gran <stephen.gran@guardian.co.uk>
 Stephen Sugden <me@stephensugden.com>
@@ -434,7 +486,10 @@ Swapnil Kulkarni (coolsvap) <me@coolsvap.net>
 Sylvain Baubeau <sbaubeau@redhat.com>
 Takashi Kajinami <tkajinam@redhat.com>
 Takashi NATSUME <natsume.takashi@lab.ntt.co.jp>
+<<<<<<< AUTHORS
 Takashi Natsume <takanattie@gmail.com>
+=======
+>>>>>>> AUTHORS
 Tanvir Talukder <tanvirt16@gmail.com>
 Tetiana Lashchova <tlashchova@mirantis.com>
 Thierry Carrez <thierry@openstack.org>
@@ -445,12 +500,18 @@ Thomas Herve <therve@gmail.com>
 Thomas Herve <therve@redhat.com>
 Thomas Herve <thomas.herve@enovance.com>
 Thomas Spatzier <thomas.spatzier@de.ibm.com>
+<<<<<<< AUTHORS
 Tim Burke <tim.burke@gmail.com>
+=======
+>>>>>>> AUTHORS
 Tim Rozet <trozet@redhat.com>
 Tim Schnell <tim.schnell@rackspace.com>
 Tim Smith <tsmith@gridcentric.com>
 Timothy Okwii <tokwii@cisco.com>
+<<<<<<< AUTHORS
 Tobias Urdin <tobias.urdin@binero.se>
+=======
+>>>>>>> AUTHORS
 Tom Stappaerts <tom.stappaerts@nuagenetworks.net>
 Tomas Sedovic <tomas@sedovic.cz>
 Tomas Sedovic <tsedovic@redhat.com>
@@ -463,7 +524,10 @@ Unmesh Gurjar <unmesh.gurjar@hp.com>
 Van Hung Pham <hungpv@vn.fujitsu.com>
 Vic Howard <victor.r.howard@gmail.com>
 Victor Coutellier <alistarle@gmail.com>
+<<<<<<< AUTHORS
 Victor Coutellier <victor.coutellier@gmail.com>
+=======
+>>>>>>> AUTHORS
 Victor HU <huruifeng@huawei.com>
 Victor Sergeyev <vsergeyev@mirantis.com>
 Victor Stinner <vstinner@redhat.com>
@@ -501,9 +565,13 @@ abdul nizamuddin <abdul.nizamuddin@nectechnologies.in>
 abhishekkekane <abhishek.kekane@nttdata.com>
 aivanitskiy <aivanitskiy@mirantis.com>
 ananta <anant.patil@hp.com>
+<<<<<<< AUTHORS
 apetrich <apetrich@redhat.com>
 april <xiaolixu@cn.ibm.com>
 bshephar <bshephar@redhat.com>
+=======
+april <xiaolixu@cn.ibm.com>
+>>>>>>> AUTHORS
 caoyuan <cao.yuan@99cloud.net>
 cbjchen@cn.ibm.com <cbjchen@cn.ibm.com>
 chao liu <liuc@rc.inesa.com>
@@ -528,7 +596,10 @@ gengchc2 <geng.changcai2@zte.com.cn>
 ghanshyam <gmann@ghanshyammann.com>
 gong yong sheng <gong.yongsheng@99cloud.net>
 gordon chung <gord@live.ca>
+<<<<<<< AUTHORS
 gugug <gu.jin@99cloud.net>
+=======
+>>>>>>> AUTHORS
 guohliu <guohliu@cn.ibm.com>
 hgangwx <hgangwx@cn.ibm.com>
 hmonika <monikaparkar25@gmail.com>
@@ -543,13 +614,19 @@ jun xie <junxiebj@cn.ibm.com>
 junxu <xujun@cmss.chinamobile.com>
 kairat_kushaev <kkushaev@mirantis.com>
 kaz_shinohara <ksnhr.tech@gmail.com>
+<<<<<<< AUTHORS
 kumari paluru <kumari.paluru1@tcs.com>
+=======
+>>>>>>> AUTHORS
 kylin7-sg <kylin7.sg@gmail.com>
 lawrancejing <lawrancejing@gmail.com>
 liangshang <liang.shang@easystack.cn>
 lidong <lidongbj@inspur.com>
 lijunjie <lijunjie@cloudin.cn>
+<<<<<<< AUTHORS
 likui <likui@yovole.com>
+=======
+>>>>>>> AUTHORS
 linwwu <wlwwu@cn.ibm.com>
 liu-sheng <liusheng@huawei.com>
 liudong <willowd878@gmail.com>
@@ -562,7 +639,10 @@ lvdongbing <dongbing.lv@kylin-cloud.com>
 maniksidana019 <manik@voereir.com>
 matthew-fuller <mfuller@suse.com>
 matts2006 <matt@jones-central.net>
+<<<<<<< AUTHORS
 melissaml <ma.lei@99cloud.net>
+=======
+>>>>>>> AUTHORS
 mohankumar_n <nmohankumar1011@gmail.com>
 npraveen35 <npraveen35@gmail.com>
 pallavi <pallavi.s@nectechnologies.in>
@@ -583,7 +663,10 @@ sabeensyed <sabeen.syed@rackspace.com>
 sdake <sdake@redhat.com>
 shaofeng_cheng <chengsf@winhong.com>
 sharat.sharma <sharat.sharma@nectechnologies.in>
+<<<<<<< AUTHORS
 shenxindi <shenxindi_yewu@cmss.chinamobile.com>
+=======
+>>>>>>> AUTHORS
 shizhihui <zhihui.shi@easystack.cn>
 sslypushenko <sslypushenko@mirantis.com>
 tanlin <lin.tan@intel.com>
@@ -597,16 +680,25 @@ ubuntu <divakar.padiyar-nandavar@hp.com>
 venkatamahesh <venkatamaheshkotha@gmail.com>
 wanghui <wang_hui@inspur.com>
 wangtianfa <wang.tianfa@99cloud.net>
+<<<<<<< AUTHORS
 wangzihao <wangzihao@yovole.com>
 wbluo0907 <wbluo@fiberhome.com>
 weizhao <zhaowei7146@fiberhome.com>
 whoami-rajat <rajatdhasmana@gmail.com>
 wu.shiming <wushiming@yovole.com>
+=======
+wbluo0907 <wbluo@fiberhome.com>
+weizhao <zhaowei7146@fiberhome.com>
+whoami-rajat <rajatdhasmana@gmail.com>
+>>>>>>> AUTHORS
 xiaolihope <dixiaobj@cn.ibm.com>
 xiexs <xiexs@cn.fujitsu.com>
 xpress <anonymous.freelancer@yahoo.com>
 yangxurong <yangxurong@huawei.com>
+<<<<<<< AUTHORS
 yangyawei <yangyawei@inspur.com>
+=======
+>>>>>>> AUTHORS
 yanpuqing <yanpq@awcloud.com>
 yanyanhu <yanyanhu@cn.ibm.com>
 yatin <ykarel@redhat.com>
@@ -621,7 +713,10 @@ zhangchunlong1@huawei.com <zhangchunlong1@huawei.com>
 zhangguoqing <zhang.guoqing@99cloud.net>
 zhanghao <zhang.hao16@zte.com.cn>
 zhaozhilong <zhaozhilong@unitedstack.com>
+<<<<<<< AUTHORS
 zhoulinhui <df.some@foxmail.com>
+=======
+>>>>>>> AUTHORS
 zhufeng <gfl1998@163.com>
 zhufl <zhu.fanglei@zte.com.cn>
 zhulingjie <easyzlj@gmail.com>
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 8ccf6dd..c6903f7 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -1,3 +1,4 @@
+<<<<<<< CONTRIBUTING.rst
 The source repository for this project can be found at:
 
    https://opendev.org/openstack/heat
@@ -17,3 +18,21 @@ For more specific information about contributing to this repository, see the
 heat contributor guide:
 
    https://docs.openstack.org/heat/latest/contributor/contributing.html
+=======
+If you would like to contribute to the development of OpenStack,
+you must follow the steps in this page:
+
+   https://docs.openstack.org/infra/manual/developers.html
+
+Once those steps have been completed, changes to OpenStack
+should be submitted for review via the Gerrit tool, following
+the workflow documented at:
+
+   https://docs.openstack.org/infra/manual/developers.html#development-workflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on OpenStack Storyboard, not GitHub:
+
+   https://storyboard.openstack.org/#!/project/989
+>>>>>>> CONTRIBUTING.rst
diff --git a/ChangeLog b/ChangeLog
index 20e0963..2adae6c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,6 +1,7 @@
 CHANGES
 =======
 
+<<<<<<< ChangeLog
 20.0.0
 ------
 
@@ -213,12 +214,43 @@ CHANGES
 * Use err.errno to access errno for socket errors
 * Add Python3 wallaby unit tests
 * Update master for stable/victoria
+=======
+15.1.0
+------
+
+* Provide install\_command to pip install with upper-constraints
+* Fallback to upstream for Fedora image
+* Revert "Make grenade job voting again"
+* Make grenade job voting again
+* Don't create deployment when changing transport
+* Update get\_hosts to use available API
+* Use Block Storage API v3 instead of API v2
+* Detect EL8 platform-python
+* Preserve order in list\_concat\_unique
+* Resume testing grenade jobs in Victoria
+* Assume interface detach called for port not found
+* Don't update status for replaced resource
+* Net attr: Sort segments without name attribute first
+* Fix args for initializing trove client
+* Fix multiple gate issues
+* Align lower-constraints for new pip
+* Replace Fedora test image with F32
+* Add SOURCE\_IP\_PORT to LB\_ALGORITHM allowed values
+* Delete the default value of domain in role creation
+* As of nova microversion 2.57, personality is deprecated
+* Configure swap as before
+* Update TOX\_CONSTRAINTS\_FILE for stable/victoria
+* Update .gitreview for stable/victoria
+>>>>>>> ChangeLog
 
 15.0.0
 ------
 
+<<<<<<< ChangeLog
 * Net attr: Sort segments without name attribute first
 * Mark support for python3.8
+=======
+>>>>>>> ChangeLog
 * Stop testing ceilometer and aodh
 * Fix lower-constraints errors
 * Lazily cache parsed value of list/json parameters
@@ -234,11 +266,17 @@ CHANGES
 * Rename variables in sort key validation
 * Improve naming of get\_allowed\_params() argument
 * Use 'skiplist' to describe skipped indices in ResourceGroup code
+<<<<<<< ChangeLog
 * Switch from unittest2 compat methods to Python 3.x methods
 * Set context username if session client is used
 * Don't check stack staus for already migrated stacks
 * Check for nova exception.Conflict rather than task\_state
 * S2007837: Fixes domain\_id parameter in the user find call
+=======
+* Set context username if session client is used
+* Don't check stack staus for already migrated stacks
+* Check for nova exception.Conflict rather than task\_state
+>>>>>>> ChangeLog
 * Imported Translations from Zanata
 * Use unittest.mock instead of mock
 * Use never expiring alarm\_url for resource attributes
@@ -246,7 +284,10 @@ CHANGES
 * Imported Translations from Zanata
 * Allow scale-down of ASG as part of update
 * Switch to newer openstackdocstheme and reno versions
+<<<<<<< ChangeLog
 * Fix pygments style
+=======
+>>>>>>> ChangeLog
 * Add native grenade zuul v3 jobs
 * Fix hacking min version to 3.0.1
 * Use Fedora 31 for tests
@@ -254,8 +295,11 @@ CHANGES
 * Add propagate\_uplink\_status support to OS::Neutron::Port
 * Use unittest.mock instead of third party mock
 * Imported Translations from Zanata
+<<<<<<< ChangeLog
 * Allow null values to be returned from Macros
 * Refactor Properties.\_get\_property\_value()
+=======
+>>>>>>> ChangeLog
 * Monkey patch original current\_thread \_active
 * Add a /healthcheck URL
 * Simplify logic in retrigger\_check\_resource()
@@ -265,7 +309,10 @@ CHANGES
 * Deprecate wrappertask decorator
 * Ensure use of stored properties in actions
 * Check external resources after creation
+<<<<<<< ChangeLog
 * Optimise resource type listing
+=======
+>>>>>>> ChangeLog
 * Imported Translations from Zanata
 * Add Python3 victoria unit tests
 * Update master for stable/ussuri
@@ -310,7 +357,10 @@ CHANGES
 * Prepare the six and python 2.7 support dropping
 * Fix hacking warnings
 * Update hacking for Python3
+<<<<<<< ChangeLog
 * Use resource\_id instead of phy rsrc name in InstanceGroup
+=======
+>>>>>>> ChangeLog
 * Fix doc building with Sphinx 3
 * Reduce Heat engine workers
 * Imported Translations from Zanata
@@ -320,7 +370,10 @@ CHANGES
 * Imported Translations from Zanata
 * Filter by project id in list\_security\_groups
 * Keep db model and migration scripts in sync
+<<<<<<< ChangeLog
 * Ignore old 'vN-branch' tags when scanning for release notes
+=======
+>>>>>>> ChangeLog
 * Retry transactions for DBConnectionError
 * Add Octavia Quota Resource for lbaas quota parameters
 * Support allowed\_cidrs for Octavia listener
@@ -334,7 +387,10 @@ CHANGES
 * Make properties updatable for IKE policy of VPNaaS
 * Deprecate 'accessIPv4'/'accessIPv6' attributes
 * Avoid dynamic import from monasca client
+<<<<<<< ChangeLog
 * Use set instead of list of server\_deferred\_statuses
+=======
+>>>>>>> ChangeLog
 * [S2007072] Enables case insensitive user name search
 * Fix grenade job failure
 * [ussuri][goal] Finish dropping python 2.7 support
@@ -352,16 +408,23 @@ CHANGES
 * Heat templates doesn't support AZ for trove cluster
 * Migrate heat-functional-non-apache to zuul v3
 * Cache service availability results per request
+<<<<<<< ChangeLog
 * API ref: Update sample output of resource type APIs
 * Migrate functional test jobs to zuul v3
 * Fix duplicated words issue like "can be be used"
+=======
+* Migrate functional test jobs to zuul v3
+>>>>>>> ChangeLog
 * Check task\_state of instance before volume actions
 * Replace git.openstack.org URLs with opendev.org URLs
 * Fix the misspelling of "except"
 * Dashboard: Add stable branch reviews
 * Support for shared services in multi region mode
+<<<<<<< ChangeLog
 * Workaround client race in legacy nested stack delete
 * Remove handling for client status races
+=======
+>>>>>>> ChangeLog
 * Don't get frozen definitions for resource being deleted
 * Remove unrequired migrate check in test
 * Add unit test for creating structured SW Config
@@ -431,7 +494,10 @@ CHANGES
 * Firewall creation failed due to "INACTIVE"
 * Fix invalid assert state
 * Fix coverity check FORWARD\_NULL error
+<<<<<<< ChangeLog
 * Allow turning off max\_stacks limit
+=======
+>>>>>>> ChangeLog
 * Fix Senlin policy resource
 * Add heat resource for creating Aodh loadbalancer\_member\_health alarm type[1]
 * Bump the openstackdocstheme extension to 1.20
@@ -569,7 +635,10 @@ CHANGES
 * Don't use 'user' and 'tenant' args in context
 * Imported Translations from Zanata
 * Don't assert that we haven't signalled the calling thread
+<<<<<<< ChangeLog
 * Calculate resource data prior to legacy updates
+=======
+>>>>>>> ChangeLog
 * Set a concurrency limit on ResourceChain
 * Consider volume in 'reserved' status as detachment in progress
 * Set the octavia noop drivers correctly
diff --git a/PKG-INFO b/PKG-INFO
index 8d66aab..d263da3 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,10 @@
 Metadata-Version: 1.2
 Name: openstack-heat
+<<<<<<< PKG-INFO
 Version: 20.0.0
+=======
+Version: 15.1.0
+>>>>>>> PKG-INFO
 Summary: OpenStack Orchestration
 Home-page: https://docs.openstack.org/heat/latest/
 Author: OpenStack
@@ -94,8 +98,15 @@ Classifier: Intended Audience :: System Administrators
 Classifier: License :: OSI Approved :: Apache Software License
 Classifier: Operating System :: POSIX :: Linux
 Classifier: Programming Language :: Python
+<<<<<<< PKG-INFO
 Classifier: Programming Language :: Python :: 3 :: Only
 Classifier: Programming Language :: Python :: 3
 Classifier: Programming Language :: Python :: 3.8
 Classifier: Programming Language :: Python :: 3.9
 Requires-Python: >=3.8
+=======
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Requires-Python: >=3.6
+>>>>>>> PKG-INFO
diff --git a/api-ref/source/v1/parameters.yaml b/api-ref/source/v1/parameters.yaml
index 0a9db58..0e822e6 100644
--- a/api-ref/source/v1/parameters.yaml
+++ b/api-ref/source/v1/parameters.yaml
@@ -115,7 +115,11 @@ deployment_server_id_query:
 global_tenant:
   description: |
     Set to ``true`` to include stacks from all tenants (projects) in the stack
+<<<<<<< api-ref/source/v1/parameters.yaml
     list. Specify policy requirements in the Orchestration ``policy.yaml``
+=======
+    list. Specify policy requirements in the Orchestration ``policy.json``
+>>>>>>> api-ref/source/v1/parameters.yaml
     file.
   in: query
   required: false
diff --git a/api-ref/source/v1/samples/resource-schema-response.json b/api-ref/source/v1/samples/resource-schema-response.json
index 9d28b50..7b5f375 100644
--- a/api-ref/source/v1/samples/resource-schema-response.json
+++ b/api-ref/source/v1/samples/resource-schema-response.json
@@ -1,8 +1,12 @@
 {
     "attributes": {
         "an_attribute": {
+<<<<<<< api-ref/source/v1/samples/resource-schema-response.json
             "description": "A runtime value of the resource.",
             "type": "string"
+=======
+            "description": "A runtime value of the resource."
+>>>>>>> api-ref/source/v1/samples/resource-schema-response.json
         }
     },
     "properties": {
@@ -16,17 +20,28 @@
                     }
                 }
             ],
+<<<<<<< api-ref/source/v1/samples/resource-schema-response.json
             "description": "A resource property description.",
             "required": true,
             "type": "string",
             "update_allowed": false,
             "immutable": false
+=======
+            "description": "A resource description.",
+            "required": true,
+            "type": "string",
+            "update_allowed": false
+>>>>>>> api-ref/source/v1/samples/resource-schema-response.json
         }
     },
     "resource_type": "OS::Heat::AResourceName",
     "support_status": {
         "message": "A status message",
         "status": "SUPPORTED",
+<<<<<<< api-ref/source/v1/samples/resource-schema-response.json
         "version": "10.0.0"
+=======
+        "version": "2014.1"
+>>>>>>> api-ref/source/v1/samples/resource-schema-response.json
     }
 }
diff --git a/api-ref/source/v1/samples/resource-type-template-hot-response.json b/api-ref/source/v1/samples/resource-type-template-hot-response.json
index 3f97746..a35c7ed 100644
--- a/api-ref/source/v1/samples/resource-type-template-hot-response.json
+++ b/api-ref/source/v1/samples/resource-type-template-hot-response.json
@@ -1,4 +1,5 @@
 {
+<<<<<<< api-ref/source/v1/samples/resource-type-template-hot-response.json
     "heat_template_version": "2016-10-14",
     "description": "Initial template of KeyPair",
     "parameters": {
@@ -33,6 +34,26 @@
         "name": {
             "type": "string",
             "description": "The name of the key pair.",
+=======
+    "description": "Initial template of KeyPair",
+    "heat_template_version": "2016-10-14",
+    "outputs": {
+        "private_key": {
+            "description": "The private key if it has been saved.",
+            "value": "{\"get_attr\": [\"KeyPair\", \"private_key\"]}"
+        },
+        "public_key": {
+            "description": "The public key.",
+            "value": "{\"get_attr\": [\"KeyPair\", \"public_key\"]}"
+        },
+        "show": {
+            "description": "Detailed information about resource.",
+            "value": "{\"get_attr\": [\"KeyPair\", \"show\"]}"
+        }
+    },
+    "parameters": {
+        "name": {
+>>>>>>> api-ref/source/v1/samples/resource-type-template-hot-response.json
             "constraints": [
                 {
                     "length": {
@@ -40,18 +61,41 @@
                         "min": 1
                     }
                 }
+<<<<<<< api-ref/source/v1/samples/resource-type-template-hot-response.json
             ]
+=======
+            ],
+            "description": "The name of the key pair.",
+            "type": "string"
+        },
+        "public_key": {
+            "description": "The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated.",
+            "type": "string"
+        },
+        "save_private_key": {
+            "default": false,
+            "description": "True if the system should remember a generated private key; False otherwise.",
+            "type": "boolean"
+>>>>>>> api-ref/source/v1/samples/resource-type-template-hot-response.json
         }
     },
     "resources": {
         "KeyPair": {
+<<<<<<< api-ref/source/v1/samples/resource-type-template-hot-response.json
             "type": "OS::Nova::KeyPair",
             "properties": {
+=======
+            "properties": {
+                "name": {
+                    "get_param": "name"
+                },
+>>>>>>> api-ref/source/v1/samples/resource-type-template-hot-response.json
                 "public_key": {
                     "get_param": "public_key"
                 },
                 "save_private_key": {
                     "get_param": "save_private_key"
+<<<<<<< api-ref/source/v1/samples/resource-type-template-hot-response.json
                 },
                 "type": {
                     "get_param": "type"
@@ -97,6 +141,11 @@
                     "show"
                 ]
             }
+=======
+                }
+            },
+            "type": "OS::Nova::KeyPair"
+>>>>>>> api-ref/source/v1/samples/resource-type-template-hot-response.json
         }
     }
 }
diff --git a/api-ref/source/v1/samples/resource-type-template-response.json b/api-ref/source/v1/samples/resource-type-template-response.json
index a645c88..61cceaf 100644
--- a/api-ref/source/v1/samples/resource-type-template-response.json
+++ b/api-ref/source/v1/samples/resource-type-template-response.json
@@ -1,4 +1,5 @@
 {
+<<<<<<< api-ref/source/v1/samples/resource-type-template-response.json
     "HeatTemplateFormatVersion": "2012-12-12",
     "Description": "Initial template of KeyPair",
     "Parameters": {
@@ -10,11 +11,42 @@
             "Default": false,
             "Type": "Boolean",
             "Description": "True if the system should remember a generated private key; False otherwise.",
+=======
+    "Description": "Initial template of KeyPair",
+    "HeatTemplateFormatVersion": "2012-12-12",
+    "Outputs": {
+        "private_key": {
+            "Description": "The private key if it has been saved.",
+            "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"private_key\"]}"
+        },
+        "public_key": {
+            "Description": "The public key.",
+            "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"public_key\"]}"
+        },
+        "show": {
+            "Description": "Detailed information about resource.",
+            "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"show\"]}"
+        }
+    },
+    "Parameters": {
+        "name": {
+            "Description": "The name of the key pair.",
+            "MaxLength": 255,
+            "MinLength": 1,
+            "Type": "String"
+        },
+        "public_key": {
+            "Description": "The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated.",
+            "Type": "String"
+        },
+        "save_private_key": {
+>>>>>>> api-ref/source/v1/samples/resource-type-template-response.json
             "AllowedValues": [
                 "True",
                 "true",
                 "False",
                 "false"
+<<<<<<< api-ref/source/v1/samples/resource-type-template-response.json
             ]
         },
         "type": {
@@ -34,17 +66,31 @@
             "Type": "String",
             "Description": "The name of the key pair.",
             "MaxLength": 255
+=======
+            ],
+            "Default": false,
+            "Description": "True if the system should remember a generated private key; False otherwise.",
+            "Type": "Boolean"
+>>>>>>> api-ref/source/v1/samples/resource-type-template-response.json
         }
     },
     "Resources": {
         "KeyPair": {
+<<<<<<< api-ref/source/v1/samples/resource-type-template-response.json
             "Type": "OS::Nova::KeyPair",
             "Properties": {
+=======
+            "Properties": {
+                "name": {
+                    "Ref": "name"
+                },
+>>>>>>> api-ref/source/v1/samples/resource-type-template-response.json
                 "public_key": {
                     "Ref": "public_key"
                 },
                 "save_private_key": {
                     "Ref": "save_private_key"
+<<<<<<< api-ref/source/v1/samples/resource-type-template-response.json
                 },
                 "type": {
                     "Ref": "type"
@@ -90,6 +136,11 @@
                     "show"
                 ]
             }
+=======
+                }
+            },
+            "Type": "OS::Nova::KeyPair"
+>>>>>>> api-ref/source/v1/samples/resource-type-template-response.json
         }
     }
 }
diff --git a/bindep.txt b/bindep.txt
index 55d97b8..a4151a4 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -6,6 +6,10 @@ mariadb-server [platform:redhat]
 postgresql
 
 build-essential [platform:dpkg]
+<<<<<<< bindep.txt
+=======
+python-dev [platform:dpkg]
+>>>>>>> bindep.txt
 python3-all-dev [platform:dpkg]
 libxml2-dev [platform:dpkg]
 libxslt1-dev [platform:dpkg]
@@ -18,6 +22,10 @@ mysql-client [platform:dpkg]
 postgresql-client [platform:dpkg]
 
 gcc [platform:rpm]
+<<<<<<< bindep.txt
+=======
+python-devel [platform:rpm]
+>>>>>>> bindep.txt
 python3-devel [platform:fedora platform:suse]
 python3 [platform:suse]
 libxml2-devel [platform:rpm]
diff --git a/config-generator.conf b/config-generator.conf
index 322215a..ad66525 100644
--- a/config-generator.conf
+++ b/config-generator.conf
@@ -13,10 +13,16 @@ namespace = heat.api.aws.ec2token
 namespace = keystonemiddleware.auth_token
 namespace = oslo.messaging
 namespace = oslo.middleware
+<<<<<<< config-generator.conf
 namespace = oslo.cache
 namespace = oslo.log
 namespace = oslo.policy
 namespace = oslo.reports
+=======
+namespace = oslo.db
+namespace = oslo.log
+namespace = oslo.policy
+>>>>>>> config-generator.conf
 namespace = oslo.service.service
 namespace = oslo.service.periodic_task
 namespace = oslo.service.sslutils
diff --git a/debian/changelog b/debian/changelog
index 0d23048..f4510e8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,4 @@
+<<<<<<< debian/changelog
 heat (1:20.0.0-0ubuntu1) lunar; urgency=medium
 
   * New upstream release for OpenStack Antelope.
@@ -170,6 +171,46 @@ heat (1:15.0.0+git2020120911.4370af157-0ubuntu1) hirsute; urgency=medium
   * d/control: Align (Build-)Depends with upstream.
 
  -- Corey Bryant <corey.bryant@canonical.com>  Wed, 09 Dec 2020 11:57:44 -0500
+=======
+heat (1:15.1.0-0ubuntu1~cloud0ubuntu1) UNRELEASED; urgency=medium
+
+  * d/p/lp2019175.patch: Fix None comparision when sorting
+    by `updated_at` (LP: #2019175)
+
+ -- Ponnuvel Palaniyappan <pponnuvel@gmail.com>  Mon, 15 May 2023 17:55:21 +0100
+
+heat (1:15.1.0-0ubuntu1~cloud0) focal-victoria; urgency=medium
+
+  [ Corey Bryant ]
+  * d/gbp.conf: Create stable/victoria branch.
+
+  [ Felipe Reyes ]
+  * d/watch: d/watch: Scope to 15.x series.
+  * New stable point release for OpenStack Victoria (LP: #1962772).
+
+ -- Felipe Reyes <felipe.reyes@canonical.com>  Wed, 02 Mar 2022 16:20:32 -0300
+
+heat (1:15.0.0-0ubuntu1~cloud0) focal-victoria; urgency=medium
+
+  * New upstream release for the Ubuntu Cloud Archive.
+
+ -- Openstack Ubuntu Testing Bot <openstack-testing-bot@ubuntu.com>  Fri, 16 Oct 2020 15:46:13 +0000
+
+heat (1:15.0.0-0ubuntu1) groovy; urgency=medium
+
+  * d/control: Update VCS paths for move to lp:~ubuntu-openstack-dev.
+  * d/watch: Track 15.x series.
+  * New upstream release for OpenStack Victoria.
+  * d/control: Align (Build-)Depends with upstream.
+
+ -- Chris MacNaughton <chris.macnaughton@ubuntu.com>  Thu, 15 Oct 2020 11:13:41 +0000
+
+heat (1:15.0.0~b3~git2020091012.c54518d60-0ubuntu3) UNRELEASED; urgency=medium
+
+  * d/control: Update VCS paths for move to lp:~ubuntu-openstack-dev.
+
+ -- Chris MacNaughton <chris.macnaughton@ubuntu.com>  Thu, 08 Oct 2020 13:07:53 +0000
+>>>>>>> debian/changelog
 
 heat (1:15.0.0~b3~git2020091012.c54518d60-0ubuntu2) groovy; urgency=medium
 
diff --git a/debian/control b/debian/control
index 997c44a..bd77603 100644
--- a/debian/control
+++ b/debian/control
@@ -11,11 +11,19 @@ Uploaders:
  Mehdi Abaakouk <sileht@sileht.net>,
  David Della Vecchia <ddv@canonical.com>,
 Build-Depends:
+<<<<<<< debian/control
  debhelper-compat (= 13),
  dh-python,
  openstack-pkg-tools (>= 119ubuntu1~),
  python3-all,
  python3-pbr (>= 3.1.1),
+=======
+ debhelper-compat (= 12),
+ dh-python,
+ openstack-pkg-tools (>= 85ubuntu3~),
+ python3-all,
+ python3-pbr (>= 2.0.0),
+>>>>>>> debian/control
  python3-setuptools,
  python3-sphinx (>= 2.0.0),
 Build-Depends-Indep:
@@ -28,24 +36,40 @@ Build-Depends-Indep:
  python3-cinderclient (>= 1:3.3.0),
  python3-coverage (>= 4.0),
  python3-croniter (>= 0.3.4),
+<<<<<<< debian/control
  python3-cryptography (>= 2.5),
  python3-ddt (>= 1.4.1),
  python3-debtcollector (>= 1.19.0),
  python3-designateclient (>= 2.7.0),
  python3-doc8,
+=======
+ python3-cryptography (>= 2.1),
+ python3-debtcollector (>= 1.19.0),
+ python3-designateclient (>= 2.7.0),
+ python3-doc8 (>= 0.6.0),
+>>>>>>> debian/control
  python3-eventlet (>= 0.18.2),
  python3-fixtures (>= 3.0.0),
  python3-gabbi (>= 1.35.0),
  python3-glanceclient (>= 1:2.8.0),
  python3-gnocchiclient (>= 3.3.1),
  python3-greenlet (>= 0.4.10),
+<<<<<<< debian/control
  python3-hacking,
+=======
+ python3-hacking (>= 1.1.0),
+>>>>>>> debian/control
  python3-heatclient (>= 1.10.0),
  python3-ironicclient (>= 2.8.0),
  python3-keystoneauth1 (>= 3.18.0),
  python3-keystoneclient (>= 1:3.8.0),
+<<<<<<< debian/control
  python3-keystonemiddleware (>= 5.1.0),
  python3-kombu (>= 5.0.1),
+=======
+ python3-keystonemiddleware (>= 4.17.0),
+ python3-kombu (>= 4.6.11),
+>>>>>>> debian/control
  python3-lxml (>= 4.5.0),
  python3-magnumclient (>= 2.3.0),
  python3-manilaclient (>= 1.16.0),
@@ -56,16 +80,25 @@ Build-Depends-Indep:
  python3-mox3 (>= 0.20.0),
  python3-netaddr (>= 0.7.18),
  python3-neutron-lib (>= 1.14.0),
+<<<<<<< debian/control
  python3-neutronclient (>= 1:7.7.0),
+=======
+ python3-neutronclient (>= 1:6.14.0),
+>>>>>>> debian/control
  python3-novaclient (>= 2:9.1.0),
  python3-octaviaclient (>= 1.8.0),
  python3-openstackclient (>= 3.12.0),
  python3-openstackdocstheme (>= 2.2.1),
+<<<<<<< debian/control
  python3-openstacksdk (>= 0.28.0),
+=======
+ python3-openstacksdk (>= 0.11.2),
+>>>>>>> debian/control
  python3-os-api-ref (>= 1.4.0),
  python3-os-testr (>= 1.0.0),
  python3-oslo.cache (>= 1.26.0),
  python3-oslo.concurrency (>= 3.26.0),
+<<<<<<< debian/control
  python3-oslo.config (>= 1:6.8.0),
  python3-oslo.context (>= 1:2.22.0),
  python3-oslo.db (>= 6.0.0),
@@ -79,22 +112,50 @@ Build-Depends-Indep:
  python3-oslo.service (>= 1.24.0),
  python3-oslo.upgradecheck (>= 1.3.0),
  python3-oslo.utils (>= 4.5.0),
+=======
+ python3-oslo.config (>= 1:5.2.0),
+ python3-oslo.context (>= 1:2.19.2),
+ python3-oslo.db (>= 6.0.0),
+ python3-oslo.i18n (>= 3.15.3),
+ python3-oslo.log (>= 3.36.0),
+ python3-oslo.messaging (>= 5.29.0),
+ python3-oslo.middleware (>= 3.31.0),
+ python3-oslo.policy (>= 1.30.0),
+ python3-oslo.reports (>= 1.18.0),
+ python3-oslo.serialization (>= 2.18.0),
+ python3-oslo.service (>= 1.24.0),
+ python3-oslo.upgradecheck (>= 0.1.0),
+ python3-oslo.utils (>= 3.37.0),
+>>>>>>> debian/control
  python3-oslo.versionedobjects (>= 1.31.2),
  python3-oslotest (>= 1:3.2.0),
  python3-osprofiler (>= 1.4.0),
  python3-paramiko (>= 2.0),
  python3-pastedeploy (>= 1.5.0),
+<<<<<<< debian/control
  python3-psycopg2 (>= 2.7),
  python3-pygments (>= 2.2.0),
  python3-pymysql (>= 0.8.0),
  python3-requests (>= 2.23.0),
+=======
+ python3-psycopg2 (>= 2.8),
+ python3-pygments (>= 2.2.0),
+ python3-pymysql (>= 0.8.0),
+ python3-pymysql (>= 0.8.0),
+ python3-reno (>= 2.11.2),
+ python3-requests (>= 2.14.2),
+>>>>>>> debian/control
  python3-routes (>= 2.3.1),
  python3-saharaclient (>= 1.4.0),
  python3-senlinclient (>= 1.1.0),
  python3-six (>= 1.10.0),
  python3-sphinxcontrib.apidoc (>= 0.2.0),
  python3-sphinxcontrib.httpdomain (>= 1.3.0),
+<<<<<<< debian/control
  python3-sqlalchemy (>= 1.4.0),
+=======
+ python3-sqlalchemy (>= 1.0.10),
+>>>>>>> debian/control
  python3-stestr (>= 2.0.0),
  python3-stevedore (>= 1:3.1.0),
  python3-swiftclient (>= 1:3.2.0),
@@ -107,11 +168,21 @@ Build-Depends-Indep:
  python3-tz (>= 2013.6),
  python3-vitrageclient (>= 2.7.0),
  python3-webob (>= 1:1.7.1),
+<<<<<<< debian/control
  python3-yaml (>= 5.1),
  python3-yaql (>= 1.1.3),
  python3-zaqarclient (>= 1.3.0),
  subunit,
 Standards-Version: 4.6.1
+=======
+ python3-yaml (>= 3.13),
+ python3-yaql (>= 1.1.3),
+ python3-zaqarclient (>= 1.3.0),
+ python3-zunclient (>= 3.4.0),
+ subunit,
+Standards-Version: 4.5.0
+Vcs-Browser: https://git.launchpad.net/~ubuntu-openstack-dev/ubuntu/+source/heat
+>>>>>>> debian/control
 Vcs-Git: https://git.launchpad.net/~ubuntu-openstack-dev/ubuntu/+source/heat
 Homepage: https://wiki.openstack.org/Heat
 Testsuite: autopkgtest-pkg-python
@@ -170,8 +241,12 @@ Depends:
  python3-blazarclient (>= 1.0.1),
  python3-cinderclient (>= 1:3.3.0),
  python3-croniter (>= 0.3.4),
+<<<<<<< debian/control
  python3-cryptography (>= 2.5),
  python3-ddt (>= 1.4.1),
+=======
+ python3-cryptography (>= 2.1),
+>>>>>>> debian/control
  python3-debtcollector (>= 1.19.0),
  python3-designateclient (>= 2.7.0),
  python3-eventlet (>= 0.18.2),
@@ -183,7 +258,11 @@ Depends:
  python3-ironicclient (>= 2.8.0),
  python3-keystoneauth1 (>= 3.18.0),
  python3-keystoneclient (>= 1:3.8.0),
+<<<<<<< debian/control
  python3-keystonemiddleware (>= 5.1.0),
+=======
+ python3-keystonemiddleware (>= 4.17.0),
+>>>>>>> debian/control
  python3-lxml (>= 4.5.0),
  python3-magnumclient (>= 2.3.0),
  python3-manilaclient (>= 1.16.0),
@@ -192,6 +271,7 @@ Depends:
  python3-monascaclient (>= 1.12.0),
  python3-netaddr (>= 0.7.18),
  python3-neutron-lib (>= 1.14.0),
+<<<<<<< debian/control
  python3-neutronclient (>= 1:7.7.0),
  python3-novaclient (>= 2:9.1.0),
  python3-octaviaclient (>= 1.8.0),
@@ -218,23 +298,66 @@ Depends:
  python3-pbr (>= 3.1.1),
  python3-pymysql,
  python3-requests (>= 2.23.0),
+=======
+ python3-neutronclient (>= 1:6.14.0),
+ python3-novaclient (>= 2:9.1.0),
+ python3-octaviaclient (>= 1.8.0),
+ python3-openstackclient (>= 3.12.0),
+ python3-openstacksdk (>= 0.11.2),
+ python3-oslo.cache (>= 1.26.0),
+ python3-oslo.concurrency (>= 3.26.0),
+ python3-oslo.config (>= 1:5.2.0),
+ python3-oslo.context (>= 1:2.19.2),
+ python3-oslo.db (>= 6.0.0),
+ python3-oslo.i18n (>= 3.15.3),
+ python3-oslo.log (>= 3.36.0),
+ python3-oslo.messaging (>= 5.29.0),
+ python3-oslo.middleware (>= 3.31.0),
+ python3-oslo.policy (>= 1.30.0),
+ python3-oslo.reports (>= 1.18.0),
+ python3-oslo.serialization (>= 2.18.0),
+ python3-oslo.service (>= 1.24.0),
+ python3-oslo.upgradecheck (>= 0.1.0),
+ python3-oslo.utils (>= 3.37.0),
+ python3-oslo.versionedobjects (>= 1.31.2),
+ python3-osprofiler (>= 1.4.0),
+ python3-pastedeploy (>= 1.5.0),
+ python3-pbr (>= 2.0.0),
+ python3-pymysql,
+ python3-requests (>= 2.14.2),
+>>>>>>> debian/control
  python3-routes (>= 2.3.1),
  python3-saharaclient (>= 1.4.0),
  python3-senlinclient (>= 0.3.0),
  python3-six (>= 1.10.0),
+<<<<<<< debian/control
  python3-sqlalchemy (>= 1.4.0),
+=======
+ python3-sqlalchemy (>= 1.0.10),
+>>>>>>> debian/control
  python3-stevedore (>= 1:3.1.0),
  python3-swiftclient (>= 1:3.2.0),
  python3-tenacity (>= 6.1.0),
  python3-troveclient (>= 1:2.2.0),
  python3-tz (>= 2013.6),
+<<<<<<< debian/control
  python3-vitrageclient (>= 2.7.0),
  python3-webob (>= 1:1.7.1),
  python3-yaml (>= 5.1),
+=======
+ python3-webob (>= 1:1.7.1),
+ python3-yaml (>= 3.13),
+>>>>>>> debian/control
  python3-yaql (>= 1.1.3),
  python3-zaqarclient (>= 1.3.0),
  ${misc:Depends},
  ${python3:Depends},
+<<<<<<< debian/control
+=======
+Suggests:
+ python3-vitrageclient (>= 2.7.0),
+ python3-zunclient (>= 3.4.0),
+>>>>>>> debian/control
 Description: OpenStack orchestration service - Python 3 files
  Heat is a service to orchestrate multiple composite cloud applications using
  templates, through both an OpenStack-native ReST API and a
diff --git a/debian/gbp.conf b/debian/gbp.conf
index 24f9139..5597010 100644
--- a/debian/gbp.conf
+++ b/debian/gbp.conf
@@ -1,5 +1,9 @@
 [DEFAULT]
+<<<<<<< debian/gbp.conf
 debian-branch = master
+=======
+debian-branch = stable/victoria
+>>>>>>> debian/gbp.conf
 upstream-tag = %(version)s
 pristine-tar = True
 
diff --git a/debian/heat-common.postinst b/debian/heat-common.postinst
index 3e4d00c..4fb0f75 100644
--- a/debian/heat-common.postinst
+++ b/debian/heat-common.postinst
@@ -8,7 +8,11 @@ if [ "$1" = "configure" ] ; then
     fi
     if ! getent passwd heat > /dev/null 2>&1; then
         adduser --system --home /var/lib/heat --ingroup heat --no-create-home \
+<<<<<<< debian/heat-common.postinst
             --shell /usr/sbin/nologin heat
+=======
+            --shell /bin/false heat
+>>>>>>> debian/heat-common.postinst
     fi
 
     chown heat:adm /var/log/heat
diff --git a/debian/patches/lp2019175.patch b/debian/patches/lp2019175.patch
new file mode 100644
index 0000000..8fe7e1c
--- /dev/null
+++ b/debian/patches/lp2019175.patch
@@ -0,0 +1,117 @@
+From 785180ddc7a31bcee1494c7935406cccde902b0d Mon Sep 17 00:00:00 2001
+From: Erik Panter <e.panter@mittwald.de>
+Date: Fri, 29 Oct 2021 11:03:27 +0200
+Subject: [PATCH] Fix None comparision when sorting by `updated_at`
+
+When sorting resource candidates in `_get_best_existing_rsrc_db`,
+resources with the same score are sorted by `updated_at`, which can be
+`None`. If that is the case, use `created_at` instead.
+
+(victoria to ussuri)
+Conflicts:
+	heat/tests/test_convg_stack.py
+
+Resolved conflict caused by the following commit.
+
+commit fd6cf83554db68752278d37f577ba984d9f831b2
+    Use unittest.mock instead of third party mock
+
+Task: 43815
+Story: 2009653
+Change-Id: Ic0265fcf7ceb811803cdebaa8932fe80dc59a627
+(cherry picked from commit 403fa55fe94ae1063d2cb4b8db3b63b76b1ee5cf)
+(cherry picked from commit 5ea5276a3e76829fd72345e3aae7482cbd260b51)
+(cherry picked from commit aa31864de4fe480674a0669c05a024ab28c3c429)
+(cherry picked from commit 26a20de88c0b578422e9847c1210d10f10b04854)
+---
+ heat/engine/stack.py           |  5 ++++-
+ heat/tests/test_convg_stack.py | 28 +++++++++++++++++++++++-----
+ 2 files changed, 27 insertions(+), 6 deletions(-)
+
+diff --git a/heat/engine/stack.py b/heat/engine/stack.py
+index 1b5f65387..801fce0a0 100644
+--- a/heat/engine/stack.py
++++ b/heat/engine/stack.py
+@@ -1499,7 +1499,10 @@ class Stack(collections.Mapping):
+                     # Rolling back to previous resource
+                     score += 10
+ 
+-                return score, ext_rsrc.updated_at
++                last_changed_at = ext_rsrc.updated_at
++                if last_changed_at is None:
++                    last_changed_at = ext_rsrc.created_at
++                return score, last_changed_at
+ 
+             candidates = sorted((r for r in self.ext_rsrcs_db.values()
+                                  if r.name == rsrc_name),
+diff --git a/heat/tests/test_convg_stack.py b/heat/tests/test_convg_stack.py
+index 1d2476d0b..466ea11c9 100644
+--- a/heat/tests/test_convg_stack.py
++++ b/heat/tests/test_convg_stack.py
+@@ -11,7 +11,10 @@
+ #    License for the specific language governing permissions and limitations
+ #    under the License.
+ 
++from datetime import datetime
++from datetime import timedelta
+ import mock
++
+ from oslo_config import cfg
+ 
+ from heat.common import template_format
+@@ -428,22 +431,32 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
+         stack.prev_raw_template_id = 2
+         stack.t.id = 3
+ 
+-        def db_resource(current_template_id):
++        def db_resource(current_template_id,
++                        created_at=None,
++                        updated_at=None):
+             db_res = resource_objects.Resource(stack.context)
+             db_res['id'] = current_template_id
+             db_res['name'] = 'A'
+             db_res['current_template_id'] = current_template_id
+-            db_res['action'] = 'CREATE'
++            db_res['action'] = 'UPDATE' if updated_at else 'CREATE'
+             db_res['status'] = 'COMPLETE'
+-            db_res['updated_at'] = None
++            db_res['updated_at'] = updated_at
++            db_res['created_at'] = created_at
+             db_res['replaced_by'] = None
+             return db_res
+ 
++        start_time = datetime.utcfromtimestamp(0)
++
++        def t(minutes):
++            return start_time + timedelta(minutes=minutes)
++
+         a_res_2 = db_resource(2)
+         a_res_3 = db_resource(3)
+-        a_res_1 = db_resource(1)
++        a_res_0 = db_resource(0, created_at=t(0), updated_at=t(1))
++        a_res_1 = db_resource(1, created_at=t(2))
+         existing_res = {a_res_2.id: a_res_2,
+                         a_res_3.id: a_res_3,
++                        a_res_0.id: a_res_0,
+                         a_res_1.id: a_res_1}
+         stack.ext_rsrcs_db = existing_res
+         best_res = stack._get_best_existing_rsrc_db('A')
+@@ -459,9 +472,14 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
+         # no resource with current template id as 3 or 2
+         del existing_res[2]
+         best_res = stack._get_best_existing_rsrc_db('A')
+-        # should return resource with template id 1 existing in DB
++        # should return resource with template id 1 which is the newest
+         self.assertEqual(a_res_1.id, best_res.id)
+ 
++        del existing_res[1]
++        best_res = stack._get_best_existing_rsrc_db('A')
++        # should return resource with template id 0 existing in the db
++        self.assertEqual(a_res_0.id, best_res.id)
++
+     @mock.patch.object(parser.Stack, '_converge_create_or_update')
+     def test_updated_time_stack_create(self, mock_ccu, mock_cr):
+         stack = parser.Stack(utils.dummy_context(), 'convg_updated_time_test',
+-- 
+2.39.2
+
diff --git a/debian/patches/series b/debian/patches/series
index da0d792..45f0639 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,2 +1,7 @@
+<<<<<<< debian/patches/series
 drop-zun.patch
 sudoers_patch.patch
+=======
+sudoers_patch.patch
+lp2019175.patch
+>>>>>>> debian/patches/series
diff --git a/debian/rules b/debian/rules
index e1d36fb..600b2ab 100755
--- a/debian/rules
+++ b/debian/rules
@@ -17,7 +17,11 @@ include /usr/share/openstack-pkg-tools/pkgos.make
 
 ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS)))
 override_dh_auto_test:
+<<<<<<< debian/rules
 	pkgos-dh_auto_test --serial --no-py2 '^(?!heat_integrationtests)'
+=======
+	pkgos-dh_auto_test --no-py2 '^(?!heat_integrationtests)'
+>>>>>>> debian/rules
 endif
 
 override_dh_clean:
@@ -36,9 +40,12 @@ override_dh_install:
 		--output-file etc/heat/heat.conf
 	dh_install
 
+<<<<<<< debian/rules
 override_dh_python3:
 	dh_python3 --shebang=/usr/bin/python3
 
+=======
+>>>>>>> debian/rules
 #override_dh_installman:
 #	python setup.py build
 #	make -C doc man
diff --git a/debian/watch b/debian/watch
index adbaf0e..4e96968 100644
--- a/debian/watch
+++ b/debian/watch
@@ -1,3 +1,7 @@
 version=3
 opts="uversionmangle=s/\.([a-zA-Z])/~$1/;s/%7E/~/;s/\.0b/~b/;s/\.0rc/~rc/" \
+<<<<<<< debian/watch
  https://tarballs.opendev.org/openstack/heat/ openstack-heat-(\d.*)\.tar\.gz
+=======
+ https://tarballs.opendev.org/openstack/heat/ openstack-heat-(15\.\d.*)\.tar\.gz
+>>>>>>> debian/watch
diff --git a/devstack/lib/heat b/devstack/lib/heat
index 170d034..6050cab 100644
--- a/devstack/lib/heat
+++ b/devstack/lib/heat
@@ -298,6 +298,13 @@ function start_heat {
             enable_apache_site heat-api
             enable_apache_site heat-api-cfn
             restart_apache_server
+<<<<<<< devstack/lib/heat
+=======
+            tail_log heat-api /var/log/$APACHE_NAME/heat_api.log
+            tail_log heat-api-access /var/log/$APACHE_NAME/heat_api_access.log
+            tail_log heat-api-cfn /var/log/$APACHE_NAME/heat_api_cfn.log
+            tail_log heat-api-cfn-access /var/log/$APACHE_NAME/heat_api_cfn_access.log
+>>>>>>> devstack/lib/heat
         else
             run_process h-api "$HEAT_BIN_DIR/uwsgi --ini $HEAT_API_UWSGI_CONF" ""
             run_process h-api-cfn "$HEAT_BIN_DIR/uwsgi --ini $HEAT_CFN_API_UWSGI_CONF" ""
@@ -333,6 +340,17 @@ function stop_heat {
     fi
 }
 
+<<<<<<< devstack/lib/heat
+=======
+# TODO(ramishra): Remove after Queens
+function stop_cw_service {
+    if $SYSTEMCTL is-enabled devstack@h-api-cw.service; then
+        $SYSTEMCTL stop devstack@h-api-cw.service
+        $SYSTEMCTL disable devstack@h-api-cw.service
+    fi
+}
+
+>>>>>>> devstack/lib/heat
 # _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
 function _cleanup_heat_apache_wsgi {
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
@@ -437,6 +455,7 @@ function configure_tempest_for_heat {
     # Skip AutoscalingLoadBalancerTest and AutoscalingLoadBalancerv2Test as deprecated neutron-lbaas service is not enabled
     # Skip CfnInitIntegrationTest as latest fedora images don't have heat-cfntools
     iniset $TEMPEST_CONFIG heat_plugin skip_scenario_test_list 'AutoscalingLoadBalancerTest, AutoscalingLoadBalancerv2Test, \
+<<<<<<< devstack/lib/heat
 	    SoftwareConfigIntegrationTest, AodhAlarmTest, CfnInitIntegrationTest, \
         BasicResourcesTest'
     # Skip LoadBalancerv2Test as deprecated neutron-lbaas service is not enabled.
@@ -450,24 +469,60 @@ function configure_tempest_for_heat {
     openstack flavor show m1.heat_micro || openstack flavor create m1.heat_micro --ram 128 --disk 1
 
     export OS_CLOUD=devstack
+=======
+	    SoftwareConfigIntegrationTest, AodhAlarmTest, CfnInitIntegrationTest'
+    # Skip LoadBalancerv2Test as deprecated neutron-lbaas service is not enabled
+    iniset $TEMPEST_CONFIG heat_plugin skip_functional_test_list 'LoadBalancerv2Test, NotificationTest'
+
+    openstack flavor show m1.heat_int || openstack flavor create m1.heat_int --ram 512 --disk 4
+    openstack flavor show m1.heat_micro || openstack flavor create m1.heat_micro --ram 128 --disk 1
+
+    source $TOP_DIR/openrc demo demo
+>>>>>>> devstack/lib/heat
     openstack network show heat-net || openstack network create heat-net
     openstack subnet show heat-subnet || openstack subnet create heat-subnet --network heat-net --subnet-range 10.0.5.0/24
     openstack router add subnet router1 heat-subnet
 
+<<<<<<< devstack/lib/heat
+=======
+    iniset $TEMPEST_CONFIG heat_plugin username $OS_USERNAME
+    iniset $TEMPEST_CONFIG heat_plugin password $OS_PASSWORD
+    iniset $TEMPEST_CONFIG heat_plugin project_name $OS_PROJECT_NAME
+    iniset $TEMPEST_CONFIG heat_plugin auth_url $OS_AUTH_URL
+    iniset $TEMPEST_CONFIG heat_plugin user_domain_id $OS_USER_DOMAIN_ID
+    iniset $TEMPEST_CONFIG heat_plugin project_domain_id $OS_PROJECT_DOMAIN_ID
+    iniset $TEMPEST_CONFIG heat_plugin user_domain_name $OS_USER_DOMAIN_NAME
+    iniset $TEMPEST_CONFIG heat_plugin project_domain_name $OS_PROJECT_DOMAIN_NAME
+    iniset $TEMPEST_CONFIG heat_plugin region $OS_REGION_NAME
+    iniset $TEMPEST_CONFIG heat_plugin auth_version $OS_IDENTITY_API_VERSION
+
+    source $TOP_DIR/openrc admin admin
+    iniset $TEMPEST_CONFIG heat_plugin admin_username $OS_USERNAME
+    iniset $TEMPEST_CONFIG heat_plugin admin_password $OS_PASSWORD
+
+>>>>>>> devstack/lib/heat
     # NOTE(ianw) OpenDev infra only keeps the latest two Fedora's
     # around; prefer the mirror but allow fallback
     if [[ -e /etc/ci/mirror_info.sh ]]; then
         source /etc/ci/mirror_info.sh
     fi
     HEAT_TEST_FEDORA_IMAGE_UPSTREAM=https://download.fedoraproject.org/pub/fedora/linux
+<<<<<<< devstack/lib/heat
     HEAT_TEST_FEDORA_IMAGE_PATH=releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2
+=======
+    HEAT_TEST_FEDORA_IMAGE_PATH=releases/33/Cloud/x86_64/images/Fedora-Cloud-Base-33-1.2.x86_64.qcow2
+>>>>>>> devstack/lib/heat
     if curl --output /dev/null --silent --head --fail "${NODEPOOL_FEDORA_MIRROR}/${HEAT_TEST_FEDORA_IMAGE_PATH}"; then
         export HEAT_TEST_FEDORA_IMAGE="${NODEPOOL_FEDORA_MIRROR}/${HEAT_TEST_FEDORA_IMAGE_PATH}"
     else
         export HEAT_TEST_FEDORA_IMAGE="${HEAT_TEST_FEDORA_IMAGE_UPSTREAM}/${HEAT_TEST_FEDORA_IMAGE_PATH}"
     fi
     TOKEN=$(openstack token issue -c id -f value)
+<<<<<<< devstack/lib/heat
     local image_exists=$( openstack image list | grep "Fedora-Cloud-Base-36-1.5.x86_64" )
+=======
+    local image_exists=$( openstack image list | grep "Fedora-Cloud-Base-33-1.2.x86_64" )
+>>>>>>> devstack/lib/heat
     if [[ -z $image_exists ]]; then
         if is_service_enabled g-api; then
             upload_image $HEAT_TEST_FEDORA_IMAGE $TOKEN
@@ -483,6 +538,7 @@ function configure_tempest_for_heat {
     export OS_CREDENTIAL_SECRET_ID=$(openstack secret store -n heat-multi-cloud-test-cred --payload \
         '{"auth_type": "v3applicationcredential", "auth": {"auth_url": $OS_AUTH_URL, "application_credential_id": $app_cred_id, "application_credential_secret": "secret"}}'\
         -c "Secret href" -f value)
+<<<<<<< devstack/lib/heat
     source $TOP_DIR/openrc demo demo
     iniset $TEMPEST_CONFIG heat_plugin username $OS_USERNAME
     iniset $TEMPEST_CONFIG heat_plugin password $OS_PASSWORD
@@ -499,6 +555,8 @@ function configure_tempest_for_heat {
     iniset $TEMPEST_CONFIG heat_plugin admin_username $OS_USERNAME
     iniset $TEMPEST_CONFIG heat_plugin admin_password $OS_PASSWORD
     export OS_CLOUD=devstack-admin
+=======
+>>>>>>> devstack/lib/heat
 }
 
 # Restore xtrace
diff --git a/devstack/upgrade/resources.sh b/devstack/upgrade/resources.sh
index 277e37d..575c22e 100755
--- a/devstack/upgrade/resources.sh
+++ b/devstack/upgrade/resources.sh
@@ -64,6 +64,7 @@ function _run_heat_integrationtests {
     # Run set of specified functional tests
     UPGRADE_TESTS=upgrade_tests.list
     _write_heat_integrationtests $UPGRADE_TESTS
+<<<<<<< devstack/upgrade/resources.sh
     # NOTE(gmann): heat script does not know about
     # TEMPEST_VENV_UPPER_CONSTRAINTS, only DevStack does.
     # This sources that one variable from it.
@@ -98,6 +99,12 @@ function _run_heat_integrationtests {
     tox -evenv-tempest -- stestr --test-path=$DEST/heat/heat_integrationtests --top-dir=$DEST/heat \
         --group_regex='heat_tempest_plugin\.tests\.api\.test_heat_api[._]([^_]+)' \
         run --include-list $UPGRADE_TESTS
+=======
+
+    tox -evenv-tempest -- stestr --test-path=$DEST/heat/heat_integrationtests --top-dir=$DEST/heat \
+        --group_regex='heat_tempest_plugin\.tests\.api\.test_heat_api[._]([^_]+)' \
+        run --whitelist-file $UPGRADE_TESTS
+>>>>>>> devstack/upgrade/resources.sh
     _heat_set_user
     popd
 }
@@ -135,7 +142,11 @@ function create {
     local stack_name='grenadine'
     resource_save heat stack_name $stack_name
     local loc=`dirname $BASH_SOURCE`
+<<<<<<< devstack/upgrade/resources.sh
     openstack stack create -t $loc/templates/random_string.yaml $stack_name
+=======
+    heat stack-create -f $loc/templates/random_string.yaml $stack_name
+>>>>>>> devstack/upgrade/resources.sh
 }
 
 function verify {
@@ -147,7 +158,11 @@ function verify {
         fi
     fi
     stack_name=$(resource_get heat stack_name)
+<<<<<<< devstack/upgrade/resources.sh
     openstack stack show $stack_name
+=======
+    heat stack-show $stack_name
+>>>>>>> devstack/upgrade/resources.sh
     # TODO(sirushtim): Create more granular checks for Heat.
 }
 
@@ -159,7 +174,11 @@ function verify_noapi {
 
 function destroy {
     _heat_set_user
+<<<<<<< devstack/upgrade/resources.sh
     openstack stack delete -y $(resource_get heat stack_name)
+=======
+    heat stack-delete $(resource_get heat stack_name)
+>>>>>>> devstack/upgrade/resources.sh
 
     source $TOP_DIR/openrc admin admin
     local user_id=$(resource_get heat user_id)
diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh
index f512e8d..8c206d0 100755
--- a/devstack/upgrade/shutdown.sh
+++ b/devstack/upgrade/shutdown.sh
@@ -30,6 +30,13 @@ set -o xtrace
 
 stop_heat
 
+<<<<<<< devstack/upgrade/shutdown.sh
+=======
+# stop cloudwatch service if running
+# TODO(ramishra): Remove it after Queens
+stop_cw_service
+
+>>>>>>> devstack/upgrade/shutdown.sh
 SERVICES_DOWN="heat-api heat-engine heat-api-cfn"
 
 # sanity check that services are actually down
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
index 630df38..cb93e67 100644
--- a/doc/source/_extra/.htaccess
+++ b/doc/source/_extra/.htaccess
@@ -1,5 +1,13 @@
+<<<<<<< doc/source/_extra/.htaccess
 redirectmatch 301 ^/heat/([^/]+)/(architecture|pluginguide|schedulerhints|gmr|supportstatus)\.html$ /heat/$1/developing_guides/$2.html
 redirectmatch 301 ^/heat/([^/]+)/(scale_deployment)\.html$ /heat/$1/operating_guides/$2.html
 redirectmatch 301 ^/heat/([^/]+)/configuration/(api|clients)\.html /heat/$1/configuration/config-options.html
 redirectmatch 301 ^/heat/([^/]+)/contributing/(index|blueprints)\.html /heat/$1/developing_guides/$2.html
 redirectmatch 301 ^/heat/([^/]+)/contributor/(blueprints)\.html /heat/$1/developing_guides/$2.html
+=======
+redirectmatch 301 ^/heat/([^/]+)/(architecture|pluginguide|schedulerhints|gmr|supportstatus)\.html$ /heat/$1/contributor/$2.html
+redirectmatch 301 ^/heat/([^/]+)/developing_guides/(index|architecture|pluginguide|schedulerhints|gmr|supportstatus)\.html$ /heat/$1/contributor/$2.html
+redirectmatch 301 ^/heat/([^/]+)/(scale_deployment)\.html$ /heat/$1/operating_guides/$2.html
+redirectmatch 301 ^/heat/([^/]+)/configuration/(api|clients)\.html /heat/$1/configuration/config-options.html
+redirectmatch 301 ^/heat/([^/]+)/contributing/(index|blueprints)\.html /heat/$1/contributor/$2.html
+>>>>>>> doc/source/_extra/.htaccess
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 450c895..a3f859c 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -1,6 +1,12 @@
+<<<<<<< doc/source/admin/index.rst
 ==================
 Administering Heat
 ==================
+=======
+=================
+Adminstering Heat
+=================
+>>>>>>> doc/source/admin/index.rst
 
 .. toctree::
    :maxdepth: 2
diff --git a/doc/source/admin/stack-domain-users.rst b/doc/source/admin/stack-domain-users.rst
index 4818554..c63b062 100644
--- a/doc/source/admin/stack-domain-users.rst
+++ b/doc/source/admin/stack-domain-users.rst
@@ -124,7 +124,11 @@ The following steps are run during stack creation:
    perspective) to the stack owners project. The users who are created
    in the stack domain are still assigned the ``heat_stack_user`` role, so
    the API surface they can access is limited through
+<<<<<<< doc/source/admin/stack-domain-users.rst
    the :file:`policy.yaml` file.
+=======
+   the :file:`policy.json` file.
+>>>>>>> doc/source/admin/stack-domain-users.rst
    For more  information, see :keystone-doc:`OpenStack Identity documentation
    <>`.
 
@@ -133,7 +137,11 @@ The following steps are run during stack creation:
    retrieved. Details are retrieved from the database for
    both the stack owner's project (the default
    API path to the stack) and the stack domain project, subject to the
+<<<<<<< doc/source/admin/stack-domain-users.rst
    :file:`policy.yaml` restrictions.
+=======
+   :file:`policy.json` restrictions.
+>>>>>>> doc/source/admin/stack-domain-users.rst
 
 This means there are now two paths that
 can result in the same data being retrieved through the Orchestration API.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 913262c..0fbcf0b 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -73,7 +73,11 @@ extensions = ['sphinx.ext.autodoc',
               'sphinx.ext.viewcode',
               'sphinx.ext.todo',
               'sphinx.ext.coverage',
+<<<<<<< doc/source/conf.py
               'sphinx.ext.intersphinx',
+=======
+              'sphinx.ext.viewcode',
+>>>>>>> doc/source/conf.py
               'sphinx.ext.doctest',
               'sphinxcontrib.apidoc',
               'openstackdocstheme',
@@ -85,10 +89,13 @@ extensions = ['sphinx.ext.autodoc',
               'ext.tablefromtext',
               'stevedore.sphinxext']
 
+<<<<<<< doc/source/conf.py
 intersphinx_mapping = {
     'types_typedecorator': ('https://docs.sqlalchemy.org', None),
 }
 
+=======
+>>>>>>> doc/source/conf.py
 # policy sample file generation
 policy_generator_config_file = '../../etc/heat/heat-policy-generator.conf'
 sample_policy_basename = '_static/heat'
diff --git a/doc/source/configuration/sample_policy.rst b/doc/source/configuration/sample_policy.rst
index 0b9ce09..99e7d42 100644
--- a/doc/source/configuration/sample_policy.rst
+++ b/doc/source/configuration/sample_policy.rst
@@ -2,6 +2,7 @@
 Heat Sample Policy
 ==================
 
+<<<<<<< doc/source/configuration/sample_policy.rst
 .. warning::
 
    JSON formatted policy file is deprecated since Heat 17.0.0 (Xena).
@@ -10,6 +11,8 @@ Heat Sample Policy
 
 .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html
 
+=======
+>>>>>>> doc/source/configuration/sample_policy.rst
 The following is a sample heat policy file that has been auto-generated
 from default policy values in code. If you're using the default policies, then
 the maintenance of this file is not necessary, and it should not be copied into
diff --git a/doc/source/contributor/architecture.rst b/doc/source/contributor/architecture.rst
new file mode 100644
index 0000000..17ba510
--- /dev/null
+++ b/doc/source/contributor/architecture.rst
@@ -0,0 +1,96 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=================
+Heat architecture
+=================
+
+Heat is a service to orchestrate multiple composite cloud applications using
+the `AWS CloudFormation`_ template format, through both an OpenStack-native
+REST API and a CloudFormation-compatible Query API.
+
+
+Detailed description
+~~~~~~~~~~~~~~~~~~~~
+
+What is the purpose of the project and vision for it?
+
+*Heat provides an AWS CloudFormation implementation for OpenStack that
+orchestrates an AWS CloudFormation template describing a cloud application by
+executing appropriate OpenStack API calls to generate running cloud
+applications.*
+
+Describe the relevance of the project to other OpenStack projects and the
+OpenStack mission to provide a ubiquitous cloud computing platform:
+
+*The software integrates other core components of OpenStack into a one-file
+template system. The templates allow creation of most OpenStack resource types
+(such as instances, floating IPs, volumes, security groups and users), as well
+as some more advanced functionality such as instance high availability,
+instance autoscaling, and nested stacks. By providing very tight integration
+with other OpenStack core projects, all OpenStack core projects could receive
+a larger user base.*
+
+*Currently no other CloudFormation implementation exists for OpenStack. The
+developers believe cloud developers have a strong desire to move workloads
+from AWS to OpenStack deployments. Given the missing gap of a well-implemented
+and integrated CloudFormation API in OpenStack, we provide a high quality
+implementation of this gap improving the ubiquity of OpenStack.*
+
+
+Heat services
+~~~~~~~~~~~~~
+
+The developers are focused on creating an OpenStack style project using
+OpenStack design tenets, implemented in Python. We have started with full
+integration with keystone. We have a number of components.
+
+As the developers have only started development in March 2012, the
+architecture is evolving rapidly.
+
+heat
+----
+
+The heat tool is a CLI which communicates with the heat-api to execute AWS
+CloudFormation APIs. End developers could also use the heat REST API directly.
+
+
+heat-api
+--------
+
+The heat-api component provides an OpenStack-native REST API that processes
+API requests by sending them to the heat-engine over RPC.
+
+
+heat-api-cfn
+------------
+
+The heat-api-cfn component provides an AWS Query API that is compatible with
+AWS CloudFormation and processes API requests by sending them to the
+heat-engine over RPC.
+
+
+heat-engine
+-----------
+
+The heat-engine's main responsibility is to orchestrate the launching of
+templates and provide events back to the API consumer.
+
+The templates integrate well with Puppet_ and Chef_.
+
+.. _Puppet: https://s3.amazonaws.com/cloudformation-examples/IntegratingAWSCloudFormationWithPuppet.pdf
+.. _Chef: https://www.full360.com/2011/02/27/integrating-aws-cloudformation-and-chef.html
+.. _`AWS CloudFormation`: https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/Welcome.html?r=7078
diff --git a/doc/source/contributor/blueprints.rst b/doc/source/contributor/blueprints.rst
new file mode 100644
index 0000000..e8d7f74
--- /dev/null
+++ b/doc/source/contributor/blueprints.rst
@@ -0,0 +1,84 @@
+Blueprints and Specs
+====================
+
+The Heat team uses the `heat-specs
+<https://opendev.org/openstack/heat-specs>`_ repository for its
+specification reviews. Detailed information can be found `here
+<https://wiki.openstack.org/wiki/Blueprints#Heat>`_.
+
+Please note that we use a template for spec submissions. Please use the
+`template for the latest release
+<https://opendev.org/openstack/heat-specs/src/branch/master/specs/templates>`_.
+It is not required to fill out all sections in the template.
+
+You have to create a Story in StoryBoard `heat storyboard
+<https://storyboard.openstack.org/#!/project/989>`_. And create tasks that
+fit with the plan to implement this spec (A task to link to a patch in gerrit).
+
+Spec Notes
+----------
+
+
+There are occasions when a spec is approved and the code does not land in
+the cycle it was targeted for. For these cases, the workflow to get the spec
+into the next release is as below:
+
+* Anyone can propose a patch to heat-specs which moves a spec from the
+  previous release backlog into the new release directory.
+
+The specs which are moved in this way can be fast-tracked into the next
+release. Please note that it is required to re-propose the spec for the new
+release and it'll be evaluated based on the resources available and cycle
+priorities.
+
+Heat Spec Lite
+--------------
+
+Lite specs are small feature requests tracked as StoryBoard stories, and tagged
+with 'spec-lite' and 'priority-wishlist' tag. These allow for submission
+and review of these feature requests before code is submitted.
+
+These can be used for small features that don’t warrant a detailed spec to be
+proposed, evaluated, and worked on. The team evaluates these requests as it
+evaluates specs.
+
+Once a `spec-lite` story has been approved/triaged as a
+Request for Enhancement(RFE), it’ll be targeted for a release.
+
+The workflow for the life of a spec-lite in StoryBoard is as follows:
+
+* File a story with a small summary of what the requested change is and
+  tag it as `spec-lite` and `priority-wishlist`.
+* Create tasks that fit to your plan in story.
+* The story is evaluated and marked with tag as `triaged` to announce
+  approval or `Invalid` to request a full spec or it's not a valided task.
+* The task is moved to `Progress` once the code is up and ready to
+  review.
+* The task is moved to `Merged` once the patch lands.
+* The story is moved to `Merged` once all tasks merged.
+
+The drivers team will discuss the following story reports in IRC meetings:
+
+* `heat stories <https://storyboard.openstack.org/#!/project_group/82>`_
+* `heat story filter <https://storyboard.openstack.org/#!/board/71>`_
+
+
+Lite spec Submission Guidelines
+-------------------------------
+
+When a story is submitted, there is field that must be filled: ‘Description’.
+
+The ‘Description’ section must be a description of what you would like
+to see implemented in heat. The description should provide enough details for
+a knowledgeable developer to understand what is the existing problem and
+what’s the proposed solution.
+
+Add `spec-lite` tag to the story.
+
+
+Lite spec from existing stories
+-------------------------------
+
+If there's an already existing story that describes a small feature suitable for
+a spec-lite, add a `spec-lite` tag to the story. There is no need to create a new
+story. The comments and history of the existing story are important for its review.
diff --git a/doc/source/contributor/gmr.rst b/doc/source/contributor/gmr.rst
new file mode 100644
index 0000000..26d1d41
--- /dev/null
+++ b/doc/source/contributor/gmr.rst
@@ -0,0 +1,93 @@
+..
+      Copyright (c) 2014 OpenStack Foundation
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=======================
+Guru Meditation Reports
+=======================
+
+Heat contains a mechanism whereby developers and system administrators can
+generate a report about the state of a running Heat executable.  This report
+is called a *Guru Meditation Report* (*GMR* for short).
+
+
+Generating a GMR
+~~~~~~~~~~~~~~~~
+
+A *GMR* can be generated by sending the *USR2* signal to any Heat process with
+support (see below).  The *GMR* will then be outputted standard error for that
+particular process.
+
+For example, suppose that ``heat-api`` has process id ``10172``, and was run
+with ``2>/var/log/heat/heat-api-err.log``.  Then, ``kill -USR2 10172`` will
+trigger the Guru Meditation report to be printed to
+``/var/log/heat/heat-api-err.log``.
+
+
+Structure of a GMR
+~~~~~~~~~~~~~~~~~~
+
+The *GMR* is designed to be extensible; any particular executable may add its
+own sections.  However, the base *GMR* consists of several sections:
+
+Package
+  Shows information about the package to which this process belongs, including
+  version information
+
+Threads
+  Shows stack traces and thread ids for each of the threads within this process
+
+Green Threads
+  Shows stack traces for each of the green threads within this process (green
+  threads don't have thread ids)
+
+Configuration
+  Lists all the configuration options currently accessible via the CONF object
+  for the current process
+
+
+Adding support for GMRs to new executable
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Adding support for a *GMR* to a given executable is fairly easy.
+
+First import the module (currently residing in oslo-incubator), as well as the
+Heat version module:
+
+.. code-block:: python
+
+      from oslo_reports import guru_meditation_report as gmr
+      from heat import version
+
+Then, register any additional sections (optional):
+
+.. code-block:: python
+
+      TextGuruMeditation.register_section('Some Special Section',
+                                          some_section_generator)
+
+Finally (under main), before running the "main loop" of the executable
+(usually ``server.start()`` or something similar), register the *GMR* hook:
+
+.. code-block:: python
+
+      TextGuruMeditation.setup_autorun(version)
+
+
+Extending the GMR
+~~~~~~~~~~~~~~~~~
+
+As mentioned above, additional sections can be added to the GMR for a
+particular executable. For more information, see the documentation about
+:oslo.reports-doc:`oslo.reports <>`.
diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst
index f5ad159..1977f6f 100644
--- a/doc/source/contributor/index.rst
+++ b/doc/source/contributor/index.rst
@@ -1,11 +1,32 @@
 Heat Contributor Guidelines
 ===========================
 
+<<<<<<< doc/source/contributor/index.rst
 .. toctree::
    :maxdepth: 3
 
    contributing
 
+=======
+In the contributor guide, you will find documented policies for
+developing with heat. This includes the processes we use for
+blueprints and specs, bugs, contributor onboarding, core reviewer
+memberships, and other procedural items.
+
+.. note:: This guideline also includes documentation for developers.
+
+.. toctree::
+   :maxdepth: 3
+
+   ../getting_started/on_devstack
+   blueprints
+   architecture
+   pluginguide
+   schedulerhints
+   gmr
+   supportstatus
+   rally_on_gates
+>>>>>>> doc/source/contributor/index.rst
 .. bugs
    contributor-onboarding
    core-reviewers
diff --git a/doc/source/contributor/pluginguide.rst b/doc/source/contributor/pluginguide.rst
new file mode 100644
index 0000000..d8d4334
--- /dev/null
+++ b/doc/source/contributor/pluginguide.rst
@@ -0,0 +1,726 @@
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=======================================
+Heat Resource Plug-in Development Guide
+=======================================
+Heat allows service providers to extend the capabilities of the orchestration
+service by writing their own resource plug-ins. These plug-ins are written in
+Python and included in a directory configured by the service provider. This
+guide describes a resource plug-in structure and life cycle in order to assist
+developers in writing their own resource plug-ins.
+
+Resource Plug-in Life Cycle
+---------------------------
+A resource plug-in is relatively simple in that it needs to extend a base
+``Resource`` class and implement some relevant life cycle handler methods.
+The basic life cycle methods of a resource are:
+
+create
+  The plug-in should create a new physical resource.
+
+update
+  The plug-in should update an existing resource with new
+  configuration or tell the engine that the resource must be destroyed
+  and re-created.  This method is optional; the default behavior is to
+  create a replacement resource and then delete the old resource.
+
+suspend
+  The plug-in should suspend operation of the physical resource; this is
+  an optional operation.
+
+resume
+  The plug-in should resume operation of the physical resource; this is an
+  optional operation.
+
+delete
+  The plug-in should delete the physical resource.
+
+The base class ``Resource`` implements each of these life cycle methods and
+defines one or more handler methods that plug-ins can implement in order
+to manifest and manage the actual physical resource abstracted by the plug-in.
+These handler methods will be described in detail in the following sections.
+
+Heat Resource Base Class
+++++++++++++++++++++++++
+Plug-ins must extend the class ``heat.engine.resource.Resource``.
+
+This class is responsible for managing the overall life cycle of the plug-in.
+It defines methods corresponding to the life cycle as well as the basic hooks
+for plug-ins to handle the work of communicating with specific down-stream
+services. For example, when the engine determines it is time to create a
+resource, it calls the ``create`` method of the applicable plug-in. This method
+is implemented in the ``Resource`` base class and handles most of the
+bookkeeping and interaction with the engine. This method then calls a
+``handle_create`` method defined in the plug-in class (if implemented) which is
+responsible for using specific service calls or other methods needed to
+instantiate the desired physical resource (server, network, volume, etc).
+
+Resource Status and Action
+**************************
+
+The base class handles reporting state of the resource back to the engine.
+A resource's state is the combination of the life cycle action and the status
+of that action. For example, if a resource is created successfully, the state
+of that resource will be ``CREATE_COMPLETE``. Alternatively, if the plug-in
+encounters an error when attempting to create the physical resource, the
+state would be ``CREATE_FAILED``. The base class handles the
+reporting and persisting of resource state, so a plug-in's handler
+methods only need to return data or raise exceptions as appropriate.
+
+Resource Support Status
+***********************
+
+New resource should be marked from which OpenStack release it will be available
+with *support_status* option. For more details, see
+:ref:`supportstatus`.
+
+Resource description
+********************
+
+An important part of future resources is a concisely written description. It
+should be in class docstring and contain information about the resource and
+how it could be useful to the end-user. The docstring description is used in
+documentation generation and should be always defined, if resource is designed
+for public use. Docstring should follows `PEP 257
+<https://www.python.org/dev/peps/pep-0257/>`_.
+
+.. code-block:: python
+
+    class CustomResource(resource.Resource):
+        """This custom resource has description.
+
+        Now end-users could understand the meaning of the resource existing
+        and will use it correctly without any additional questions.
+        """
+
+Properties and Attributes
++++++++++++++++++++++++++
+A resource's *properties* define the settings the template author can
+manipulate when including that resource in a template. Some examples would be:
+
+* Which flavor and image to use for a Nova server
+* The port to listen to on Neutron LBaaS nodes
+* The size of a Cinder volume
+
+.. note::
+
+   Properties should normally be accessed through self.properties.
+   This resolves intrinsic functions, provides default values when required
+   and performs property translation for backward compatible schema changes.
+   The self.properties.data dict provides access to the raw data supplied by
+   the user in the template without any of those transformations.
+
+*Attributes* describe runtime state data of the physical resource that the
+plug-in can expose to other resources in a Stack. Generally, these aren't
+available until the physical resource has been created and is in a usable
+state. Some examples would be:
+
+* The host id of a Nova server
+* The status of a Neutron network
+* The creation time of a Cinder volume
+
+Defining Resource Properties
+****************************
+Each property that a resource supports must be defined in a schema that informs
+the engine and validation logic what the properties are, what type each is,
+and validation constraints. The schema is a dictionary whose keys define
+property names and whose values describe the constraints on that property. This
+dictionary must be assigned to the ``properties_schema`` attribute of the
+plug-in.
+
+.. code-block:: python
+
+    from heat.common.i18n import _
+    from heat.engine import constraints
+    from heat.engine import properties
+
+        nested_schema = {
+            "foo": properties.Schema(
+                properties.Schema.STRING,
+                _('description of foo field'),
+                constraints=[
+                    constraints.AllowedPattern('(Ba[rc]?)+'),
+                    constraints.Length(max=10,
+                                       description="don't go crazy")
+                ]
+            )
+        }
+        properties_schema = {
+            "property_name": properties.Schema(
+                properties.Schema.MAP,
+                _('Internationalized description of property'),
+                required=True,
+                default={"Foo": "Bar"},
+                schema=nested_schema
+            )
+        }
+
+As shown above, some properties may themselves be complex and
+reference nested schema definitions. Following are the parameters to the
+``Schema`` constructor; all but the first have defaults.
+
+*data_type*:
+
+        Defines the type of the property's value. The valid types are
+        the members of the list ``properties.Schema.TYPES``, currently
+        ``INTEGER``, ``STRING``, ``NUMBER``, ``BOOLEAN``, ``MAP``, ``LIST``
+        and ``ANY``; please use those symbolic names rather than the
+        literals to which they are equated. For ``LIST`` and ``MAP``
+        type properties, the ``schema`` referenced constrains the
+        format of complex items in the list or map.
+
+*description*:
+  A description of the property and its function; also used in documentation
+  generation.  Default is ``None`` --- but you should always provide a
+  description.
+
+*default*:
+  The default value to assign to this property if none was supplied in the
+  template.  Default is ``None``.
+
+*schema*:
+  This property's value is complex and its members must conform to
+  this referenced schema in order to be valid. The referenced schema
+  dictionary has the same format as the ``properties_schema``. Default
+  is ``None``.
+
+*required*:
+        ``True`` if the property must have a value for the template to be valid;
+        ``False`` otherwise. The default is ``False``
+
+*constraints*:
+  A list of constraints that apply to the property's value.  See
+  `Property Constraints`_.
+
+*update_allowed*:
+  ``True`` if an existing resource can be updated, ``False`` means
+  update is accomplished by delete and re-create.  Default is ``False``.
+
+*immutable*:
+  ``True`` means updates are not supported, resource update will fail on
+  every change of this property. ``False`` otherwise. Default is ``False``.
+
+*support_status*:
+  Defines current status of the property. Read :ref:`supportstatus` for
+  details.
+
+Accessing property values of the plug-in at runtime is then a simple call to:
+
+.. code-block:: python
+
+        self.properties['PropertyName']
+
+Based on the property type, properties without a set value will return the
+default "empty" value for that type:
+
+======= ============
+Type    Empty Value
+======= ============
+String      ''
+Number      0
+Integer     0
+List        []
+Map         {}
+Boolean     False
+======= ============
+
+Property Constraints
+********************
+
+Following are the available kinds of constraints.  The description is
+optional and, if given, states the constraint in plain language for
+the end user.
+
+*AllowedPattern(regex, description)*:
+  Constrains the value to match the given regular expression;
+  applicable to STRING.
+
+*AllowedValues(allowed, description)*:
+  Lists the allowed values.  ``allowed`` must be a
+  ``collections.Sequence`` or ``basestring``.  Applicable to all types
+  of value except MAP.
+
+*Length(min, max, description)*:
+  Constrains the length of the value.  Applicable to STRING, LIST,
+  MAP.  Both ``min`` and ``max`` default to ``None``.
+
+*Range(min, max, description)*:
+  Constrains a numerical value.  Applicable to INTEGER and NUMBER.
+  Both ``min`` and ``max`` default to ``None``.
+
+*Modulo(step, offset, description)*:
+  Starting with the specified ``offset``, every multiple of ``step`` is a valid
+  value. Applicable to INTEGER and NUMBER.
+
+  Available from template version 2017-02-24.
+
+*CustomConstraint(name, description, environment)*:
+  This constructor brings in a named constraint class from an
+  environment.  If the given environment is ``None`` (its default)
+  then the environment used is the global one.
+
+Defining Resource Attributes
+****************************
+Attributes communicate runtime state of the physical resource. Note that some
+plug-ins do not define any attributes and doing so is optional. If the plug-in
+needs to expose attributes, it will define an ``attributes_schema`` similar to
+the properties schema described above. Each item in the schema dictionary
+consists of an attribute name and an attribute Schema object.
+
+.. code-block:: python
+
+        attributes_schema = {
+            "foo": attributes.Schema(
+                _("The foo attribute"),
+                type=attribute.Schema.STRING
+            ),
+            "bar": attributes.Schema(
+                _("The bar attribute"),
+                type=attribute.Schema.STRING
+            ),
+            "baz": attributes.Schema(
+                _("The baz attribute"),
+                type=attribute.Schema.STRING
+            )
+        }
+
+Following are the parameters to the Schema.
+
+*description*
+  A description of the attribute; also used in documentation
+  generation.  Default is ``None`` --- but you should always provide a
+  description.
+
+*type*
+  Defines the type of attribute value. The valid types are
+  the members of the list ``attributes.Schema.TYPES``, currently
+  ``STRING``, ``NUMBER``, ``BOOLEAN``, ``MAP``, and ``LIST``; please use
+  those symbolic names rather than the literals to which they are equated.
+
+*support_status*
+  Defines current status of the attribute. Read :ref:`supportstatus` for
+  details.
+
+If attributes are defined, their values must also be resolved by the plug-in.
+The simplest way to do this is to override the ``_resolve_attribute`` method
+from the ``Resource`` class:
+
+.. code-block:: python
+
+        def _resolve_attribute(self, name):
+            # _example_get_physical_resource is just an example and is not
+            # defined in the Resource class
+            phys_resource = self._example_get_physical_resource()
+            if phys_resource:
+                if not hasattr(phys_resource, name):
+                        # this is usually not needed, but this is a simple
+                        # example
+                        raise exception.InvalidTemplateAttribute(name)
+                return getattr(phys_resource, name)
+            return None
+
+If the plug-in needs to be more sophisticated in its attribute resolution, the
+plug-in may instead choose to override ``FnGetAtt``. However, if this method is
+chosen, validation and accessibility of the attribute would be the plug-in's
+responsibility.
+
+Also, each resource has ``show`` attribute by default. The attribute uses
+default implementation from ``heat.engine.resource.Resource`` class, but if
+resource has different way of resolving ``show`` attribute, the
+``_show_resource`` method from the ``Resource`` class will need to be
+overridden:
+
+.. code-block:: python
+
+       def _show_resource(self):
+           """Default implementation; should be overridden by resources.
+
+           :returns: the map of resource information or None
+            """
+           if self.entity:
+               try:
+                   obj = getattr(self.client(), self.entity)
+                   resource = obj.get(self.resource_id)
+                   if isinstance(resource, dict):
+                       return resource
+                   else:
+                       return resource.to_dict()
+               except AttributeError as ex:
+                   LOG.warning("Resolving 'show' attribute has failed : %s",
+                               ex)
+                   return None
+
+Property and Attribute Example
+******************************
+Assume the following simple property and attribute definition:
+
+.. code-block:: python
+
+        properties_schema = {
+            'foo': properties.Schema(
+                properties.Schema.STRING,
+                _('foo prop description'),
+                default='foo',
+                required=True
+            ),
+            'bar': properties.Schema(
+                properties.Schema.INTEGER,
+                _('bar prop description'),
+                required=True,
+                constraints=[
+                    constraints.Range(5, 10)
+                ]
+            )
+        }
+
+        attributes_schema = {
+            'Attr_1': attributes.Schema(
+                _('The first attribute'),
+                support_status=support.Status('5.0.0'),
+                type=attributes.Schema.STRING
+            ),
+            'Attr_2': attributes.Schema(
+                _('The second attribute'),
+                type=attributes.Schema.MAP
+            )
+        }
+
+Also assume the plug-in defining the above has been registered under the
+template reference name 'Resource::Foo' (see `Registering Resource Plug-ins`_).
+A template author could then use this plug-in in a stack by simply making
+following declarations in a template:
+
+.. code-block:: yaml
+
+        # ... other sections omitted for brevity ...
+
+        resources:
+          resource-1:
+            type: Resource::Foo
+            properties:
+              foo: Value of the foo property
+              bar: 7
+
+        outputs:
+          foo-attrib-1:
+            value: { get_attr: [resource-1, Attr_1] }
+            description: The first attribute of the foo resource
+          foo-attrib-2:
+            value: { get_attr: [resource-1, Attr_2] }
+            description: The second attribute of the foo resource
+
+Life Cycle Handler Methods
+++++++++++++++++++++++++++
+To do the work of managing the physical resource the plug-in supports, the
+following life cycle handler methods should be implemented. Note that the
+plug-in need not implement *all* of these methods; optional handlers will
+be documented as such.
+
+Generally, the handler methods follow a basic pattern. The basic
+handler method for any life cycle step follows the format
+``handle_<life cycle step>``. So for the create step, the handler
+method would be ``handle_create``. Once a handler is called, an
+optional ``check_<life cycle step>_complete`` may also be implemented
+so that the plug-in may return immediately from the basic handler and
+then take advantage of cooperative multi-threading built in to the
+base class and periodically poll a down-stream service for completion;
+the check method is polled until it returns ``True``. Again, for the
+create step, this method would be ``check_create_complete``.
+
+Create
+******
+.. py:function:: handle_create(self)
+
+  Create a new physical resource. This function should make the required
+  calls to create the physical resource and return as soon as there is enough
+  information to identify the resource. The function should return this
+  identifying information and implement ``check_create_complete`` which will
+  take this information in as a parameter and then periodically be polled.
+  This allows for cooperative multi-threading between multiple resources that
+  have had their dependencies satisfied.
+
+  *Note* once the native identifier of the physical resource is known, this
+  function should call ``self.resource_id_set`` passing the native identifier
+  of the physical resource. This will persist the identifier and make it
+  available to the plug-in by accessing ``self.resource_id``.
+
+  :returns: A representation of the created physical resource
+  :raise: any ``Exception`` if the create failed
+
+.. py:function:: check_create_complete(self, token)
+
+  If defined, will be called with the return value of ``handle_create``
+
+  :param token: the return value of ``handle_create``; used to poll the
+                physical resource's status.
+  :returns: ``True`` if the physical resource is active and ready for use;
+            ``False`` otherwise.
+  :raise: any ``Exception`` if the create failed.
+
+Update (Optional)
+*****************
+Note that there is a default implementation of ``handle_update`` in
+``heat.engine.resource.Resource`` that simply raises an exception indicating
+that updates require the engine to delete and re-create the resource
+(this is the default behavior) so implementing this is optional.
+
+.. py:function:: handle_update(self, json_snippet, tmpl_diff, prop_diff)
+
+  Update the physical resources using updated information.
+
+  :param json_snippet: the resource definition from the updated template
+  :type json_snippet: collections.Mapping
+  :param tmpl_diff: values in the updated definition that have changed
+                    with respect to the original template definition.
+  :type tmpl_diff: collections.Mapping
+  :param prop_diff: property values that are different between the original
+                    definition and the updated definition; keys are
+                    property names and values are the new values. Deleted or
+                    properties that were originally present but now absent
+                    have values of ``None``
+  :type prop_diff: collections.Mapping
+
+  *Note* Before calling ``handle_update`` we check whether need to replace
+  the resource, especially for resource in ``*_FAILED`` state, there is a
+  default implementation of ``needs_replace_failed`` in
+  ``heat.engine.resource.Resource`` that simply returns ``True`` indicating
+  that updates require replacement. And we override the implementation for
+  ``OS::Nova::Server``, ``OS::Cinder::Volume`` and all of neutron resources.
+  The base principle is that to check whether the resource exists underlying
+  and whether the real status is available. So override the method
+  ``needs_replace_failed`` for your resource plug-ins if needed.
+
+.. py:function:: check_update_complete(self, token)
+
+  If defined, will be called with the return value of ``handle_update``
+
+  :param token: the return value of ``handle_update``; used to poll the
+                physical resource's status.
+  :returns: ``True`` if the update has finished;
+            ``False`` otherwise.
+  :raise: any ``Exception`` if the update failed.
+
+Suspend (Optional)
+******************
+*These handler functions are optional and only need to be implemented if the
+physical resource supports suspending*
+
+.. py:function:: handle_suspend(self)
+
+  If the physical resource supports it, this function should call the native
+  API and suspend the resource's operation. This function should return
+  information sufficient for ``check_suspend_complete`` to poll the native
+  API to verify the operation's status.
+
+  :return: a token containing enough information for ``check_suspend_complete``
+           to verify operation status.
+  :raise: any ``Exception`` if the suspend operation fails.
+
+.. py:function:: check_suspend_complete(self, token)
+
+  Verify the suspend operation completed successfully.
+
+  :param token: the return value of ``handle_suspend``
+  :return: ``True`` if the suspend operation completed and the physical
+           resource is now suspended; ``False`` otherwise.
+  :raise: any ``Exception`` if the suspend operation failed.
+
+Resume (Optional)
+*****************
+*These handler functions are optional and only need to be implemented if the
+physical resource supports resuming from a suspended state*
+
+.. py:function:: handle_resume(self)
+
+  If the physical resource supports it, this function should call the native
+  API and resume a suspended resource's operation. This function should return
+  information sufficient for ``check_resume_complete`` to poll the native
+  API to verify the operation's status.
+
+  :return: a token containing enough information for ``check_resume_complete``
+           to verify operation status.
+  :raise: any ``Exception`` if the resume operation fails.
+
+.. py:function:: check_resume_complete(self, token)
+
+  Verify the resume operation completed successfully.
+
+  :param token: the return value of ``handle_resume``
+  :return: ``True`` if the resume operation completed and the physical resource
+           is now active; ``False`` otherwise.
+  :raise: any Exception if the resume operation failed.
+
+
+Delete
+******
+.. py:function:: handle_delete(self)
+
+  Delete the physical resource.
+
+  :return: a token containing sufficient data to verify the operations status
+  :raise: any ``Exception`` if the delete operation failed
+
+  .. note::
+     As of the Liberty release, implementing handle_delete is optional. The
+     parent resource class can handle the most common pattern for deleting
+     resources:
+
+     .. code-block:: python
+
+        def handle_delete(self):
+            if self.resource_id is not None:
+                try:
+                    self.client().<entity>.delete(self.resource_id)
+                except Exception as ex:
+                    self.client_plugin().ignore_not_found(ex)
+                    return None
+                return self.resource_id
+
+     For this to work for a particular resource, the `entity` and
+     `default_client_name` attributes must be overridden in the resource
+     implementation. For example, `entity` of Aodh Alarm should equals
+     to "alarm" and `default_client_name` to "aodh".
+
+.. py:function:: handle_delete_snapshot(self, snapshot)
+
+  Delete resource snapshot.
+
+  :param snapshot: dictionary describing current snapshot.
+  :return: a token containing sufficient data to verify the operations status
+  :raise: any ``Exception`` if the delete operation failed
+
+.. py:function:: handle_snapshot_delete(self, state)
+
+  Called instead of ``handle_delete`` when the deletion policy is SNAPSHOT.
+  Create backup of resource and then delete resource.
+
+  :param state: the (action, status) tuple of the resource to make sure that
+                backup may be created for the current resource
+  :return: a token containing sufficient data to verify the operations status
+  :raise: any ``Exception`` if the delete operation failed
+
+.. py:function:: check_delete_complete(self, token)
+
+  Verify the delete operation completed successfully.
+
+  :param token: the return value of ``handle_delete`` or
+                ``handle_snapshot_delete`` (for deletion policy - Snapshot)
+                used to verify the status of the operation
+  :return: ``True`` if the delete operation completed and the physical resource
+           is deleted; ``False`` otherwise.
+  :raise: any ``Exception`` if the delete operation failed.
+
+.. py:function:: check_delete_snapshot_complete(self, token)
+
+  Verify the delete snapshot operation completed successfully.
+
+  :param token: the return value of ``handle_delete_snapshot`` used
+                to verify the status of the operation
+  :return: ``True`` if the delete operation completed and the snapshot
+           is deleted; ``False`` otherwise.
+  :raise: any ``Exception`` if the delete operation failed.
+
+Resource Dependencies
++++++++++++++++++++++
+
+Ideally, your resource should not have any 'hidden' dependencies, i.e. Heat
+should be able to infer any inbound or outbound dependencies of your resource
+instances from resource properties and the other resources/resource attributes
+they reference. This is handled by
+``heat.engine.resource.Resource.add_dependencies()``.
+
+If this is not possible, please do not simply override `add_dependencies()` in
+your resource plugin! This has previously caused `problems
+<https://bugs.launchpad.net/heat/+bug/1554625>`_ for multiple operations,
+usually due to uncaught exceptions, If you feel you need to override
+`add_dependencies()`, please reach out to Heat developers on the `#heat` IRC
+channel on FreeNode or on the `openstack-discuss
+<mailto:openstack-discuss@lists.openstack.org>`_ mailing list to discuss the
+possibility of a better solution.
+
+Registering Resource Plug-ins
++++++++++++++++++++++++++++++
+To make your plug-in available for use in stack templates, the plug-in must
+register a reference name with the engine. This is done by defining a
+``resource_mapping`` function in your plug-in module that returns a map of
+template resource type names and their corresponding implementation classes::
+
+        def resource_mapping():
+            return { 'My::Custom::Plugin': MyResourceClass }
+
+This would allow a template author to define a resource as:
+
+.. code-block:: yaml
+
+        resources:
+          my_resource:
+            type: My::Custom::Plugin
+            properties:
+            # ... your plug-in's properties ...
+
+Note that you can define multiple plug-ins per module by simply returning
+a map containing a unique template type name for each. You may also use this to
+register a single resource plug-in under multiple template type names (which
+you would only want to do when constrained by backwards compatibility).
+
+Configuring the Engine
+----------------------
+In order to use your plug-in, Heat must be configured to read your resources
+from a particular directory. The ``plugin_dirs`` configuration option lists the
+directories on the local file system where the engine will search for plug-ins.
+Simply place the file containing your resource in one of these directories and
+the engine will make them available next time the service starts.
+
+See :doc:`../configuration/index` for more information on configuring the
+orchestration service.
+
+Testing
+-------
+
+Tests can live inside the plug-in under the ``tests``
+namespace/directory. The Heat plug-in loader will implicitly not load
+anything under that directory. This is useful when your plug-in tests
+have dependencies you don't want installed in production.
+
+Putting It All Together
+-----------------------
+You can find the plugin classes in ``heat/engine/resources``.  An
+exceptionally simple one to start with is ``random_string.py``; it is
+unusual in that it does not manipulate anything in the cloud!
+
+Resource Contributions
+----------------------
+
+The Heat team is interested in adding new resources that give Heat access to
+additional OpenStack or StackForge projects. The following checklist defines
+the requirements for a candidate resource to be considered for inclusion:
+
+- Must wrap an OpenStack or StackForge project, or a third party project that
+  is relevant to OpenStack users.
+- Must have its dependencies listed in OpenStack's ``global-requirements.txt``
+  file, or else it should be able to conditionally disable itself when there
+  are missing dependencies, without crashing or otherwise affecting the normal
+  operation of the heat-engine service.
+- The resource's support status flag must be set to ``UNSUPPORTED``, to
+  indicate that the Heat team is not responsible for supporting this resource.
+- The code must be of comparable quality to official resources. The Heat team
+  can help with this during the review phase.
+
+If you have a resource that is a good fit, you are welcome to contact the Heat
+team. If for any reason your resource does not meet the above requirements,
+but you still think it can be useful to other users, you are encouraged to
+host it on your own repository and share it as a regular Python installable
+package. You can find example resource plug-ins that have all the required
+packaging files in the ``contrib`` directory of the official Heat git
+repository.
diff --git a/doc/source/contributor/rally_on_gates.rst b/doc/source/contributor/rally_on_gates.rst
new file mode 100644
index 0000000..61fe237
--- /dev/null
+++ b/doc/source/contributor/rally_on_gates.rst
@@ -0,0 +1,247 @@
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+.. _rally_gates:
+
+=========================
+Using Rally on Heat gates
+=========================
+Heat gate allows to use Rally for performance testing for each particular
+patch. This functionality can be used for checking patch on performance
+regressions and also for detecting any floating bugs for common scenarios.
+
+How to run Rally for particular patch
+-------------------------------------
+As was mentioned above Heat allows to execute Rally scenarios as a gate job
+for particular patch. It can be done by posting comment with text
+``check experimental`` for patch on review. It will run bunch of jobs, one of
+which has name ``gate-rally-dsvm-fakevirt-heat``.
+
+List of scenarios, which will be executed, is presented in file
+``heat-fakevirt.yaml``. Default version of this file is available here:
+https://github.com/openstack/heat/blob/master/rally-scenarios/heat-fakevirt.yaml
+
+Obviously performance analysis make sense, when it can be compared with some
+another performance data. So two different approaches can be used for it:
+
+- Comparison of one part of code with some custom changes
+  (see :ref:`check_performance_or_detect_regression`)
+- Comparison of two different code parts
+  (see :ref:`compare_output_API_performance`)
+
+Examples of using Rally
+-----------------------
+
+Previously two main approaches of using Rally job for Heat were highlighted.
+Corresponding examples will be described in this part of documentation.
+
+However need to note, that there are a lot of other ways how to use Rally job
+for Heat performance. For example, this job can be launched periodically
+(twice in week) for random patches and these results will be compared between
+each other. It allows to see, that Heat has not any performance regressions.
+
+.. _check_performance_or_detect_regression:
+
+Check performance or how to detect regression
++++++++++++++++++++++++++++++++++++++++++++++
+
+The easiest way of using Rally is to execute already existing scenarios.
+One of the examples is presented in patch
+https://review.opendev.org/#/c/279450/ . In this patch was executed scenario
+already existing in Rally ``HeatStacks.create_and_delete_stack``.
+During executing this scenario Rally creates and then, when stack is created,
+delete Heat stack. All existing scenarios can be found here:
+https://github.com/openstack/rally-openstack/blob/master/rally_openstack/scenarios/heat/stacks.py
+
+Mentioned scenario uses Heat template as a parameter for task. The template
+path should be mentioned for argument ``template_path``. It can be one of Heat
+templates presented in Rally repository
+(https://github.com/openstack/rally-openstack/tree/master/samples/tasks/scenarios/heat/templates)
+or new one, like it was done for mentioned patch. New added template should be
+placed in ``rally-scenarios/extra/`` directory.
+
+Also it's possible to specify other fields for each Rally task, like ``sla``
+or ``context``. More information about other configuration setting is
+available by link https://rally.readthedocs.io/en/latest/plugins/#rally-plugins
+Mentioned patch was proposed for confirmation caching mechanism of Heat
+template validation process
+(see https://specs.openstack.org/openstack/heat-specs/specs/liberty/constraint-validation-cache.html).
+So it contains some changes in OS::Heat::TestResource resource, which allows
+to demonstrate mentioned caching feature improvements.
+
+Initially test was run against current devstack installation, where caching
+is disabled (e.g. Patch Set 7). The follow results were gotten:
+
++------------------+----------+----------+----------+--------+------+
+|Action            | Min (sec)| Max (sec)| Avg (sec)| Success| Count|
++------------------+----------+----------+----------+--------+------+
+|heat.create_stack | 38.223   | 48.085   | 42.971   | 100.0% | 10   |
++------------------+----------+----------+----------+--------+------+
+|heat.delete_stack | 11.755   | 18.155   | 14.085   | 100.0% | 10   |
++------------------+----------+----------+----------+--------+------+
+|total             | 50.188   | 65.361   | 57.057   | 100.0% | 10   |
++------------------+----------+----------+----------+--------+------+
+
+In the next patch set (Patch Set 8) was updated by adding Depends-On reference
+to commit message. It let to execute the same test with patch for devstack,
+which turns on caching (https://review.opendev.org/#/c/279400/).
+The results for this case were:
+
++------------------+----------+----------+----------+--------+------+
+|Action            | Min (sec)| Max (sec)| Avg (sec)| Success| Count|
++------------------+----------+----------+----------+--------+------+
+|heat.create_stack | 11.863   | 16.074   | 14.174   | 100.0% | 10   |
++------------------+----------+----------+----------+--------+------+
+|heat.delete_stack | 9.144    | 11.663   | 10.595   | 100.0% | 10   |
++------------------+----------+----------+----------+--------+------+
+|total             | 21.557   | 27.18    | 24.77    | 100.0% | 10   |
++------------------+----------+----------+----------+--------+------+
+
+Comparison average values for create_stack action in the first and the second
+executions shows, that with enabled caching create_stack works faster in 3
+times. It is a tangible improvement for create_stack operation.
+Need to note, that in described test delay for each constraint validation
+request takes 0.3 sec. as specified in ``constraint_prop_secs`` property of
+TestResource. It may be more, than real time delay, but it allows to confirm,
+that caching works correct.
+
+Also this approach may be used for detecting regressions. In this case workflow
+may be presented as follow list of steps:
+
+- add to task list (``heat-fakevirt.yaml``) existing or new tasks.
+- wait a result of this execution.
+- upload patchset with changes (new feature) and launch the same test again.
+- compare performance results.
+
+.. _compare_output_API_performance:
+
+Compare output API performance
+++++++++++++++++++++++++++++++
+
+Another example of using Rally job is writing custom Rally scenarios in Heat
+repository. There is an example of this is presented on review:
+https://review.opendev.org/#/c/270225/
+
+It's similar on the first example, but requires more Rally specific coding.
+New tasks in ``heat-fakevirt.yaml`` use undefined in Rally repository
+scenarios:
+
+- CustomHeatBenchmark.create_stack_and_show_output_new
+- CustomHeatBenchmark.create_stack_and_show_output_old
+- CustomHeatBenchmark.create_stack_and_list_output_new
+- CustomHeatBenchmark.create_stack_and_list_output_old
+
+All these scenarios are defined in the same patch and placed in
+``rally-scenarios/plugins/`` directory.
+
+The aim of these scenarios and tasks is to demonstrate differences between
+new and old API calls. Heat client has a two commands for operating stack
+outputs:  ``heat output-list`` and ``heat output-show <output-id>``.
+Previously there are no special API calls for getting this information from
+server and this data was obtained from whole Heat Stack object.
+This was changed after implementation new API for outputs:
+https://specs.openstack.org/openstack/heat-specs/specs/mitaka/api-calls-for-output.html
+
+As described in the mentioned specification outputs can be obtained via special
+requests to Heat API. According to this changes code in Heat client was updated
+to use new API, if it's available.
+
+The initial problem for this change was performance issue, which can be
+formulated as: execution command ``heat output-show <output-id>`` with old
+approach required resolving all outputs in Heat Stack, before getting only
+one output specified by user.
+
+The same issue was and with ``heat output-list``, which required to resolve all
+outputs only for providing list of output keys without resolved values.
+
+Two scenarios with suffix ``*_new`` use new output API. These scenarios
+are not presented in Rally yet, because it's new API.
+Another two scenarios with suffix ``*_old`` are based on the old approach of
+getting outputs. This code was partially replaced by new API, so it's not
+possible to use it on fresh devstack. As result this custom code was written
+as two custom scenarios.
+
+All these scenarios were added to task list and executed in the same time.
+Results of execution are shown below:
+
+create_stack_and_show_output_old
+--------------------------------
+
++---------------------+----------+----------+----------+--------+------+
+|Action               | Min (sec)| Max (sec)| Avg (sec)| Success| Count|
++---------------------+----------+----------+----------+--------+------+
+|heat.create_stack    | 13.559   | 14.298   | 13.899   | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|heat.show_output_old | 5.214    | 5.297    | 5.252    | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|heat.delete_stack    | 5.445    | 6.962    | 6.008    | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|total                | 24.243   | 26.146   | 25.159   | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+
+create_stack_and_show_output_new
+--------------------------------
+
++---------------------+----------+----------+----------+--------+------+
+|Action               | Min (sec)| Max (sec)| Avg (sec)| Success| Count|
++---------------------+----------+----------+----------+--------+------+
+|heat.create_stack    | 13.719   | 14.286   | 13.935   | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|heat.show_output_new | 0.699    | 0.835    | 0.762    | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|heat.delete_stack    | 5.398    | 6.457    | 5.636    | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|total                | 19.873   | 21.21    | 20.334   | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+
+Average value for execution ``output-show`` for old approach obviously more,
+then for new API. It happens, because new API resolve only one specified
+output.
+
+Same results are for ``output-list``:
+
+create_stack_and_list_output_old
+--------------------------------
+
++---------------------+----------+----------+----------+--------+------+
+|Action               | Min (sec)| Max (sec)| Avg (sec)| Success| Count|
++---------------------+----------+----------+----------+--------+------+
+|heat.create_stack    | 13.861   | 14.573   | 14.141   | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|heat.list_output_old | 5.247    | 5.339    | 5.281    | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|heat.delete_stack    | 6.727    | 6.845    | 6.776    | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|total                | 25.886   | 26.696   | 26.199   | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+
+create_stack_and_list_output_new
+--------------------------------
+
++---------------------+----------+----------+----------+--------+------+
+|Action               | Min (sec)| Max (sec)| Avg (sec)| Success| Count|
++---------------------+----------+----------+----------+--------+------+
+|heat.create_stack    | 13.902   | 21.117   | 16.729   | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|heat.list_output_new | 0.147    | 0.363    | 0.213    | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|heat.delete_stack    | 6.616    | 8.202    | 7.022    | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+|total                | 20.838   | 27.908   | 23.964   | 100.0% | 5    |
++---------------------+----------+----------+----------+--------+------+
+
+It's also expected, because for getting list of output names is not necessary
+resolved values, how it is done in new API.
+
+All mentioned results clearly show performance changes and allow to confirm,
+that new approach works correctly.
diff --git a/doc/source/contributor/schedulerhints.rst b/doc/source/contributor/schedulerhints.rst
new file mode 100644
index 0000000..44b14eb
--- /dev/null
+++ b/doc/source/contributor/schedulerhints.rst
@@ -0,0 +1,41 @@
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+====================================
+Heat Stack Lifecycle Scheduler Hints
+====================================
+This is a mechanism whereby when heat processes a stack with Server or Volume
+resources, the stack id, root stack id, stack resource uuid, stack resource
+name and the path in the stack can be passed by heat to nova and cinder as
+scheduler hints.
+
+
+Enabling the scheduler hints
+----------------------------
+By default, passing the lifecycle scheduler hints is disabled. To enable it,
+set stack_scheduler_hints to True in heat.conf.
+
+The hints
+---------
+When heat processes a stack, and the feature is enabled, the stack id, root
+stack id, stack resource uuid, stack resource name, and the path in the stack
+(as a list of comma delimited strings of stackresourcename and stackname) will
+be passed by heat to nova and cinder as scheduler hints.
+
+Purpose
+-------
+A heat provider may have a need for custom code to examine stack requests
+prior to performing the operations to create or update a stack. After the
+custom code completes, the provider may want to provide hints to the nova
+or cinder schedulers with stack related identifiers, for processing by
+any custom scheduler plug-ins configured for nova or cinder.
diff --git a/doc/source/contributor/supportstatus.rst b/doc/source/contributor/supportstatus.rst
new file mode 100644
index 0000000..ef49459
--- /dev/null
+++ b/doc/source/contributor/supportstatus.rst
@@ -0,0 +1,282 @@
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+.. _supportstatus:
+
+===============================
+Heat Support Status usage Guide
+===============================
+Heat allows to use for each resource, property, attribute special option named
+*support_status*, which describes current state of object: current status,
+since what time this status is actual, any additional information about
+object's state. This guide describes a detailed state life cycle of resources,
+properties and attributes.
+
+Support Status option and its parameters
+----------------------------------------
+Support status of object may be specified by using class ``SupportStatus``,
+which has follow options:
+
+*status*:
+  Current status of object. Allowed values:
+    - SUPPORTED. Default value of status parameter. All objects with this
+      status are available and can be used.
+    - DEPRECATED. Object with this status is available, but using it in
+      code or templates is undesirable. As usual, can be reference in message
+      to new object, which can be used instead of deprecated resource.
+    - HIDDEN. The last step in the deprecation process. Old stacks
+      containing resources in this status will continue
+      functioning. Certain functionality is disabled for resources in
+      this status (resource-type-list, resource-type-show, and
+      resource-type-template). Resources in HIDDEN status are not
+      included in the documentation. A known limitation is that new
+      stacks can be created with HIDDEN resources. See below for more
+      details about the removal and deprecation process.
+    - UNSUPPORTED. Resources with UNSUPPORTED status are not supported by Heat
+      team, i.e. user can use it, but it may be broken.
+
+*substitute_class*:
+  Assign substitute class for object. If replacing the object with new object
+  which inherited (or extended) from the substitute class will transfer the
+  object to new class type gracefully (without calling update replace).
+
+*version*:
+  Release name, since which current status is active. Parameter is optional,
+  but should be defined or changed any time SupportStatus is specified or
+  status changed. It used for better understanding from which release object
+  in current status.
+  .. note::
+
+     Since Liberty release mark looks like 5.0.0 instead of 2015.2.
+
+*message*:
+  Any additional information about object's state, e.g.
+  ``'Use property new_property instead.'``.
+
+*previous_status*:
+  Option, which allows to display object's previous status, if any. This is
+  helpful for displaying full life cycle of object. Type of *previous_status*
+  is SupportStatus.
+
+Life cycle of resource, property, attribute
+-------------------------------------------
+This section describes life cycle of such objects as resource, property
+and attribute. All these objects have same life cycle::
+
+  UNSUPPORTED -> SUPPORTED -> DEPRECATED -> HIDDEN
+                                        \
+                                         -> UNSUPPORTED
+
+where UNSUPPORTED is optional.
+
+Creating process of object
+++++++++++++++++++++++++++
+During creating object there is a reason to add support status. So new
+object should contains *support_status* parameter equals to ``SupportStatus``
+class with defined version of object and, maybe, *substitute_class* or some
+message. This parameter allows user to understand, from which OpenStack
+release this object is available and can be used.
+
+Deprecating process of object
++++++++++++++++++++++++++++++
+When some object becomes obsolete, user should know about that, so there is
+need to add information about deprecation in *support_status* of object.
+Status of ``SupportStatus`` must equals to DEPRECATED. If there is no *version*
+parameter, need to add one with current release otherwise move current status
+to *previous_status* and add to *version* current release as value. If some new
+object replaces old object, it will be good decision to add some information
+about new object to *support_status* message of old object, e.g. 'Use property
+new_property instead.'. If old object is directly replaceable by new object,
+we should add *substitute_class* to *support_status* in old object.
+
+Removing process of object
+++++++++++++++++++++++++++
+After at least one full release cycle deprecated object should be hidden and
+*support_status* status should equals to HIDDEN. HIDDEN status means hiding
+object from documentation and from result of :code:`resource-type-list` CLI
+command, if object is resource. Also, :code:`resource-type-show` command with
+such resource will raise `NotSupported` exception.
+
+The purpose of hiding, rather than removing, obsolete resources or properties
+is to ensure that users can continue to operate existing stacks - replacing or
+removing the offending resources, or deleting the entire stack. Steps should be
+taken to ensure that these operations can succeed, e.g. by replacing a hidden
+resource type's implementation with one that is equivalent to
+``OS::Heat::None`` when the underlying API no longer exists, supplying a
+*substitute_class* for a resource type, or adding a property translation rule.
+
+Using Support Status during code writing
+----------------------------------------
+When adding new objects or adding objects instead of some old (e.g. property
+subnet instead of subnet_id in OS::Neutron::RouterInterface), there is some
+information about time of adding objects (since which release it will be
+available or unavailable). This section described ``SupportStatus`` during
+creating/deprecating/removing resources and properties and attributes. Note,
+that ``SupportStatus`` locates in support.py, so you need to import *support*.
+For specifying status, use *support* constant names, e.g. support.SUPPORTED.
+All constant names described in section above.
+
+Using Support Status during creation
+++++++++++++++++++++++++++++++++++++
+Option *support_status* may be used for whole resource:
+
+.. code-block:: python
+
+   class ResourceWithType(resource.Resource):
+
+       support_status=support.SupportStatus(
+           version='5.0.0',
+           message=_('Optional message')
+       )
+
+To define *support_status* for property or attribute, follow next steps:
+
+.. code-block:: python
+
+   PROPERTY: properties.Schema(
+       ...
+       support_status=support.SupportStatus(
+           version='5.0.0',
+           message=_('Optional message')
+       )
+   )
+
+Same support_status definition for attribute schema.
+
+Note, that in this situation status parameter of ``SupportStatus`` uses default
+value, equals to SUPPORTED.
+
+Using Support Status during deprecation and hiding
+++++++++++++++++++++++++++++++++++++++++++++++++++
+When time of deprecation or hiding resource/property/attribute comes, follow
+next steps:
+
+1. If there is some support_status in object, add `previous_status` parameter
+   with current ``SupportStatus`` value and change all other parameters for
+   current `status`, `version` and, maybe, `substitute_class` or `message`.
+
+2. If there is no support_status option, add new one with parameters status
+   equals to current status, `version` equals to current release note and,
+   optionally, some message.
+
+Using Support Status during resource deprecating looks like:
+
+.. code-block:: python
+
+   class ResourceWithType(resource.Resource):
+
+       support_status=support.SupportStatus(
+           status=support.DEPRECATED,
+           version='5.0.0',
+           substitute_class=SubstituteResourceWithType,
+           message=_('Optional message'),
+           previous_status=support.SupportStatus(version='2014.2')
+       )
+
+Using Support Status during attribute (or property) deprecating looks like:
+
+.. code-block:: python
+
+   ATTRIBUTE: attributes.Schema(
+       ...
+       support_status=support.SupportStatus(
+           status=support.DEPRECATED,
+           version='5.0.0',
+           message=_('Optional message like: Use attribute new_attr'),
+           previous_status=support.SupportStatus(
+               version='2014.2',
+               message=_('Feature available since 2014.2'))
+       )
+   )
+
+Same *support_status* defining for property schema.
+
+Note, that during hiding object status should be equal support.HIDDEN
+instead of support.DEPRECATED. Besides that, SupportStatus with DEPRECATED
+status should be moved to *previous_status*, e.g.:
+
+.. code-block:: python
+
+    support.SupportStatus(
+        status=support.HIDDEN,
+        version='6.0.0',
+        message=_('Some message'),
+        previous_status=support.SupportStatus(
+            status=support.DEPRECATED,
+            version='2015.1',
+            substitute_class=SubstituteResourceWithType,
+            previous_status=support.SupportStatus(version='2014.2')
+        )
+    )
+
+During hiding properties, if some hidden property has alternative, use
+translation mechanism for translating properties from old to new one. See
+below, how to use this mechanism.
+
+Translating mechanism for hidden properties
+-------------------------------------------
+
+Sometimes properties become deprecated and replaced by another. There is
+translation mechanism for that. Mechanism used for such cases:
+
+1. If there are two properties in properties_schema, which have STRING,
+   INTEGER, NUMBER or BOOLEAN type.
+2. If there are two properties: one in LIST or MAP property sub-schema and
+   another on the top schema.
+3. If there are two properties in LIST property.
+4. If there are non-LIST property and LIST property, which is designed to
+   replace non-LIST property.
+5. If there is STRING property, which contains name or ID of some entity, e.g.
+   `subnet`, and should be resolved to entity's ID.
+
+Mechanism has rules and executes them. To define rule, ``TranslationRule``
+class called and specifies *translation_path* - list with path in
+properties_schema for property which will be affected; *value* - value, which
+will be added to property, specified by previous parameter; *value_name* - name
+of old property, used for case 4; *value_path* - list with path in
+properties_schema for property which will be used for getting value.
+``TranslationRule`` supports next rules:
+
+- *ADD*. This rule allows to add some value to LIST-type properties. Only
+  LIST-type values can be added to such properties. Using for other
+  cases is prohibited and will be returned with error.
+- *REPLACE*. This rule allows to replace some property value to another. Used
+  for all types of properties. Note, that if property has list type, then
+  value will be replaced for all elements of list, where it needed. If
+  element in such property must be replaced by value of another element of
+  this property, *value_name* must be defined.
+- *DELETE*. This rule allows to delete some property. If property has list
+  type, then deleting affects value in all list elements.
+- *RESOLVE* - This rule allows to resolve some property using client and the
+  *finder* function. Finders may require an additional *entity* key.
+
+Each resource, which has some hidden properties, which can be replaced by new,
+must overload `translation_rules` method, which should return a list of
+``TranslationRules``, for example:
+
+.. code-block:: python
+
+   def translation_rules(self, properties):
+        rules = [
+          translation.TranslationRule(
+            properties,
+            translation.TranslationRule.REPLACE,
+            translation_path=[self.NETWORKS, self.NETWORK_ID],
+            value_name=self.NETWORK_UUID),
+          translation.TranslationRule(
+            properties,
+            translation.TranslationRule.RESOLVE,
+            translation_path=[self.FLAVOR],
+            client_plugin=self.client_plugin('nova'),
+            finder='find_flavor_by_name_or_id')]
+        return rules
diff --git a/doc/source/getting_started/on_devstack.rst b/doc/source/getting_started/on_devstack.rst
index 64d607a..abdd771 100644
--- a/doc/source/getting_started/on_devstack.rst
+++ b/doc/source/getting_started/on_devstack.rst
@@ -51,8 +51,13 @@ a VM image that heat can launch. To do that add the following to
 `[[local|localrc]]` section of `local.conf`::
 
     IMAGE_URL_SITE="https://download.fedoraproject.org"
+<<<<<<< doc/source/getting_started/on_devstack.rst
     IMAGE_URL_PATH="/pub/fedora/linux/releases/36/Cloud/x86_64/images/"
     IMAGE_URL_FILE="Fedora-Cloud-Base-36-1.5.x86_64.qcow2"
+=======
+    IMAGE_URL_PATH="/pub/fedora/linux/releases/33/Cloud/x86_64/images/"
+    IMAGE_URL_FILE="Fedora-Cloud-Base-33-1.2.x86_64.qcow2"
+>>>>>>> doc/source/getting_started/on_devstack.rst
     IMAGE_URLS+=","$IMAGE_URL_SITE$IMAGE_URL_PATH$IMAGE_URL_FILE
 
 URLs for any cloud image may be specified, but fedora images from F20 contain
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 79e8092..7d0072e 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -82,12 +82,17 @@ Using the Heat Service
 - `OpenStack Orchestration API v1 Reference`_
 - :python-heatclient-doc:`Python and CLI client <>`
 
+<<<<<<< doc/source/index.rst
 .. _`OpenStack Orchestration API v1 Reference`: https://developer.openstack.org/api-ref/orchestration/v1/
+=======
+.. _`OpenStack Orchestration API v1 Reference`: https://docs.openstack.org/api-ref/orchestration/v1/
+>>>>>>> doc/source/index.rst
 
 Developing Heat
 ===============
 
 .. toctree::
+<<<<<<< doc/source/index.rst
     :maxdepth: 2
 
     developing_guides/index
@@ -106,6 +111,19 @@ For Contributors
     :maxdepth: 2
 
     contributor/index
+=======
+    :maxdepth: 1
+
+    contributor/index
+    getting_started/on_devstack
+    contributor/architecture
+    contributor/pluginguide
+    contributor/schedulerhints
+    contributor/gmr
+    contributor/supportstatus
+    contributor/rally_on_gates
+    api/index
+>>>>>>> doc/source/index.rst
 
 Indices and tables
 ==================
diff --git a/doc/source/operating_guides/scale_deployment.rst b/doc/source/operating_guides/scale_deployment.rst
index 58902f8..839f391 100644
--- a/doc/source/operating_guides/scale_deployment.rst
+++ b/doc/source/operating_guides/scale_deployment.rst
@@ -48,7 +48,11 @@ Basic Architecture
 ------------------
 
 The heat architecture is as defined at :doc:`heat architecture
+<<<<<<< doc/source/operating_guides/scale_deployment.rst
 <../developing_guides/architecture>` and shown in the diagram below, where we have
+=======
+<../contributor/architecture>` and shown in the diagram below, where we have
+>>>>>>> doc/source/operating_guides/scale_deployment.rst
 a CLI that sends HTTP requests to the REST and CFN APIs, which in turn make
 calls using AMQP to the heat-engine::
 
diff --git a/doc/source/template_guide/basic_resources.rst b/doc/source/template_guide/basic_resources.rst
index a0dcb17..6e4395a 100644
--- a/doc/source/template_guide/basic_resources.rst
+++ b/doc/source/template_guide/basic_resources.rst
@@ -137,6 +137,48 @@ port:
 
 Create and associate a floating IP to an instance
 -------------------------------------------------
+<<<<<<< doc/source/template_guide/basic_resources.rst
+=======
+You can use two sets of resources to create and associate floating IPs to
+instances.
+
+OS::Nova resources
+++++++++++++++++++
+Use the :ref:`OS::Nova::FloatingIP` resource to create a floating IP, and
+the :ref:`OS::Nova::FloatingIPAssociation` resource to associate the
+floating IP to an instance.
+
+The following example creates an instance and a floating IP, and associate the
+floating IP to the instance:
+
+.. code-block:: yaml
+
+    resources:
+      floating_ip:
+        type: OS::Nova::FloatingIP
+        properties:
+          pool: public
+
+      inst1:
+        type: OS::Nova::Server
+        properties:
+          flavor: m1.small
+          image: ubuntu-trusty-x86_64
+          networks:
+            - network: private
+
+      association:
+        type: OS::Nova::FloatingIPAssociation
+        properties:
+          floating_ip: { get_resource: floating_ip }
+          server_id: { get_resource: inst1 }
+
+OS::Neutron resources
++++++++++++++++++++++
+.. note::
+   The Networking service (neutron) must be enabled on your OpenStack
+   deployment to use these resources.
+>>>>>>> doc/source/template_guide/basic_resources.rst
 
 Use the :ref:`OS::Neutron::FloatingIP` resource to create a floating IP, and
 the :ref:`OS::Neutron::FloatingIPAssociation` resource to associate the
diff --git a/doc/source/template_guide/composition.rst b/doc/source/template_guide/composition.rst
index 5cfc531..f55e9c3 100644
--- a/doc/source/template_guide/composition.rst
+++ b/doc/source/template_guide/composition.rst
@@ -146,7 +146,11 @@ accessible as follows
 
   outputs:
     test_out:
+<<<<<<< doc/source/template_guide/composition.rst
       value: {get_attr: [my_server, resource.server, first_address]}
+=======
+      value: {get_attr: my_server, resource.server, first_address}
+>>>>>>> doc/source/template_guide/composition.rst
 
 Making your template resource more "transparent"
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/template_guide/environment.rst b/doc/source/template_guide/environment.rst
index 1fa7224..fcd36ff 100644
--- a/doc/source/template_guide/environment.rst
+++ b/doc/source/template_guide/environment.rst
@@ -170,10 +170,17 @@ identifier, and must reference either another resource's ID or the URL of an
 existing template file.
 
 The following example maps a new ``OS::Networking::FloatingIP``
+<<<<<<< doc/source/template_guide/environment.rst
 resource to an existing ``OS::Neutron::FloatingIP`` resource::
 
   resource_registry:
     "OS::Networking::FloatingIP": "OS::Neutron::FloatingIP"
+=======
+resource to an existing ``OS::Nova::FloatingIP`` resource::
+
+  resource_registry:
+    "OS::Networking::FloatingIP": "OS::Nova::FloatingIP"
+>>>>>>> doc/source/template_guide/environment.rst
 
 You can use wildcards to map multiple resources, for example to map all
 ``OS::Neutron`` resources to ``OS::Network``::
diff --git a/doc/source/template_guide/hot_spec.rst b/doc/source/template_guide/hot_spec.rst
index b9e3952..4f6b097 100644
--- a/doc/source/template_guide/hot_spec.rst
+++ b/doc/source/template_guide/hot_spec.rst
@@ -376,6 +376,7 @@ The complete list of supported condition functions is::
 -------------------
 The key with value ``2018-08-31`` or ``rocky`` indicates that the YAML
 document is a HOT template and it may contain features added and/or removed
+<<<<<<< doc/source/template_guide/hot_spec.rst
 up until the Rocky release. The complete list of supported functions is::
 
   digest
@@ -423,6 +424,9 @@ value) will be elided. This allows for e.g. conditional definition of
 properties while keeping the default value when the condition is false.
 
 The complete list of supported functions is::
+=======
+up until the Queens release. The complete list of supported functions is::
+>>>>>>> doc/source/template_guide/hot_spec.rst
 
   digest
   filter
@@ -1883,6 +1887,7 @@ template except for ``if`` conditions. You can use the ``if`` condition
 in the property values in the ``resources`` section and ``outputs`` sections
 of a template.
 
+<<<<<<< doc/source/template_guide/hot_spec.rst
 Beginning with the ``wallaby`` template version, the third argument is
 optional. If only two arguments are passed, the entire enclosing item is
 removed when the condition is false.
@@ -1904,6 +1909,8 @@ In this example, the default name for the server (which is generated by Heat
 when the property value is not specified) would be used when the
 ``server_name`` parameter value is an empty string.
 
+=======
+>>>>>>> doc/source/template_guide/hot_spec.rst
 not
 ---
 The ``not`` function acts as a NOT operator.
diff --git a/etc/heat/api-paste.ini b/etc/heat/api-paste.ini
index 8ca1f51..13300eb 100644
--- a/etc/heat/api-paste.ini
+++ b/etc/heat/api-paste.ini
@@ -1,4 +1,5 @@
 
+<<<<<<< etc/heat/api-paste.ini
 # heat-api composite
 [composite:heat-api]
 paste.composite_factory = heat.api:root_app_factory
@@ -6,6 +7,13 @@ paste.composite_factory = heat.api:root_app_factory
 /healthcheck: healthcheck
 
 # heat-api composite for standalone heat
+=======
+# heat-api pipeline
+[pipeline:heat-api]
+pipeline = healthcheck cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authtoken context osprofiler apiv1app
+
+# heat-api pipeline for standalone heat
+>>>>>>> etc/heat/api-paste.ini
 # ie. uses alternative auth backend that authenticates users against keystone
 # using username and password instead of validating token (which requires
 # an admin/service token).
@@ -13,25 +21,38 @@ paste.composite_factory = heat.api:root_app_factory
 #   [paste_deploy]
 #   flavor = standalone
 #
+<<<<<<< etc/heat/api-paste.ini
 [composite:heat-api-standalone]
 paste.composite_factory = heat.api:root_app_factory
 /: api
 /healthcheck: healthcheck
 
 # heat-api composite for custom cloud backends
+=======
+[pipeline:heat-api-standalone]
+pipeline = healthcheck cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app
+
+# heat-api pipeline for custom cloud backends
+>>>>>>> etc/heat/api-paste.ini
 # i.e. in heat.conf:
 #   [paste_deploy]
 #   flavor = custombackend
 #
+<<<<<<< etc/heat/api-paste.ini
 [composite:heat-api-custombackend]
 paste.composite_factory = heat.api:root_app_factory
 /: api
 /healthcheck: healthcheck
+=======
+[pipeline:heat-api-custombackend]
+pipeline = healthcheck cors request_id context faultwrap versionnegotiation custombackendauth apiv1app
+>>>>>>> etc/heat/api-paste.ini
 
 # To enable, in heat.conf:
 #   [paste_deploy]
 #   flavor = noauth
 #
+<<<<<<< etc/heat/api-paste.ini
 [composite:heat-api-noauth]
 paste.composite_factory = heat.api:root_app_factory
 /: api
@@ -61,6 +82,19 @@ noauth = cors request_id faultwrap noauth context http_proxy_to_wsgi versionnego
 paste.composite_factory = heat.api:pipeline_factory
 default = cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken authtoken context osprofiler apicfnv1app
 standalone = cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app
+=======
+[pipeline:heat-api-noauth]
+pipeline = healthcheck cors request_id faultwrap noauth context http_proxy_to_wsgi versionnegotiation apiv1app
+
+# heat-api-cfn pipeline
+[pipeline:heat-api-cfn]
+pipeline = healthcheck cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken authtoken context osprofiler apicfnv1app
+
+# heat-api-cfn pipeline for standalone heat
+# relies exclusively on authenticating with ec2 signed requests
+[pipeline:heat-api-cfn-standalone]
+pipeline = healthcheck cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app
+>>>>>>> etc/heat/api-paste.ini
 
 [app:apiv1app]
 paste.app_factory = heat.common.wsgi:app_factory
@@ -70,9 +104,12 @@ heat.app_factory = heat.api.openstack.v1:API
 paste.app_factory = heat.common.wsgi:app_factory
 heat.app_factory = heat.api.cfn.v1:API
 
+<<<<<<< etc/heat/api-paste.ini
 [app:healthcheck]
 paste.app_factory = oslo_middleware:Healthcheck.app_factory
 
+=======
+>>>>>>> etc/heat/api-paste.ini
 [filter:versionnegotiation]
 paste.filter_factory = heat.common.wsgi:filter_factory
 heat.filter_factory = heat.api.openstack:version_negotiation_filter
@@ -127,3 +164,9 @@ paste.filter_factory = oslo_middleware.request_id:RequestId.factory
 
 [filter:osprofiler]
 paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+<<<<<<< etc/heat/api-paste.ini
+=======
+
+[filter:healthcheck]
+paste.filter_factory = oslo_middleware:Healthcheck.factory
+>>>>>>> etc/heat/api-paste.ini
diff --git a/heat/api/cfn/v1/stacks.py b/heat/api/cfn/v1/stacks.py
index 881c43d..d971b51 100644
--- a/heat/api/cfn/v1/stacks.py
+++ b/heat/api/cfn/v1/stacks.py
@@ -49,7 +49,11 @@ class StackController(object):
         raise exception.HeatInvalidActionError()
 
     def _enforce(self, req, action):
+<<<<<<< heat/api/cfn/v1/stacks.py
         """Authorize an action against the policy.yaml and policies in code."""
+=======
+        """Authorize an action against the policy.json and policies in code."""
+>>>>>>> heat/api/cfn/v1/stacks.py
         try:
             self.policy.enforce(req.context, action, is_registered_policy=True)
         except heat_exception.Forbidden:
diff --git a/heat/api/middleware/fault.py b/heat/api/middleware/fault.py
index b6ed0f2..91c8762 100644
--- a/heat/api/middleware/fault.py
+++ b/heat/api/middleware/fault.py
@@ -93,8 +93,12 @@ class FaultWrapper(wsgi.Middleware):
         'UnsupportedObjectError': webob.exc.HTTPBadRequest,
         'ResourceTypeUnavailable': webob.exc.HTTPBadRequest,
         'InvalidBreakPointHook': webob.exc.HTTPBadRequest,
+<<<<<<< heat/api/middleware/fault.py
         'ImmutableParameterModified': webob.exc.HTTPBadRequest,
         'CircularDependencyException': webob.exc.HTTPBadRequest
+=======
+        'ImmutableParameterModified': webob.exc.HTTPBadRequest
+>>>>>>> heat/api/middleware/fault.py
     }
 
     def _map_exception_to_error(self, class_exception):
diff --git a/heat/api/openstack/v1/actions.py b/heat/api/openstack/v1/actions.py
index 553c09a..48a964c 100644
--- a/heat/api/openstack/v1/actions.py
+++ b/heat/api/openstack/v1/actions.py
@@ -25,7 +25,11 @@ class ActionController(object):
 
     Implements the API for stack actions
     """
+<<<<<<< heat/api/openstack/v1/actions.py
     # Define request scope (must match what is in policy.yaml or policies in
+=======
+    # Define request scope (must match what is in policy.json or policies in
+>>>>>>> heat/api/openstack/v1/actions.py
     # code)
     REQUEST_SCOPE = 'actions'
 
diff --git a/heat/api/openstack/v1/build_info.py b/heat/api/openstack/v1/build_info.py
index 923d382..7454098 100644
--- a/heat/api/openstack/v1/build_info.py
+++ b/heat/api/openstack/v1/build_info.py
@@ -24,7 +24,11 @@ class BuildInfoController(object):
 
     Returns build information for current app.
     """
+<<<<<<< heat/api/openstack/v1/build_info.py
     # Define request scope (must match what is in policy.yaml or policies in
+=======
+    # Define request scope (must match what is in policy.json or policies in
+>>>>>>> heat/api/openstack/v1/build_info.py
     # code)
     REQUEST_SCOPE = 'build_info'
 
diff --git a/heat/api/openstack/v1/events.py b/heat/api/openstack/v1/events.py
index 7c05274..f8dc07e 100644
--- a/heat/api/openstack/v1/events.py
+++ b/heat/api/openstack/v1/events.py
@@ -83,7 +83,11 @@ class EventController(object):
 
     Implements the API actions.
     """
+<<<<<<< heat/api/openstack/v1/events.py
     # Define request scope (must match what is in policy.yaml or policies in
+=======
+    # Define request scope (must match what is in policy.json or policies in
+>>>>>>> heat/api/openstack/v1/events.py
     # code)
     REQUEST_SCOPE = 'events'
 
diff --git a/heat/api/openstack/v1/resources.py b/heat/api/openstack/v1/resources.py
index f6d7cd8..a71e218 100644
--- a/heat/api/openstack/v1/resources.py
+++ b/heat/api/openstack/v1/resources.py
@@ -74,7 +74,11 @@ class ResourceController(object):
 
     Implements the API actions.
     """
+<<<<<<< heat/api/openstack/v1/resources.py
     # Define request scope (must match what is in policy.yaml or policies in
+=======
+    # Define request scope (must match what is in policy.json or policies in
+>>>>>>> heat/api/openstack/v1/resources.py
     # code)
     REQUEST_SCOPE = 'resource'
 
diff --git a/heat/api/openstack/v1/services.py b/heat/api/openstack/v1/services.py
index 4c61e2a..fdf075b 100644
--- a/heat/api/openstack/v1/services.py
+++ b/heat/api/openstack/v1/services.py
@@ -25,7 +25,11 @@ from heat.rpc import client as rpc_client
 
 class ServiceController(object):
     """WSGI controller for reporting the heat engine status in Heat v1 API."""
+<<<<<<< heat/api/openstack/v1/services.py
     # Define request scope (must match what is in policy.yaml or policies in
+=======
+    # Define request scope (must match what is in policy.json or policies in
+>>>>>>> heat/api/openstack/v1/services.py
     # code)
     REQUEST_SCOPE = 'service'
 
diff --git a/heat/api/openstack/v1/software_configs.py b/heat/api/openstack/v1/software_configs.py
index c27da4f..a286420 100644
--- a/heat/api/openstack/v1/software_configs.py
+++ b/heat/api/openstack/v1/software_configs.py
@@ -43,6 +43,7 @@ class SoftwareConfigController(object):
         except ValueError as e:
             raise exc.HTTPBadRequest(str(e))
 
+<<<<<<< heat/api/openstack/v1/software_configs.py
     def _extract_int_param(self, name, value,
                            allow_zero=True, allow_negative=False):
         try:
@@ -51,6 +52,8 @@ class SoftwareConfigController(object):
         except ValueError as e:
             raise exc.HTTPBadRequest(str(e))
 
+=======
+>>>>>>> heat/api/openstack/v1/software_configs.py
     def _index(self, req, use_admin_cnxt=False):
         param_types = {
             'limit': util.PARAM_TYPE_SINGLE,
@@ -58,10 +61,13 @@ class SoftwareConfigController(object):
         }
         params = util.get_allowed_params(req.params, param_types)
 
+<<<<<<< heat/api/openstack/v1/software_configs.py
         key = rpc_api.PARAM_LIMIT
         if key in params:
             params[key] = self._extract_int_param(key, params[key])
 
+=======
+>>>>>>> heat/api/openstack/v1/software_configs.py
         if use_admin_cnxt:
             cnxt = context.get_admin_context()
         else:
diff --git a/heat/api/openstack/v1/stacks.py b/heat/api/openstack/v1/stacks.py
index 80b8e90..e899831 100644
--- a/heat/api/openstack/v1/stacks.py
+++ b/heat/api/openstack/v1/stacks.py
@@ -166,6 +166,7 @@ class InstantiationData(object):
         params = self.data.items()
         return dict((k, v) for k, v in params if k not in self.PARAMS)
 
+<<<<<<< heat/api/openstack/v1/stacks.py
     def no_change(self):
         assert self.patch
         return ((self.template() is None) and
@@ -177,13 +178,19 @@ class InstantiationData(object):
                 (not any(k != rpc_api.PARAM_EXISTING
                          for k in self.args().keys())))
 
+=======
+>>>>>>> heat/api/openstack/v1/stacks.py
 
 class StackController(object):
     """WSGI controller for stacks resource in Heat v1 API.
 
     Implements the API actions.
     """
+<<<<<<< heat/api/openstack/v1/stacks.py
     # Define request scope (must match what is in policy.yaml or policies in
+=======
+    # Define request scope (must match what is in policy.json or policies in
+>>>>>>> heat/api/openstack/v1/stacks.py
     # code)
     REQUEST_SCOPE = 'stacks'
 
@@ -507,8 +514,12 @@ class StackController(object):
 
         raise exc.HTTPAccepted()
 
+<<<<<<< heat/api/openstack/v1/stacks.py
     @util.no_policy_enforce
     @util._identified_stack
+=======
+    @util.registered_identified_stack
+>>>>>>> heat/api/openstack/v1/stacks.py
     def update_patch(self, req, identity, body):
         """Update an existing stack with a new template.
 
@@ -516,6 +527,7 @@ class StackController(object):
         Add the flag patch to the args so the engine code can distinguish
         """
         data = InstantiationData(body, patch=True)
+<<<<<<< heat/api/openstack/v1/stacks.py
         _target = {"project_id": req.context.tenant_id}
 
         policy_act = 'update_no_change' if data.no_change() else 'update_patch'
@@ -527,6 +539,8 @@ class StackController(object):
             is_registered_policy=True)
         if not allowed:
             raise exc.HTTPForbidden()
+=======
+>>>>>>> heat/api/openstack/v1/stacks.py
 
         args = self.prepare_args(data, is_update=True)
         self.rpc_client.update_stack(
diff --git a/heat/api/openstack/v1/util.py b/heat/api/openstack/v1/util.py
index 46ae955..1f7cc40 100644
--- a/heat/api/openstack/v1/util.py
+++ b/heat/api/openstack/v1/util.py
@@ -29,17 +29,24 @@ def registered_policy_enforce(handler):
     """
     @functools.wraps(handler)
     def handle_stack_method(controller, req, tenant_id, **kwargs):
+<<<<<<< heat/api/openstack/v1/util.py
         _target = {"project_id": tenant_id}
 
         if req.context.tenant_id != tenant_id and not (
                 req.context.is_admin or
                 req.context.system_scope == all):
+=======
+        if req.context.tenant_id != tenant_id and not req.context.is_admin:
+>>>>>>> heat/api/openstack/v1/util.py
             raise exc.HTTPForbidden()
         allowed = req.context.policy.enforce(
             context=req.context,
             action=handler.__name__,
             scope=controller.REQUEST_SCOPE,
+<<<<<<< heat/api/openstack/v1/util.py
             target=_target,
+=======
+>>>>>>> heat/api/openstack/v1/util.py
             is_registered_policy=True)
         if not allowed:
             raise exc.HTTPForbidden()
@@ -48,6 +55,7 @@ def registered_policy_enforce(handler):
     return handle_stack_method
 
 
+<<<<<<< heat/api/openstack/v1/util.py
 def no_policy_enforce(handler):
     """Decorator that does *not* enforce policies.
 
@@ -66,6 +74,8 @@ def no_policy_enforce(handler):
     return handle_stack_method
 
 
+=======
+>>>>>>> heat/api/openstack/v1/util.py
 def registered_identified_stack(handler):
     """Decorator that passes a stack identifier instead of path components.
 
diff --git a/heat/cmd/status.py b/heat/cmd/status.py
index 4bf48d0..6c66570 100644
--- a/heat/cmd/status.py
+++ b/heat/cmd/status.py
@@ -15,7 +15,10 @@
 import sys
 
 from oslo_config import cfg
+<<<<<<< heat/cmd/status.py
 from oslo_upgradecheck import common_checks
+=======
+>>>>>>> heat/cmd/status.py
 from oslo_upgradecheck import upgradecheck
 
 from heat.common.i18n import _
@@ -29,6 +32,14 @@ class Checks(upgradecheck.UpgradeCommands):
     and added to _upgrade_checks tuple.
     """
 
+<<<<<<< heat/cmd/status.py
+=======
+    def _check_placeholder(self):
+        # This is just a placeholder for upgrade checks, it should be
+        # removed when the actual checks are added
+        return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+
+>>>>>>> heat/cmd/status.py
     # The format of the check functions is to return an
     # oslo_upgradecheck.upgradecheck.Result
     # object with the appropriate
@@ -37,8 +48,13 @@ class Checks(upgradecheck.UpgradeCommands):
     # in the returned Result's "details" attribute. The
     # summary will be rolled up at the end of the check() method.
     _upgrade_checks = (
+<<<<<<< heat/cmd/status.py
         (_('Policy File JSON to YAML Migration'),
          (common_checks.check_policy_json, {'conf': cfg.CONF})),
+=======
+        # In the future there should be some real checks added here
+        (_('Placeholder'), _check_placeholder),
+>>>>>>> heat/cmd/status.py
     )
 
 
diff --git a/heat/common/auth_password.py b/heat/common/auth_password.py
index 8bfe2be..030822d 100644
--- a/heat/common/auth_password.py
+++ b/heat/common/auth_password.py
@@ -40,16 +40,28 @@ class KeystonePasswordAuthProtocol(object):
         """Authenticate incoming request."""
         username = env.get('HTTP_X_AUTH_USER')
         password = env.get('HTTP_X_AUTH_KEY')
+<<<<<<< heat/common/auth_password.py
         # Determine project id from path.
         project_id = env.get('PATH_INFO').split('/')[1]
         auth_url = env.get('HTTP_X_AUTH_URL')
         user_domain_id = env.get('HTTP_X_USER_DOMAIN_ID')
         if not project_id:
+=======
+        # Determine tenant id from path.
+        tenant = env.get('PATH_INFO').split('/')[1]
+        auth_url = env.get('HTTP_X_AUTH_URL')
+        user_domain_id = env.get('HTTP_X_USER_DOMAIN_ID')
+        if not tenant:
+>>>>>>> heat/common/auth_password.py
             return self._reject_request(env, start_response, auth_url)
         try:
             ctx = context.RequestContext(username=username,
                                          password=password,
+<<<<<<< heat/common/auth_password.py
                                          project_id=project_id,
+=======
+                                         tenant=tenant,
+>>>>>>> heat/common/auth_password.py
                                          auth_url=auth_url,
                                          user_domain_id=user_domain_id,
                                          is_admin=False)
diff --git a/heat/common/config.py b/heat/common/config.py
index b96da54..ce36584 100644
--- a/heat/common/config.py
+++ b/heat/common/config.py
@@ -16,10 +16,15 @@ import os
 
 from eventlet.green import socket
 from oslo_config import cfg
+<<<<<<< heat/common/config.py
 from oslo_db import options as oslo_db_ops
 from oslo_log import log as logging
 from oslo_middleware import cors
 from oslo_policy import opts as policy_opts
+=======
+from oslo_log import log as logging
+from oslo_middleware import cors
+>>>>>>> heat/common/config.py
 from osprofiler import opts as profiler
 
 from heat.common import exception
@@ -47,6 +52,15 @@ service_opts = [
                       'keystone catalog')),
     cfg.StrOpt('heat_waitcondition_server_url',
                help=_('URL of the Heat waitcondition server.')),
+<<<<<<< heat/common/config.py
+=======
+    cfg.StrOpt('heat_watch_server_url',
+               default="",
+               deprecated_for_removal=True,
+               deprecated_reason='Heat CloudWatch Service has been removed.',
+               deprecated_since='10.0.0',
+               help=_('URL of the Heat CloudWatch server.')),
+>>>>>>> heat/common/config.py
     cfg.StrOpt('instance_connection_is_secure',
                default="0",
                help=_('Instance connection to CFN/CW API via https.')),
@@ -59,7 +73,11 @@ service_opts = [
     cfg.StrOpt('region_name_for_shared_services',
                help=_('Region name for shared services endpoints.')),
     cfg.ListOpt('shared_services_types',
+<<<<<<< heat/common/config.py
                 default=['image', 'volume', 'volumev3'],
+=======
+                default=['image', 'volume', 'volumev2'],
+>>>>>>> heat/common/config.py
                 help=_('The shared services located in the other region.'
                        'Needs region_name_for_shared_services option to '
                        'be set for this to take effect.')),
@@ -149,9 +167,15 @@ engine_opts = [
                help=_('Maximum resources allowed per top-level stack. '
                       '-1 stands for unlimited.')),
     cfg.IntOpt('max_stacks_per_tenant',
+<<<<<<< heat/common/config.py
                default=512,
                help=_('Maximum number of stacks any one tenant may have '
                       'active at one time. -1 stands for unlimited.')),
+=======
+               default=100,
+               help=_('Maximum number of stacks any one tenant may have'
+                      ' active at one time.')),
+>>>>>>> heat/common/config.py
     cfg.IntOpt('action_retry_limit',
                default=5,
                help=_('Number of times to retry to bring a '
@@ -212,6 +236,15 @@ engine_opts = [
                default=2,
                help=_('RPC timeout for the engine liveness check that is used'
                       ' for stack locking.')),
+<<<<<<< heat/common/config.py
+=======
+    cfg.BoolOpt('enable_cloud_watch_lite',
+                default=False,
+                deprecated_for_removal=True,
+                deprecated_reason='Heat CloudWatch Service has been removed.',
+                deprecated_since='10.0.0',
+                help=_('Enable the legacy OS::Heat::CWLiteAlarm resource.')),
+>>>>>>> heat/common/config.py
     cfg.BoolOpt('enable_stack_abandon',
                 default=False,
                 help=_('Enable the preview Stack Abandon feature.')),
@@ -459,7 +492,10 @@ def list_opts():
     yield 'clients_keystone', keystone_client_opts
     yield 'clients_nova', client_http_log_debug_opts
     yield 'clients_cinder', client_http_log_debug_opts
+<<<<<<< heat/common/config.py
     yield oslo_db_ops.list_opts()[0]
+=======
+>>>>>>> heat/common/config.py
 
 
 cfg.CONF.register_group(paste_deploy_group)
@@ -585,7 +621,10 @@ def set_config_defaults():
                        'DELETE',
                        'PATCH']
     )
+<<<<<<< heat/common/config.py
     # TODO(gmann): Remove setting the default value of config policy_file
     # once oslo_policy change the default value to 'policy.yaml'.
     # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
     policy_opts.set_defaults(cfg.CONF, 'policy.yaml')
+=======
+>>>>>>> heat/common/config.py
diff --git a/heat/common/context.py b/heat/common/context.py
index c72c836..a3ec9ed 100644
--- a/heat/common/context.py
+++ b/heat/common/context.py
@@ -91,12 +91,27 @@ class RequestContext(context.RequestContext):
         :param overwrite: Set to False to ensure that the greenthread local
             copy of the index is not overwritten.
         """
+<<<<<<< heat/common/context.py
         super(RequestContext, self).__init__(
             is_admin=is_admin, read_only=read_only,
             show_deleted=show_deleted, request_id=request_id,
             roles=roles, user_domain_id=user_domain_id,
             project_domain_id=project_domain_id,
             overwrite=overwrite, **kwargs)
+=======
+        if user_domain_id:
+            kwargs['user_domain'] = user_domain_id
+        if project_domain_id:
+            kwargs['project_domain'] = project_domain_id
+
+        super(RequestContext, self).__init__(is_admin=is_admin,
+                                             read_only=read_only,
+                                             show_deleted=show_deleted,
+                                             request_id=request_id,
+                                             roles=roles,
+                                             overwrite=overwrite,
+                                             **kwargs)
+>>>>>>> heat/common/context.py
 
         self.username = username
         self.password = password
@@ -155,8 +170,13 @@ class RequestContext(context.RequestContext):
         return self._clients
 
     def to_dict(self):
+<<<<<<< heat/common/context.py
         user_idt = u'{user} {project}'.format(user=self.user_id or '-',
                                               project=self.project_id or '-')
+=======
+        user_idt = u'{user} {tenant}'.format(user=self.user_id or '-',
+                                             tenant=self.project_id or '-')
+>>>>>>> heat/common/context.py
 
         return {'auth_token': self.auth_token,
                 'username': self.username,
@@ -179,8 +199,13 @@ class RequestContext(context.RequestContext):
                 'show_deleted': self.show_deleted,
                 'region_name': self.region_name,
                 'user_identity': user_idt,
+<<<<<<< heat/common/context.py
                 'user_domain_id': self.user_domain_id,
                 'project_domain_id': self.project_domain_id}
+=======
+                'user_domain': self.user_domain,
+                'project_domain': self.project_domain}
+>>>>>>> heat/common/context.py
 
     @classmethod
     def from_dict(cls, values):
@@ -201,8 +226,13 @@ class RequestContext(context.RequestContext):
             request_id=values.get('request_id'),
             show_deleted=values.get('show_deleted', False),
             region_name=values.get('region_name'),
+<<<<<<< heat/common/context.py
             user_domain_id=values.get('user_domain_id'),
             project_domain_id=values.get('project_domain_id')
+=======
+            user_domain_id=values.get('user_domain'),
+            project_domain_id=values.get('project_domain')
+>>>>>>> heat/common/context.py
         )
 
     def to_policy_values(self):
@@ -293,8 +323,13 @@ class StoredContext(RequestContext):
         auth_ref = self.auth_plugin.get_access(self.keystone_session)
 
         self.roles = auth_ref.role_names
+<<<<<<< heat/common/context.py
         self.user_domain_id = auth_ref.user_domain_id
         self.project_domain_id = auth_ref.project_domain_id
+=======
+        self.user_domain = auth_ref.user_domain_id
+        self.project_domain = auth_ref.project_domain_id
+>>>>>>> heat/common/context.py
 
     @property
     def roles(self):
@@ -307,24 +342,43 @@ class StoredContext(RequestContext):
         self._roles = roles
 
     @property
+<<<<<<< heat/common/context.py
     def user_domain_id(self):
+=======
+    def user_domain(self):
+>>>>>>> heat/common/context.py
         if not getattr(self, '_keystone_loaded', False):
             self._load_keystone_data()
         return self._user_domain_id
 
+<<<<<<< heat/common/context.py
     @user_domain_id.setter
     def user_domain_id(self, user_domain_id):
         self._user_domain_id = user_domain_id
 
     @property
     def project_domain_id(self):
+=======
+    @user_domain.setter
+    def user_domain(self, user_domain):
+        self._user_domain_id = user_domain
+
+    @property
+    def project_domain(self):
+>>>>>>> heat/common/context.py
         if not getattr(self, '_keystone_loaded', False):
             self._load_keystone_data()
         return self._project_domain_id
 
+<<<<<<< heat/common/context.py
     @project_domain_id.setter
     def project_domain_id(self, project_domain_id):
         self._project_domain_id = project_domain_id
+=======
+    @project_domain.setter
+    def project_domain(self, project_domain):
+        self._project_domain_id = project_domain
+>>>>>>> heat/common/context.py
 
 
 def get_admin_context(show_deleted=False):
@@ -353,8 +407,13 @@ class ContextMiddleware(wsgi.Middleware):
         username = None
         password = None
         aws_creds = None
+<<<<<<< heat/common/context.py
         user_domain_id = None
         project_domain_id = None
+=======
+        user_domain = None
+        project_domain = None
+>>>>>>> heat/common/context.py
 
         if headers.get('X-Auth-User') is not None:
             username = headers.get('X-Auth-User')
@@ -363,10 +422,17 @@ class ContextMiddleware(wsgi.Middleware):
             aws_creds = headers.get('X-Auth-EC2-Creds')
 
         if headers.get('X-User-Domain-Id') is not None:
+<<<<<<< heat/common/context.py
             user_domain_id = headers.get('X-User-Domain-Id')
 
         if headers.get('X-Project-Domain-Id') is not None:
             project_domain_id = headers.get('X-Project-Domain-Id')
+=======
+            user_domain = headers.get('X-User-Domain-Id')
+
+        if headers.get('X-Project-Domain-Id') is not None:
+            project_domain = headers.get('X-Project-Domain-Id')
+>>>>>>> heat/common/context.py
 
         project_name = headers.get('X-Project-Name')
         region_name = headers.get('X-Region-Name')
@@ -384,8 +450,13 @@ class ContextMiddleware(wsgi.Middleware):
             password=password,
             auth_url=auth_url,
             request_id=req_id,
+<<<<<<< heat/common/context.py
             user_domain_id=user_domain_id,
             project_domain_id=project_domain_id,
+=======
+            user_domain=user_domain,
+            project_domain=project_domain,
+>>>>>>> heat/common/context.py
             auth_token_info=token_info,
             region_name=region_name,
             auth_plugin=auth_plugin,
diff --git a/heat/common/environment_util.py b/heat/common/environment_util.py
index 4908028..89b4c0b 100644
--- a/heat/common/environment_util.py
+++ b/heat/common/environment_util.py
@@ -12,7 +12,10 @@
 #    under the License.
 import collections
 
+<<<<<<< heat/common/environment_util.py
 from oslo_log import log as logging
+=======
+>>>>>>> heat/common/environment_util.py
 from oslo_serialization import jsonutils
 
 from heat.common import environment_format as env_fmt
@@ -22,6 +25,7 @@ from heat.common.i18n import _
 ALLOWED_PARAM_MERGE_STRATEGIES = (OVERWRITE, MERGE, DEEP_MERGE) = (
     'overwrite', 'merge', 'deep_merge')
 
+<<<<<<< heat/common/environment_util.py
 LOG = logging.getLogger(__name__)
 
 
@@ -29,15 +33,24 @@ def get_param_merge_strategy(merge_strategies, param_key,
                              available_strategies=None):
     if not available_strategies:
         available_strategies = {}
+=======
+
+def get_param_merge_strategy(merge_strategies, param_key):
+>>>>>>> heat/common/environment_util.py
 
     if merge_strategies is None:
         return OVERWRITE
 
     env_default = merge_strategies.get('default', OVERWRITE)
+<<<<<<< heat/common/environment_util.py
     merge_strategy = merge_strategies.get(
         param_key, available_strategies.get(
             param_key, env_default))
 
+=======
+
+    merge_strategy = merge_strategies.get(param_key, env_default)
+>>>>>>> heat/common/environment_util.py
     if merge_strategy in ALLOWED_PARAM_MERGE_STRATEGIES:
         return merge_strategy
 
@@ -65,10 +78,17 @@ def merge_map(old, new, deep_merge=False):
         if v is not None:
             if not deep_merge:
                 old[k] = v
+<<<<<<< heat/common/environment_util.py
             elif isinstance(v, collections.abc.Mapping):
                 old_v = old.get(k)
                 old[k] = merge_map(old_v, v, deep_merge) if old_v else v
             elif (isinstance(v, collections.abc.Sequence) and
+=======
+            elif isinstance(v, collections.Mapping):
+                old_v = old.get(k)
+                old[k] = merge_map(old_v, v, deep_merge) if old_v else v
+            elif (isinstance(v, collections.Sequence) and
+>>>>>>> heat/common/environment_util.py
                     not isinstance(v, str)):
                 old_v = old.get(k)
                 old[k] = merge_list(old_v, v) if old_v else v
@@ -87,7 +107,11 @@ def parse_param(p_val, p_schema):
                 p_val = jsonutils.dumps(p_val)
             if p_val:
                 return jsonutils.loads(p_val)
+<<<<<<< heat/common/environment_util.py
         elif not isinstance(p_val, collections.abc.Sequence):
+=======
+        elif not isinstance(p_val, collections.Sequence):
+>>>>>>> heat/common/environment_util.py
             raise ValueError()
     except (ValueError, TypeError) as err:
         msg = _("Invalid parameter in environment %s.") % str(err)
@@ -113,19 +137,34 @@ def merge_parameters(old, new, param_schemata, strategies_in_file,
             raise exception.InvalidMergeStrategyForParam(strategy=MERGE,
                                                          param=p_key)
 
+<<<<<<< heat/common/environment_util.py
+=======
+    new_strategies = {}
+
+    if not old:
+        return new, new_strategies
+
+>>>>>>> heat/common/environment_util.py
     for key, value in new.items():
         # if key not in param_schemata ignore it
         if key in param_schemata and value is not None:
             param_merge_strategy = get_param_merge_strategy(
+<<<<<<< heat/common/environment_util.py
                 strategies_in_file, key, available_strategies)
             if key not in available_strategies:
                 available_strategies[key] = param_merge_strategy
+=======
+                strategies_in_file, key)
+            if key not in available_strategies:
+                new_strategies[key] = param_merge_strategy
+>>>>>>> heat/common/environment_util.py
 
             elif param_merge_strategy != available_strategies[key]:
                 raise exception.ConflictingMergeStrategyForParam(
                     strategy=param_merge_strategy,
                     param=key, env_file=env_file)
 
+<<<<<<< heat/common/environment_util.py
     if not old:
         return new
 
@@ -135,10 +174,14 @@ def merge_parameters(old, new, param_schemata, strategies_in_file,
             param_merge_strategy = available_strategies[key]
             if param_merge_strategy == DEEP_MERGE:
                 LOG.debug("Deep Merging Parameter: %s", key)
+=======
+            if param_merge_strategy == DEEP_MERGE:
+>>>>>>> heat/common/environment_util.py
                 param_merge(key, value,
                             param_schemata[key],
                             deep_merge=True)
             elif param_merge_strategy == MERGE:
+<<<<<<< heat/common/environment_util.py
                 LOG.debug("Merging Parameter: %s", key)
                 param_merge(key, value, param_schemata[key])
             else:
@@ -146,6 +189,13 @@ def merge_parameters(old, new, param_schemata, strategies_in_file,
                 old[key] = value
 
     return old
+=======
+                param_merge(key, value, param_schemata[key])
+            else:
+                old[key] = value
+
+    return old, new_strategies
+>>>>>>> heat/common/environment_util.py
 
 
 def merge_environments(environment_files, files,
@@ -183,10 +233,18 @@ def merge_environments(environment_files, files,
             if section_value:
                 if section_key in (env_fmt.PARAMETERS,
                                    env_fmt.PARAMETER_DEFAULTS):
+<<<<<<< heat/common/environment_util.py
                     params[section_key] = merge_parameters(
                         params[section_key], section_value,
                         param_schemata, strategies_in_file,
                         available_strategies, filename)
+=======
+                    params[section_key], new_strategies = merge_parameters(
+                        params[section_key], section_value,
+                        param_schemata, strategies_in_file,
+                        available_strategies, filename)
+                    available_strategies.update(new_strategies)
+>>>>>>> heat/common/environment_util.py
                 else:
                     params[section_key] = merge_map(params[section_key],
                                                     section_value)
diff --git a/heat/common/exception.py b/heat/common/exception.py
index c4184a6..45f2bfc 100644
--- a/heat/common/exception.py
+++ b/heat/common/exception.py
@@ -568,7 +568,10 @@ class InvalidTemplateVersions(HeatException):
 
 class UnableToAutoAllocateNetwork(HeatException):
     msg_fmt = _('Unable to automatically allocate a network: %(message)s')
+<<<<<<< heat/common/exception.py
 
 
 class CircularDependencyException(HeatException):
     msg_fmt = _("Circular Dependency Found: %(cycle)s")
+=======
+>>>>>>> heat/common/exception.py
diff --git a/heat/common/identifier.py b/heat/common/identifier.py
index 02628b4..3d1fe78 100644
--- a/heat/common/identifier.py
+++ b/heat/common/identifier.py
@@ -20,7 +20,11 @@ from urllib import parse as urlparse
 from heat.common.i18n import _
 
 
+<<<<<<< heat/common/identifier.py
 class HeatIdentifier(collections.abc.Mapping):
+=======
+class HeatIdentifier(collections.Mapping):
+>>>>>>> heat/common/identifier.py
 
     FIELDS = (
         TENANT, STACK_NAME, STACK_ID, PATH
diff --git a/heat/common/lifecycle_plugin_utils.py b/heat/common/lifecycle_plugin_utils.py
index 4874588..87f38ce 100644
--- a/heat/common/lifecycle_plugin_utils.py
+++ b/heat/common/lifecycle_plugin_utils.py
@@ -30,7 +30,11 @@ def get_plug_point_class_instances():
 
     The list of class instances is sorted using get_ordinal methods
     on the plug point classes. If class1.ordinal() < class2.ordinal(),
+<<<<<<< heat/common/lifecycle_plugin_utils.py
     then class1 will be before class2 in the list.
+=======
+    then class1 will be before before class2 in the list.
+>>>>>>> heat/common/lifecycle_plugin_utils.py
     """
     global pp_class_instances
     if pp_class_instances is None:
diff --git a/heat/common/messaging.py b/heat/common/messaging.py
index 8c93070..61bce0b 100644
--- a/heat/common/messaging.py
+++ b/heat/common/messaging.py
@@ -136,8 +136,13 @@ def get_rpc_client(**kwargs):
     target = oslo_messaging.Target(**kwargs)
     serializer = RequestContextSerializer(
         oslo_messaging.JsonPayloadSerializer())
+<<<<<<< heat/common/messaging.py
     return oslo_messaging.get_rpc_client(
         TRANSPORT, target, serializer=serializer)
+=======
+    return oslo_messaging.RPCClient(TRANSPORT, target,
+                                    serializer=serializer)
+>>>>>>> heat/common/messaging.py
 
 
 def get_notifier(publisher_id):
diff --git a/heat/common/pluginutils.py b/heat/common/pluginutils.py
index 1fc9618..5141e22 100644
--- a/heat/common/pluginutils.py
+++ b/heat/common/pluginutils.py
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
 
 
 def log_fail_msg(manager, entrypoint, exception):
+<<<<<<< heat/common/pluginutils.py
     # importlib.metadata in Python 3.8 is quite old and the EntryPoint class
     # does not have module. This logic is required to workaround AttributeError
     # caused by that old implementation.
@@ -32,3 +33,11 @@ def log_fail_msg(manager, entrypoint, exception):
                     'Not using %(name)s.',
                     {'message': getattr(exception, 'message', str(exception)),
                      'name': entrypoint.name})
+=======
+    LOG.warning('Encountered exception while loading %(module_name)s: '
+                '"%(message)s". Not using %(name)s.',
+                {'module_name': entrypoint.module,
+                 'message': getattr(exception, 'message',
+                                    str(exception)),
+                 'name': entrypoint.name})
+>>>>>>> heat/common/pluginutils.py
diff --git a/heat/common/policy.py b/heat/common/policy.py
index bfcadba..4e15e0f 100644
--- a/heat/common/policy.py
+++ b/heat/common/policy.py
@@ -19,7 +19,10 @@
 
 from oslo_config import cfg
 from oslo_log import log as logging
+<<<<<<< heat/common/policy.py
 from oslo_policy import opts
+=======
+>>>>>>> heat/common/policy.py
 from oslo_policy import policy
 from oslo_utils import excutils
 
@@ -34,12 +37,15 @@ LOG = logging.getLogger(__name__)
 DEFAULT_RULES = policy.Rules.from_dict({'default': '!'})
 DEFAULT_RESOURCE_RULES = policy.Rules.from_dict({'default': '@'})
 
+<<<<<<< heat/common/policy.py
 # TODO(gmann): Remove setting the default value of config policy_file
 # once oslo_policy change the default value to 'policy.yaml'.
 # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
 DEFAULT_POLICY_FILE = 'policy.yaml'
 opts.set_defaults(CONF, DEFAULT_POLICY_FILE)
 
+=======
+>>>>>>> heat/common/policy.py
 ENFORCER = None
 
 
@@ -55,9 +61,12 @@ class Enforcer(object):
             CONF, default_rule=default_rule, policy_file=policy_file)
         self.log_not_registered = True
 
+<<<<<<< heat/common/policy.py
         # TODO(ramishra) Remove this once remove the deprecated rules.
         self.enforcer.suppress_deprecation_warnings = True
 
+=======
+>>>>>>> heat/common/policy.py
         # register rules
         self.enforcer.register_defaults(policies.list_rules())
         self.file_rules = self.enforcer.file_rules
@@ -188,6 +197,11 @@ class ResourceEnforcer(Enforcer):
 
     def enforce_stack(self, stack, scope=None, target=None,
                       is_registered_policy=False):
+<<<<<<< heat/common/policy.py
         for res_type in stack.defn.all_resource_types():
             self.enforce(stack.context, res_type, scope=scope, target=target,
+=======
+        for res in stack.resources.values():
+            self.enforce(stack.context, res.type(), scope=scope, target=target,
+>>>>>>> heat/common/policy.py
                          is_registered_policy=is_registered_policy)
diff --git a/heat/common/wsgi.py b/heat/common/wsgi.py
index 42126b7..50472b2 100644
--- a/heat/common/wsgi.py
+++ b/heat/common/wsgi.py
@@ -130,6 +130,71 @@ cfg.CONF.register_group(api_cfn_group)
 cfg.CONF.register_opts(api_cfn_opts,
                        group=api_cfn_group)
 
+<<<<<<< heat/common/wsgi.py
+=======
+api_cw_opts = [
+    cfg.IPOpt('bind_host', default='0.0.0.0',
+              help=_('Address to bind the server. Useful when '
+                     'selecting a particular network interface.'),
+              deprecated_group='DEFAULT',
+              deprecated_for_removal=True,
+              deprecated_reason='Heat CloudWatch API has been removed.',
+              deprecated_since='10.0.0'),
+    cfg.PortOpt('bind_port', default=8003,
+                help=_('The port on which the server will listen.'),
+                deprecated_group='DEFAULT',
+                deprecated_for_removal=True,
+                deprecated_reason='Heat CloudWatch API has been removed.',
+                deprecated_since='10.0.0'),
+    cfg.IntOpt('backlog', default=4096,
+               help=_("Number of backlog requests "
+                      "to configure the socket with."),
+               deprecated_group='DEFAULT',
+               deprecated_for_removal=True,
+               deprecated_reason='Heat CloudWatch API has been removed.',
+               deprecated_since='10.0.0'),
+    cfg.StrOpt('cert_file',
+               help=_("Location of the SSL certificate file "
+                      "to use for SSL mode."),
+               deprecated_group='DEFAULT',
+               deprecated_for_removal=True,
+               deprecated_reason='Heat CloudWatch API has been Removed.',
+               deprecated_since='10.0.0'),
+    cfg.StrOpt('key_file',
+               help=_("Location of the SSL key file to use "
+                      "for enabling SSL mode."),
+               deprecated_group='DEFAULT',
+               deprecated_for_removal=True,
+               deprecated_reason='Heat CloudWatch API has been Removed.',
+               deprecated_since='10.0.0'),
+    cfg.IntOpt('workers', min=0, default=1,
+               help=_("Number of workers for Heat service."),
+               deprecated_group='DEFAULT',
+               deprecated_for_removal=True,
+               deprecated_reason='Heat CloudWatch API has been Removed.',
+               deprecated_since='10.0.0'),
+    cfg.IntOpt('max_header_line', default=16384,
+               help=_('Maximum line size of message headers to be accepted. '
+                      'max_header_line may need to be increased when using '
+                      'large tokens (typically those generated by the '
+                      'Keystone v3 API with big service catalogs.)'),
+               deprecated_for_removal=True,
+               deprecated_reason='Heat CloudWatch API has been Removed.',
+               deprecated_since='10.0.0'),
+    cfg.IntOpt('tcp_keepidle', default=600,
+               help=_('The value for the socket option TCP_KEEPIDLE.  This is '
+                      'the time in seconds that the connection must be idle '
+                      'before TCP starts sending keepalive probes.'),
+               deprecated_for_removal=True,
+               deprecated_reason='Heat CloudWatch API has been Removed.',
+               deprecated_since='10.0.0')
+]
+api_cw_group = cfg.OptGroup('heat_api_cloudwatch')
+cfg.CONF.register_group(api_cw_group)
+cfg.CONF.register_opts(api_cw_opts,
+                       group=api_cw_group)
+
+>>>>>>> heat/common/wsgi.py
 wsgi_elt_opts = [
     cfg.BoolOpt('wsgi_keep_alive',
                 default=True,
@@ -156,6 +221,10 @@ def list_opts():
     yield None, [json_size_opt]
     yield 'heat_api', api_opts
     yield 'heat_api_cfn', api_cfn_opts
+<<<<<<< heat/common/wsgi.py
+=======
+    yield 'heat_api_cloudwatch', api_cw_opts
+>>>>>>> heat/common/wsgi.py
     yield 'eventlet_opts', wsgi_elt_opts
 
 
@@ -200,7 +269,11 @@ def get_socket(conf, default_port):
                                    backlog=conf.backlog,
                                    family=address_family)
         except socket.error as err:
+<<<<<<< heat/common/wsgi.py
             if err.errno != errno.EADDRINUSE:
+=======
+            if err.args[0] != errno.EADDRINUSE:
+>>>>>>> heat/common/wsgi.py
                 raise
             eventlet.sleep(0.1)
     if not sock:
@@ -521,7 +594,11 @@ class Server(object):
                 keepalive=cfg.CONF.eventlet_opts.wsgi_keep_alive,
                 socket_timeout=socket_timeout)
         except socket.error as err:
+<<<<<<< heat/common/wsgi.py
             if err.errno != errno.EINVAL:
+=======
+            if err[0] != errno.EINVAL:
+>>>>>>> heat/common/wsgi.py
                 raise
         self.pool.waitall()
 
diff --git a/heat/db/sqlalchemy/api.py b/heat/db/sqlalchemy/api.py
index df627d4..1609d99 100644
--- a/heat/db/sqlalchemy/api.py
+++ b/heat/db/sqlalchemy/api.py
@@ -49,12 +49,15 @@ CONF = cfg.CONF
 CONF.import_opt('hidden_stack_tags', 'heat.common.config')
 CONF.import_opt('max_events_per_stack', 'heat.common.config')
 CONF.import_group('profiler', 'heat.common.config')
+<<<<<<< heat/db/sqlalchemy/api.py
 CONF.import_opt('db_max_retries', 'oslo_db.options', group='database')
 CONF.import_opt('db_retry_interval', 'oslo_db.options', group='database')
 CONF.import_opt(
     'db_inc_retry_interval', 'oslo_db.options', group='database')
 CONF.import_opt(
     'db_max_retry_interval', 'oslo_db.options', group='database')
+=======
+>>>>>>> heat/db/sqlalchemy/api.py
 
 options.set_defaults(CONF)
 
@@ -65,9 +68,13 @@ LOG = logging.getLogger(__name__)
 
 
 # TODO(sbaker): fix tests so that sqlite_fk=True can be passed to configure
+<<<<<<< heat/db/sqlalchemy/api.py
 # FIXME(stephenfin): we need to remove reliance on autocommit semantics ASAP
 # since it's not compatible with SQLAlchemy 2.0
 db_context.configure(__autocommit=True)
+=======
+db_context.configure()
+>>>>>>> heat/db/sqlalchemy/api.py
 
 
 def get_facade():
@@ -102,6 +109,7 @@ def retry_on_db_error(func):
     def try_func(context, *args, **kwargs):
         if (context.session.transaction is None or
                 not context.session.autocommit):
+<<<<<<< heat/db/sqlalchemy/api.py
             wrapped = oslo_db_api.wrap_db_retry(
                 max_retries=CONF.database.db_max_retries,
                 retry_on_deadlock=True,
@@ -109,6 +117,13 @@ def retry_on_db_error(func):
                 retry_interval=CONF.database.db_retry_interval,
                 inc_retry_interval=CONF.database.db_inc_retry_interval,
                 max_retry_interval=CONF.database.db_max_retry_interval)(func)
+=======
+            wrapped = oslo_db_api.wrap_db_retry(max_retries=3,
+                                                retry_on_deadlock=True,
+                                                retry_on_disconnect=True,
+                                                retry_interval=0.5,
+                                                inc_retry_interval=True)(func)
+>>>>>>> heat/db/sqlalchemy/api.py
             return wrapped(context, *args, **kwargs)
         else:
             try:
@@ -387,6 +402,7 @@ def resource_data_get_all(context, resource_id, data=None):
 
     for res in data:
         if res.redact:
+<<<<<<< heat/db/sqlalchemy/api.py
             try:
                 ret[res.key] = crypt.decrypt(res.decrypt_method, res.value)
                 continue
@@ -395,6 +411,11 @@ def resource_data_get_all(context, resource_id, data=None):
                               'for %(rid)s, ignoring.',
                               {'rkey': res.key, 'rid': resource_id})
         ret[res.key] = res.value
+=======
+            ret[res.key] = crypt.decrypt(res.decrypt_method, res.value)
+        else:
+            ret[res.key] = res.value
+>>>>>>> heat/db/sqlalchemy/api.py
     return ret
 
 
@@ -544,7 +565,11 @@ def resource_get_all_active_by_stack(context, stack_id):
 
     results = context.session.query(models.Resource).filter_by(
         stack_id=stack_id).filter(
+<<<<<<< heat/db/sqlalchemy/api.py
         models.Resource.id.notin_(subquery.scalar_subquery())
+=======
+        models.Resource.id.notin_(subquery.as_scalar())
+>>>>>>> heat/db/sqlalchemy/api.py
     ).options(orm.joinedload("data")).all()
 
     return dict((res.id, res) for res in results)
@@ -1146,7 +1171,11 @@ def _delete_event_rows(context, stack_id, limit):
         (ids, rsrc_prop_ids) = zip(*id_pairs)
         max_id = ids[-1]
         # delete the events
+<<<<<<< heat/db/sqlalchemy/api.py
         retval = session.query(models.Event).filter(
+=======
+        retval = session.query(models.Event.id).filter(
+>>>>>>> heat/db/sqlalchemy/api.py
             models.Event.id <= max_id).filter(
                 models.Event.stack_id == stack_id).delete()
 
diff --git a/heat/db/sqlalchemy/types.py b/heat/db/sqlalchemy/types.py
index d454024..3daea0c 100644
--- a/heat/db/sqlalchemy/types.py
+++ b/heat/db/sqlalchemy/types.py
@@ -21,9 +21,13 @@ loads = jsonutils.loads
 
 
 class LongText(types.TypeDecorator):
+<<<<<<< heat/db/sqlalchemy/types.py
 
     impl = types.Text
     cache_ok = True
+=======
+    impl = types.Text
+>>>>>>> heat/db/sqlalchemy/types.py
 
     def load_dialect_impl(self, dialect):
         if dialect.name == 'mysql':
@@ -34,8 +38,11 @@ class LongText(types.TypeDecorator):
 
 class Json(LongText):
 
+<<<<<<< heat/db/sqlalchemy/types.py
     cache_ok = True
 
+=======
+>>>>>>> heat/db/sqlalchemy/types.py
     def process_bind_param(self, value, dialect):
         return dumps(value)
 
@@ -46,9 +53,13 @@ class Json(LongText):
 
 
 class List(types.TypeDecorator):
+<<<<<<< heat/db/sqlalchemy/types.py
 
     impl = types.Text
     cache_ok = True
+=======
+    impl = types.Text
+>>>>>>> heat/db/sqlalchemy/types.py
 
     def load_dialect_impl(self, dialect):
         if dialect.name == 'mysql':
diff --git a/heat/db/sqlalchemy/utils.py b/heat/db/sqlalchemy/utils.py
index 5fff4f7..e3a61f0 100644
--- a/heat/db/sqlalchemy/utils.py
+++ b/heat/db/sqlalchemy/utils.py
@@ -53,7 +53,11 @@ def clone_table(name, parent, meta, newcols=None, ignorecols=None,
 
         return False
 
+<<<<<<< heat/db/sqlalchemy/utils.py
     constraints = [c.copy(target_table=new_table) for c in parent.constraints
+=======
+    constraints = [c.copy() for c in parent.constraints
+>>>>>>> heat/db/sqlalchemy/utils.py
                    if c.name not in ignorecons
                    if not _is_ignorable(c)]
 
diff --git a/heat/engine/api.py b/heat/engine/api.py
index 35edaff..c0b73a2 100644
--- a/heat/engine/api.py
+++ b/heat/engine/api.py
@@ -292,7 +292,11 @@ def format_resource_attributes(resource, with_attr=None):
     if 'show' in resolver:
         show_attr = resolve('show', resolver)
         # check if 'show' resolved to dictionary. so it's not None
+<<<<<<< heat/engine/api.py
         if isinstance(show_attr, collections.abc.Mapping):
+=======
+        if isinstance(show_attr, collections.Mapping):
+>>>>>>> heat/engine/api.py
             for a in with_attr:
                 if a not in show_attr:
                     show_attr[a] = resolve(a, resolver)
diff --git a/heat/engine/attributes.py b/heat/engine/attributes.py
index eb2c566..f01a4c1 100644
--- a/heat/engine/attributes.py
+++ b/heat/engine/attributes.py
@@ -138,7 +138,11 @@ BASE_ATTRIBUTES = (SHOW_ATTR, ) = ('show', )
 ALL_ATTRIBUTES = '*'
 
 
+<<<<<<< heat/engine/attributes.py
 class Attributes(collections.abc.Mapping):
+=======
+class Attributes(collections.Mapping):
+>>>>>>> heat/engine/attributes.py
     """Models a collection of Resource Attributes."""
 
     def __init__(self, res_name, schema, resolver):
@@ -212,14 +216,22 @@ class Attributes(collections.abc.Mapping):
                             {'name': attrib.name,
                              'att_type': attrib.schema.STRING})
         elif attrib.schema.type == attrib.schema.LIST:
+<<<<<<< heat/engine/attributes.py
             if (not isinstance(value, collections.abc.Sequence)
+=======
+            if (not isinstance(value, collections.Sequence)
+>>>>>>> heat/engine/attributes.py
                     or isinstance(value, str)):
                 LOG.warning("Attribute %(name)s is not of type "
                             "%(att_type)s",
                             {'name': attrib.name,
                              'att_type': attrib.schema.LIST})
         elif attrib.schema.type == attrib.schema.MAP:
+<<<<<<< heat/engine/attributes.py
             if not isinstance(value, collections.abc.Mapping):
+=======
+            if not isinstance(value, collections.Mapping):
+>>>>>>> heat/engine/attributes.py
                 LOG.warning("Attribute %(name)s is not of type "
                             "%(att_type)s",
                             {'name': attrib.name,
@@ -307,8 +319,13 @@ def select_from_attribute(attribute_value, path):
     :returns: the selected attribute component value.
     """
     def get_path_component(collection, key):
+<<<<<<< heat/engine/attributes.py
         if not isinstance(collection, (collections.abc.Mapping,
                                        collections.abc.Sequence)):
+=======
+        if not isinstance(collection, (collections.Mapping,
+                                       collections.Sequence)):
+>>>>>>> heat/engine/attributes.py
             raise TypeError(_("Can't traverse attribute path"))
 
         if not isinstance(key, (str, int)):
diff --git a/heat/engine/cfn/functions.py b/heat/engine/cfn/functions.py
index f037eec..6a92edb 100644
--- a/heat/engine/cfn/functions.py
+++ b/heat/engine/cfn/functions.py
@@ -168,7 +168,11 @@ class Select(function.Function):
                             'err': json_ex}
                 raise ValueError(_('"%(fn_name)s": %(err)s') % fmt_data)
 
+<<<<<<< heat/engine/cfn/functions.py
         if isinstance(strings, collections.abc.Mapping):
+=======
+        if isinstance(strings, collections.Mapping):
+>>>>>>> heat/engine/cfn/functions.py
             if not isinstance(index, str):
                 raise TypeError(_('Index to "%s" must be a string') %
                                 self.fn_name)
@@ -179,7 +183,11 @@ class Select(function.Function):
         except (ValueError, TypeError):
             pass
 
+<<<<<<< heat/engine/cfn/functions.py
         if (isinstance(strings, collections.abc.Sequence) and
+=======
+        if (isinstance(strings, collections.Sequence) and
+>>>>>>> heat/engine/cfn/functions.py
                 not isinstance(strings, str)):
             if not isinstance(index, int):
                 raise TypeError(_('Index to "%s" must be an integer') %
@@ -229,7 +237,11 @@ class Split(function.Function):
         fmt_data = {'fn_name': self.fn_name,
                     'example': example}
 
+<<<<<<< heat/engine/cfn/functions.py
         if isinstance(self.args, (str, collections.abc.Mapping)):
+=======
+        if isinstance(self.args, (str, collections.Mapping)):
+>>>>>>> heat/engine/cfn/functions.py
             raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
                               'should be: %(example)s') % fmt_data)
 
@@ -278,7 +290,11 @@ class Replace(hot_funcs.Replace):
         fmt_data = {'fn_name': self.fn_name,
                     'example': example}
 
+<<<<<<< heat/engine/cfn/functions.py
         if isinstance(self.args, (str, collections.abc.Mapping)):
+=======
+        if isinstance(self.args, (str, collections.Mapping)):
+>>>>>>> heat/engine/cfn/functions.py
             raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
                               'should be: %(example)s') % fmt_data)
 
@@ -350,7 +366,11 @@ class MemberListToMap(function.Function):
     def result(self):
         member_list = function.resolve(self._list)
 
+<<<<<<< heat/engine/cfn/functions.py
         if not isinstance(member_list, collections.abc.Iterable):
+=======
+        if not isinstance(member_list, collections.Iterable):
+>>>>>>> heat/engine/cfn/functions.py
             raise TypeError(_('Member list must be a list'))
 
         def item(s):
@@ -428,7 +448,11 @@ class Not(hot_funcs.Not):
         msg = _('Arguments to "%s" must be of the form: '
                 '[condition]') % self.fn_name
         if (not self.args or
+<<<<<<< heat/engine/cfn/functions.py
                 not isinstance(self.args, collections.abc.Sequence) or
+=======
+                not isinstance(self.args, collections.Sequence) or
+>>>>>>> heat/engine/cfn/functions.py
                 isinstance(self.args, str)):
             raise ValueError(msg)
         if len(self.args) != 1:
diff --git a/heat/engine/clients/os/__init__.py b/heat/engine/clients/os/__init__.py
index 3c3167e..6e83cec 100644
--- a/heat/engine/clients/os/__init__.py
+++ b/heat/engine/clients/os/__init__.py
@@ -13,6 +13,11 @@
 
 import abc
 
+<<<<<<< heat/engine/clients/os/__init__.py
+=======
+import six
+
+>>>>>>> heat/engine/clients/os/__init__.py
 from oslo_cache import core
 from oslo_config import cfg
 
@@ -29,7 +34,12 @@ MEMOIZE_FINDER = core.get_memoization_decorator(
     group="resource_finder_cache")
 
 
+<<<<<<< heat/engine/clients/os/__init__.py
 class ExtensionMixin(object, metaclass=abc.ABCMeta):
+=======
+@six.add_metaclass(abc.ABCMeta)
+class ExtensionMixin(object):
+>>>>>>> heat/engine/clients/os/__init__.py
     def __init__(self, *args, **kwargs):
         super(ExtensionMixin, self).__init__(*args, **kwargs)
         self._extensions = None
diff --git a/heat/engine/clients/os/cinder.py b/heat/engine/clients/os/cinder.py
index 55b7755..3f977a3 100644
--- a/heat/engine/clients/os/cinder.py
+++ b/heat/engine/clients/os/cinder.py
@@ -34,7 +34,11 @@ class CinderClientPlugin(os_client.ExtensionMixin,
 
     exceptions_module = exceptions
 
+<<<<<<< heat/engine/clients/os/cinder.py
     service_types = [VOLUME_V3] = ['volumev3']
+=======
+    service_types = [VOLUME_V2, VOLUME_V3] = ['volumev2', 'volumev3']
+>>>>>>> heat/engine/clients/os/cinder.py
 
     def get_volume_api_version(self):
         '''Returns the most recent API version.'''
@@ -46,7 +50,18 @@ class CinderClientPlugin(os_client.ExtensionMixin,
             self.service_type = self.VOLUME_V3
             self.client_version = '3'
         except ks_exceptions.EndpointNotFound:
+<<<<<<< heat/engine/clients/os/cinder.py
             raise exception.Error(_('No volume service available.'))
+=======
+            try:
+                self.context.keystone_session.get_endpoint(
+                    service_type=self.VOLUME_V2,
+                    interface=self.interface)
+                self.service_type = self.VOLUME_V2
+                self.client_version = '2'
+            except ks_exceptions.EndpointNotFound:
+                raise exception.Error(_('No volume service available.'))
+>>>>>>> heat/engine/clients/os/cinder.py
 
     def _create(self):
         self.get_volume_api_version()
diff --git a/heat/engine/clients/os/keystone/__init__.py b/heat/engine/clients/os/keystone/__init__.py
index 076a814..5760d80 100644
--- a/heat/engine/clients/os/keystone/__init__.py
+++ b/heat/engine/clients/os/keystone/__init__.py
@@ -164,7 +164,11 @@ class KeystoneClientPlugin(client_plugin.ClientPlugin):
         except ks_exceptions.NotFound:
             try:
                 user_obj = self.client().client.users.find(name=user,
+<<<<<<< heat/engine/clients/os/keystone/__init__.py
                                                            domain_id=domain)
+=======
+                                                           domain=domain)
+>>>>>>> heat/engine/clients/os/keystone/__init__.py
                 return user_obj.id
             except ks_exceptions.NotFound:
                 pass
diff --git a/heat/engine/clients/os/keystone/fake_keystoneclient.py b/heat/engine/clients/os/keystone/fake_keystoneclient.py
index 9715c95..0859e44 100644
--- a/heat/engine/clients/os/keystone/fake_keystoneclient.py
+++ b/heat/engine/clients/os/keystone/fake_keystoneclient.py
@@ -91,6 +91,7 @@ class FakeKeystoneClient(object):
                                       trust_id='atrust',
                                       trustor_user_id=self.user_id)
 
+<<<<<<< heat/engine/clients/os/keystone/fake_keystoneclient.py
     def regenerate_trust_context(self):
         return context.RequestContext(username=self.username,
                                       password=self.password,
@@ -98,6 +99,8 @@ class FakeKeystoneClient(object):
                                       trust_id='atrust',
                                       trustor_user_id=self.user_id)
 
+=======
+>>>>>>> heat/engine/clients/os/keystone/fake_keystoneclient.py
     def delete_trust(self, trust_id):
         pass
 
diff --git a/heat/engine/clients/os/keystone/heat_keystoneclient.py b/heat/engine/clients/os/keystone/heat_keystoneclient.py
index 7f993ec..59290e4 100644
--- a/heat/engine/clients/os/keystone/heat_keystoneclient.py
+++ b/heat/engine/clients/os/keystone/heat_keystoneclient.py
@@ -26,7 +26,10 @@ from oslo_log import log as logging
 from oslo_serialization import jsonutils
 from oslo_utils import importutils
 
+<<<<<<< heat/engine/clients/os/keystone/heat_keystoneclient.py
 from heat.common import config
+=======
+>>>>>>> heat/engine/clients/os/keystone/heat_keystoneclient.py
 from heat.common import context
 from heat.common import exception
 from heat.common.i18n import _
@@ -77,8 +80,11 @@ class KsClientWrapper(object):
         self._domain_admin_auth = None
         self._domain_admin_client = None
         self._region_name = region_name
+<<<<<<< heat/engine/clients/os/keystone/heat_keystoneclient.py
         self._interface = config.get_client_option('keystone',
                                                    'endpoint_type')
+=======
+>>>>>>> heat/engine/clients/os/keystone/heat_keystoneclient.py
 
         self.session = self.context.keystone_session
         self.v3_endpoint = self.context.keystone_v3_endpoint
@@ -161,7 +167,10 @@ class KsClientWrapper(object):
                 session=self.session,
                 auth=self.domain_admin_auth,
                 connect_retries=cfg.CONF.client_retry_limit,
+<<<<<<< heat/engine/clients/os/keystone/heat_keystoneclient.py
                 interface=self._interface,
+=======
+>>>>>>> heat/engine/clients/os/keystone/heat_keystoneclient.py
                 region_name=self.auth_region_name)
 
         return self._domain_admin_client
@@ -169,7 +178,10 @@ class KsClientWrapper(object):
     def _v3_client_init(self):
         client = kc_v3.Client(session=self.session,
                               connect_retries=cfg.CONF.client_retry_limit,
+<<<<<<< heat/engine/clients/os/keystone/heat_keystoneclient.py
                               interface=self._interface,
+=======
+>>>>>>> heat/engine/clients/os/keystone/heat_keystoneclient.py
                               region_name=self.auth_region_name)
 
         if hasattr(self.context.auth_plugin, 'get_access'):
@@ -193,7 +205,23 @@ class KsClientWrapper(object):
 
         return client
 
+<<<<<<< heat/engine/clients/os/keystone/heat_keystoneclient.py
     def _create_trust_context(self, trustor_user_id, trustor_proj_id):
+=======
+    def create_trust_context(self):
+        """Create a trust using the trustor identity in the current context.
+
+        The trust is created with the trustee as the heat service user.
+
+        If the current context already contains a trust_id, we do nothing
+        and return the current context.
+
+        Returns a context containing the new trust_id.
+        """
+        if self.context.trust_id:
+            return self.context
+
+>>>>>>> heat/engine/clients/os/keystone/heat_keystoneclient.py
         # We need the service admin user ID (not name), as the trustor user
         # can't lookup the ID in keystoneclient unless they're admin
         # workaround this by getting the user_id from admin_client
@@ -204,6 +232,12 @@ class KsClientWrapper(object):
             LOG.error("Domain admin client authentication failed")
             raise exception.AuthorizationFailure()
 
+<<<<<<< heat/engine/clients/os/keystone/heat_keystoneclient.py
+=======
+        trustor_user_id = self.context.auth_plugin.get_user_id(self.session)
+        trustor_proj_id = self.context.auth_plugin.get_project_id(self.session)
+
+>>>>>>> heat/engine/clients/os/keystone/heat_keystoneclient.py
         role_kw = {}
         # inherit the roles of the trustor, unless set trusts_delegated_roles
         if cfg.CONF.trusts_delegated_roles:
@@ -235,6 +269,7 @@ class KsClientWrapper(object):
         trust_context.trustor_user_id = trustor_user_id
         return trust_context
 
+<<<<<<< heat/engine/clients/os/keystone/heat_keystoneclient.py
     def create_trust_context(self):
         """Create a trust using the trustor identity in the current context.
 
@@ -252,6 +287,8 @@ class KsClientWrapper(object):
         trustor_proj_id = self.context.auth_plugin.get_project_id(self.session)
         return self._create_trust_context(trustor_user_id, trustor_proj_id)
 
+=======
+>>>>>>> heat/engine/clients/os/keystone/heat_keystoneclient.py
     def delete_trust(self, trust_id):
         """Delete the specified trust."""
         try:
@@ -259,6 +296,7 @@ class KsClientWrapper(object):
         except (ks_exception.NotFound, ks_exception.Unauthorized):
             pass
 
+<<<<<<< heat/engine/clients/os/keystone/heat_keystoneclient.py
     def regenerate_trust_context(self):
         """Regenerate a trust using the trustor identity of current user_id.
 
@@ -276,6 +314,8 @@ class KsClientWrapper(object):
             self.delete_trust(old_trust_id)
         return trust_context
 
+=======
+>>>>>>> heat/engine/clients/os/keystone/heat_keystoneclient.py
     def _get_username(self, username):
         if(len(username) > 255):
             LOG.warning("Truncating the username %s to the last 255 "
diff --git a/heat/engine/clients/os/magnum.py b/heat/engine/clients/os/magnum.py
index acc470b..caeeeb5 100644
--- a/heat/engine/clients/os/magnum.py
+++ b/heat/engine/clients/os/magnum.py
@@ -57,6 +57,13 @@ class MagnumClientPlugin(client_plugin.ClientPlugin):
             raise exception.EntityNotFound(entity=entity_msg,
                                            name=value)
 
+<<<<<<< heat/engine/clients/os/magnum.py
+=======
+    def get_baymodel(self, value):
+        return self._get_rsrc_name_or_id(value, entity='baymodels',
+                                         entity_msg='BayModel')
+
+>>>>>>> heat/engine/clients/os/magnum.py
     def get_cluster_template(self, value):
         return self._get_rsrc_name_or_id(value, entity='cluster_templates',
                                          entity_msg='ClusterTemplate')
@@ -66,3 +73,12 @@ class ClusterTemplateConstraint(constraints.BaseCustomConstraint):
 
     resource_client_name = CLIENT_NAME
     resource_getter_name = 'get_cluster_template'
+<<<<<<< heat/engine/clients/os/magnum.py
+=======
+
+
+class BaymodelConstraint(constraints.BaseCustomConstraint):
+
+    resource_client_name = CLIENT_NAME
+    resource_getter_name = 'get_baymodel'
+>>>>>>> heat/engine/clients/os/magnum.py
diff --git a/heat/engine/clients/os/nova.py b/heat/engine/clients/os/nova.py
index cbd155f..e7d9441 100644
--- a/heat/engine/clients/os/nova.py
+++ b/heat/engine/clients/os/nova.py
@@ -47,7 +47,11 @@ CLIENT_NAME = 'nova'
 class NovaClientPlugin(microversion_mixin.MicroversionMixin,
                        client_plugin.ClientPlugin):
 
+<<<<<<< heat/engine/clients/os/nova.py
     deferred_server_statuses = {'BUILD',
+=======
+    deferred_server_statuses = ['BUILD',
+>>>>>>> heat/engine/clients/os/nova.py
                                 'HARD_REBOOT',
                                 'PASSWORD',
                                 'REBOOT',
@@ -56,7 +60,11 @@ class NovaClientPlugin(microversion_mixin.MicroversionMixin,
                                 'REVERT_RESIZE',
                                 'SHUTOFF',
                                 'SUSPENDED',
+<<<<<<< heat/engine/clients/os/nova.py
                                 'VERIFY_RESIZE'}
+=======
+                                'VERIFY_RESIZE']
+>>>>>>> heat/engine/clients/os/nova.py
 
     exceptions_module = exceptions
 
@@ -596,7 +604,11 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
 
     def meta_serialize(self, metadata):
         """Serialize non-string metadata values before sending them to Nova."""
+<<<<<<< heat/engine/clients/os/nova.py
         if not isinstance(metadata, collections.abc.Mapping):
+=======
+        if not isinstance(metadata, collections.Mapping):
+>>>>>>> heat/engine/clients/os/nova.py
             raise exception.StackValidationFailed(message=_(
                 "nova server metadata needs to be a Map."))
 
@@ -647,7 +659,11 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
         """
         nc = self.client
 
+<<<<<<< heat/engine/clients/os/nova.py
         class ConsoleUrls(collections.abc.Mapping):
+=======
+        class ConsoleUrls(collections.Mapping):
+>>>>>>> heat/engine/clients/os/nova.py
             def __init__(self, server):
                 self.console_method = server.get_console_url
                 self.support_console_types = ['novnc', 'xvpvnc',
diff --git a/heat/engine/clients/os/openstacksdk.py b/heat/engine/clients/os/openstacksdk.py
index 15a08bf..debe873 100644
--- a/heat/engine/clients/os/openstacksdk.py
+++ b/heat/engine/clients/os/openstacksdk.py
@@ -72,12 +72,15 @@ class OpenStackSDKPlugin(client_plugin.ClientPlugin):
     def find_network_segment(self, value):
         return self.client().network.find_segment(value).id
 
+<<<<<<< heat/engine/clients/os/openstacksdk.py
     def find_network_port(self, value):
         return self.client().network.find_port(value).id
 
     def find_network_ip(self, value):
         return self.client().network.find_ip(value).id
 
+=======
+>>>>>>> heat/engine/clients/os/openstacksdk.py
 
 class SegmentConstraint(constraints.BaseCustomConstraint):
 
diff --git a/heat/engine/clients/progress.py b/heat/engine/clients/progress.py
index 091fb95..6cc37db 100644
--- a/heat/engine/clients/progress.py
+++ b/heat/engine/clients/progress.py
@@ -129,11 +129,18 @@ class VolumeDeleteProgress(object):
 
 
 class VolumeResizeProgress(object):
+<<<<<<< heat/engine/clients/progress.py
     def __init__(self, task_complete=False, size=None, pre_check=False):
         self.called = task_complete
         self.complete = task_complete
         self.size = size
         self.pre_check = pre_check
+=======
+    def __init__(self, task_complete=False, size=None):
+        self.called = task_complete
+        self.complete = task_complete
+        self.size = size
+>>>>>>> heat/engine/clients/progress.py
 
 
 class VolumeUpdateAccessModeProgress(object):
diff --git a/heat/engine/conditions.py b/heat/engine/conditions.py
index 73e51f4..69d149d 100644
--- a/heat/engine/conditions.py
+++ b/heat/engine/conditions.py
@@ -24,7 +24,11 @@ _in_progress = object()
 
 class Conditions(object):
     def __init__(self, conditions_dict):
+<<<<<<< heat/engine/conditions.py
         assert isinstance(conditions_dict, collections.abc.Mapping)
+=======
+        assert isinstance(conditions_dict, collections.Mapping)
+>>>>>>> heat/engine/conditions.py
         self._conditions = conditions_dict
         self._resolved = {}
 
diff --git a/heat/engine/constraints.py b/heat/engine/constraints.py
index dfed2c9..4181705 100644
--- a/heat/engine/constraints.py
+++ b/heat/engine/constraints.py
@@ -36,7 +36,11 @@ MEMOIZE = core.get_memoization_decorator(conf=cfg.CONF,
 LOG = log.getLogger(__name__)
 
 
+<<<<<<< heat/engine/constraints.py
 class Schema(collections.abc.Mapping):
+=======
+class Schema(collections.Mapping):
+>>>>>>> heat/engine/constraints.py
     """Schema base class for validating properties or parameters.
 
     Schema objects are serializable to dictionaries following a superset of
@@ -251,7 +255,11 @@ class Schema(collections.abc.Mapping):
         return self._len
 
 
+<<<<<<< heat/engine/constraints.py
 class AnyIndexDict(collections.abc.Mapping):
+=======
+class AnyIndexDict(collections.Mapping):
+>>>>>>> heat/engine/constraints.py
     """A Mapping that returns the same value for any integer index.
 
     Used for storing the schema for a list. When converted to a dictionary,
@@ -276,7 +284,11 @@ class AnyIndexDict(collections.abc.Mapping):
         return 1
 
 
+<<<<<<< heat/engine/constraints.py
 class Constraint(collections.abc.Mapping):
+=======
+class Constraint(collections.Mapping):
+>>>>>>> heat/engine/constraints.py
     """Parent class for constraints on allowable values for a Property.
 
     Constraints are serializable to dictionaries following the HOT input
@@ -540,7 +552,11 @@ class AllowedValues(Constraint):
 
     def __init__(self, allowed, description=None):
         super(AllowedValues, self).__init__(description)
+<<<<<<< heat/engine/constraints.py
         if (not isinstance(allowed, collections.abc.Sequence) or
+=======
+        if (not isinstance(allowed, collections.Sequence) or
+>>>>>>> heat/engine/constraints.py
                 isinstance(allowed, str)):
             raise exception.InvalidSchemaError(
                 message=_('AllowedValues must be a list'))
diff --git a/heat/engine/dependencies.py b/heat/engine/dependencies.py
index f4d085f..a8dbf05 100644
--- a/heat/engine/dependencies.py
+++ b/heat/engine/dependencies.py
@@ -15,6 +15,14 @@ import collections
 import itertools
 
 from heat.common import exception
+<<<<<<< heat/engine/dependencies.py
+=======
+from heat.common.i18n import _
+
+
+class CircularDependencyException(exception.HeatException):
+    msg_fmt = _("Circular Dependency Found: %(cycle)s")
+>>>>>>> heat/engine/dependencies.py
 
 
 class Node(object):
@@ -158,7 +166,11 @@ class Graph(collections.defaultdict):
             else:
                 # There are nodes remaining, but none without
                 # dependencies: a cycle
+<<<<<<< heat/engine/dependencies.py
                 raise exception.CircularDependencyException(cycle=str(graph))
+=======
+                raise CircularDependencyException(cycle=str(graph))
+>>>>>>> heat/engine/dependencies.py
 
 
 class Dependencies(object):
diff --git a/heat/engine/environment.py b/heat/engine/environment.py
index f01b836..014635d 100644
--- a/heat/engine/environment.py
+++ b/heat/engine/environment.py
@@ -12,7 +12,10 @@
 #    under the License.
 
 import collections
+<<<<<<< heat/engine/environment.py
 import fnmatch
+=======
+>>>>>>> heat/engine/environment.py
 import glob
 import itertools
 import os.path
@@ -21,6 +24,10 @@ import weakref
 
 from oslo_config import cfg
 from oslo_log import log
+<<<<<<< heat/engine/environment.py
+=======
+from oslo_utils import fnmatch
+>>>>>>> heat/engine/environment.py
 
 from heat.common import environment_format as env_fmt
 from heat.common import exception
@@ -55,7 +62,11 @@ def is_hook_definition(key, value):
     if key == 'hooks':
         if isinstance(value, str):
             is_valid_hook = valid_hook_type(value)
+<<<<<<< heat/engine/environment.py
         elif isinstance(value, collections.abc.Sequence):
+=======
+        elif isinstance(value, collections.Sequence):
+>>>>>>> heat/engine/environment.py
             is_valid_hook = all(valid_hook_type(hook) for hook in value)
 
         if not is_valid_hook:
@@ -72,7 +83,11 @@ def is_valid_restricted_action(key, value):
     if key == 'restricted_actions':
         if isinstance(value, str):
             valid_action = valid_restricted_actions(value)
+<<<<<<< heat/engine/environment.py
         elif isinstance(value, collections.abc.Sequence):
+=======
+        elif isinstance(value, collections.Sequence):
+>>>>>>> heat/engine/environment.py
             valid_action = all(valid_restricted_actions(
                 action) for action in value)
 
@@ -397,7 +412,11 @@ class ResourceRegistry(object):
                     actions = resource['restricted_actions']
                     if isinstance(actions, str):
                         restricted_actions.add(actions)
+<<<<<<< heat/engine/environment.py
                     elif isinstance(actions, collections.abc.Sequence):
+=======
+                    elif isinstance(actions, collections.Sequence):
+>>>>>>> heat/engine/environment.py
                         restricted_actions |= set(actions)
         return restricted_actions
 
@@ -433,7 +452,11 @@ class ResourceRegistry(object):
                     if isinstance(hooks, str):
                         if hook == hooks:
                             return True
+<<<<<<< heat/engine/environment.py
                     elif isinstance(hooks, collections.abc.Sequence):
+=======
+                    elif isinstance(hooks, collections.Sequence):
+>>>>>>> heat/engine/environment.py
                         if hook in hooks:
                             return True
         return False
@@ -590,6 +613,7 @@ class ResourceRegistry(object):
                    str(support.SUPPORT_STATUSES))
             raise exception.Invalid(reason=msg)
 
+<<<<<<< heat/engine/environment.py
         enforcer = policy.ResourceEnforcer()
         if type_name is not None:
             try:
@@ -644,6 +668,54 @@ class ResourceRegistry(object):
         import heat.engine.resource
 
         def resource_description(name, info):
+=======
+        def is_resource(key):
+            return isinstance(self._registry[key], (ClassResourceInfo,
+                                                    TemplateResourceInfo))
+
+        def status_matches(cls):
+            return (support_status is None or
+                    cls.get_class().support_status.status ==
+                    support_status)
+
+        def is_available(cls):
+            if cnxt is None:
+                return True
+
+            try:
+                return cls.get_class().is_service_available(cnxt)[0]
+            except Exception:
+                return False
+
+        def not_hidden_matches(cls):
+            return cls.get_class().support_status.status != support.HIDDEN
+
+        def is_allowed(enforcer, name):
+            if cnxt is None:
+                return True
+            try:
+                enforcer.enforce(cnxt, name, is_registered_policy=True)
+            except enforcer.exc:
+                return False
+            else:
+                return True
+
+        enforcer = policy.ResourceEnforcer()
+
+        def name_matches(name):
+            try:
+                return type_name is None or re.match(type_name, name)
+            except:  # noqa
+                return False
+
+        def version_matches(cls):
+            return (version is None or
+                    cls.get_class().support_status.version == version)
+
+        import heat.engine.resource
+
+        def resource_description(name, info, with_description):
+>>>>>>> heat/engine/environment.py
             if not with_description:
                 return name
             rsrc_cls = info.get_class()
@@ -654,9 +726,21 @@ class ResourceRegistry(object):
                 'description': rsrc_cls.getdoc(),
             }
 
+<<<<<<< heat/engine/environment.py
         return [resource_description(name, info)
                 for name, info in self._registry.items()
                 if matches(name, info)]
+=======
+        return [resource_description(name, cls, with_description)
+                for name, cls in self._registry.items()
+                if (is_resource(name) and
+                    name_matches(name) and
+                    status_matches(cls) and
+                    is_available(cls) and
+                    is_allowed(enforcer, name) and
+                    not_hidden_matches(cls) and
+                    version_matches(cls))]
+>>>>>>> heat/engine/environment.py
 
 
 class Environment(object):
diff --git a/heat/engine/function.py b/heat/engine/function.py
index 2169e21..9eaad31 100644
--- a/heat/engine/function.py
+++ b/heat/engine/function.py
@@ -195,7 +195,11 @@ class Macro(Function, metaclass=abc.ABCMeta):
 
     def result(self):
         """Return the resolved result of the macro contents."""
+<<<<<<< heat/engine/function.py
         return resolve(self.parsed, nullable=True)
+=======
+        return resolve(self.parsed)
+>>>>>>> heat/engine/function.py
 
     def dependencies(self, path):
         return dependencies(self.parsed, '.'.join([path, self.fn_name]))
@@ -250,6 +254,7 @@ class Macro(Function, metaclass=abc.ABCMeta):
         return repr(self.parsed)
 
 
+<<<<<<< heat/engine/function.py
 def _non_null_item(i):
     k, v = i
     return v is not Ellipsis
@@ -274,6 +279,17 @@ def resolve(snippet, nullable=False):
           isinstance(snippet, collections.abc.Iterable)):
         return list(filter(_non_null_value,
                            (resolve(v, nullable=True) for v in snippet)))
+=======
+def resolve(snippet):
+    if isinstance(snippet, Function):
+        return snippet.result()
+
+    if isinstance(snippet, collections.Mapping):
+        return dict((k, resolve(v)) for k, v in snippet.items())
+    elif (not isinstance(snippet, str) and
+          isinstance(snippet, collections.Iterable)):
+        return [resolve(v) for v in snippet]
+>>>>>>> heat/engine/function.py
 
     return snippet
 
@@ -293,11 +309,19 @@ def validate(snippet, path=None):
             raise exception.StackValidationFailed(
                 path=path + [snippet.fn_name],
                 message=str(e))
+<<<<<<< heat/engine/function.py
     elif isinstance(snippet, collections.abc.Mapping):
         for k, v in snippet.items():
             validate(v, path + [k])
     elif (not isinstance(snippet, str) and
           isinstance(snippet, collections.abc.Iterable)):
+=======
+    elif isinstance(snippet, collections.Mapping):
+        for k, v in snippet.items():
+            validate(v, path + [k])
+    elif (not isinstance(snippet, str) and
+          isinstance(snippet, collections.Iterable)):
+>>>>>>> heat/engine/function.py
         basepath = list(path)
         parent = basepath.pop() if basepath else ''
         for i, v in enumerate(snippet):
@@ -314,7 +338,11 @@ def dependencies(snippet, path=''):
     if isinstance(snippet, Function):
         return snippet.dependencies(path)
 
+<<<<<<< heat/engine/function.py
     elif isinstance(snippet, collections.abc.Mapping):
+=======
+    elif isinstance(snippet, collections.Mapping):
+>>>>>>> heat/engine/function.py
         def mkpath(key):
             return '.'.join([path, str(key)])
 
@@ -323,7 +351,11 @@ def dependencies(snippet, path=''):
         return itertools.chain.from_iterable(deps)
 
     elif (not isinstance(snippet, str) and
+<<<<<<< heat/engine/function.py
           isinstance(snippet, collections.abc.Iterable)):
+=======
+          isinstance(snippet, collections.Iterable)):
+>>>>>>> heat/engine/function.py
         def mkpath(idx):
             return ''.join([path, '[%d]' % idx])
 
@@ -348,11 +380,19 @@ def dep_attrs(snippet, resource_name):
     if isinstance(snippet, Function):
         return snippet.dep_attrs(resource_name)
 
+<<<<<<< heat/engine/function.py
     elif isinstance(snippet, collections.abc.Mapping):
         attrs = (dep_attrs(val, resource_name) for val in snippet.values())
         return itertools.chain.from_iterable(attrs)
     elif (not isinstance(snippet, str) and
           isinstance(snippet, collections.abc.Iterable)):
+=======
+    elif isinstance(snippet, collections.Mapping):
+        attrs = (dep_attrs(val, resource_name) for val in snippet.values())
+        return itertools.chain.from_iterable(attrs)
+    elif (not isinstance(snippet, str) and
+          isinstance(snippet, collections.Iterable)):
+>>>>>>> heat/engine/function.py
         attrs = (dep_attrs(value, resource_name) for value in snippet)
         return itertools.chain.from_iterable(attrs)
     return []
@@ -371,11 +411,19 @@ def all_dep_attrs(snippet):
     if isinstance(snippet, Function):
         return snippet.all_dep_attrs()
 
+<<<<<<< heat/engine/function.py
     elif isinstance(snippet, collections.abc.Mapping):
         res_attrs = (all_dep_attrs(value) for value in snippet.values())
         return itertools.chain.from_iterable(res_attrs)
     elif (not isinstance(snippet, str) and
           isinstance(snippet, collections.abc.Iterable)):
+=======
+    elif isinstance(snippet, collections.Mapping):
+        res_attrs = (all_dep_attrs(value) for value in snippet.values())
+        return itertools.chain.from_iterable(res_attrs)
+    elif (not isinstance(snippet, str) and
+          isinstance(snippet, collections.Iterable)):
+>>>>>>> heat/engine/function.py
         res_attrs = (all_dep_attrs(value) for value in snippet)
         return itertools.chain.from_iterable(res_attrs)
     return []
diff --git a/heat/engine/hot/functions.py b/heat/engine/hot/functions.py
index ec4bb9d..20efd92 100644
--- a/heat/engine/hot/functions.py
+++ b/heat/engine/hot/functions.py
@@ -79,7 +79,11 @@ class GetParam(function.Function):
         if isinstance(args, str):
             param_name = args
             path_components = []
+<<<<<<< heat/engine/hot/functions.py
         elif isinstance(args, collections.abc.Sequence):
+=======
+        elif isinstance(args, collections.Sequence):
+>>>>>>> heat/engine/hot/functions.py
             param_name = args[0]
             path_components = args[1:]
         else:
@@ -96,15 +100,24 @@ class GetParam(function.Function):
             raise exception.UserParameterMissing(key=param_name)
 
         def get_path_component(collection, key):
+<<<<<<< heat/engine/hot/functions.py
             if not isinstance(collection, (collections.abc.Mapping,
                                            collections.abc.Sequence)):
+=======
+            if not isinstance(collection, (collections.Mapping,
+                                           collections.Sequence)):
+>>>>>>> heat/engine/hot/functions.py
                 raise TypeError(_('"%s" can\'t traverse path') % self.fn_name)
 
             if not isinstance(key, (str, int)):
                 raise TypeError(_('Path components in "%s" '
                                   'must be strings') % self.fn_name)
 
+<<<<<<< heat/engine/hot/functions.py
             if isinstance(collection, collections.abc.Sequence
+=======
+            if isinstance(collection, collections.Sequence
+>>>>>>> heat/engine/hot/functions.py
                           ) and isinstance(key, str):
                 try:
                     key = int(key)
@@ -167,7 +180,11 @@ class GetAttThenSelect(function.Function):
          self._path_components) = self._parse_args()
 
     def _parse_args(self):
+<<<<<<< heat/engine/hot/functions.py
         if (not isinstance(self.args, collections.abc.Sequence) or
+=======
+        if (not isinstance(self.args, collections.Sequence) or
+>>>>>>> heat/engine/hot/functions.py
                 isinstance(self.args, str)):
             raise TypeError(_('Argument to "%s" must be a list') %
                             self.fn_name)
@@ -314,7 +331,11 @@ class GetAttAllAttributes(GetAtt):
                                'forms: [resource_name] or '
                                '[resource_name, attribute, (path), ...]'
                                ) % self.fn_name)
+<<<<<<< heat/engine/hot/functions.py
         elif isinstance(self.args, collections.abc.Sequence):
+=======
+        elif isinstance(self.args, collections.Sequence):
+>>>>>>> heat/engine/hot/functions.py
             if len(self.args) > 1:
                 return super(GetAttAllAttributes, self)._parse_args()
             else:
@@ -372,12 +393,20 @@ class Replace(function.Function):
 
         self._mapping, self._string = self._parse_args()
         if not isinstance(self._mapping,
+<<<<<<< heat/engine/hot/functions.py
                           (collections.abc.Mapping, function.Function)):
+=======
+                          (collections.Mapping, function.Function)):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('"%s" parameters must be a mapping') %
                             self.fn_name)
 
     def _parse_args(self):
+<<<<<<< heat/engine/hot/functions.py
         if not isinstance(self.args, collections.abc.Mapping):
+=======
+        if not isinstance(self.args, collections.Mapping):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('Arguments to "%s" must be a map') %
                             self.fn_name)
 
@@ -418,7 +447,11 @@ class Replace(function.Function):
         if not isinstance(template, str):
             raise TypeError(_('"%s" template must be a string') % self.fn_name)
 
+<<<<<<< heat/engine/hot/functions.py
         if not isinstance(mapping, collections.abc.Mapping):
+=======
+        if not isinstance(mapping, collections.Mapping):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('"%s" params must be a map') % self.fn_name)
 
         def replace(strings, keys):
@@ -490,10 +523,16 @@ class ReplaceJson(Replace):
             else:
                 _raise_empty_param_value_error()
 
+<<<<<<< heat/engine/hot/functions.py
         if not isinstance(value, (str, int, float, bool)):
             if isinstance(
                 value, (collections.abc.Mapping, collections.abc.Sequence)
             ):
+=======
+        if not isinstance(value, (str, int,
+                                  float, bool)):
+            if isinstance(value, (collections.Mapping, collections.Sequence)):
+>>>>>>> heat/engine/hot/functions.py
                 if not self._allow_empty_value and len(value) == 0:
                     _raise_empty_param_value_error()
                 try:
@@ -605,7 +644,11 @@ class Join(function.Function):
         if strings is None:
             strings = []
         if (isinstance(strings, str) or
+<<<<<<< heat/engine/hot/functions.py
                 not isinstance(strings, collections.abc.Sequence)):
+=======
+                not isinstance(strings, collections.Sequence)):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('"%s" must operate on a list') % self.fn_name)
 
         delim = function.resolve(self._delim)
@@ -670,7 +713,11 @@ class JoinMultiple(function.Function):
         for jl in r_joinlists:
             if jl:
                 if (isinstance(jl, str) or
+<<<<<<< heat/engine/hot/functions.py
                         not isinstance(jl, collections.abc.Sequence)):
+=======
+                        not isinstance(jl, collections.Sequence)):
+>>>>>>> heat/engine/hot/functions.py
                     raise TypeError(_('"%s" must operate on '
                                       'a list') % self.fn_name)
 
@@ -688,9 +735,13 @@ class JoinMultiple(function.Function):
                 return ''
             elif isinstance(s, str):
                 return s
+<<<<<<< heat/engine/hot/functions.py
             elif isinstance(
                 s, (collections.abc.Mapping, collections.abc.Sequence)
             ):
+=======
+            elif isinstance(s, (collections.Mapping, collections.Sequence)):
+>>>>>>> heat/engine/hot/functions.py
                 try:
                     return jsonutils.dumps(s, default=None, sort_keys=True)
                 except TypeError:
@@ -728,19 +779,30 @@ class MapMerge(function.Function):
     def result(self):
         args = function.resolve(self.args)
 
+<<<<<<< heat/engine/hot/functions.py
         if not isinstance(args, collections.abc.Sequence):
+=======
+        if not isinstance(args, collections.Sequence):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
                               'should be: %(example)s') % self.fmt_data)
 
         def ensure_map(m):
             if m is None:
                 return {}
+<<<<<<< heat/engine/hot/functions.py
             elif isinstance(m, collections.abc.Mapping):
                 return m
             else:
                 msg = _('Incorrect arguments: Items to merge must be maps. '
                         '{} is type {} instead of a dict'.format(
                             repr(m)[:200], type(m)))
+=======
+            elif isinstance(m, collections.Mapping):
+                return m
+            else:
+                msg = _('Incorrect arguments: Items to merge must be maps.')
+>>>>>>> heat/engine/hot/functions.py
                 raise TypeError(msg)
 
         ret_map = {}
@@ -781,7 +843,11 @@ class MapReplace(function.Function):
         def ensure_map(m):
             if m is None:
                 return {}
+<<<<<<< heat/engine/hot/functions.py
             elif isinstance(m, collections.abc.Mapping):
+=======
+            elif isinstance(m, collections.Mapping):
+>>>>>>> heat/engine/hot/functions.py
                 return m
             else:
                 msg = (_('Incorrect arguments: to "%(fn_name)s", arguments '
@@ -906,7 +972,11 @@ class Repeat(function.Function):
         self._parse_args()
 
     def _parse_args(self):
+<<<<<<< heat/engine/hot/functions.py
         if not isinstance(self.args, collections.abc.Mapping):
+=======
+        if not isinstance(self.args, collections.Mapping):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('Arguments to "%s" must be a map') %
                             self.fn_name)
 
@@ -928,12 +998,20 @@ class Repeat(function.Function):
         super(Repeat, self).validate()
 
         if not isinstance(self._for_each, function.Function):
+<<<<<<< heat/engine/hot/functions.py
             if not isinstance(self._for_each, collections.abc.Mapping):
+=======
+            if not isinstance(self._for_each, collections.Mapping):
+>>>>>>> heat/engine/hot/functions.py
                 raise TypeError(_('The "for_each" argument to "%s" must '
                                   'contain a map') % self.fn_name)
 
     def _valid_arg(self, arg):
+<<<<<<< heat/engine/hot/functions.py
         if not (isinstance(arg, (collections.abc.Sequence,
+=======
+        if not (isinstance(arg, (collections.Sequence,
+>>>>>>> heat/engine/hot/functions.py
                                  function.Function)) and
                 not isinstance(arg, str)):
             raise TypeError(_('The values of the "for_each" argument to '
@@ -944,10 +1022,17 @@ class Repeat(function.Function):
             for (key, value) in zip(keys, values):
                 template = template.replace(key, value)
             return template
+<<<<<<< heat/engine/hot/functions.py
         elif isinstance(template, collections.abc.Sequence):
             return [self._do_replacement(keys, values, elem)
                     for elem in template]
         elif isinstance(template, collections.abc.Mapping):
+=======
+        elif isinstance(template, collections.Sequence):
+            return [self._do_replacement(keys, values, elem)
+                    for elem in template]
+        elif isinstance(template, collections.Mapping):
+>>>>>>> heat/engine/hot/functions.py
             return dict((self._do_replacement(keys, values, k),
                          self._do_replacement(keys, values, v))
                         for (k, v) in template.items())
@@ -998,8 +1083,13 @@ class RepeatWithMap(Repeat):
     """
 
     def _valid_arg(self, arg):
+<<<<<<< heat/engine/hot/functions.py
         if not (isinstance(arg, (collections.abc.Sequence,
                                  collections.abc.Mapping,
+=======
+        if not (isinstance(arg, (collections.Sequence,
+                                 collections.Mapping,
+>>>>>>> heat/engine/hot/functions.py
                                  function.Function)) and
                 not isinstance(arg, str)):
             raise TypeError(_('The values of the "for_each" argument to '
@@ -1123,7 +1213,11 @@ class StrSplit(function.Function):
                          'example': example}
         self.fn_name = fn_name
 
+<<<<<<< heat/engine/hot/functions.py
         if isinstance(args, (str, collections.abc.Mapping)):
+=======
+        if isinstance(args, (str, collections.Mapping)):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
                               'should be: %(example)s') % self.fmt_data)
 
@@ -1193,7 +1287,11 @@ class Yaql(function.Function):
     def __init__(self, stack, fn_name, args):
         super(Yaql, self).__init__(stack, fn_name, args)
 
+<<<<<<< heat/engine/hot/functions.py
         if not isinstance(self.args, collections.abc.Mapping):
+=======
+        if not isinstance(self.args, collections.Mapping):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('Arguments to "%s" must be a map.') %
                             self.fn_name)
 
@@ -1280,6 +1378,7 @@ class If(function.Macro):
     evaluates to false.
     """
 
+<<<<<<< heat/engine/hot/functions.py
     def _read_args(self):
         return self.args
 
@@ -1290,6 +1389,15 @@ class If(function.Macro):
                     isinstance(self.args, str)):
                 raise ValueError()
             condition, value_if_true, value_if_false = self._read_args()
+=======
+    def parse_args(self, parse_func):
+        try:
+            if (not self.args or
+                    not isinstance(self.args, collections.Sequence) or
+                    isinstance(self.args, str)):
+                raise ValueError()
+            condition, value_if_true, value_if_false = self.args
+>>>>>>> heat/engine/hot/functions.py
         except ValueError:
             msg = _('Arguments to "%s" must be of the form: '
                     '[condition_name, value_if_true, value_if_false]')
@@ -1307,6 +1415,7 @@ class If(function.Macro):
         return self.template.conditions(self.stack).is_enabled(cond)
 
 
+<<<<<<< heat/engine/hot/functions.py
 class IfNullable(If):
     """A function to return corresponding value based on condition evaluation.
 
@@ -1341,6 +1450,8 @@ class IfNullable(If):
         return self.args
 
 
+=======
+>>>>>>> heat/engine/hot/functions.py
 class ConditionBoolean(function.Function):
     """Abstract parent class of boolean condition functions."""
 
@@ -1349,7 +1460,11 @@ class ConditionBoolean(function.Function):
         self._check_args()
 
     def _check_args(self):
+<<<<<<< heat/engine/hot/functions.py
         if not (isinstance(self.args, collections.abc.Sequence) and
+=======
+        if not (isinstance(self.args, collections.Sequence) and
+>>>>>>> heat/engine/hot/functions.py
                 not isinstance(self.args, str)):
             msg = _('Arguments to "%s" must be a list of conditions')
             raise ValueError(msg % self.fn_name)
@@ -1444,7 +1559,11 @@ class Filter(function.Function):
         self._values, self._sequence = self._parse_args()
 
     def _parse_args(self):
+<<<<<<< heat/engine/hot/functions.py
         if (not isinstance(self.args, collections.abc.Sequence) or
+=======
+        if (not isinstance(self.args, collections.Sequence) or
+>>>>>>> heat/engine/hot/functions.py
                 isinstance(self.args, str)):
             raise TypeError(_('Argument to "%s" must be a list') %
                             self.fn_name)
@@ -1506,7 +1625,11 @@ class MakeURL(function.Function):
             if arg in args:
                 if arg == self.QUERY:
                     if not isinstance(args[arg], (function.Function,
+<<<<<<< heat/engine/hot/functions.py
                                                   collections.abc.Mapping)):
+=======
+                                                  collections.Mapping)):
+>>>>>>> heat/engine/hot/functions.py
                         raise TypeError(_('The "%(arg)s" argument to '
                                           '"%(fn_name)s" must be a map') %
                                         {'arg': arg,
@@ -1541,7 +1664,11 @@ class MakeURL(function.Function):
     def validate(self):
         super(MakeURL, self).validate()
 
+<<<<<<< heat/engine/hot/functions.py
         if not isinstance(self.args, collections.abc.Mapping):
+=======
+        if not isinstance(self.args, collections.Mapping):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('The arguments to "%s" must '
                               'be a map') % self.fn_name)
 
@@ -1624,14 +1751,22 @@ class ListConcat(function.Function):
         args = function.resolve(self.args)
 
         if (isinstance(args, str) or
+<<<<<<< heat/engine/hot/functions.py
                 not isinstance(args, collections.abc.Sequence)):
+=======
+                not isinstance(args, collections.Sequence)):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
                               'should be: %(example)s') % self.fmt_data)
 
         def ensure_list(m):
             if m is None:
                 return []
+<<<<<<< heat/engine/hot/functions.py
             elif (isinstance(m, collections.abc.Sequence) and
+=======
+            elif (isinstance(m, collections.Sequence) and
+>>>>>>> heat/engine/hot/functions.py
                   not isinstance(m, str)):
                 return m
             else:
@@ -1698,7 +1833,11 @@ class Contains(function.Function):
         resolved_value = function.resolve(self.value)
         resolved_sequence = function.resolve(self.sequence)
 
+<<<<<<< heat/engine/hot/functions.py
         if not isinstance(resolved_sequence, collections.abc.Sequence):
+=======
+        if not isinstance(resolved_sequence, collections.Sequence):
+>>>>>>> heat/engine/hot/functions.py
             raise TypeError(_('Second argument to "%s" should be '
                               'a sequence.') % self.fn_name)
 
diff --git a/heat/engine/hot/template.py b/heat/engine/hot/template.py
index ee56a64..8464d5a 100644
--- a/heat/engine/hot/template.py
+++ b/heat/engine/hot/template.py
@@ -758,6 +758,7 @@ class HOTemplate20180831(HOTemplate20180302):
         'yaql': hot_funcs.Yaql,
         'contains': hot_funcs.Contains
     }
+<<<<<<< heat/engine/hot/template.py
 
 
 class HOTemplate20210416(HOTemplate20180831):
@@ -810,3 +811,5 @@ class HOTemplate20210416(HOTemplate20180831):
         'Fn::ResourceFacade': hot_funcs.Removed,
         'Ref': hot_funcs.Removed,
     }
+=======
+>>>>>>> heat/engine/hot/template.py
diff --git a/heat/engine/parameters.py b/heat/engine/parameters.py
index cb05c6e..a6beda6 100644
--- a/heat/engine/parameters.py
+++ b/heat/engine/parameters.py
@@ -388,7 +388,11 @@ class ParsedParameter(Parameter):
         return self._parsed
 
 
+<<<<<<< heat/engine/parameters.py
 class CommaDelimitedListParam(ParsedParameter, collections.abc.Sequence):
+=======
+class CommaDelimitedListParam(ParsedParameter, collections.Sequence):
+>>>>>>> heat/engine/parameters.py
     """A template parameter of type "CommaDelimitedList"."""
 
     __slots__ = tuple()
@@ -481,7 +485,11 @@ class JsonParam(ParsedParameter):
         self.schema.validate_value(parsed, context)
 
 
+<<<<<<< heat/engine/parameters.py
 class Parameters(collections.abc.Mapping, metaclass=abc.ABCMeta):
+=======
+class Parameters(collections.Mapping, metaclass=abc.ABCMeta):
+>>>>>>> heat/engine/parameters.py
     """Parameters of a stack.
 
     The parameters of a stack, with type checking, defaults, etc. specified by
diff --git a/heat/engine/plugin_manager.py b/heat/engine/plugin_manager.py
index cde1591..63ddecb 100644
--- a/heat/engine/plugin_manager.py
+++ b/heat/engine/plugin_manager.py
@@ -93,7 +93,11 @@ class PluginMapping(object):
                               'from %(module)s', fmt_data)
                     raise
                 else:
+<<<<<<< heat/engine/plugin_manager.py
                     if isinstance(mapping_dict, collections.abc.Mapping):
+=======
+                    if isinstance(mapping_dict, collections.Mapping):
+>>>>>>> heat/engine/plugin_manager.py
                         return mapping_dict
                     elif mapping_dict is not None:
                         LOG.error('Invalid type for %(mapping_name)s '
diff --git a/heat/engine/properties.py b/heat/engine/properties.py
index 2d9623b..a66c654 100644
--- a/heat/engine/properties.py
+++ b/heat/engine/properties.py
@@ -300,14 +300,22 @@ class Property(object):
     def _get_map(self, value, validate=False, translation=None):
         if value is None:
             value = self.default() if self.has_default() else {}
+<<<<<<< heat/engine/properties.py
         if not isinstance(value, collections.abc.Mapping):
+=======
+        if not isinstance(value, collections.Mapping):
+>>>>>>> heat/engine/properties.py
             # This is to handle passing Lists via Json parameters exposed
             # via a provider resource, in particular lists-of-dicts which
             # cannot be handled correctly via comma_delimited_list
             if self.schema.allow_conversion:
                 if isinstance(value, str):
                     return value
+<<<<<<< heat/engine/properties.py
                 elif isinstance(value, collections.abc.Sequence):
+=======
+                elif isinstance(value, collections.Sequence):
+>>>>>>> heat/engine/properties.py
                     return jsonutils.dumps(value)
             raise TypeError(_('"%s" is not a map') % value)
 
@@ -320,7 +328,11 @@ class Property(object):
             value = self.has_default() and self.default() or []
         if self.schema.allow_conversion and isinstance(value, str):
             value = param_utils.delim_string_to_list(value)
+<<<<<<< heat/engine/properties.py
         if (not isinstance(value, collections.abc.Sequence) or
+=======
+        if (not isinstance(value, collections.Sequence) or
+>>>>>>> heat/engine/properties.py
                 isinstance(value, str)):
             raise TypeError(_('"%s" is not a list') % repr(value))
 
@@ -372,6 +384,7 @@ class Property(object):
         return _value
 
 
+<<<<<<< heat/engine/properties.py
 def _default_resolver(d, nullable=False):
     return d
 
@@ -380,6 +393,11 @@ class Properties(collections.abc.Mapping):
 
     def __init__(self, schema, data, resolver=_default_resolver,
                  parent_name=None,
+=======
+class Properties(collections.Mapping):
+
+    def __init__(self, schema, data, resolver=lambda d: d, parent_name=None,
+>>>>>>> heat/engine/properties.py
                  context=None, section=None, translation=None,
                  rsrc_description=None):
         self.props = dict((k, Property(s, k, context, path=parent_name))
@@ -458,11 +476,16 @@ class Properties(collections.abc.Mapping):
         if any(res.action == res.INIT for res in deps):
             return True
 
+<<<<<<< heat/engine/properties.py
     def get_user_value(self, key):
+=======
+    def get_user_value(self, key, validate=False):
+>>>>>>> heat/engine/properties.py
         if key not in self:
             raise KeyError(_('Invalid Property %s') % key)
 
         prop = self.props[key]
+<<<<<<< heat/engine/properties.py
         value, found = self._resolve_user_value(key, prop, validate=False)
         return value
 
@@ -510,22 +533,65 @@ class Properties(collections.abc.Mapping):
         # so handle this generically
         except Exception as e:
             raise ValueError(str(e))
+=======
+        if (self.translation.is_deleted(prop.path) or
+                self.translation.is_replaced(prop.path)):
+            return
+        if key in self.data:
+            try:
+                unresolved_value = self.data[key]
+                if validate:
+                    if self._find_deps_any_in_init(unresolved_value):
+                        validate = False
+
+                value = self.resolve(unresolved_value)
+
+                if self.translation.has_translation(prop.path):
+                    value = self.translation.translate(prop.path,
+                                                       value,
+                                                       self.data)
+
+                return prop.get_value(value, validate,
+                                      translation=self.translation)
+            # Children can raise StackValidationFailed with unique path which
+            # is necessary for further use in StackValidationFailed exception.
+            # So we need to handle this exception in this method.
+            except exception.StackValidationFailed as e:
+                raise exception.StackValidationFailed(path=e.path,
+                                                      message=e.error_message)
+            # the resolver function could raise any number of exceptions,
+            # so handle this generically
+            except Exception as e:
+                raise ValueError(str(e))
+>>>>>>> heat/engine/properties.py
 
     def _get_property_value(self, key, validate=False):
         if key not in self:
             raise KeyError(_('Invalid Property %s') % key)
 
         prop = self.props[key]
+<<<<<<< heat/engine/properties.py
         value, found = self._resolve_user_value(key, prop, validate)
         if found:
             return value
         if self.translation.has_translation(prop.path):
+=======
+        if not self.translation.is_deleted(prop.path) and key in self.data:
+            return self.get_user_value(key, validate)
+        elif self.translation.has_translation(prop.path):
+>>>>>>> heat/engine/properties.py
             value = self.translation.translate(prop.path, prop_data=self.data,
                                                validate=validate)
             if value is not None or prop.has_default():
                 return prop.get_value(value)
+<<<<<<< heat/engine/properties.py
 
         if prop.has_default():
+=======
+            elif prop.required():
+                raise ValueError(_('Property %s not assigned') % key)
+        elif prop.has_default():
+>>>>>>> heat/engine/properties.py
             return prop.get_value(None, validate,
                                   translation=self.translation)
         elif prop.required():
diff --git a/heat/engine/resource.py b/heat/engine/resource.py
index c8e5c0e..d1dfdb1 100644
--- a/heat/engine/resource.py
+++ b/heat/engine/resource.py
@@ -16,7 +16,10 @@ import contextlib
 import datetime as dt
 import itertools
 import pydoc
+<<<<<<< heat/engine/resource.py
 import re
+=======
+>>>>>>> heat/engine/resource.py
 import tenacity
 import weakref
 
@@ -408,8 +411,13 @@ class Resource(status.ResourceStatus):
         # Retry in case a signal has updated the atomic_key
         attempts = max(cfg.CONF.client_retry_limit, 0) + 1
 
+<<<<<<< heat/engine/resource.py
         def prepare_attempt(retry_state):
             if retry_state.attempt_number > 1:
+=======
+        def prepare_attempt(fn, attempt):
+            if attempt > 1:
+>>>>>>> heat/engine/resource.py
                 res_obj = resource_objects.Resource.get_obj(
                     self.context, self.id)
                 if (res_obj.engine_id is not None or
@@ -808,6 +816,7 @@ class Resource(status.ResourceStatus):
                 service_name=cls.default_client_name)
             if endpoint_exists:
                 req_extension = cls.required_service_extension
+<<<<<<< heat/engine/resource.py
                 if not req_extension:
                     return(True, None)
                 if isinstance(req_extension, str):
@@ -823,6 +832,18 @@ class Resource(status.ResourceStatus):
                         break
                 if is_ext_available:
                     return (True, None)
+=======
+                is_ext_available = (
+                    not req_extension or client_plugin.has_extension(
+                        req_extension))
+                if is_ext_available:
+                    return (True, None)
+                else:
+                    reason = _('Required extension {0} in {1} service '
+                               'is not available.')
+                    reason = reason.format(req_extension,
+                                           cls.default_client_name)
+>>>>>>> heat/engine/resource.py
             else:
                 reason = _('{0} {1} endpoint is not in service catalog.')
                 reason = reason.format(cls.default_client_name, service_type)
@@ -1070,7 +1091,11 @@ class Resource(status.ResourceStatus):
             refd_attrs |= get_dep_attrs(stk_defn.resource_definition(r_name)
                                         for r_name in enabled_resources)
 
+<<<<<<< heat/engine/resource.py
         subset_outputs = isinstance(in_outputs, collections.abc.Iterable)
+=======
+        subset_outputs = isinstance(in_outputs, collections.Iterable)
+>>>>>>> heat/engine/resource.py
         if subset_outputs or in_outputs:
             if not subset_outputs:
                 in_outputs = stk_defn.enabled_output_names()
diff --git a/heat/engine/resources/aws/ec2/instance.py b/heat/engine/resources/aws/ec2/instance.py
index fc536c2..8179ea2 100644
--- a/heat/engine/resources/aws/ec2/instance.py
+++ b/heat/engine/resources/aws/ec2/instance.py
@@ -865,7 +865,11 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
         status = cp.get_status(server)
         LOG.debug('%(name)s check_suspend_complete status = %(status)s',
                   {'name': self.name, 'status': status})
+<<<<<<< heat/engine/resources/aws/ec2/instance.py
         if status in (cp.deferred_server_statuses | {'ACTIVE'}):
+=======
+        if status in list(cp.deferred_server_statuses + ['ACTIVE']):
+>>>>>>> heat/engine/resources/aws/ec2/instance.py
             return status == 'SUSPENDED'
         else:
             exc = exception.ResourceUnknownStatus(
diff --git a/heat/engine/resources/openstack/cinder/quota.py b/heat/engine/resources/openstack/cinder/quota.py
index 1968536..9840eee 100644
--- a/heat/engine/resources/openstack/cinder/quota.py
+++ b/heat/engine/resources/openstack/cinder/quota.py
@@ -41,11 +41,16 @@ class CinderQuota(resource.Resource):
 
     required_service_extension = 'os-quota-sets'
 
+<<<<<<< heat/engine/resources/openstack/cinder/quota.py
     PROPERTIES = (PROJECT, GIGABYTES, VOLUMES, BACKUPS,
                   BACKUPS_GIGABYTES, SNAPSHOTS) = (
         'project', 'gigabytes', 'volumes',
         'backups', 'backup_gigabytes',
         'snapshots'
+=======
+    PROPERTIES = (PROJECT, GIGABYTES, VOLUMES, SNAPSHOTS) = (
+        'project', 'gigabytes', 'volumes', 'snapshots'
+>>>>>>> heat/engine/resources/openstack/cinder/quota.py
     )
 
     properties_schema = {
@@ -75,6 +80,7 @@ class CinderQuota(resource.Resource):
             ],
             update_allowed=True
         ),
+<<<<<<< heat/engine/resources/openstack/cinder/quota.py
         BACKUPS: properties.Schema(
             properties.Schema.INTEGER,
             _('Quota for the number of backups. '
@@ -95,6 +101,8 @@ class CinderQuota(resource.Resource):
             ],
             update_allowed=True
         ),
+=======
+>>>>>>> heat/engine/resources/openstack/cinder/quota.py
         SNAPSHOTS: properties.Schema(
             properties.Schema.INTEGER,
             _('Quota for the number of snapshots. '
@@ -137,7 +145,10 @@ class CinderQuota(resource.Resource):
     def validate_quotas(self, project, **kwargs):
         search_opts = {'all_tenants': True, 'project_id': project}
         volume_list = None
+<<<<<<< heat/engine/resources/openstack/cinder/quota.py
         backup_list = None
+=======
+>>>>>>> heat/engine/resources/openstack/cinder/quota.py
         snapshot_list = None
         for key, value in kwargs.copy().items():
             if value == -1:
@@ -160,6 +171,7 @@ class CinderQuota(resource.Resource):
             total_size = len(volume_list)
             self._validate_quota(self.VOLUMES, quota_size, total_size)
 
+<<<<<<< heat/engine/resources/openstack/cinder/quota.py
         if self.BACKUPS in kwargs:
             quota_size = kwargs[self.BACKUPS]
             if backup_list is None:
@@ -175,6 +187,8 @@ class CinderQuota(resource.Resource):
             self._validate_quota(self.BACKUPS_GIGABYTES,
                                  quota_size, total_size)
 
+=======
+>>>>>>> heat/engine/resources/openstack/cinder/quota.py
         if self.SNAPSHOTS in kwargs:
             quota_size = kwargs[self.SNAPSHOTS]
             if snapshot_list is None:
@@ -203,9 +217,13 @@ class CinderQuota(resource.Resource):
         if sum(1 for p in self.properties.values() if p is not None) <= 1:
             raise exception.PropertyUnspecifiedError(self.GIGABYTES,
                                                      self.SNAPSHOTS,
+<<<<<<< heat/engine/resources/openstack/cinder/quota.py
                                                      self.VOLUMES,
                                                      self.BACKUPS,
                                                      self.BACKUPS_GIGABYTES)
+=======
+                                                     self.VOLUMES)
+>>>>>>> heat/engine/resources/openstack/cinder/quota.py
 
 
 def resource_mapping():
diff --git a/heat/engine/resources/openstack/cinder/volume.py b/heat/engine/resources/openstack/cinder/volume.py
index 14ff244..67213f5 100644
--- a/heat/engine/resources/openstack/cinder/volume.py
+++ b/heat/engine/resources/openstack/cinder/volume.py
@@ -366,6 +366,7 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
 
         return True
 
+<<<<<<< heat/engine/resources/openstack/cinder/volume.py
     def _ready_to_extend_volume(self):
         vol = self.client().volumes.get(self.resource_id)
         expected_status = (
@@ -376,6 +377,8 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
             return True
         return False
 
+=======
+>>>>>>> heat/engine/resources/openstack/cinder/volume.py
     def _check_extend_volume_complete(self):
         vol = self.client().volumes.get(self.resource_id)
         if vol.status == 'extending':
@@ -561,6 +564,7 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
                 return prg_restore.complete and not prg_resize
         # resize volume
         if prg_resize:
+<<<<<<< heat/engine/resources/openstack/cinder/volume.py
             # Make sure volume status ready for resize.
             if not prg_resize.pre_check:
                 prg_resize.pre_check = self._ready_to_extend_volume()
@@ -568,6 +572,8 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
                 if not prg_resize.pre_check:
                     return False
 
+=======
+>>>>>>> heat/engine/resources/openstack/cinder/volume.py
             if not prg_resize.called:
                 prg_resize.called = self._extend_volume(prg_resize.size)
                 return False
diff --git a/heat/engine/resources/openstack/glance/image.py b/heat/engine/resources/openstack/glance/image.py
index 74d8345..9ad18d1 100644
--- a/heat/engine/resources/openstack/glance/image.py
+++ b/heat/engine/resources/openstack/glance/image.py
@@ -31,6 +31,7 @@ class GlanceWebImage(resource.Resource):
         NAME, IMAGE_ID, MIN_DISK, MIN_RAM, PROTECTED,
         DISK_FORMAT, CONTAINER_FORMAT, LOCATION, TAGS,
         ARCHITECTURE, KERNEL_ID, OS_DISTRO, OS_VERSION, OWNER,
+<<<<<<< heat/engine/resources/openstack/glance/image.py
         EXTRA_PROPERTIES, VISIBILITY, RAMDISK_ID, ACTIVE, MEMBERS
     ) = (
         'name', 'id', 'min_disk', 'min_ram', 'protected',
@@ -38,6 +39,14 @@ class GlanceWebImage(resource.Resource):
         'architecture', 'kernel_id', 'os_distro', 'os_version',
         'owner', 'extra_properties', 'visibility', 'ramdisk_id',
         'active', 'members'
+=======
+        VISIBILITY, RAMDISK_ID
+    ) = (
+        'name', 'id', 'min_disk', 'min_ram', 'protected',
+        'disk_format', 'container_format', 'location', 'tags',
+        'architecture', 'kernel_id', 'os_distro', 'os_version', 'owner',
+        'visibility', 'ramdisk_id'
+>>>>>>> heat/engine/resources/openstack/glance/image.py
     )
 
     glance_id_pattern = ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
@@ -76,7 +85,10 @@ class GlanceWebImage(resource.Resource):
             properties.Schema.BOOLEAN,
             _('Whether the image can be deleted. If the value is True, '
               'the image is protected and cannot be deleted.'),
+<<<<<<< heat/engine/resources/openstack/glance/image.py
             update_allowed=True,
+=======
+>>>>>>> heat/engine/resources/openstack/glance/image.py
             default=False
         ),
         DISK_FORMAT: properties.Schema(
@@ -140,6 +152,7 @@ class GlanceWebImage(resource.Resource):
             _('Owner of the image.'),
             update_allowed=True,
         ),
+<<<<<<< heat/engine/resources/openstack/glance/image.py
         EXTRA_PROPERTIES: properties.Schema(
             properties.Schema.MAP,
             _('Arbitrary properties to associate with the image.'),
@@ -147,6 +160,8 @@ class GlanceWebImage(resource.Resource):
             default={},
             support_status=support.SupportStatus(version='17.0.0')
         ),
+=======
+>>>>>>> heat/engine/resources/openstack/glance/image.py
         VISIBILITY: properties.Schema(
             properties.Schema.STRING,
             _('Scope of image accessibility.'),
@@ -165,6 +180,7 @@ class GlanceWebImage(resource.Resource):
             constraints=[
                 constraints.AllowedPattern(glance_id_pattern)
             ]
+<<<<<<< heat/engine/resources/openstack/glance/image.py
         ),
         ACTIVE: properties.Schema(
             properties.Schema.BOOLEAN,
@@ -187,6 +203,8 @@ class GlanceWebImage(resource.Resource):
             ),
             update_allowed=True,
             support_status=support.SupportStatus(version='16.0.0')
+=======
+>>>>>>> heat/engine/resources/openstack/glance/image.py
         )
     }
 
@@ -196,6 +214,7 @@ class GlanceWebImage(resource.Resource):
 
     def handle_create(self):
         args = dict((k, v) for k, v in self.properties.items()
+<<<<<<< heat/engine/resources/openstack/glance/image.py
                     if v is not None and k is not self.EXTRA_PROPERTIES)
         members = args.pop(self.MEMBERS, [])
         active = args.pop(self.ACTIVE)
@@ -284,6 +303,45 @@ class GlanceWebImage(resource.Resource):
         if active:
             self.client().images.reactivate(self.resource_id)
         return True
+=======
+                    if v is not None)
+
+        location = args.pop(self.LOCATION)
+        images = self.client().images
+        image_id = images.create(
+            **args).id
+        self.resource_id_set(image_id)
+
+        images.image_import(image_id, method='web-download', uri=location)
+
+        return image_id
+
+    def check_create_complete(self, image_id):
+        image = self.client().images.get(image_id)
+        return image.status == 'active'
+
+    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+        if prop_diff and self.TAGS in prop_diff:
+            existing_tags = self.properties.get(self.TAGS) or []
+            diff_tags = prop_diff.pop(self.TAGS) or []
+
+            new_tags = set(diff_tags) - set(existing_tags)
+            for tag in new_tags:
+                self.client().image_tags.update(
+                    self.resource_id,
+                    tag)
+
+            removed_tags = set(existing_tags) - set(diff_tags)
+            for tag in removed_tags:
+                with self.client_plugin().ignore_not_found:
+                    self.client().image_tags.delete(
+                        self.resource_id,
+                        tag)
+
+        images = self.client().images
+
+        images.update(self.resource_id, **prop_diff)
+>>>>>>> heat/engine/resources/openstack/glance/image.py
 
     def validate(self):
         super(GlanceWebImage, self).validate()
@@ -296,6 +354,7 @@ class GlanceWebImage(resource.Resource):
                     "match.")
             raise exception.StackValidationFailed(message=msg)
 
+<<<<<<< heat/engine/resources/openstack/glance/image.py
         if (self.properties[self.MEMBERS]
                 and self.properties[self.VISIBILITY] != 'shared'):
             raise exception.ResourcePropertyValueDependency(
@@ -303,6 +362,8 @@ class GlanceWebImage(resource.Resource):
                 prop2=self.VISIBILITY,
                 value='shared')
 
+=======
+>>>>>>> heat/engine/resources/openstack/glance/image.py
     def get_live_resource_data(self):
         image_data = super(GlanceWebImage, self).get_live_resource_data()
         if image_data.get('status') in ('deleted', 'killed'):
@@ -323,6 +384,7 @@ class GlanceWebImage(resource.Resource):
                         self.IMAGE_ID)})
                 else:
                     image_reality.update({self.IMAGE_ID: None})
+<<<<<<< heat/engine/resources/openstack/glance/image.py
 
             if key == self.EXTRA_PROPERTIES:
                 continue
@@ -335,6 +397,11 @@ class GlanceWebImage(resource.Resource):
                 extra_properties[key] = resource_data.get(key)
             image_reality.update({self.EXTRA_PROPERTIES: extra_properties})
 
+=======
+            else:
+                image_reality.update({key: resource_data.get(key)})
+
+>>>>>>> heat/engine/resources/openstack/glance/image.py
         return image_reality
 
 
diff --git a/heat/engine/resources/openstack/heat/autoscaling_group.py b/heat/engine/resources/openstack/heat/autoscaling_group.py
index 8561048..4834e66 100644
--- a/heat/engine/resources/openstack/heat/autoscaling_group.py
+++ b/heat/engine/resources/openstack/heat/autoscaling_group.py
@@ -21,6 +21,10 @@ from heat.engine import constraints
 from heat.engine.hot import template
 from heat.engine import output
 from heat.engine import properties
+<<<<<<< heat/engine/resources/openstack/heat/autoscaling_group.py
+=======
+from heat.engine import resource
+>>>>>>> heat/engine/resources/openstack/heat/autoscaling_group.py
 from heat.engine.resources.aws.autoscaling import autoscaling_group as aws_asg
 from heat.engine import rsrc_defn
 from heat.engine import support
@@ -205,6 +209,12 @@ class AutoScalingResourceGroup(aws_asg.AutoScalingGroup):
                      self)._create_template(num_instances, num_replace,
                                             template_version=template_version)
 
+<<<<<<< heat/engine/resources/openstack/heat/autoscaling_group.py
+=======
+    def get_reference_id(self):
+        return resource.Resource.get_reference_id(self)
+
+>>>>>>> heat/engine/resources/openstack/heat/autoscaling_group.py
     def _attribute_output_name(self, *attr_path):
         return ', '.join(str(a) for a in attr_path)
 
diff --git a/heat/engine/resources/openstack/heat/cloud_watch.py b/heat/engine/resources/openstack/heat/cloud_watch.py
new file mode 100644
index 0000000..26ed62f
--- /dev/null
+++ b/heat/engine/resources/openstack/heat/cloud_watch.py
@@ -0,0 +1,41 @@
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+from heat.common.i18n import _
+from heat.engine.resources.openstack.heat import none_resource
+from heat.engine import support
+
+
+class CloudWatchAlarm(none_resource.NoneResource):
+    support_status = support.SupportStatus(
+        status=support.HIDDEN,
+        message=_('OS::Heat::CWLiteAlarm resource has been removed '
+                  'since version 10.0.0. Existing stacks can still '
+                  'use it, where it would do nothing for update/delete.'),
+        version='5.0.0',
+        previous_status=support.SupportStatus(
+            status=support.DEPRECATED,
+            version='2014.2')
+    )
+
+
+def resource_mapping():
+    cfg.CONF.import_opt('enable_cloud_watch_lite', 'heat.common.config')
+    if cfg.CONF.enable_cloud_watch_lite:
+        return {
+            'OS::Heat::CWLiteAlarm': CloudWatchAlarm,
+        }
+    else:
+        return {}
diff --git a/heat/engine/resources/openstack/heat/instance_group.py b/heat/engine/resources/openstack/heat/instance_group.py
index 86b19f1..6750d0a 100644
--- a/heat/engine/resources/openstack/heat/instance_group.py
+++ b/heat/engine/resources/openstack/heat/instance_group.py
@@ -429,6 +429,12 @@ class InstanceGroup(stack_resource.StackResource):
             lbs = [self.stack[name] for name in lb_names]
             lbutils.reconfigure_loadbalancers(lbs, id_list)
 
+<<<<<<< heat/engine/resources/openstack/heat/instance_group.py
+=======
+    def get_reference_id(self):
+        return self.physical_resource_name_or_FnGetRefId()
+
+>>>>>>> heat/engine/resources/openstack/heat/instance_group.py
     def _group_data(self, refresh=False):
         """Return a cached GroupInspector object for the nested stack."""
         if refresh or getattr(self, '_group_inspector', None) is None:
diff --git a/heat/engine/resources/openstack/heat/resource_group.py b/heat/engine/resources/openstack/heat/resource_group.py
index 1bdc0c3..a6d2797 100644
--- a/heat/engine/resources/openstack/heat/resource_group.py
+++ b/heat/engine/resources/openstack/heat/resource_group.py
@@ -15,7 +15,10 @@ import collections
 import copy
 import functools
 import itertools
+<<<<<<< heat/engine/resources/openstack/heat/resource_group.py
 import math
+=======
+>>>>>>> heat/engine/resources/openstack/heat/resource_group.py
 
 from oslo_log import log as logging
 
@@ -434,18 +437,31 @@ class ResourceGroup(stack_resource.StackResource):
                 return False
         return True
 
+<<<<<<< heat/engine/resources/openstack/heat/resource_group.py
     def _run_to_completion(self, template, timeout_mins):
         updater = self.update_with_template(template, {},
                                             timeout_mins)
+=======
+    def _run_to_completion(self, template, timeout):
+        updater = self.update_with_template(template, {},
+                                            timeout)
+>>>>>>> heat/engine/resources/openstack/heat/resource_group.py
 
         while not super(ResourceGroup,
                         self).check_update_complete(updater):
             yield
 
+<<<<<<< heat/engine/resources/openstack/heat/resource_group.py
     def _run_update(self, total_capacity, max_updates, timeout_mins):
         template = self._assemble_for_rolling_update(total_capacity,
                                                      max_updates)
         return self._run_to_completion(template, timeout_mins)
+=======
+    def _run_update(self, total_capacity, max_updates, timeout):
+        template = self._assemble_for_rolling_update(total_capacity,
+                                                     max_updates)
+        return self._run_to_completion(template, timeout)
+>>>>>>> heat/engine/resources/openstack/heat/resource_group.py
 
     def check_update_complete(self, checkers):
         for checker in checkers:
@@ -601,6 +617,7 @@ class ResourceGroup(stack_resource.StackResource):
         # At this stage, we don't mind if all of the parameters have values
         # assigned. Pass in a custom resolver to the properties to not
         # error when a parameter does not have a user entered value.
+<<<<<<< heat/engine/resources/openstack/heat/resource_group.py
         def ignore_param_resolve(snippet, nullable=False):
             if isinstance(snippet, function.Function):
                 try:
@@ -620,6 +637,21 @@ class ResourceGroup(stack_resource.StackResource):
                 return list(filter(function._non_null_value,
                                    (ignore_param_resolve(v, nullable=True)
                                     for v in snippet)))
+=======
+        def ignore_param_resolve(snippet):
+            if isinstance(snippet, function.Function):
+                try:
+                    return snippet.result()
+                except exception.UserParameterMissing:
+                    return None
+
+            if isinstance(snippet, collections.Mapping):
+                return dict((k, ignore_param_resolve(v))
+                            for k, v in snippet.items())
+            elif (not isinstance(snippet, str) and
+                  isinstance(snippet, collections.Iterable)):
+                return [ignore_param_resolve(v) for v in snippet]
+>>>>>>> heat/engine/resources/openstack/heat/resource_group.py
 
             return snippet
 
@@ -647,9 +679,15 @@ class ResourceGroup(stack_resource.StackResource):
 
         if isinstance(val, str):
             return val.replace(repl_var, res_name)
+<<<<<<< heat/engine/resources/openstack/heat/resource_group.py
         elif isinstance(val, collections.abc.Mapping):
             return {k: recurse(v) for k, v in val.items()}
         elif isinstance(val, collections.abc.Sequence):
+=======
+        elif isinstance(val, collections.Mapping):
+            return {k: recurse(v) for k, v in val.items()}
+        elif isinstance(val, collections.Sequence):
+>>>>>>> heat/engine/resources/openstack/heat/resource_group.py
             return [recurse(v) for v in val]
         return val
 
@@ -777,18 +815,26 @@ class ResourceGroup(stack_resource.StackResource):
 
         batches = list(self._get_batches(self.get_size(), curr_cap, batch_size,
                                          min_in_service))
+<<<<<<< heat/engine/resources/openstack/heat/resource_group.py
         update_timeout_secs = self._update_timeout(len(batches), pause_sec)
 
         # NOTE(gibi) update_timeout is in seconds but the _run_update
         # eventually calls StackResource.update_with_template that takes
         # timeout in minutes so we need to convert here.
         update_timeout_mins = math.ceil(update_timeout_secs / 60)
+=======
+        update_timeout = self._update_timeout(len(batches), pause_sec)
+>>>>>>> heat/engine/resources/openstack/heat/resource_group.py
 
         def tasks():
             for index, (curr_cap, max_upd) in enumerate(batches):
                 yield scheduler.TaskRunner(self._run_update,
                                            curr_cap, max_upd,
+<<<<<<< heat/engine/resources/openstack/heat/resource_group.py
                                            update_timeout_mins)
+=======
+                                           update_timeout)
+>>>>>>> heat/engine/resources/openstack/heat/resource_group.py
 
                 if index < (len(batches) - 1) and pause_sec > 0:
                     yield scheduler.TaskRunner(pause_between_batch, pause_sec)
diff --git a/heat/engine/resources/openstack/heat/structured_config.py b/heat/engine/resources/openstack/heat/structured_config.py
index abcb676..ee5f088 100644
--- a/heat/engine/resources/openstack/heat/structured_config.py
+++ b/heat/engine/resources/openstack/heat/structured_config.py
@@ -166,7 +166,11 @@ class StructuredDeployment(sd.SoftwareDeployment):
             input_key,
             check_input_val=check_input_val)
 
+<<<<<<< heat/engine/resources/openstack/heat/structured_config.py
         if isinstance(snippet, collections.abc.Mapping):
+=======
+        if isinstance(snippet, collections.Mapping):
+>>>>>>> heat/engine/resources/openstack/heat/structured_config.py
             fn_arg = StructuredDeployment.get_input_key_arg(snippet, input_key)
             if fn_arg is not None:
                 return StructuredDeployment.get_input_key_value(fn_arg, inputs,
@@ -175,7 +179,11 @@ class StructuredDeployment(sd.SoftwareDeployment):
 
             return dict((k, parse(v)) for k, v in snippet.items())
         elif (not isinstance(snippet, str) and
+<<<<<<< heat/engine/resources/openstack/heat/structured_config.py
               isinstance(snippet, collections.abc.Iterable)):
+=======
+              isinstance(snippet, collections.Iterable)):
+>>>>>>> heat/engine/resources/openstack/heat/structured_config.py
             return [parse(v) for v in snippet]
         else:
             return snippet
diff --git a/heat/engine/resources/openstack/heat/swiftsignal.py b/heat/engine/resources/openstack/heat/swiftsignal.py
index 295f6ae..9b2fb07 100644
--- a/heat/engine/resources/openstack/heat/swiftsignal.py
+++ b/heat/engine/resources/openstack/heat/swiftsignal.py
@@ -267,8 +267,11 @@ class SwiftSignal(resource.Resource):
                 continue
 
             body = signal[1]
+<<<<<<< heat/engine/resources/openstack/heat/swiftsignal.py
             if isinstance(body, bytes):
                 body = body.decode()
+=======
+>>>>>>> heat/engine/resources/openstack/heat/swiftsignal.py
             if body == swift.IN_PROGRESS:  # Ignore the initial object
                 continue
             if body == "":
diff --git a/heat/engine/resources/openstack/heat/value.py b/heat/engine/resources/openstack/heat/value.py
index 7f21926..19ffdbf 100644
--- a/heat/engine/resources/openstack/heat/value.py
+++ b/heat/engine/resources/openstack/heat/value.py
@@ -103,8 +103,12 @@ class Value(resource.Resource):
                 _('The expression to generate the "value" attribute.'),
                 required=True,
                 update_allowed=True,
+<<<<<<< heat/engine/resources/openstack/heat/value.py
             ),
             self.VALUE)
+=======
+            ))
+>>>>>>> heat/engine/resources/openstack/heat/value.py
 
 
 def resource_mapping():
diff --git a/heat/engine/resources/openstack/keystone/project.py b/heat/engine/resources/openstack/keystone/project.py
index 8ea1945..f82fe23 100644
--- a/heat/engine/resources/openstack/keystone/project.py
+++ b/heat/engine/resources/openstack/keystone/project.py
@@ -192,6 +192,7 @@ class KeystoneProject(resource.Resource):
         result[self.DOMAIN] = resource_data.get('domain_id')
         return result
 
+<<<<<<< heat/engine/resources/openstack/keystone/project.py
     def handle_delete(self):
         if self.resource_id:
             # find and delete the default security group Neutron has created
@@ -210,6 +211,8 @@ class KeystoneProject(resource.Resource):
                     nclient.delete_security_group(secgroup["id"])
         super(KeystoneProject, self).handle_delete()
 
+=======
+>>>>>>> heat/engine/resources/openstack/keystone/project.py
 
 def resource_mapping():
     return {
diff --git a/heat/engine/resources/openstack/magnum/bay.py b/heat/engine/resources/openstack/magnum/bay.py
index f3a1693..3c3940f 100644
--- a/heat/engine/resources/openstack/magnum/bay.py
+++ b/heat/engine/resources/openstack/magnum/bay.py
@@ -11,12 +11,24 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/engine/resources/openstack/magnum/bay.py
 from heat.common.i18n import _
 from heat.engine.resources.openstack.heat import none_resource
 from heat.engine import support
 
 
 class Bay(none_resource.NoneResource):
+=======
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine import resource
+from heat.engine import support
+
+
+class Bay(resource.Resource):
+>>>>>>> heat/engine/resources/openstack/magnum/bay.py
     """A resource that creates a Magnum Bay.
 
     This resource has been deprecated in favor of OS::Magnum::Cluster.
@@ -37,6 +49,139 @@ class Bay(none_resource.NoneResource):
         )
     )
 
+<<<<<<< heat/engine/resources/openstack/magnum/bay.py
+=======
+    PROPERTIES = (
+        NAME, BAYMODEL, NODE_COUNT, MASTER_COUNT, DISCOVERY_URL,
+        BAY_CREATE_TIMEOUT
+    ) = (
+        'name', 'baymodel', 'node_count', 'master_count',
+        'discovery_url', 'bay_create_timeout'
+    )
+
+    properties_schema = {
+        NAME: properties.Schema(
+            properties.Schema.STRING,
+            _('The bay name.')
+        ),
+        BAYMODEL: properties.Schema(
+            properties.Schema.STRING,
+            _('The name or ID of the bay model.'),
+            constraints=[
+                constraints.CustomConstraint('magnum.baymodel')
+            ],
+            required=True
+        ),
+        NODE_COUNT: properties.Schema(
+            properties.Schema.INTEGER,
+            _('The node count for this bay.'),
+            constraints=[constraints.Range(min=1)],
+            update_allowed=True,
+            default=1
+        ),
+        MASTER_COUNT: properties.Schema(
+            properties.Schema.INTEGER,
+            _('The number of master nodes for this bay.'),
+            constraints=[constraints.Range(min=1)],
+            update_allowed=True,
+            default=1
+        ),
+        DISCOVERY_URL: properties.Schema(
+            properties.Schema.STRING,
+            _('Specifies a custom discovery url for node discovery.')
+        ),
+        BAY_CREATE_TIMEOUT: properties.Schema(
+            properties.Schema.INTEGER,
+            _('Timeout for creating the bay in minutes. '
+              'Set to 0 for no timeout.'),
+            constraints=[constraints.Range(min=0)],
+            default=0
+        )
+    }
+
+    default_client_name = 'magnum'
+
+    entity = 'bays'
+
+    def handle_create(self):
+        args = {
+            'name': self.properties[self.NAME],
+            'baymodel_id': self.properties[self.BAYMODEL],
+            'node_count': self.properties[self.NODE_COUNT],
+            'master_count': self.properties[self.NODE_COUNT],
+            'discovery_url': self.properties[self.DISCOVERY_URL],
+            'bay_create_timeout': self.properties[self.BAY_CREATE_TIMEOUT]
+        }
+        bay = self.client().bays.create(**args)
+        self.resource_id_set(bay.uuid)
+        return bay.uuid
+
+    def check_create_complete(self, id):
+        bay = self.client().bays.get(id)
+        if bay.status == 'CREATE_IN_PROGRESS':
+            return False
+        elif bay.status is None:
+            return False
+        elif bay.status == 'CREATE_COMPLETE':
+            return True
+        elif bay.status == 'CREATE_FAILED':
+            msg = (_("Failed to create Bay '%(name)s' - %(reason)s")
+                   % {'name': self.name, 'reason': bay.status_reason})
+            raise exception.ResourceInError(status_reason=msg,
+                                            resource_status=bay.status)
+        else:
+            msg = (_("Unknown status creating Bay '%(name)s' - %(reason)s")
+                   % {'name': self.name, 'reason': bay.status_reason})
+            raise exception.ResourceUnknownStatus(status_reason=msg,
+                                                  resource_status=bay.status)
+
+    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+        if prop_diff:
+            patch = [{'op': 'replace', 'path': '/' + k, 'value': v}
+                     for k, v in prop_diff.items()]
+            self.client().bays.update(self.resource_id, patch)
+            return self.resource_id
+
+    def parse_live_resource_data(self, resource_properties, resource_data):
+        record_reality = {}
+
+        for key in [self.NODE_COUNT, self.MASTER_COUNT]:
+            record_reality.update({key: resource_data.get(key)})
+
+        return record_reality
+
+    def check_update_complete(self, id):
+        bay = self.client().bays.get(id)
+        # Check update complete request might get status before the status
+        # got changed to update in progress, so we allow `CREATE_COMPLETE`
+        # for it.
+        if bay.status in ['UPDATE_IN_PROGRESS', 'CREATE_COMPLETE']:
+            return False
+        elif bay.status == 'UPDATE_COMPLETE':
+            return True
+        elif bay.status == 'UPDATE_FAILED':
+            msg = (_("Failed to update Bay '%(name)s' - %(reason)s")
+                   % {'name': self.name, 'reason': bay.status_reason})
+            raise exception.ResourceInError(status_reason=msg,
+                                            resource_status=bay.status)
+
+        else:
+            msg = (_("Unknown status updating Bay '%(name)s' - %(reason)s")
+                   % {'name': self.name, 'reason': bay.status_reason})
+            raise exception.ResourceUnknownStatus(status_reason=msg,
+                                                  resource_status=bay.status)
+
+    def check_delete_complete(self, id):
+        if not id:
+            return True
+        try:
+            self.client().bays.get(id)
+        except Exception as exc:
+            self.client_plugin().ignore_not_found(exc)
+            return True
+        return False
+
+>>>>>>> heat/engine/resources/openstack/magnum/bay.py
 
 def resource_mapping():
     return {
diff --git a/heat/engine/resources/openstack/neutron/extrarouteset.py b/heat/engine/resources/openstack/neutron/extrarouteset.py
index c0378f2..cc53623 100644
--- a/heat/engine/resources/openstack/neutron/extrarouteset.py
+++ b/heat/engine/resources/openstack/neutron/extrarouteset.py
@@ -13,6 +13,10 @@
 # under the License.
 
 from operator import itemgetter
+<<<<<<< heat/engine/resources/openstack/neutron/extrarouteset.py
+=======
+import six
+>>>>>>> heat/engine/resources/openstack/neutron/extrarouteset.py
 
 from oslo_log import log as logging
 
@@ -112,7 +116,11 @@ class ExtraRouteSet(neutron.NeutronResource):
 
     def add_dependencies(self, deps):
         super(ExtraRouteSet, self).add_dependencies(deps)
+<<<<<<< heat/engine/resources/openstack/neutron/extrarouteset.py
         for resource in self.stack.values():
+=======
+        for resource in six.itervalues(self.stack):
+>>>>>>> heat/engine/resources/openstack/neutron/extrarouteset.py
             # depend on any RouterInterface in this template with the same
             # router as this router
             if resource.has_interface('OS::Neutron::RouterInterface'):
diff --git a/heat/engine/resources/openstack/neutron/floatingip.py b/heat/engine/resources/openstack/neutron/floatingip.py
index 1b1aa1d..edd51f9 100644
--- a/heat/engine/resources/openstack/neutron/floatingip.py
+++ b/heat/engine/resources/openstack/neutron/floatingip.py
@@ -455,6 +455,7 @@ class FloatingIPAssociation(neutron.NeutronResource):
             self.resource_id_set(self.id)
 
 
+<<<<<<< heat/engine/resources/openstack/neutron/floatingip.py
 class FloatingIPPortForward(neutron.NeutronResource):
     """A resource for creating port forwarding for floating IPs.
 
@@ -610,9 +611,14 @@ class FloatingIPPortForward(neutron.NeutronResource):
                 **prop_diff)
 
 
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/floatingip.py
 def resource_mapping():
     return {
         'OS::Neutron::FloatingIP': FloatingIP,
         'OS::Neutron::FloatingIPAssociation': FloatingIPAssociation,
+<<<<<<< heat/engine/resources/openstack/neutron/floatingip.py
         'OS::Neutron::FloatingIPPortForward': FloatingIPPortForward,
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/floatingip.py
     }
diff --git a/heat/engine/resources/openstack/neutron/l2_gateway.py b/heat/engine/resources/openstack/neutron/l2_gateway.py
index 634ee81..b266d48 100644
--- a/heat/engine/resources/openstack/neutron/l2_gateway.py
+++ b/heat/engine/resources/openstack/neutron/l2_gateway.py
@@ -106,10 +106,17 @@ class L2Gateway(neutron.NeutronResource):
 
     @staticmethod
     def _remove_none_value_props(props):
+<<<<<<< heat/engine/resources/openstack/neutron/l2_gateway.py
         if isinstance(props, collections.abc.Mapping):
             return dict((k, L2Gateway._remove_none_value_props(v)) for k, v
                         in props.items() if v is not None)
         elif (isinstance(props, collections.abc.Sequence) and
+=======
+        if isinstance(props, collections.Mapping):
+            return dict((k, L2Gateway._remove_none_value_props(v)) for k, v
+                        in props.items() if v is not None)
+        elif (isinstance(props, collections.Sequence) and
+>>>>>>> heat/engine/resources/openstack/neutron/l2_gateway.py
               not isinstance(props, str)):
             return list(L2Gateway._remove_none_value_props(p) for p in props
                         if p is not None)
diff --git a/heat/engine/resources/openstack/neutron/net.py b/heat/engine/resources/openstack/neutron/net.py
index 156c030..45505af 100644
--- a/heat/engine/resources/openstack/neutron/net.py
+++ b/heat/engine/resources/openstack/neutron/net.py
@@ -33,11 +33,19 @@ class Net(neutron.NeutronResource):
     PROPERTIES = (
         NAME, VALUE_SPECS, ADMIN_STATE_UP, TENANT_ID, SHARED,
         DHCP_AGENT_IDS, PORT_SECURITY_ENABLED, QOS_POLICY,
+<<<<<<< heat/engine/resources/openstack/neutron/net.py
         DNS_DOMAIN, AVAILABILITY_ZONE_HINTS, TAGS,
     ) = (
         'name', 'value_specs', 'admin_state_up', 'tenant_id', 'shared',
         'dhcp_agent_ids', 'port_security_enabled', 'qos_policy',
         'dns_domain', 'availability_zone_hints', 'tags',
+=======
+        DNS_DOMAIN, TAGS,
+    ) = (
+        'name', 'value_specs', 'admin_state_up', 'tenant_id', 'shared',
+        'dhcp_agent_ids', 'port_security_enabled', 'qos_policy',
+        'dns_domain', 'tags',
+>>>>>>> heat/engine/resources/openstack/neutron/net.py
     )
 
     ATTRIBUTES = (
@@ -118,12 +126,15 @@ class Net(neutron.NeutronResource):
             update_allowed=True,
             support_status=support.SupportStatus(version='7.0.0')
         ),
+<<<<<<< heat/engine/resources/openstack/neutron/net.py
         AVAILABILITY_ZONE_HINTS: properties.Schema(
             properties.Schema.LIST,
             _('Availability zone candidates for the network. It requires the '
               'availability_zone extension to be available.'),
             support_status=support.SupportStatus(version='19.0.0')
         ),
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/net.py
         TAGS: properties.Schema(
             properties.Schema.LIST,
             _('The tags to be added to the network.'),
diff --git a/heat/engine/resources/openstack/neutron/port.py b/heat/engine/resources/openstack/neutron/port.py
index a06fbf3..f6bbb41 100644
--- a/heat/engine/resources/openstack/neutron/port.py
+++ b/heat/engine/resources/openstack/neutron/port.py
@@ -14,7 +14,10 @@
 from oslo_log import log as logging
 from oslo_serialization import jsonutils
 
+<<<<<<< heat/engine/resources/openstack/neutron/port.py
 from heat.common import exception
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/port.py
 from heat.common.i18n import _
 from heat.engine import attributes
 from heat.engine import constraints
@@ -54,11 +57,19 @@ class Port(neutron.NeutronResource):
     EXTRA_PROPERTIES = (
         VALUE_SPECS, ADMIN_STATE_UP, MAC_ADDRESS,
         ALLOWED_ADDRESS_PAIRS, VNIC_TYPE, QOS_POLICY,
+<<<<<<< heat/engine/resources/openstack/neutron/port.py
         PORT_SECURITY_ENABLED, PROPAGATE_UPLINK_STATUS, NO_FIXED_IPS,
     ) = (
         'value_specs', 'admin_state_up', 'mac_address',
         'allowed_address_pairs', 'binding:vnic_type', 'qos_policy',
         'port_security_enabled', 'propagate_uplink_status', 'no_fixed_ips',
+=======
+        PORT_SECURITY_ENABLED, PROPAGATE_UPLINK_STATUS,
+    ) = (
+        'value_specs', 'admin_state_up', 'mac_address',
+        'allowed_address_pairs', 'binding:vnic_type', 'qos_policy',
+        'port_security_enabled', 'propagate_uplink_status',
+>>>>>>> heat/engine/resources/openstack/neutron/port.py
     )
 
     _FIXED_IP_KEYS = (
@@ -314,6 +325,7 @@ class Port(neutron.NeutronResource):
             update_allowed=True,
             support_status=support.SupportStatus(version='15.0.0')
         ),
+<<<<<<< heat/engine/resources/openstack/neutron/port.py
         NO_FIXED_IPS: properties.Schema(
             properties.Schema.BOOLEAN,
             _('Flag to disable all fixed ips on the port.'),
@@ -321,6 +333,8 @@ class Port(neutron.NeutronResource):
             support_status=support.SupportStatus(version='16.0.0'),
             default=False
         ),
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/port.py
     }
 
     # Need to update properties_schema with other properties before
@@ -450,6 +464,7 @@ class Port(neutron.NeutronResource):
             )
         ]
 
+<<<<<<< heat/engine/resources/openstack/neutron/port.py
     def validate(self):
         super(Port, self).validate()
         fixed_ips = self.properties.get(self.FIXED_IPS)
@@ -458,6 +473,8 @@ class Port(neutron.NeutronResource):
             raise exception.ResourcePropertyConflict(self.FIXED_IPS,
                                                      self.NO_FIXED_IPS)
 
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/port.py
     def add_dependencies(self, deps):
         super(Port, self).add_dependencies(deps)
         # Depend on any Subnet in this template with the same
@@ -496,6 +513,7 @@ class Port(neutron.NeutronResource):
             self.set_tags(tags)
 
     def _prepare_port_properties(self, props, prepare_for_update=False):
+<<<<<<< heat/engine/resources/openstack/neutron/port.py
         if not props.pop(self.NO_FIXED_IPS, False):
             if self.FIXED_IPS in props:
                 fixed_ips = props[self.FIXED_IPS]
@@ -518,6 +536,26 @@ class Port(neutron.NeutronResource):
         else:
             props[self.FIXED_IPS] = []
 
+=======
+        if self.FIXED_IPS in props:
+            fixed_ips = props[self.FIXED_IPS]
+            if fixed_ips:
+                for fixed_ip in fixed_ips:
+                    for key, value in list(fixed_ip.items()):
+                        if value is None:
+                            fixed_ip.pop(key)
+                    if self.FIXED_IP_SUBNET in fixed_ip:
+                        fixed_ip[
+                            'subnet_id'] = fixed_ip.pop(self.FIXED_IP_SUBNET)
+            else:
+                # Passing empty list would have created a port without
+                # fixed_ips during CREATE and released the existing
+                # fixed_ips during UPDATE (default neutron behaviour).
+                # However, for backward compatibility we will let neutron
+                # assign ip for CREATE and leave the assigned ips during
+                # UPDATE by not passing it. ref bug #1538473.
+                del props[self.FIXED_IPS]
+>>>>>>> heat/engine/resources/openstack/neutron/port.py
         # delete empty MAC addresses so that Neutron validation code
         # wouldn't fail as it not accepts Nones
         if self.ALLOWED_ADDRESS_PAIRS in props:
diff --git a/heat/engine/resources/openstack/neutron/provider_net.py b/heat/engine/resources/openstack/neutron/provider_net.py
index 899b295..6b8c00f 100644
--- a/heat/engine/resources/openstack/neutron/provider_net.py
+++ b/heat/engine/resources/openstack/neutron/provider_net.py
@@ -37,6 +37,7 @@ class ProviderNet(net.Net):
     PROPERTIES = (
         NAME, PROVIDER_NETWORK_TYPE, PROVIDER_PHYSICAL_NETWORK,
         PROVIDER_SEGMENTATION_ID, ADMIN_STATE_UP, SHARED,
+<<<<<<< heat/engine/resources/openstack/neutron/provider_net.py
         PORT_SECURITY_ENABLED, ROUTER_EXTERNAL, DNS_DOMAIN,
         AVAILABILITY_ZONE_HINTS, TAGS,
     ) = (
@@ -44,13 +45,26 @@ class ProviderNet(net.Net):
         'segmentation_id', 'admin_state_up', 'shared',
         'port_security_enabled', 'router_external', 'dns_domain',
         'availability_zone_hints', 'tags',
+=======
+        PORT_SECURITY_ENABLED, ROUTER_EXTERNAL, DNS_DOMAIN, TAGS,
+    ) = (
+        'name', 'network_type', 'physical_network',
+        'segmentation_id', 'admin_state_up', 'shared',
+        'port_security_enabled', 'router_external', 'dns_domain', 'tags',
+>>>>>>> heat/engine/resources/openstack/neutron/provider_net.py
 
     )
 
     ATTRIBUTES = (
+<<<<<<< heat/engine/resources/openstack/neutron/provider_net.py
         STATUS, SUBNETS, SEGMENTS,
     ) = (
         'status', 'subnets', 'segments',
+=======
+        STATUS, SUBNETS,
+    ) = (
+        'status', 'subnets',
+>>>>>>> heat/engine/resources/openstack/neutron/provider_net.py
     )
 
     NETWORK_TYPES = (
@@ -121,6 +135,7 @@ class ProviderNet(net.Net):
             update_allowed=True,
             support_status=support.SupportStatus(version='15.0.0')
         ),
+<<<<<<< heat/engine/resources/openstack/neutron/provider_net.py
         AVAILABILITY_ZONE_HINTS: properties.Schema(
             properties.Schema.LIST,
             _('Availability zone candidates for the network. It requires the '
@@ -128,6 +143,8 @@ class ProviderNet(net.Net):
             update_allowed=True,
             support_status=support.SupportStatus(version='19.0.0')
         ),
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/provider_net.py
     }
 
     attributes_schema = {
@@ -139,11 +156,14 @@ class ProviderNet(net.Net):
             _("Subnets of this network."),
             type=attributes.Schema.LIST
         ),
+<<<<<<< heat/engine/resources/openstack/neutron/provider_net.py
         SEGMENTS: attributes.Schema(
             _("The segments of this network."),
             type=attributes.Schema.LIST,
             support_status=support.SupportStatus(version='16.0.0'),
         ),
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/provider_net.py
     }
 
     def validate(self):
diff --git a/heat/engine/resources/openstack/neutron/qos.py b/heat/engine/resources/openstack/neutron/qos.py
index a2a65ad..bab5e73 100644
--- a/heat/engine/resources/openstack/neutron/qos.py
+++ b/heat/engine/resources/openstack/neutron/qos.py
@@ -378,6 +378,7 @@ class QoSMinimumBandwidthRule(QoSRule):
         return [self.resource_id, self.policy_id]
 
 
+<<<<<<< heat/engine/resources/openstack/neutron/qos.py
 class QoSMinimumPacketRateRule(QoSRule):
     """A resource for guaranteeing packet rate.
 
@@ -464,11 +465,16 @@ class QoSMinimumPacketRateRule(QoSRule):
         return [self.resource_id, self.policy_id]
 
 
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/qos.py
 def resource_mapping():
     return {
         'OS::Neutron::QoSPolicy': QoSPolicy,
         'OS::Neutron::QoSBandwidthLimitRule': QoSBandwidthLimitRule,
         'OS::Neutron::QoSDscpMarkingRule': QoSDscpMarkingRule,
         'OS::Neutron::QoSMinimumBandwidthRule': QoSMinimumBandwidthRule,
+<<<<<<< heat/engine/resources/openstack/neutron/qos.py
         'OS::Neutron::QoSMinimumPacketRateRule': QoSMinimumPacketRateRule,
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/qos.py
     }
diff --git a/heat/engine/resources/openstack/neutron/router.py b/heat/engine/resources/openstack/neutron/router.py
index 72f7dbb..ed25c74 100644
--- a/heat/engine/resources/openstack/neutron/router.py
+++ b/heat/engine/resources/openstack/neutron/router.py
@@ -35,12 +35,19 @@ class Router(neutron.NeutronResource):
 
     PROPERTIES = (
         NAME, EXTERNAL_GATEWAY, VALUE_SPECS, ADMIN_STATE_UP,
+<<<<<<< heat/engine/resources/openstack/neutron/router.py
         L3_AGENT_ID, L3_AGENT_IDS, DISTRIBUTED, HA, AVAILABILITY_ZONE_HINTS,
         TAGS,
     ) = (
         'name', 'external_gateway_info', 'value_specs', 'admin_state_up',
         'l3_agent_id', 'l3_agent_ids', 'distributed', 'ha',
         'availability_zone_hints', 'tags',
+=======
+        L3_AGENT_ID, L3_AGENT_IDS, DISTRIBUTED, HA, TAGS,
+    ) = (
+        'name', 'external_gateway_info', 'value_specs', 'admin_state_up',
+        'l3_agent_id', 'l3_agent_ids', 'distributed', 'ha', 'tags',
+>>>>>>> heat/engine/resources/openstack/neutron/router.py
     )
 
     _EXTERNAL_GATEWAY_KEYS = (
@@ -173,6 +180,7 @@ class Router(neutron.NeutronResource):
               'do not support distributed and ha at the same time.'),
             support_status=support.SupportStatus(version='2015.1')
         ),
+<<<<<<< heat/engine/resources/openstack/neutron/router.py
         AVAILABILITY_ZONE_HINTS: properties.Schema(
             properties.Schema.LIST,
             _('Availability zone candidates for the router. It requires the '
@@ -180,6 +188,8 @@ class Router(neutron.NeutronResource):
             update_allowed=True,
             support_status=support.SupportStatus(version='19.0.0')
         ),
+=======
+>>>>>>> heat/engine/resources/openstack/neutron/router.py
         TAGS: properties.Schema(
             properties.Schema.LIST,
             _('The tags to be added to the router.'),
diff --git a/heat/engine/resources/openstack/neutron/sfc/port_pair_group.py b/heat/engine/resources/openstack/neutron/sfc/port_pair_group.py
index 280aea7..1ec7dad 100644
--- a/heat/engine/resources/openstack/neutron/sfc/port_pair_group.py
+++ b/heat/engine/resources/openstack/neutron/sfc/port_pair_group.py
@@ -24,7 +24,11 @@ class PortPairGroup(neutron.NeutronResource):
 
     Multiple port-pairs may be included in a port-pair-group to allow the
     specification of a set of functionally equivalent Service Functions that
+<<<<<<< heat/engine/resources/openstack/neutron/sfc/port_pair_group.py
     can be used for load distribution.
+=======
+    can be be used for load distribution.
+>>>>>>> heat/engine/resources/openstack/neutron/sfc/port_pair_group.py
     """
 
     support_status = support.SupportStatus(
diff --git a/heat/engine/resources/openstack/nova/keypair.py b/heat/engine/resources/openstack/nova/keypair.py
index 25df73a..445e56e 100644
--- a/heat/engine/resources/openstack/nova/keypair.py
+++ b/heat/engine/resources/openstack/nova/keypair.py
@@ -22,8 +22,12 @@ from heat.engine import translation
 
 
 NOVA_MICROVERSIONS = (MICROVERSION_KEY_TYPE,
+<<<<<<< heat/engine/resources/openstack/nova/keypair.py
                       MICROVERSION_USER,
                       MICROVERSION_PUBLIC_KEY) = ('2.2', '2.10', '2.92')
+=======
+                      MICROVERSION_USER) = ('2.2', '2.10')
+>>>>>>> heat/engine/resources/openstack/nova/keypair.py
 
 
 class KeyPair(resource.Resource):
@@ -72,10 +76,16 @@ class KeyPair(resource.Resource):
         ),
         PUBLIC_KEY: properties.Schema(
             properties.Schema.STRING,
+<<<<<<< heat/engine/resources/openstack/nova/keypair.py
             _('The public key. This allows users to supply the public key '
               'from a pre-existing key pair. In Nova api version < 2.92, '
               'if not supplied, a new key pair will be generated. '
               'This property is required since Nova api version 2.92.')
+=======
+            _('The optional public key. This allows users to supply the '
+              'public key from a pre-existing key pair. If not supplied, a '
+              'new key pair will be generated.')
+>>>>>>> heat/engine/resources/openstack/nova/keypair.py
         ),
         KEY_TYPE: properties.Schema(
             properties.Schema.STRING,
@@ -150,7 +160,10 @@ class KeyPair(resource.Resource):
         # Check if key_type is allowed to use
         key_type = self.properties[self.KEY_TYPE]
         user = self.properties[self.USER]
+<<<<<<< heat/engine/resources/openstack/nova/keypair.py
         public_key = self.properties[self.PUBLIC_KEY]
+=======
+>>>>>>> heat/engine/resources/openstack/nova/keypair.py
 
         validate_props = []
         c_plugin = self.client_plugin()
@@ -164,12 +177,15 @@ class KeyPair(resource.Resource):
                      'support required api microversion.') % validate_props)
             raise exception.StackValidationFailed(message=msg)
 
+<<<<<<< heat/engine/resources/openstack/nova/keypair.py
         if not public_key and c_plugin.is_version_supported(
                 MICROVERSION_PUBLIC_KEY):
             msg = _('The public_key property is required by the nova API '
                     'version currently used.')
             raise exception.StackValidationFailed(message=msg)
 
+=======
+>>>>>>> heat/engine/resources/openstack/nova/keypair.py
     def handle_create(self):
         pub_key = self.properties[self.PUBLIC_KEY] or None
         user_id = self.properties[self.USER]
@@ -181,7 +197,11 @@ class KeyPair(resource.Resource):
         }
 
         if key_type:
+<<<<<<< heat/engine/resources/openstack/nova/keypair.py
             create_kwargs['key_type'] = key_type
+=======
+            create_kwargs[self.KEY_TYPE] = key_type
+>>>>>>> heat/engine/resources/openstack/nova/keypair.py
         if user_id:
             create_kwargs['user_id'] = user_id
 
diff --git a/heat/engine/resources/openstack/nova/server.py b/heat/engine/resources/openstack/nova/server.py
index 700527d..70d6a82 100644
--- a/heat/engine/resources/openstack/nova/server.py
+++ b/heat/engine/resources/openstack/nova/server.py
@@ -12,7 +12,10 @@
 #    under the License.
 
 import copy
+<<<<<<< heat/engine/resources/openstack/nova/server.py
 import ipaddress
+=======
+>>>>>>> heat/engine/resources/openstack/nova/server.py
 
 from oslo_config import cfg
 from oslo_log import log as logging
@@ -548,12 +551,20 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
         ),
         USER_DATA_UPDATE_POLICY: properties.Schema(
             properties.Schema.STRING,
+<<<<<<< heat/engine/resources/openstack/nova/server.py
             _('Policy on how to apply a user_data update; by '
               'ignoring it, by replacing the entire server, '
               'or rebuild the server.'),
             default='REPLACE',
             constraints=[
                 constraints.AllowedValues(['REPLACE', 'IGNORE', 'REBUILD']),
+=======
+            _('Policy on how to apply a user_data update; either by '
+              'ignoring it or by replacing the entire server.'),
+            default='REPLACE',
+            constraints=[
+                constraints.AllowedValues(['REPLACE', 'IGNORE']),
+>>>>>>> heat/engine/resources/openstack/nova/server.py
             ],
             support_status=support.SupportStatus(version='6.0.0'),
             update_allowed=True
@@ -944,6 +955,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
 
     def parse_live_resource_data(self, resource_properties, resource_data):
         server, server_data = resource_data
+<<<<<<< heat/engine/resources/openstack/nova/server.py
         flavor = server_data.get(self.FLAVOR)
         # NOTE(pas-ha) since compute API 2.47 flavor in instance
         # does not have "id" but "original_name" instead,
@@ -957,6 +969,11 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
         result = {
             # there's a risk that flavor id will be int type, so cast to str
             self.FLAVOR: str(flavor_value),
+=======
+        result = {
+            # there's a risk that flavor id will be int type, so cast to str
+            self.FLAVOR: str(server_data.get(self.FLAVOR)['id']),
+>>>>>>> heat/engine/resources/openstack/nova/server.py
             self.IMAGE: str(server_data.get(self.IMAGE)['id']),
             self.NAME: server_data.get(self.NAME),
             self.METADATA: server_data.get(self.METADATA),
@@ -967,8 +984,13 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
         return result
 
     def _get_live_networks(self, server, props):
+<<<<<<< heat/engine/resources/openstack/nova/server.py
         reality_nets = self._get_server_addresses(server,
                                                   extend_networks=False)
+=======
+        reality_nets = self._add_attrs_for_address(server,
+                                                   extend_networks=False)
+>>>>>>> heat/engine/resources/openstack/nova/server.py
         reality_net_ids = {}
         client_plugin = self.client_plugin('neutron')
         for net_key in reality_nets:
@@ -1136,7 +1158,11 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
             LOG.warning("Failed to fetch resource attributes: %s", ex)
             return
 
+<<<<<<< heat/engine/resources/openstack/nova/server.py
     def _get_server_addresses(self, server, extend_networks=True):
+=======
+    def _add_attrs_for_address(self, server, extend_networks=True):
+>>>>>>> heat/engine/resources/openstack/nova/server.py
         """Adds port id, subnets and network attributes to addresses list.
 
         This method is used only for resolving attributes.
@@ -1145,6 +1171,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
                                 the net is returned without replacing name on
                                 id.
         """
+<<<<<<< heat/engine/resources/openstack/nova/server.py
         nets = {}
         ifaces = self.client('neutron').list_ports(device_id=server.id)
         for port in ifaces['ports']:
@@ -1174,11 +1201,23 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
                             'OS-EXT-IPS:type': 'floating',
                             'port': None})
 
+=======
+        nets = copy.deepcopy(server.addresses) or {}
+        ifaces = server.interface_list()
+        ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
+                                           iface.mac_addr), iface.port_id)
+                                         for iface in ifaces)
+        for net_name in nets:
+            for addr in nets[net_name]:
+                addr['port'] = ip_mac_mapping_on_port_id.get(
+                    (addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
+>>>>>>> heat/engine/resources/openstack/nova/server.py
                 # _get_live_networks() uses this method to get reality_nets.
                 # We don't need to get subnets and network in that case. Only
                 # do the external calls if extend_networks is true, i.e called
                 # from _resolve_attribute()
                 if not extend_networks:
+<<<<<<< heat/engine/resources/openstack/nova/server.py
                     net.append(addr)
                     continue
 
@@ -1187,6 +1226,19 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
 
                 net.append(addr)
 
+=======
+                    continue
+                try:
+                    port = self.client('neutron').show_port(
+                        addr['port'])['port']
+                except Exception as ex:
+                    addr['subnets'], addr['network'] = None, None
+                    LOG.warning("Failed to fetch resource attributes: %s", ex)
+                    continue
+                addr['subnets'] = self._get_subnets_attr(port['fixed_ips'])
+                addr['network'] = self._get_network_attr(port['network_id'])
+
+>>>>>>> heat/engine/resources/openstack/nova/server.py
         if extend_networks:
             return self._extend_networks(nets)
         else:
@@ -1230,7 +1282,11 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
             self.client_plugin().ignore_not_found(e)
             return ''
         if name == self.ADDRESSES:
+<<<<<<< heat/engine/resources/openstack/nova/server.py
             return self._get_server_addresses(server)
+=======
+            return self._add_attrs_for_address(server)
+>>>>>>> heat/engine/resources/openstack/nova/server.py
         if name == self.NETWORKS_ATTR:
             return self._extend_networks(server.networks)
         if name == self.INSTANCE_NAME:
@@ -1324,6 +1380,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
                                                            'kwargs': kwargs})
         return prg
 
+<<<<<<< heat/engine/resources/openstack/nova/server.py
     def _update_user_data_rebuild(self, after_props):
         user_data = after_props[self.USER_DATA]
         prg = progress.ServerUpdateProgress(
@@ -1332,6 +1389,8 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
             handler_extra={'args': (user_data,)})
         return prg
 
+=======
+>>>>>>> heat/engine/resources/openstack/nova/server.py
     def _update_networks(self, server, after_props):
         updaters = []
         new_networks = after_props[self.NETWORKS]
@@ -1423,12 +1482,15 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
         if self.FLAVOR in prop_diff:
             updaters.extend(self._update_flavor(after_props))
 
+<<<<<<< heat/engine/resources/openstack/nova/server.py
         if self.USER_DATA in prop_diff:
             # We only care about rebuild here. The standard replace is
             # dealt elsewere
             if after_props[self.USER_DATA_UPDATE_POLICY] == 'REBUILD':
                 updaters.append(self._update_user_data_rebuild(after_props))
 
+=======
+>>>>>>> heat/engine/resources/openstack/nova/server.py
         if self.IMAGE in prop_diff:
             updaters.append(self._update_image(after_props))
         elif self.ADMIN_PASS in prop_diff:
@@ -1760,7 +1822,11 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
         status = cp.get_status(server)
         LOG.debug('%(name)s check_suspend_complete status = %(status)s',
                   {'name': self.name, 'status': status})
+<<<<<<< heat/engine/resources/openstack/nova/server.py
         if status in (cp.deferred_server_statuses | {'ACTIVE'}):
+=======
+        if status in list(cp.deferred_server_statuses + ['ACTIVE']):
+>>>>>>> heat/engine/resources/openstack/nova/server.py
             return status == 'SUSPENDED'
         else:
             exc = exception.ResourceUnknownStatus(
diff --git a/heat/engine/resources/openstack/nova/server_group.py b/heat/engine/resources/openstack/nova/server_group.py
index 9930033..9cb1f88 100644
--- a/heat/engine/resources/openstack/nova/server_group.py
+++ b/heat/engine/resources/openstack/nova/server_group.py
@@ -17,8 +17,12 @@ from heat.engine import properties
 from heat.engine import resource
 from heat.engine import support
 
+<<<<<<< heat/engine/resources/openstack/nova/server_group.py
 NOVA_MICROVERSIONS = (MICROVERSION_SOFT_POLICIES, MICROVERSION_RULE) = ('2.15',
                                                                         '2.64')
+=======
+NOVA_MICROVERSIONS = (MICROVERSION_SOFT_POLICIES) = ('2.15')
+>>>>>>> heat/engine/resources/openstack/nova/server_group.py
 
 
 class ServerGroup(resource.Resource):
@@ -35,6 +39,7 @@ class ServerGroup(resource.Resource):
     entity = 'server_groups'
 
     PROPERTIES = (
+<<<<<<< heat/engine/resources/openstack/nova/server_group.py
         NAME, POLICIES, RULES
     ) = (
         'name', 'policies', 'rules'
@@ -42,6 +47,13 @@ class ServerGroup(resource.Resource):
 
     _RULES = (MAX_SERVER_PER_HOST) = ('max_server_per_host')
 
+=======
+        NAME, POLICIES
+    ) = (
+        'name', 'policies'
+    )
+
+>>>>>>> heat/engine/resources/openstack/nova/server_group.py
     properties_schema = {
         NAME: properties.Schema(
             properties.Schema.STRING,
@@ -49,7 +61,11 @@ class ServerGroup(resource.Resource):
         ),
         POLICIES: properties.Schema(
             properties.Schema.LIST,
+<<<<<<< heat/engine/resources/openstack/nova/server_group.py
             _('A list of exactly one policy to apply. '
+=======
+            _('A list of string policies to apply. '
+>>>>>>> heat/engine/resources/openstack/nova/server_group.py
               'Defaults to anti-affinity.'),
             default=['anti-affinity'],
             constraints=[
@@ -59,6 +75,7 @@ class ServerGroup(resource.Resource):
             ],
             schema=properties.Schema(
                 properties.Schema.STRING,
+<<<<<<< heat/engine/resources/openstack/nova/server_group.py
             ),
         ),
         RULES: properties.Schema(
@@ -72,6 +89,9 @@ class ServerGroup(resource.Resource):
                 )
             },
             support_status=support.SupportStatus(version='17.0.0'),
+=======
+            )
+>>>>>>> heat/engine/resources/openstack/nova/server_group.py
         ),
     }
 
@@ -85,6 +105,7 @@ class ServerGroup(resource.Resource):
             msg = _('Required microversion for soft policies not supported.')
             raise exception.StackValidationFailed(message=msg)
 
+<<<<<<< heat/engine/resources/openstack/nova/server_group.py
         if self.properties[self.RULES]:
             is_supported = self.client_plugin().is_version_supported(
                 MICROVERSION_RULE)
@@ -116,6 +137,16 @@ class ServerGroup(resource.Resource):
 
         return True
 
+=======
+    def handle_create(self):
+        name = self.physical_resource_name()
+        policies = self.properties[self.POLICIES]
+        client = self.client(version=MICROVERSION_SOFT_POLICIES)
+        server_group = client.server_groups.create(name=name,
+                                                   policies=policies)
+        self.resource_id_set(server_group.id)
+
+>>>>>>> heat/engine/resources/openstack/nova/server_group.py
     def physical_resource_name(self):
         name = self.properties[self.NAME]
         if name:
diff --git a/heat/engine/resources/openstack/nova/server_network_mixin.py b/heat/engine/resources/openstack/nova/server_network_mixin.py
index 62d5019..f74a4b8 100644
--- a/heat/engine/resources/openstack/nova/server_network_mixin.py
+++ b/heat/engine/resources/openstack/nova/server_network_mixin.py
@@ -65,7 +65,11 @@ class ServerNetworkMixin(object):
                 "/".join([self.NETWORKS, self.NETWORK_PORT]))
 
         # if user only specifies network and floating ip, floating ip
+<<<<<<< heat/engine/resources/openstack/nova/server_network_mixin.py
         # can't be associated as the neutron port isn't created/managed
+=======
+        # can't be associated as the the neutron port isn't created/managed
+>>>>>>> heat/engine/resources/openstack/nova/server_network_mixin.py
         # by heat
         if floating_ip is not None:
             if net_id is not None and port is None and subnet is None:
diff --git a/heat/engine/resources/openstack/octavia/loadbalancer.py b/heat/engine/resources/openstack/octavia/loadbalancer.py
index 0e86616..80548d2 100644
--- a/heat/engine/resources/openstack/octavia/loadbalancer.py
+++ b/heat/engine/resources/openstack/octavia/loadbalancer.py
@@ -29,10 +29,17 @@ class LoadBalancer(octavia_base.OctaviaBase):
 
     PROPERTIES = (
         DESCRIPTION, NAME, PROVIDER, VIP_ADDRESS, VIP_SUBNET,
+<<<<<<< heat/engine/resources/openstack/octavia/loadbalancer.py
         ADMIN_STATE_UP, TENANT_ID, FLAVOR, AVAILABILITY_ZONE
     ) = (
         'description', 'name', 'provider', 'vip_address', 'vip_subnet',
         'admin_state_up', 'tenant_id', 'flavor', 'availability_zone'
+=======
+        ADMIN_STATE_UP, TENANT_ID, FLAVOR
+    ) = (
+        'description', 'name', 'provider', 'vip_address', 'vip_subnet',
+        'admin_state_up', 'tenant_id', 'flavor'
+>>>>>>> heat/engine/resources/openstack/octavia/loadbalancer.py
     )
 
     ATTRIBUTES = (
@@ -96,6 +103,7 @@ class LoadBalancer(octavia_base.OctaviaBase):
             constraints=[
                 constraints.CustomConstraint('octavia.flavor')
             ]
+<<<<<<< heat/engine/resources/openstack/octavia/loadbalancer.py
         ),
         AVAILABILITY_ZONE: properties.Schema(
             properties.Schema.STRING,
@@ -103,6 +111,9 @@ class LoadBalancer(octavia_base.OctaviaBase):
             support_status=support.SupportStatus(version='17.0.0'),
         )
 
+=======
+        )
+>>>>>>> heat/engine/resources/openstack/octavia/loadbalancer.py
     }
 
     attributes_schema = {
diff --git a/heat/engine/resources/openstack/octavia/pool.py b/heat/engine/resources/openstack/octavia/pool.py
index 729af07..001edf6 100644
--- a/heat/engine/resources/openstack/octavia/pool.py
+++ b/heat/engine/resources/openstack/octavia/pool.py
@@ -87,7 +87,10 @@ class Pool(octavia_base.OctaviaBase):
                       'required if type is APP_COOKIE.')
                 )
             },
+<<<<<<< heat/engine/resources/openstack/octavia/pool.py
             update_allowed=True,
+=======
+>>>>>>> heat/engine/resources/openstack/octavia/pool.py
         ),
         NAME: properties.Schema(
             properties.Schema.STRING,
@@ -178,15 +181,22 @@ class Pool(octavia_base.OctaviaBase):
             props['listener_id'] = props.pop(self.LISTENER)
         if self.LOADBALANCER in props:
             props['loadbalancer_id'] = props.pop(self.LOADBALANCER)
+<<<<<<< heat/engine/resources/openstack/octavia/pool.py
         self._prepare_session_persistence(props)
         return props
 
     def _prepare_session_persistence(self, props):
+=======
+>>>>>>> heat/engine/resources/openstack/octavia/pool.py
         session_p = props.get(self.SESSION_PERSISTENCE)
         if session_p is not None:
             session_props = dict(
                 (k, v) for k, v in session_p.items() if v is not None)
             props[self.SESSION_PERSISTENCE] = session_props
+<<<<<<< heat/engine/resources/openstack/octavia/pool.py
+=======
+        return props
+>>>>>>> heat/engine/resources/openstack/octavia/pool.py
 
     def validate(self):
         super(Pool, self).validate()
@@ -219,9 +229,13 @@ class Pool(octavia_base.OctaviaBase):
         return self.client().pool_create(json={'pool': properties})['pool']
 
     def _resource_update(self, prop_diff):
+<<<<<<< heat/engine/resources/openstack/octavia/pool.py
         props = dict((k, v) for k, v in prop_diff.items() if v is not None)
         self._prepare_session_persistence(props)
         self.client().pool_set(self.resource_id, json={'pool': props})
+=======
+        self.client().pool_set(self.resource_id, json={'pool': prop_diff})
+>>>>>>> heat/engine/resources/openstack/octavia/pool.py
 
     def _resource_delete(self):
         self.client().pool_delete(self.resource_id)
diff --git a/heat/engine/resources/openstack/senlin/cluster.py b/heat/engine/resources/openstack/senlin/cluster.py
index 6d68bb9..03b2431 100644
--- a/heat/engine/resources/openstack/senlin/cluster.py
+++ b/heat/engine/resources/openstack/senlin/cluster.py
@@ -322,11 +322,19 @@ class Cluster(res_base.BaseSenlinResource):
             params['cluster'] = cluster_obj
             if self.PROFILE in params:
                 params['profile_id'] = params.pop(self.PROFILE)
+<<<<<<< heat/engine/resources/openstack/senlin/cluster.py
 
             self.client().update_cluster(**params)
             action = {
                 'cluster_id': self.resource_id,
                 'done': False
+=======
+            action = {
+                'func': 'update_cluster',
+                'params': params,
+                'action_id': None,
+                'done': False,
+>>>>>>> heat/engine/resources/openstack/senlin/cluster.py
             }
             actions.append(action)
         # Resize Cluster
diff --git a/heat/engine/resources/server_base.py b/heat/engine/resources/server_base.py
index 50a04e0..5cbcfb2 100644
--- a/heat/engine/resources/server_base.py
+++ b/heat/engine/resources/server_base.py
@@ -240,8 +240,16 @@ class BaseServer(stack_user.StackUser):
     def _update_software_config_transport(self, prop_diff):
         if not self.user_data_software_config():
             return
+<<<<<<< heat/engine/resources/server_base.py
         self._delete_queue()
         self._delete_temp_url()
+=======
+        try:
+            self._delete_queue()
+            self._delete_temp_url()
+        except Exception:
+            pass
+>>>>>>> heat/engine/resources/server_base.py
 
         metadata = self.metadata_get(True) or {}
         self._create_transport_credentials(prop_diff)
@@ -273,6 +281,7 @@ class BaseServer(stack_user.StackUser):
         object_name = self.data().get('metadata_object_name')
         if not object_name:
             return
+<<<<<<< heat/engine/resources/server_base.py
         endpoint_exists = self.client_plugin().does_endpoint_exist(
             'swift', 'object-store')
         if endpoint_exists:
@@ -285,6 +294,17 @@ class BaseServer(stack_user.StackUser):
                 headers = swift.head_container(container)
                 if int(headers['x-container-object-count']) == 0:
                     swift.delete_container(container)
+=======
+        with self.client_plugin('swift').ignore_not_found:
+            container = self.properties[self.DEPLOYMENT_SWIFT_DATA].get(
+                'container')
+            container = container or self.physical_resource_name()
+            swift = self.client('swift')
+            swift.delete_object(container, object_name)
+            headers = swift.head_container(container)
+            if int(headers['x-container-object-count']) == 0:
+                swift.delete_container(container)
+>>>>>>> heat/engine/resources/server_base.py
         self.data_delete('metadata_object_name')
         self.data_delete('metadata_put_url')
 
@@ -292,6 +312,7 @@ class BaseServer(stack_user.StackUser):
         queue_id = self.data().get('metadata_queue_id')
         if not queue_id:
             return
+<<<<<<< heat/engine/resources/server_base.py
         endpoint_exists = self.client_plugin().does_endpoint_exist(
             'zaqar', 'messaging')
         if endpoint_exists:
@@ -300,6 +321,13 @@ class BaseServer(stack_user.StackUser):
                 self.stack.stack_user_project_id, self._user_token())
             with client_plugin.ignore_not_found:
                 zaqar.queue(queue_id).delete()
+=======
+        client_plugin = self.client_plugin('zaqar')
+        zaqar = client_plugin.create_for_tenant(
+            self.stack.stack_user_project_id, self._user_token())
+        with client_plugin.ignore_not_found:
+            zaqar.queue(queue_id).delete()
+>>>>>>> heat/engine/resources/server_base.py
         self.data_delete('metadata_queue_id')
 
     def handle_snapshot_delete(self, state):
diff --git a/heat/engine/resources/stack_resource.py b/heat/engine/resources/stack_resource.py
index d7129da..862d157 100644
--- a/heat/engine/resources/stack_resource.py
+++ b/heat/engine/resources/stack_resource.py
@@ -78,7 +78,11 @@ class StackResource(resource.Resource):
         except Exception as ex:
             path = "%s<%s>" % (self.name, self.template_url)
             raise exception.StackValidationFailed(
+<<<<<<< heat/engine/resources/stack_resource.py
                 error=ex, path=[self.stack.t.RESOURCES, path])
+=======
+                ex, path=[self.stack.t.RESOURCES, path])
+>>>>>>> heat/engine/resources/stack_resource.py
 
     @property
     def template_url(self):
@@ -424,12 +428,36 @@ class StackResource(resource.Resource):
         if action != expected_action:
             return False
 
+<<<<<<< heat/engine/resources/stack_resource.py
         if status == self.IN_PROGRESS:
             if cookie is not None and 'fail_count' in cookie:
                 prev_status_reason = cookie['previous']['status_reason']
                 if status_reason != prev_status_reason:
                     # State has changed, so fail on the next failure
                     cookie['fail_count'] = 1
+=======
+        # Has the action really started?
+        #
+        # The rpc call to update does not guarantee that the stack will be
+        # placed into IN_PROGRESS by the time it returns (it runs stack.update
+        # in a thread) so you could also have a situation where we get into
+        # this method and the update hasn't even started.
+        #
+        # So we are using a mixture of state (action+status) and updated_at
+        # to see if the action has actually progressed.
+        # - very fast updates (like something with one RandomString) we will
+        #   probably miss the state change, but we should catch the updated_at.
+        # - very slow updates we won't see the updated_at for quite a while,
+        #   but should see the state change.
+        if cookie is not None:
+            prev_state = cookie['previous']['state']
+            prev_updated_at = cookie['previous']['updated_at']
+            if (prev_updated_at == updated_time and
+                    prev_state == (action, status)):
+                return False
+
+        if status == self.IN_PROGRESS:
+>>>>>>> heat/engine/resources/stack_resource.py
             return False
         elif status == self.COMPLETE:
             # For operations where we do not take a resource lock
@@ -443,10 +471,13 @@ class StackResource(resource.Resource):
                 self._nested = None
             return done
         elif status == self.FAILED:
+<<<<<<< heat/engine/resources/stack_resource.py
             if cookie is not None and 'fail_count' in cookie:
                 cookie['fail_count'] -= 1
                 if cookie['fail_count'] > 0:
                     raise resource.PollDelay(10)
+=======
+>>>>>>> heat/engine/resources/stack_resource.py
             raise exception.ResourceFailure(status_reason, self,
                                             action=action)
         else:
@@ -522,6 +553,12 @@ class StackResource(resource.Resource):
         action, status, status_reason, updated_time = status_data
 
         kwargs = self._stack_kwargs(user_params, child_template)
+<<<<<<< heat/engine/resources/stack_resource.py
+=======
+        cookie = {'previous': {
+            'updated_at': updated_time,
+            'state': (action, status)}}
+>>>>>>> heat/engine/resources/stack_resource.py
 
         kwargs.update({
             'stack_identity': dict(self.nested_identifier()),
@@ -535,6 +572,10 @@ class StackResource(resource.Resource):
                 with excutils.save_and_reraise_exception():
                     raw_template.RawTemplate.delete(self.context,
                                                     kwargs['template_id'])
+<<<<<<< heat/engine/resources/stack_resource.py
+=======
+        return cookie
+>>>>>>> heat/engine/resources/stack_resource.py
 
     def check_update_complete(self, cookie=None):
         if cookie is not None and 'target_action' in cookie:
@@ -573,6 +614,7 @@ class StackResource(resource.Resource):
         if stack_identity is None:
             return
 
+<<<<<<< heat/engine/resources/stack_resource.py
         cookie = None
         if not self.stack.convergence:
             try:
@@ -593,13 +635,18 @@ class StackResource(resource.Resource):
                     'fail_count': 2,
                 }
 
+=======
+>>>>>>> heat/engine/resources/stack_resource.py
         with self.rpc_client().ignore_error_by_name('EntityNotFound'):
             if self.abandon_in_progress:
                 self.rpc_client().abandon_stack(self.context, stack_identity)
             else:
                 self.rpc_client().delete_stack(self.context, stack_identity,
                                                cast=False)
+<<<<<<< heat/engine/resources/stack_resource.py
             return cookie
+=======
+>>>>>>> heat/engine/resources/stack_resource.py
 
     def handle_delete(self):
         return self.delete_nested()
diff --git a/heat/engine/resources/volume_base.py b/heat/engine/resources/volume_base.py
index e870afe..3025e17 100644
--- a/heat/engine/resources/volume_base.py
+++ b/heat/engine/resources/volume_base.py
@@ -197,10 +197,18 @@ class BaseVolumeAttachment(resource.Resource):
         if self.resource_id:
             server_id = self.properties[self.INSTANCE_ID]
             vol_id = self.properties[self.VOLUME_ID]
+<<<<<<< heat/engine/resources/volume_base.py
             prg = progress.VolumeDetachProgress(server_id, vol_id,
                                                 self.resource_id)
             prg.called = self.client_plugin('nova').detach_volume(
                 server_id, self.resource_id)
+=======
+            self.client_plugin('nova').detach_volume(server_id,
+                                                     self.resource_id)
+            prg = progress.VolumeDetachProgress(
+                server_id, vol_id, self.resource_id)
+            prg.called = True
+>>>>>>> heat/engine/resources/volume_base.py
 
         return prg
 
@@ -208,10 +216,13 @@ class BaseVolumeAttachment(resource.Resource):
         if prg is None:
             return True
 
+<<<<<<< heat/engine/resources/volume_base.py
         if not prg.called:
             prg.called = self.client_plugin('nova').detach_volume(
                 prg.srv_id, self.resource_id)
             return False
+=======
+>>>>>>> heat/engine/resources/volume_base.py
         if not prg.cinder_complete:
             prg.cinder_complete = self.client_plugin(
             ).check_detach_volume_complete(prg.vol_id, prg.srv_id)
diff --git a/heat/engine/resources/wait_condition.py b/heat/engine/resources/wait_condition.py
index 74de918..a125a5d 100644
--- a/heat/engine/resources/wait_condition.py
+++ b/heat/engine/resources/wait_condition.py
@@ -57,7 +57,11 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
         return status in self.WAIT_STATUSES
 
     def _metadata_format_ok(self, metadata):
+<<<<<<< heat/engine/resources/wait_condition.py
         if not isinstance(metadata, collections.abc.Mapping):
+=======
+        if not isinstance(metadata, collections.Mapping):
+>>>>>>> heat/engine/resources/wait_condition.py
             return False
         if set(metadata) != set(self.METADATA_KEYS):
             return False
diff --git a/heat/engine/rsrc_defn.py b/heat/engine/rsrc_defn.py
index 04e12ee..aa097b2 100644
--- a/heat/engine/rsrc_defn.py
+++ b/heat/engine/rsrc_defn.py
@@ -113,17 +113,29 @@ class ResourceDefinition(object):
         assert isinstance(self.description, str)
 
         if properties is not None:
+<<<<<<< heat/engine/rsrc_defn.py
             assert isinstance(properties, (collections.abc.Mapping,
+=======
+            assert isinstance(properties, (collections.Mapping,
+>>>>>>> heat/engine/rsrc_defn.py
                                            function.Function))
             self._hash ^= _hash_data(properties)
 
         if metadata is not None:
+<<<<<<< heat/engine/rsrc_defn.py
             assert isinstance(metadata, (collections.abc.Mapping,
+=======
+            assert isinstance(metadata, (collections.Mapping,
+>>>>>>> heat/engine/rsrc_defn.py
                                          function.Function))
             self._hash ^= _hash_data(metadata)
 
         if depends is not None:
+<<<<<<< heat/engine/rsrc_defn.py
             assert isinstance(depends, (collections.abc.Sequence,
+=======
+            assert isinstance(depends, (collections.Sequence,
+>>>>>>> heat/engine/rsrc_defn.py
                                         function.Function))
             assert not isinstance(depends, str)
             self._hash ^= _hash_data(depends)
@@ -133,7 +145,11 @@ class ResourceDefinition(object):
             self._hash ^= _hash_data(deletion_policy)
 
         if update_policy is not None:
+<<<<<<< heat/engine/rsrc_defn.py
             assert isinstance(update_policy, (collections.abc.Mapping,
+=======
+            assert isinstance(update_policy, (collections.Mapping,
+>>>>>>> heat/engine/rsrc_defn.py
                                               function.Function))
             self._hash ^= _hash_data(update_policy)
 
@@ -432,10 +448,17 @@ def _hash_data(data):
         data = copy.deepcopy(data)
 
     if not isinstance(data, str):
+<<<<<<< heat/engine/rsrc_defn.py
         if isinstance(data, collections.abc.Sequence):
             return hash(tuple(_hash_data(d) for d in data))
 
         if isinstance(data, collections.abc.Mapping):
+=======
+        if isinstance(data, collections.Sequence):
+            return hash(tuple(_hash_data(d) for d in data))
+
+        if isinstance(data, collections.Mapping):
+>>>>>>> heat/engine/rsrc_defn.py
             item_hashes = (hash(k) ^ _hash_data(v) for k, v in data.items())
             return functools.reduce(operator.xor, item_hashes, 0)
 
diff --git a/heat/engine/service.py b/heat/engine/service.py
index 9019ddb..7ee7f3d 100644
--- a/heat/engine/service.py
+++ b/heat/engine/service.py
@@ -12,7 +12,10 @@
 #    under the License.
 
 import collections
+<<<<<<< heat/engine/service.py
 import copy
+=======
+>>>>>>> heat/engine/service.py
 import datetime
 import functools
 import itertools
@@ -683,8 +686,12 @@ class EngineService(service.ServiceBase):
         # Do not stack limit check for admin since admin can see all stacks.
         if not cnxt.is_admin:
             tenant_limit = cfg.CONF.max_stacks_per_tenant
+<<<<<<< heat/engine/service.py
             if (tenant_limit >= 0 and
                     stack_object.Stack.count_all(cnxt) >= tenant_limit):
+=======
+            if stack_object.Stack.count_all(cnxt) >= tenant_limit:
+>>>>>>> heat/engine/service.py
                 message = _("You have reached the maximum stacks per tenant, "
                             "%d. Please delete some stacks.") % tenant_limit
                 raise exception.RequestLimitExceeded(message=message)
@@ -836,10 +843,13 @@ class EngineService(service.ServiceBase):
                 except exception.AuthorizationFailure as ex:
                     stack.state_set(stack.action, stack.FAILED,
                                     str(ex))
+<<<<<<< heat/engine/service.py
                 except Exception:
                     LOG.exception('Failed to create stack user project')
                     stack.state_set(stack.action, stack.FAILED,
                                     'Failed to create stack user project')
+=======
+>>>>>>> heat/engine/service.py
 
         def _stack_create(stack, msg_queue=None):
             # Create/Adopt a stack, and create the periodic task if successful
@@ -1028,11 +1038,17 @@ class EngineService(service.ServiceBase):
         LOG.info('Updating stack %s', db_stack.name)
         if cfg.CONF.reauthentication_auth_method == 'trusts':
             current_stack = parser.Stack.load(
+<<<<<<< heat/engine/service.py
                 cnxt, stack=db_stack, use_stored_context=True,
                 check_refresh_cred=True)
         else:
             current_stack = parser.Stack.load(cnxt, stack=db_stack,
                                               check_refresh_cred=True)
+=======
+                cnxt, stack=db_stack, use_stored_context=True)
+        else:
+            current_stack = parser.Stack.load(cnxt, stack=db_stack)
+>>>>>>> heat/engine/service.py
         self.resource_enforcer.enforce_stack(current_stack,
                                              is_registered_policy=True)
 
@@ -1355,6 +1371,7 @@ class EngineService(service.ServiceBase):
         :rtype: dict
         """
         s = self._get_stack(cnxt, stack_identity, show_deleted=True)
+<<<<<<< heat/engine/service.py
         tmpl = templatem.Template.load(cnxt, s.raw_template_id, s.raw_template)
         param_schemata = tmpl.all_param_schemata(tmpl.files)
         env = copy.deepcopy(s.raw_template.environment)
@@ -1365,6 +1382,9 @@ class EngineService(service.ServiceBase):
                     continue
                 env[section][param_name] = str('******')
         return env
+=======
+        return s.raw_template.environment
+>>>>>>> heat/engine/service.py
 
     @context.request_context
     def get_files(self, cnxt, stack_identity):
diff --git a/heat/engine/software_config_io.py b/heat/engine/software_config_io.py
index 3e29063..de4b311 100644
--- a/heat/engine/software_config_io.py
+++ b/heat/engine/software_config_io.py
@@ -178,6 +178,7 @@ def check_io_schema_list(io_configs):
     Raises TypeError if the list itself is not a list, or if any of the
     members are not dicts.
     """
+<<<<<<< heat/engine/software_config_io.py
     if (
         not isinstance(io_configs, collections.abc.Sequence) or
         isinstance(io_configs, collections.abc.Mapping) or
@@ -188,4 +189,12 @@ def check_io_schema_list(io_configs):
     if not all(
         isinstance(conf, collections.abc.Mapping) for conf in io_configs
     ):
+=======
+    if (not isinstance(io_configs, collections.Sequence) or
+            isinstance(io_configs, collections.Mapping) or
+            isinstance(io_configs, str)):
+        raise TypeError('Software Config I/O Schema must be in a list')
+
+    if not all(isinstance(conf, collections.Mapping) for conf in io_configs):
+>>>>>>> heat/engine/software_config_io.py
         raise TypeError('Software Config I/O Schema must be a dict')
diff --git a/heat/engine/stack.py b/heat/engine/stack.py
index c7ebec1..f3a81f5 100644
--- a/heat/engine/stack.py
+++ b/heat/engine/stack.py
@@ -100,7 +100,11 @@ def reset_state_on_error(func):
     return handle_exceptions
 
 
+<<<<<<< heat/engine/stack.py
 class Stack(collections.abc.Mapping):
+=======
+class Stack(collections.Mapping):
+>>>>>>> heat/engine/stack.py
 
     ACTIONS = (
         CREATE, DELETE, UPDATE, ROLLBACK, SUSPEND, RESUME, ADOPT,
@@ -126,7 +130,11 @@ class Stack(collections.abc.Mapping):
                  nested_depth=0, strict_validate=True, convergence=False,
                  current_traversal=None, tags=None, prev_raw_template_id=None,
                  current_deps=None, cache_data=None,
+<<<<<<< heat/engine/stack.py
                  deleted_time=None, converge=False, refresh_cred=False):
+=======
+                 deleted_time=None, converge=False):
+>>>>>>> heat/engine/stack.py
 
         """Initialise the Stack.
 
@@ -188,9 +196,12 @@ class Stack(collections.abc.Mapping):
         self.thread_group_mgr = None
         self.converge = converge
 
+<<<<<<< heat/engine/stack.py
         # This flag is use to check whether credential needs to refresh or not
         self.refresh_cred = refresh_cred
 
+=======
+>>>>>>> heat/engine/stack.py
         # strict_validate can be used to disable value validation
         # in the resource properties schema, this is useful when
         # performing validation when properties reference attributes
@@ -545,6 +556,7 @@ class Stack(collections.abc.Mapping):
                                  'err': str(exc)})
 
     @classmethod
+<<<<<<< heat/engine/stack.py
     def _check_refresh_cred(cls, context, stack):
         if stack.user_creds_id:
             creds_obj = ucreds_object.UserCreds.get_by_id(
@@ -573,6 +585,11 @@ class Stack(collections.abc.Mapping):
     def load(cls, context, stack_id=None, stack=None, show_deleted=True,
              use_stored_context=False, force_reload=False, cache_data=None,
              load_template=True, check_refresh_cred=False):
+=======
+    def load(cls, context, stack_id=None, stack=None, show_deleted=True,
+             use_stored_context=False, force_reload=False, cache_data=None,
+             load_template=True):
+>>>>>>> heat/engine/stack.py
         """Retrieve a Stack from the database."""
         if stack is None:
             stack = stack_object.Stack.get_by_id(
@@ -583,6 +600,7 @@ class Stack(collections.abc.Mapping):
             message = _('No stack exists with id "%s"') % str(stack_id)
             raise exception.NotFound(message)
 
+<<<<<<< heat/engine/stack.py
         refresh_cred = False
         if check_refresh_cred and (
             cfg.CONF.deferred_auth_method == 'trusts'
@@ -591,14 +609,20 @@ class Stack(collections.abc.Mapping):
                 use_stored_context = False
                 refresh_cred = True
 
+=======
+>>>>>>> heat/engine/stack.py
         if force_reload:
             stack.refresh()
 
         return cls._from_db(context, stack,
                             use_stored_context=use_stored_context,
                             cache_data=cache_data,
+<<<<<<< heat/engine/stack.py
                             load_template=load_template,
                             refresh_cred=refresh_cred)
+=======
+                            load_template=load_template)
+>>>>>>> heat/engine/stack.py
 
     @classmethod
     def load_all(cls, context, limit=None, marker=None, sort_keys=None,
@@ -632,7 +656,11 @@ class Stack(collections.abc.Mapping):
     @classmethod
     def _from_db(cls, context, stack,
                  use_stored_context=False, cache_data=None,
+<<<<<<< heat/engine/stack.py
                  load_template=True, refresh_cred=False):
+=======
+                 load_template=True):
+>>>>>>> heat/engine/stack.py
         if load_template:
             template = tmpl.Template.load(
                 context, stack.raw_template_id, stack.raw_template)
@@ -656,8 +684,12 @@ class Stack(collections.abc.Mapping):
                    prev_raw_template_id=stack.prev_raw_template_id,
                    current_deps=stack.current_deps, cache_data=cache_data,
                    nested_depth=stack.nested_depth,
+<<<<<<< heat/engine/stack.py
                    deleted_time=stack.deleted_at,
                    refresh_cred=refresh_cred)
+=======
+                   deleted_time=stack.deleted_at)
+>>>>>>> heat/engine/stack.py
 
     def get_kwargs_for_cloning(self, keep_status=False, only_db=False,
                                keep_tags=False):
@@ -725,6 +757,7 @@ class Stack(collections.abc.Mapping):
             s['raw_template_id'] = self.t.id
 
         if self.id is not None:
+<<<<<<< heat/engine/stack.py
             if self.refresh_cred:
                 keystone = self.clients.client('keystone')
                 trust_ctx = keystone.regenerate_trust_context()
@@ -736,6 +769,8 @@ class Stack(collections.abc.Mapping):
                 self.user_creds_id = new_creds.id
                 self.refresh_cred = False
 
+=======
+>>>>>>> heat/engine/stack.py
             if exp_trvsl is None and not ignore_traversal_check:
                 exp_trvsl = self.current_traversal
 
@@ -1387,11 +1422,14 @@ class Stack(collections.abc.Mapping):
         Update will fail if it exceeds the specified timeout. The default is
         60 minutes, set in the constructor
         """
+<<<<<<< heat/engine/stack.py
         # Populate resource data needed for calulating frozen definitions
         # (particularly for metadata, which doesn't get stored separately).
         self._update_all_resource_data(for_resources=True,
                                        for_outputs=False)
 
+=======
+>>>>>>> heat/engine/stack.py
         self.updated_time = oslo_timeutils.utcnow()
         updater = scheduler.TaskRunner(self.update_task, newstack,
                                        msg_queue=msg_queue, notify=notify)
@@ -1549,10 +1587,14 @@ class Stack(collections.abc.Mapping):
                     # Rolling back to previous resource
                     score += 10
 
+<<<<<<< heat/engine/stack.py
                 last_changed_at = ext_rsrc.updated_at
                 if last_changed_at is None:
                     last_changed_at = ext_rsrc.created_at
                 return score, last_changed_at
+=======
+                return score, ext_rsrc.updated_at
+>>>>>>> heat/engine/stack.py
 
             candidates = sorted((r for r in self.ext_rsrcs_db.values()
                                  if r.name == rsrc_name),
@@ -1889,6 +1931,7 @@ class Stack(collections.abc.Mapping):
     def _try_get_user_creds(self):
         # There are cases where the user_creds cannot be returned
         # due to credentials truncated when being saved to DB.
+<<<<<<< heat/engine/stack.py
         # Also, there are cases where auth_encryption_key has
         # changed for some reason.
         # Ignore these errors instead of blocking stack deletion.
@@ -1903,6 +1946,21 @@ class Stack(collections.abc.Mapping):
                           raise_keystone_exception=False):
         # Cleanup stored user_creds so they aren't accessible via
         # the soft-deleted stack which remains in the DB
+=======
+        # Ignore this error instead of blocking stack deletion.
+        try:
+            return ucreds_object.UserCreds.get_by_id(self.context,
+                                                     self.user_creds_id)
+        except exception.Error:
+            LOG.exception("Failed to retrieve user_creds")
+            return None
+
+    def _delete_credentials(self, stack_status, reason, abandon):
+        # Cleanup stored user_creds so they aren't accessible via
+        # the soft-deleted stack which remains in the DB
+        # The stack_status and reason passed in are current values, which
+        # may get rewritten and returned from this method
+>>>>>>> heat/engine/stack.py
         if self.user_creds_id:
             user_creds = self._try_get_user_creds()
             # If we created a trust, delete it
@@ -1932,8 +1990,11 @@ class Stack(collections.abc.Mapping):
                         # Without this, they would need to issue
                         # an additional stack-delete
                         LOG.exception("Error deleting trust")
+<<<<<<< heat/engine/stack.py
                         if raise_keystone_exception:
                             raise
+=======
+>>>>>>> heat/engine/stack.py
 
             # Delete the stored credentials
             try:
@@ -1943,6 +2004,7 @@ class Stack(collections.abc.Mapping):
                 LOG.info("Tried to delete user_creds that do not exist "
                          "(stack=%(stack)s user_creds_id=%(uc)s)",
                          {'stack': self.id, 'uc': self.user_creds_id})
+<<<<<<< heat/engine/stack.py
             self.user_creds_id = None
         return stack_status, reason
 
@@ -1955,6 +2017,15 @@ class Stack(collections.abc.Mapping):
         except exception.NotFound:
             LOG.info("Tried to store a stack that does not exist %s",
                      self.id)
+=======
+
+            try:
+                self.user_creds_id = None
+                self.store()
+            except exception.NotFound:
+                LOG.info("Tried to store a stack that does not exist %s",
+                         self.id)
+>>>>>>> heat/engine/stack.py
 
         # If the stack has a domain project, delete it
         if self.stack_user_project_id and not abandon:
@@ -1993,8 +2064,13 @@ class Stack(collections.abc.Mapping):
 
         stack_status = self.COMPLETE
         reason = 'Stack %s completed successfully' % action
+<<<<<<< heat/engine/stack.py
         self.state_set(action, self.IN_PROGRESS, 'Stack %s started at %s' %
                        (action, oslo_timeutils.utcnow().isoformat()))
+=======
+        self.state_set(action, self.IN_PROGRESS, 'Stack %s started' %
+                       action)
+>>>>>>> heat/engine/stack.py
         if notify is not None:
             notify.signal()
 
diff --git a/heat/engine/stk_defn.py b/heat/engine/stk_defn.py
index 585a057..fed20b9 100644
--- a/heat/engine/stk_defn.py
+++ b/heat/engine/stk_defn.py
@@ -102,6 +102,7 @@ class StackDefinition(object):
         else:
             return self.enabled_rsrc_names()
 
+<<<<<<< heat/engine/stk_defn.py
     def all_resource_types(self):
         """Return the set of types of all resources in the template."""
         if self._resource_defns is None:
@@ -109,6 +110,8 @@ class StackDefinition(object):
         return set(self._resource_defns[res].resource_type
                    for res in self._resource_defns)
 
+=======
+>>>>>>> heat/engine/stk_defn.py
     def get_availability_zones(self):
         """Return the list of Nova availability zones."""
         if self._zones is None:
diff --git a/heat/engine/template.py b/heat/engine/template.py
index eece1d0..152b418 100644
--- a/heat/engine/template.py
+++ b/heat/engine/template.py
@@ -89,7 +89,11 @@ def get_template_class(template_data):
         raise exception.InvalidTemplateVersion(explanation=explanation)
 
 
+<<<<<<< heat/engine/template.py
 class Template(collections.abc.Mapping):
+=======
+class Template(collections.Mapping):
+>>>>>>> heat/engine/template.py
     """Abstract base class for template format plugins.
 
     All template formats (both internal and third-party) should derive from
@@ -355,7 +359,11 @@ class Template(collections.abc.Mapping):
 def parse(functions, stack, snippet, path='', template=None):
     recurse = functools.partial(parse, functions, stack, template=template)
 
+<<<<<<< heat/engine/template.py
     if isinstance(snippet, collections.abc.Mapping):
+=======
+    if isinstance(snippet, collections.Mapping):
+>>>>>>> heat/engine/template.py
         def mkpath(key):
             return '.'.join([path, str(key)])
 
@@ -380,7 +388,11 @@ def parse(functions, stack, snippet, path='', template=None):
         return dict((k, recurse(v, mkpath(k)))
                     for k, v in snippet.items())
     elif (not isinstance(snippet, str) and
+<<<<<<< heat/engine/template.py
           isinstance(snippet, collections.abc.Iterable)):
+=======
+          isinstance(snippet, collections.Iterable)):
+>>>>>>> heat/engine/template.py
 
         def mkpath(idx):
             return ''.join([path, '[%d]' % idx])
diff --git a/heat/engine/template_common.py b/heat/engine/template_common.py
index f5d80a5..f460c33 100644
--- a/heat/engine/template_common.py
+++ b/heat/engine/template_common.py
@@ -80,18 +80,30 @@ class CommonTemplate(template.Template):
 
         yield ('properties',
                self._parse_resource_field(self.RES_PROPERTIES,
+<<<<<<< heat/engine/template_common.py
                                           (collections.abc.Mapping,
+=======
+                                          (collections.Mapping,
+>>>>>>> heat/engine/template_common.py
                                            function.Function), 'object',
                                           name, data, parse))
 
         yield ('metadata',
                self._parse_resource_field(self.RES_METADATA,
+<<<<<<< heat/engine/template_common.py
                                           (collections.abc.Mapping,
+=======
+                                          (collections.Mapping,
+>>>>>>> heat/engine/template_common.py
                                            function.Function), 'object',
                                           name, data, parse))
 
         depends = self._parse_resource_field(self.RES_DEPENDS_ON,
+<<<<<<< heat/engine/template_common.py
                                              collections.abc.Sequence,
+=======
+                                             collections.Sequence,
+>>>>>>> heat/engine/template_common.py
                                              'list or string',
                                              name, data, no_parse)
         if isinstance(depends, str):
@@ -122,7 +134,11 @@ class CommonTemplate(template.Template):
 
         yield ('update_policy',
                self._parse_resource_field(self.RES_UPDATE_POLICY,
+<<<<<<< heat/engine/template_common.py
                                           (collections.abc.Mapping,
+=======
+                                          (collections.Mapping,
+>>>>>>> heat/engine/template_common.py
                                            function.Function), 'object',
                                           name, data, parse))
 
@@ -143,7 +159,11 @@ class CommonTemplate(template.Template):
             return cached_conds
 
         raw_defs = self._get_condition_definitions()
+<<<<<<< heat/engine/template_common.py
         if not isinstance(raw_defs, collections.abc.Mapping):
+=======
+        if not isinstance(raw_defs, collections.Mapping):
+>>>>>>> heat/engine/template_common.py
             message = _('Condition definitions must be a map. Found a '
                         '%s instead') % type(raw_defs).__name__
             raise exception.StackValidationFailed(
@@ -166,7 +186,11 @@ class CommonTemplate(template.Template):
 
         def get_outputs():
             for key, val in outputs.items():
+<<<<<<< heat/engine/template_common.py
                 if not isinstance(val, collections.abc.Mapping):
+=======
+                if not isinstance(val, collections.Mapping):
+>>>>>>> heat/engine/template_common.py
                     message = _('Output definitions must be a map. Found a '
                                 '%s instead') % type(val).__name__
                     raise exception.StackValidationFailed(
diff --git a/heat/engine/template_files.py b/heat/engine/template_files.py
index 55a54e0..b4d3718 100644
--- a/heat/engine/template_files.py
+++ b/heat/engine/template_files.py
@@ -28,7 +28,11 @@ class ReadOnlyDict(dict):
         raise ValueError("Attempted to write to internal TemplateFiles cache")
 
 
+<<<<<<< heat/engine/template_files.py
 class TemplateFiles(collections.abc.Mapping):
+=======
+class TemplateFiles(collections.Mapping):
+>>>>>>> heat/engine/template_files.py
 
     def __init__(self, files):
         self.files = None
diff --git a/heat/httpd/heat_api.py b/heat/httpd/heat_api.py
index 285894d..98285d2 100644
--- a/heat/httpd/heat_api.py
+++ b/heat/httpd/heat_api.py
@@ -33,9 +33,12 @@ CONF = cfg.CONF
 def init_application():
     i18n.enable_lazy()
 
+<<<<<<< heat/httpd/heat_api.py
     # NOTE(hberaud): Call reset to ensure the ConfigOpts object doesn't
     # already contain registered options if the app is reloaded.
     CONF.reset()
+=======
+>>>>>>> heat/httpd/heat_api.py
     logging.register_options(CONF)
     version = hversion.version_info.version_string()
     CONF(project='heat', prog='heat-api', version=version)
diff --git a/heat/httpd/heat_api_cfn.py b/heat/httpd/heat_api_cfn.py
index 67da8dd..0f977dc 100644
--- a/heat/httpd/heat_api_cfn.py
+++ b/heat/httpd/heat_api_cfn.py
@@ -33,9 +33,12 @@ CONF = cfg.CONF
 def init_application():
     i18n.enable_lazy()
 
+<<<<<<< heat/httpd/heat_api_cfn.py
     # NOTE(hberaud): Call reset to ensure the ConfigOpts object doesn't
     # already contain registered options if the app is reloaded.
     CONF.reset()
+=======
+>>>>>>> heat/httpd/heat_api_cfn.py
     logging.register_options(CONF)
     CONF(project='heat',
          prog='heat-api-cfn',
diff --git a/heat/locale/de/LC_MESSAGES/heat.po b/heat/locale/de/LC_MESSAGES/heat.po
index 3faad40..777a39a 100644
--- a/heat/locale/de/LC_MESSAGES/heat.po
+++ b/heat/locale/de/LC_MESSAGES/heat.po
@@ -14,7 +14,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2020-05-04 23:38+0000\n"
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -697,6 +701,14 @@ msgstr "Eine Liste der Sicherheitsgruppen für den Port."
 msgid "A list of security services IDs or names."
 msgstr "Eine Liste von IDs oder Namen von Sicherheitsdiensten."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr ""
+"Eine Liste der anzuwendenden Zeichenfolgenrichtlinien. Standardmäßig Anti-"
+"Affinität."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "A list of tags for labeling and sorting projects."
 msgstr "Eine Liste von Tags zum Beschriften und Sortieren von Projekten."
 
@@ -2405,6 +2417,12 @@ msgstr "E-Mail-Adresse des Keystone-Benutzers"
 msgid "Enable the docker registry in the cluster."
 msgstr "Aktivieren Sie die Docker-Registrierung im Cluster."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "Aktivieren Sie die ältere OS::Heat::CWLiteAlarm-Ressource."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "Aktivieren Sie die Vorschau-Funktion Stapel-Abandon."
 
@@ -2631,6 +2649,13 @@ msgstr ""
 "Fehler beim Verbinden von Datenträger %(vol)s mit Server %(srv)s - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "Fehler beim Erstellen der Bay '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Failed to create Cluster '%(name)s' - %(reason)s"
 msgstr "Fehler beim Erstellen des Clusters '%(name)s' - %(reason)s"
 
@@ -2699,6 +2724,13 @@ msgstr ""
 "(%(engine_id)s)"
 
 #, python-format
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "Fehler beim Aktualisieren der Bay '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Failed to update Cluster '%(name)s' - %(reason)s"
 msgstr "Fehler beim Aktualisieren von Cluster '%(name)s' - %(reason)s"
 
@@ -3404,6 +3436,12 @@ msgstr ""
 msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr "Falsche Argumente zu \"%(fn_name)s\" sollten sein: %(example)s"
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr "Falsche Argumente: Zu vereinende Elemente müssen Maps sein."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect arguments: to \"%(fn_name)s\", arguments must be a list of maps. "
@@ -4328,6 +4366,19 @@ msgstr ""
 "verwendet werden (normalerweise solche, die von der Keystone v3-API mit "
 "großen Servicekatalogen generiert werden)."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"Maximale Zeilengröße von Nachrichtenheadern, die akzeptiert werden sollen. "
+"Möglicherweise muss max_header_line erhöht werden, wenn große Token "
+"verwendet werden (in der Regel solche, die von der Keystone v3-API mit "
+"großen Servicekatalogen generiert werden.)"
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "Maximale Anzahl von Instanzen in der Gruppe."
 
@@ -4351,6 +4402,12 @@ msgstr ""
 "Maximale Anzahl an Sekunden, die ein Monitor darauf wartet, dass eine "
 "Verbindung hergestellt wird, bevor das Zeitlimit überschritten wird. "
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr "Maximale Anzahl von Stapeln, die ein Mieter gleichzeitig haben darf."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr "Maximale Präfixgröße, die vom Subnetzpool zugewiesen werden kann."
 
@@ -5196,6 +5253,18 @@ msgstr ""
 "OS::Aodh::CombinationAlarm ist veraltet und wurde von Aodh entfernt. "
 "Verwenden Sie stattdessen OS::Aodh::CompositeAlarm."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid ""
+"OS::Heat::CWLiteAlarm resource has been removed since version 10.0.0. "
+"Existing stacks can still use it, where it would do nothing for update/"
+"delete."
+msgstr ""
+"Die OS::Heat::CWLiteAlarm-Ressource wurde seit der Version 10.0.0 entfernt. "
+"Bestehende Stapel können es immer noch verwenden, wo es nichts zum "
+"Aktualisieren/Löschen tun würde."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 #, python-format
 msgid "Object action %(action)s failed because: %(reason)s"
 msgstr "Objektaktion %(action)s ist fehlgeschlagen, weil: %(reason)s"
@@ -5469,6 +5538,12 @@ msgstr "Zeitraum (Sekunden) für die Auswertung."
 msgid "Physical ID of the VPC. Not implemented."
 msgstr "Physische ID der VPC. Nicht implementiert."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Placeholder"
+msgstr "Platzhalter"
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Please use OS::Heat::SoftwareDeploymentGroup instead."
 msgstr "Bitte verwenden Sie stattdessen OS::Heat::SoftwareDeploymentGroup."
 
@@ -5520,6 +5595,16 @@ msgstr ""
 "Servers."
 
 msgid ""
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+"Policy on how to apply a user_data update; either by ignoring it or by "
+"replacing the entire server."
+msgstr ""
+"Richtlinie zum Anwenden einer Benutzerdatenaktualisierung entweder durch "
+"Ignorieren oder durch Ersetzen des gesamten Servers."
+
+msgid ""
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 "Policy on how to apply an image-id update; either by requesting a server "
 "rebuild or by replacing the entire server."
 msgstr ""
@@ -7448,6 +7533,12 @@ msgstr "Die Verfügbarkeitszone, in der das Volume erstellt wird."
 msgid "The availability zone of shared filesystem."
 msgstr "Die Verfügbarkeitszone des freigegebenen Dateisystems."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "Der Name der Bay."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "Die Bitlänge des Geheimnisses."
 
@@ -8103,6 +8194,12 @@ msgstr ""
 msgid "The name or ID of target cluster."
 msgstr "Der Name oder die ID des Zielclusters."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "Der Name oder die ID des Bay-Modells."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "The name or ID of the cluster template."
 msgstr "Der Name oder die ID der Clustervorlage."
 
@@ -8133,6 +8230,12 @@ msgstr "Der Verhandlungsmodus der IKE-Richtlinie."
 msgid "The next hop for the destination."
 msgstr "Der nächste Hop für das Ziel."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "Die Anzahl der Knoten für diese Bay."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "The node count for this cluster."
 msgstr "Die Knotenzahl für diesen Cluster."
 
@@ -8184,6 +8287,12 @@ msgstr ""
 "Die Anzahl aufeinanderfolgender Health Probe-Erfolge, die erforderlich sind, "
 "bevor die Instanz in den Status \"Healthy\" versetzt wird."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "Die Anzahl der Master-Knoten für diese Bay."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "The number of master nodes for this cluster."
 msgstr "Die Anzahl der Master-Knoten für diesen Cluster."
 
@@ -8233,6 +8342,17 @@ msgid "The operator indicates how to combine the rules."
 msgstr "Der Operator gibt an, wie die Regeln zu kombinieren sind."
 
 msgid ""
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"Der optionale öffentliche Schlüssel. Dadurch können Benutzer den "
+"öffentlichen Schlüssel aus einem bereits vorhandenen Schlüsselpaar "
+"bereitstellen. Wenn nicht angegeben, wird ein neues Schlüsselpaar generiert."
+
+msgid ""
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 "The os-collect-config configuration for the server's local agent to be "
 "configured to connect to Heat to retrieve deployment data."
 msgstr ""
@@ -8494,6 +8614,16 @@ msgstr ""
 "Der Server konnte der Anforderung nicht entsprechen, da sie entweder "
 "fehlerhaft oder auf andere Weise falsch ist."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid ""
+"The servers to slave from to get DNS information and is mandatory for zone "
+"type SECONDARY, otherwise ignored."
+msgstr ""
+"Die Server, von denen aus die DNS-Informationen abgerufen werden sollen, "
+"sind obligatorisch für den Zonentyp SECONDARY, andernfalls ignoriert."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "The set of parameters passed to this nested stack."
 msgstr ""
 "Die Menge der Parameter, die an diesen verschachtelten Stapel übergeben "
@@ -8933,6 +9063,13 @@ msgstr ""
 "Verbindung für diese Anzahl von Sekunden inaktiv ist, wird sie geschlossen. "
 "Ein Wert von '0' bedeutet ewig warten."
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr ""
+"Timeout zum Erstellen der Bay in Minuten. Für kein Timeout auf 0 setzen."
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Timeout for creating the cluster in minutes. Set to 0 for no timeout."
 msgstr ""
 "Timeout zum Erstellen des Clusters in Minuten. Für kein Timeout auf 0 setzen."
@@ -9065,6 +9202,16 @@ msgid "Type of the volume to create on Cinder backend."
 msgstr "Typ des Datenträgers, das auf dem Cinder-Backend erstellt werden soll."
 
 msgid ""
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+"Type of zone. PRIMARY is controlled by Designate, SECONDARY zones are slaved "
+"from another DNS Server."
+msgstr ""
+"Art der Zone PRIMARY wird von Designate gesteuert, SECONDARY-Zonen werden "
+"von einem anderen DNS-Server verwaltet."
+
+msgid ""
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 "URI of the subscriber which will be notified. Must be in the format: <TYPE>:"
 "<VALUE>."
 msgstr ""
@@ -9103,6 +9250,12 @@ msgstr ""
 msgid "URL of keystone service endpoint."
 msgstr "URL des Schlüsseldienst-Endpunkts"
 
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "URL des Heat CloudWatch Servers"
+
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -9283,6 +9436,13 @@ msgid "Unknown status Container '%(name)s' - %(reason)s"
 msgstr "Unbekannter Status Container '%(name)s' - %(reason)s"
 
 #, python-format
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "Unbekannter Status beim Erstellen der Bay '%(name)s' - %(reason)s "
+
+#, python-format
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Unknown status creating Cluster '%(name)s' - %(reason)s"
 msgstr "Unbekannter Status beim Erstellen des Clusters '%(name)s' - %(reason)s"
 
@@ -9290,6 +9450,13 @@ msgid "Unknown status during deleting share \"{0}\""
 msgstr "Unbekannter Status beim Löschen der Freigabe \"{0}\""
 
 #, python-format
+<<<<<<< heat/locale/de/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr "Unbekannter Bay-Status wird aktualisiert. '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/de/LC_MESSAGES/heat.po
 msgid "Unknown status updating Cluster '%(name)s' - %(reason)s"
 msgstr "Unbekannter Status aktualisiert Cluster '%(name)s' - %(reason)s"
 
diff --git a/heat/locale/es/LC_MESSAGES/heat.po b/heat/locale/es/LC_MESSAGES/heat.po
index 1f89d45..a8508b0 100644
--- a/heat/locale/es/LC_MESSAGES/heat.po
+++ b/heat/locale/es/LC_MESSAGES/heat.po
@@ -10,7 +10,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -435,6 +439,14 @@ msgstr "Una lista de grupos de seguridad para el puerto."
 msgid "A list of security services IDs or names."
 msgstr "Una lista de ID o nombres de servicios de seguridad."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr ""
+"Una lista de políticas de serie a aplicar. Valores predeterminados para anti-"
+"afinidad."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "Perfil de inicio de sesión para el usuario."
 
@@ -1677,6 +1689,12 @@ msgstr ""
 msgid "Email address of keystone user."
 msgstr "Dirección de correo electrónico del usuario de keystone."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "Habilitar el recurso heredado OS::Heat::CWLiteAlarm."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr ""
 "Habilitar la característica Stack Abandon (abandono de pila) de vista previa."
@@ -1835,6 +1853,13 @@ msgstr ""
 "No se ha podido conectar el volumen %(vol)s al servidor %(srv)s - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "No se ha podido crear la bahía '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr ""
 "No se ha podido desconectar la interfaz (%(port)s) del servidor (%(server)s)"
@@ -1894,6 +1919,13 @@ msgstr ""
 "No se ha podido detener la pila (%(stack_name)s) en otro motor "
 "(%(engine_id)s)"
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "No se ha podido actualizar la bahía '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr "Ha fallado la actualización, no se encuentra información de puerto."
 
@@ -2441,6 +2473,13 @@ msgstr "Argumentos incorrectos en \"%(fn_name)s\" debe ser uno de: %(allowed)s"
 msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr "Argumentos incorrectos en \"%(fn_name)s\" deben ser: %(example)s"
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr ""
+"Argumentos no correctos: los elementos a mezclar deben ser correlaciones."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -3103,6 +3142,19 @@ msgstr ""
 "grandes (normalmente las generadas por la API de Keystone v3 con catálogos "
 "de servicio grandes)."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"Tamaño de línea máximo de cabeceras de mensaje que se deben aceptar. Es "
+"posible que max_header_line se necesite incrementar al utilizar señales "
+"grandes (normalmente las generadas por la API de Keystone v3 con catálogos "
+"de servicio grandes.)"
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "Número máximo de instancias en el grupo."
 
@@ -3119,6 +3171,14 @@ msgstr ""
 "Número máximo de segundos que un supervisor debe esperar a que se establezca "
 "una conexión antes de que exceda el tiempo de espera."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr ""
+"Número máximo de pilas que cualquier arrendatario puede tener activas "
+"simultáneamente."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr ""
 "El tamaño máximo de prefijo que se puede asignar desde la agrupación de "
@@ -5457,6 +5517,12 @@ msgstr "La zona de disponibilidad en la que se creará el volumen."
 msgid "The availability zone of shared filesystem."
 msgstr "La zona de disponibilidad del sistema de archivos compartido."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "El nombre de la bahía."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "La longitud en bits del secreto."
 
@@ -5942,6 +6008,12 @@ msgstr "El nombre o el ID de este proyecto de keystone en la jerarquía."
 msgid "The name or ID of target cluster."
 msgstr "El nombre o el ID del clúster de destino."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "El nombre o el ID del modelo de bahía."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "El nombre o el ID de la subred donde asignar la dirección VIP."
 
@@ -5957,6 +6029,12 @@ msgstr "La modalidad de negociación de la política ike."
 msgid "The next hop for the destination."
 msgstr "El siguiente salto del destino."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "El recuento de nodos para esta bahía."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr ""
 "Los métodos de notificación a utilizar cuando el estado de una alarma es "
@@ -5992,6 +6070,12 @@ msgstr ""
 "El número de éxitos de análisis de estado consecutivos necesarios antes de "
 "mover la instancia al estado saludable."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "El número de nodos mestros para esta bahía."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "El número de objetos almacenados en el contenedor."
 
@@ -6023,6 +6107,17 @@ msgstr ""
 "de creación de pila continúe."
 
 msgid ""
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"Clave pública opcional. Permite a los usuarios proporcionar la clave pública "
+"desde un par de claves existente previamente. Si no se proporciona, se "
+"generará un nuevo par de claves."
+
+msgid ""
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr ""
@@ -6547,6 +6642,14 @@ msgstr ""
 "una conexión entrante está inactiva para este número de segundos se cerrará. "
 "Un valor de '0' significa esperar permanentemente."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr ""
+"Tiempo de espera para crear la bahía en minutos. Establézcalo en 0 si no "
+"desea que haya tiempo de espera."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr ""
 "Tiempo de espera en segúndos para una acción de pila (por ejemplo, creár o "
@@ -6666,6 +6769,12 @@ msgstr ""
 msgid "URL of keystone service endpoint."
 msgstr "URL del punto final del servicio de keystone."
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "URL del servidor CloudWatch de Heat."
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6814,12 +6923,26 @@ msgstr ""
 "Estado del recurso compartido (share_status) desconocido durante la creación "
 "del recurso compartido \"{0}\""
 
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "Estado desconocido al crear la bahía '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr ""
 "Estado del recurso compartido (share_status) desconocido al suprimir el "
 "recurso compartido \"{0}\""
 
 #, python-format
+<<<<<<< heat/locale/es/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr "Estado desconocido al actualizar la bahía '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/es/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "Estado desconocido: %s"
 
diff --git a/heat/locale/fr/LC_MESSAGES/heat.po b/heat/locale/fr/LC_MESSAGES/heat.po
index 9e43716..374c7dd 100644
--- a/heat/locale/fr/LC_MESSAGES/heat.po
+++ b/heat/locale/fr/LC_MESSAGES/heat.po
@@ -10,7 +10,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -433,6 +437,12 @@ msgstr "Liste de groupes de sécurité pour le port."
 msgid "A list of security services IDs or names."
 msgstr "Liste des ID ou noms des services de sécurité."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr "Liste de règles de chaîne à appliquer. Par défaut : anti-affinity."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "Un profil de connexion pour l'utilisateur."
 
@@ -1674,6 +1684,12 @@ msgstr ""
 msgid "Email address of keystone user."
 msgstr "Adresse e-mail de l'utilisateur Keystone."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "Activer la ressource OS::Heat::CWLiteAlarm existante."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "Activer la fonction préliminaire d'abandon de pile (Stack Abandon)."
 
@@ -1828,6 +1844,13 @@ msgid "Failed to attach volume %(vol)s to server %(srv)s - %(err)s"
 msgstr "Echec de connexion du volume %(vol)s au serveur %(srv)s - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "Echec de la création de la baie '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr "Echec de déconnexion de l'interface (%(port)s) du serveur (%(server)s)"
 
@@ -1886,6 +1909,13 @@ msgstr ""
 "Echec de l'arrêt de la pile (%(stack_name)s) sur l'autre moteur "
 "(%(engine_id)s)"
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "Echec de mise à jour de la baie '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr "Echec de la mise à jour, les informations de port sont introuvables."
 
@@ -2427,6 +2457,13 @@ msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr ""
 "Arguments incorrects pour \"%(fn_name)s\", devraient être : %(example)s"
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr ""
+"Arguments incorrects : les éléments à fusionner doivent être des mappes."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -3087,6 +3124,19 @@ msgstr ""
 "(généralement ceux qui sont générés par l'API Keystone v3 avec des "
 "catalogues de service volumineux)."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"Taille maximale de ligne des en-têtes de message à accepter. max_header_line "
+"peut avoir besoin d'être augmenté lors de l'utilisation de grands jetons "
+"(généralement ceux qui sont générés par l'API Keystone v3 avec des "
+"catalogues de service volumineux)."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "Nombre maximal d'instances dans le groupe."
 
@@ -3103,6 +3153,14 @@ msgstr ""
 "Nombre maximal de secondes pendant lequel le moniteur attend qu'une "
 "connexion soit établie."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr ""
+"Nombre maximum de piles pouvant être actives en même temps pour n'importe "
+"quel locataire."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr ""
 "Taille de préfixe maximum qui peut être allouée depuis le pool de sous-"
@@ -5423,6 +5481,12 @@ msgstr "Zone de disponibilité dans laquelle le volume sera créé."
 msgid "The availability zone of shared filesystem."
 msgstr "Zone de disponibilité du système de fichiers partagé."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "Nom de baie."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "Longeur en bits du secret."
 
@@ -5903,6 +5967,12 @@ msgstr "Nom ou ID du parent de ce projet Keystone dans la hiérarchie."
 msgid "The name or ID of target cluster."
 msgstr "Nom ou ID du cluster cible."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "Nom ou ID du modèle de baie."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "Nom ou ID du sous-réseau sur lequel allouer l'adresse VIP."
 
@@ -5918,6 +5988,12 @@ msgstr "Mode de négociation de la stratégie IKE."
 msgid "The next hop for the destination."
 msgstr "Prochain saut pour la destination."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "Nombre de noeuds pour cette baie."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr ""
 "Méthodes de notification à utiliser quand l'état d'une alarme est ALARM."
@@ -5950,6 +6026,12 @@ msgstr ""
 "Le nombre d'analyses d'intégrité consécutives réussies requises avant de "
 "faire passer l'instance à l'état sain."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "Nombre de noeuds maître pour cette baie."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "Nombre d'objets stockés dans le conteneur."
 
@@ -5981,6 +6063,17 @@ msgstr ""
 "création de la pile se poursuive."
 
 msgid ""
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"Clé publique facultative. Cela permet aux utilisateurs de fournir la clé "
+"publique à partir d'une paire de clés préexistante. Si elle n'est pas "
+"fournie, une nouvelle paire de clés sera générée."
+
+msgid ""
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr ""
@@ -6491,6 +6584,14 @@ msgstr ""
 "client. La connexion entrante est fermée si elle reste en veille pendant ce "
 "délai en nombre de secondes. La valeur '0' signifie une attente illimitée."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr ""
+"Délai d'attente, en minutes, pour la création de la baie. Défini sur 0 quand "
+"il n'y a pas de délai d'attente."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr ""
 "Délai d'attente en secondes pour l'action de pile (par ex. création ou mise "
@@ -6609,6 +6710,12 @@ msgstr ""
 msgid "URL of keystone service endpoint."
 msgstr "URL du noeud final de service Keystone."
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "URL du serveur Heat CloudWatch."
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6756,10 +6863,25 @@ msgstr "Clé(s) inconnue(s) %s"
 msgid "Unknown share_status during creation of share \"{0}\""
 msgstr "share_status inconnu lors de la création du partage \"{0}\""
 
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "Statut inconnu lors de la création de la baie '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr "Statut inconnu lors de la suppression du partage \"{0}\""
 
 #, python-format
+<<<<<<< heat/locale/fr/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr ""
+"Statut inconnu lors de la mise à jour de la baie '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/fr/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "Status inconnu: %s"
 
diff --git a/heat/locale/it/LC_MESSAGES/heat.po b/heat/locale/it/LC_MESSAGES/heat.po
index 82127c0..2d7d73a 100644
--- a/heat/locale/it/LC_MESSAGES/heat.po
+++ b/heat/locale/it/LC_MESSAGES/heat.po
@@ -8,7 +8,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -432,6 +436,14 @@ msgstr "Un elenco di gruppi di sicurezza per la porta."
 msgid "A list of security services IDs or names."
 msgstr "Un elenco di ID o nomi dei servizi di sicurezza."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr ""
+"Un elenco di politiche stringa da applicare. L'impostazione predefinita è "
+"anti-affinity."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "Un profilo di login per l'utente."
 
@@ -1670,6 +1682,12 @@ msgstr ""
 msgid "Email address of keystone user."
 msgstr "Indirizzo email dell'utente keystone."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "Abilita la risorsa OS::Heat::CWLiteAlarm legacy."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "Abilitare la funzione Stack Abandon."
 
@@ -1825,6 +1843,13 @@ msgstr ""
 "Collegamento del volume %(vol)s al server %(srv)snon riuscito  - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "Impossibile creare l'alloggiamento '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr ""
 "Impossibile scollegare l'interfaccia (%(port)s) dal server (%(server)s)"
@@ -1883,6 +1908,13 @@ msgstr ""
 "Impossibile arrestare lo stack (%(stack_name)s) su un altro motore "
 "(%(engine_id)s)"
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "Impossibile aggiornare l'alloggiamento '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr ""
 "Impossibile aggiornare, non è possibile reperire le informazioni sulla porta."
@@ -2417,6 +2449,12 @@ msgstr ""
 msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr "Argomenti non corretti per \"%(fn_name)s\"; devono essere: %(example)s"
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr "Argomenti non corretti: gli elementi da unire devono essere mappe."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -3070,6 +3108,19 @@ msgstr ""
 "token grandi (in genere quelli generati dall'API Keystone v3 con cataloghi "
 "del servizio di grandi dimensioni)."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"Dimensione massima della riga di intestazioni del messaggio che deve essere "
+"accettata. max_header_line dovrebbe essere incrementato quando si utilizzano "
+"token grandi (in genere quelli generati dall'API Keystone v3 con cataloghi "
+"del servizio di grandi dimensioni)."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "Numero massimo di istanze nel gruppo."
 
@@ -3086,6 +3137,14 @@ msgstr ""
 "Numero massimo di secondi che un monitor deve attendere per stabilire una "
 "connessione prima che venga raggiunto il timeout."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr ""
+"Numero massimo di stack un qualsiasi tenant può avere attivo "
+"contemporaneamente."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr ""
 "La dimensione massima del prefisso che può essere allocata dal pool di "
@@ -5389,6 +5448,12 @@ msgstr "La zona di disponibilità in cui il volume verrà creato."
 msgid "The availability zone of shared filesystem."
 msgstr "La zona di disponibilità per il filesystem condiviso."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "Il nome dell'alloggiamento."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "La lunghezza in bit del segreto."
 
@@ -5882,6 +5947,12 @@ msgstr ""
 msgid "The name or ID of target cluster."
 msgstr "Il nome o l'ID del cluster di destinazione."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "Il nome o l'ID del modello di alloggiamento."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "Il nome o l'ID della sottorete su cui allocare l'indirizzo VIP."
 
@@ -5897,6 +5968,12 @@ msgstr "La modalità di negoziazione della politica ike."
 msgid "The next hop for the destination."
 msgstr "L'hop successivo per la destinazione."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "Il numero di nodi per questo alloggiamento."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr ""
 "I metodi di notifica da utilizzare quando uno stato di allarme è ALARM."
@@ -5930,6 +6007,12 @@ msgstr ""
 "Il numero di probe di stato consecutivi con esito positivo richiesti prima "
 "di spostare l'istanza allo stato funzionante."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "Il numero di nodi principali per questo alloggiamento."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "Il numero di oggetti memorizzati nel contenitore."
 
@@ -5961,6 +6044,17 @@ msgstr ""
 "processo di creazione dello stack continui."
 
 msgid ""
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"La chiave pubblica facoltativa. Ciò consente agli utenti di fornire la "
+"chiave pubblica da una coppia di chiavi preesistente. Se non fornita, verrà "
+"generata una nuova coppia di chiavi."
+
+msgid ""
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr ""
@@ -6475,6 +6569,14 @@ msgstr ""
 "connessione in entrata è inattiva per questo numero di secondi, verrà "
 "chiusa. Il valore 0 indica un'attesa illimitata."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr ""
+"Timeout per la creazione dell'alloggiamento in minuti. Impostare su 0 per "
+"nessun timeout."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr ""
 "Timeout in secondi per l'azione stack  (ad esempio creare o aggiornare)."
@@ -6594,6 +6696,12 @@ msgstr ""
 msgid "URL of keystone service endpoint."
 msgstr "URL dell'endpoint del servizio keystone."
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "URL del server Heat CloudWatch."
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6739,10 +6847,28 @@ msgid "Unknown share_status during creation of share \"{0}\""
 msgstr ""
 "share_status sconosciuto durante la creazione della condivisione \"{0}\""
 
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr ""
+"Stato sconosciuto durante la creazione dell'alloggiamento '%(name)s' - "
+"%(reason)s"
+
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr "Stato sconosciuto durante l'eliminazione della condivisione  \"{0}\""
 
 #, python-format
+<<<<<<< heat/locale/it/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr ""
+"Stato sconosciuto durante l'aggiornamento dell'alloggiamento '%(name)s' - "
+"%(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/it/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "Stato sconosciuto: %s"
 
diff --git a/heat/locale/ja/LC_MESSAGES/heat.po b/heat/locale/ja/LC_MESSAGES/heat.po
index 74915af..cdb97e8 100644
--- a/heat/locale/ja/LC_MESSAGES/heat.po
+++ b/heat/locale/ja/LC_MESSAGES/heat.po
@@ -12,7 +12,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -418,6 +422,13 @@ msgstr "ポートのセキュリティーグループのリスト。"
 msgid "A list of security services IDs or names."
 msgstr "セキュリティーサービスの ID または名前のリスト。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr ""
+"適用する文字列ポリシーの一覧。アンチアフィニティーにデフォルト設定されます。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "ユーザーのログインプロファイル。"
 
@@ -1620,6 +1631,12 @@ msgstr ""
 msgid "Email address of keystone user."
 msgstr "keystone ユーザーの E メールアドレス。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "既存の OS::Heat::CWLiteAlarm リソースを有効にします。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "スタック破棄のプレビュー機能を有効にします。"
 
@@ -1779,6 +1796,13 @@ msgid "Failed to attach volume %(vol)s to server %(srv)s - %(err)s"
 msgstr "サーバー %(srv)s へのボリューム %(vol)s の追加に失敗しました: %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "ベイ '%(name)s' の作成に失敗しました: %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr ""
 "サーバー (%(server)s) からインターフェース (%(port)s) を接続解除できませんで"
@@ -1843,6 +1867,13 @@ msgstr ""
 "他のエンジン (%(engine_id)s) 上のスタック (%(stack_name)s) を停止できませんで"
 "した。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "ベイ '%(name)s' の更新に失敗しました: %(reason)s"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr "更新できませんでした。ポート情報が見つかりません。"
 
@@ -2364,6 +2395,12 @@ msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr ""
 "\"%(fn_name)s\" に対する引数が正しくありません。正しい引数: %(example)s"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr "不正確な引数: マージする項目はマップでなければなりません。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -3014,6 +3051,18 @@ msgstr ""
 "Keystone v3 API で大きなサービスカタログを使用して生成されるトークン) を使用"
 "するときは max_header_line を増やさなければならない場合があります。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"受け入れられるメッセージヘッダーの最大行サイズ。大きなトークン (通常は、"
+"Keystone v3 API で大きなサービスカタログを使用して生成されるトークン) を使用"
+"するときは max_header_line を増やさなければならない場合があります。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "グループ内のインスタンスの最大数。"
 
@@ -3029,6 +3078,12 @@ msgid ""
 msgstr ""
 "これを超えるとタイムアウトになる、モニターが接続の確立を待機する最大秒数。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr "任意の 1 つのテナントが同時にアクティブにできるスタックの最大数。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr ""
 "サブネットプールから割り当てることができるプレフィックスサイズの最大値。"
@@ -5289,6 +5344,12 @@ msgstr "ボリュームが作成されるアベイラビリティーゾーン。
 msgid "The availability zone of shared filesystem."
 msgstr "共有ファイルシステムのアベイラビリティーゾーン。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "ベイの名前。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "秘密のビット長。"
 
@@ -5749,6 +5810,12 @@ msgstr "階層内のこの keystone プロジェクトの親の名前または I
 msgid "The name or ID of target cluster."
 msgstr "ターゲットクラスターの名前または ID。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "ベイモデルの名前または ID。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "VIP アドレスを割り当てる必要のあるサブネットの名前または ID。"
 
@@ -5764,6 +5831,12 @@ msgstr "IKE ポリシーのネゴシエーションモード。"
 msgid "The next hop for the destination."
 msgstr "宛先のネクストホップ。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "このベイのノード数。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr "アラームの状態が ALARM の場合に使用する通知方法。"
 
@@ -5793,6 +5866,12 @@ msgstr ""
 "インスタンスを正常な状態に移行する前に、連続するヘルスプローブの成功数が必要"
 "です。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "このベイのマスターノードの数。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "コンテナーに保管されたオブジェクトの数。"
 
@@ -5821,6 +5900,16 @@ msgid ""
 msgstr "スタック作成プロセスを続行するために受信する必要のある成功シグナル数。"
 
 msgid ""
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"オプションの公開鍵。これを使用して、ユーザーは既存のキーペアからの公開鍵を指"
+"定できます。指定しない場合は、新しいキーペアが生成されます。"
+
+msgid ""
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr "アドレススコープの所有者"
@@ -6313,6 +6402,14 @@ msgstr ""
 "間アイドル状態にある場合、接続は終了します。値が '0' の場合、待機時間に制限が"
 "ないことを指します。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr ""
+"ベイを作成する際のタイムアウト時間 (分)。タイムアウト時間を設定しない場合は "
+"0 を設定します。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr "スタックアクション (作成または更新) のタイムアウト (秒)。"
 
@@ -6428,6 +6525,12 @@ msgstr ""
 msgid "URL of keystone service endpoint."
 msgstr "keystone サービスのエンドポイントのURL。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "heat CloudWatch サーバーの URL。"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6560,10 +6663,24 @@ msgstr "キー %s は不明です"
 msgid "Unknown share_status during creation of share \"{0}\""
 msgstr "シェア \"{0}\" の作成中の share_status が不明です。"
 
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "ベイ '%(name)s' の作成状況が不明です:  %(reason)s"
+
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr "シェア \"{0}\" の削除中の状態が不明です。"
 
 #, python-format
+<<<<<<< heat/locale/ja/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr "ベイ '%(name)s' の更新状況が不明です:  %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/ja/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "不明状況: %s"
 
diff --git a/heat/locale/ko_KR/LC_MESSAGES/heat.po b/heat/locale/ko_KR/LC_MESSAGES/heat.po
index a694e81..6dc3178 100644
--- a/heat/locale/ko_KR/LC_MESSAGES/heat.po
+++ b/heat/locale/ko_KR/LC_MESSAGES/heat.po
@@ -11,7 +11,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -414,6 +418,12 @@ msgstr "포트의 보안 그룹 목록입니다."
 msgid "A list of security services IDs or names."
 msgstr "보안 서비스 ID 또는 이름 목록입니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr "적용할 문자열 정책 목록입니다. 기본값은 안티 선호도입니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "사용자의 로그인 프로파일입니다."
 
@@ -1562,6 +1572,12 @@ msgstr "디바이스 맵핑 %s에 대해 volume_id 또는 snapshot_id가 지정
 msgid "Email address of keystone user."
 msgstr "keystone 사용자의 이메일 주소입니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "레거시 OS::Heat::CWLiteAlarm 자원을 사용합니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "스택 중단 미리보기 기능을 사용합니다."
 
@@ -1710,6 +1726,13 @@ msgid "Failed to attach volume %(vol)s to server %(srv)s - %(err)s"
 msgstr "볼륨 %(vol)s을(를) 서버 %(srv)s에 연결하는 데 실패 - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "Bay '%(name)s' - %(reason)s 작성 실패"
+
+#, python-format
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr "서버((%(server)s)에서 인터페이스(%(port)s)의 연결을 해제하는 데 실패"
 
@@ -1763,6 +1786,13 @@ msgstr ""
 msgid "Failed to stop stack (%(stack_name)s) on other engine (%(engine_id)s)"
 msgstr "다른 엔진(%(engine_id)s)에서 스택(%(stack_name)s) 중지 실패"
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "Bay '%(name)s' - %(reason)s 업데이트 실패"
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr "업데이트에 실패했고 포트 정보를 찾을 수 없습니다."
 
@@ -2266,6 +2296,12 @@ msgstr ""
 msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr "\"%(fn_name)s\"에 대한 올바르지 않은 인수는 %(example)s이어야 함 "
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr "잘못된 인수: 병합할 항목은 맵이어야 합니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -2871,6 +2907,18 @@ msgstr ""
 "을 늘려야 할 수 있습니다(일반적으로 큰 서비스 카탈로그가 있는 키스톤 v3 API"
 "에 의해 생성됨)."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"허용할 메시지 헤더의 최대 행 크기입니다. 더 큰 토큰 사용 시 max_header_line"
+"을 늘려야 할 수 있습니다(일반적으로 큰 서비스 카탈로그가 있는 키스톤 v3 API"
+"에 의해 생성됨)."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "그룹의 최대 인스턴스 수입니다."
 
@@ -2887,6 +2935,12 @@ msgstr ""
 "제한시간 초과되기 전에 연결이 설정될 때까지 대기하는 모니터에 대한 최대 시간"
 "(초)입니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr "하나의 테넌트가 한 번에 활성으로 가질 수 있는 최대 스택 수입니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr "서브넷 풀에서 할당할 수 있는 최대 접두부 크기입니다."
 
@@ -5066,6 +5120,12 @@ msgstr "볼륨이 작성되는 가용성 구역입니다."
 msgid "The availability zone of shared filesystem."
 msgstr "공유 파일 시스템의 가용 구역입니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "Bay 이름입니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "시크릿의 비트 길이입니다."
 
@@ -5515,6 +5575,12 @@ msgstr "계층 구조에 있는 이 keystone 프로젝트의 상위 이름 또
 msgid "The name or ID of target cluster."
 msgstr "대상 클러스터의 이름 또는 ID입니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "Bay 모델의 이름 또는 ID입니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "VIP 주소를 할당할 서브넷의 이름 또는 ID입니다."
 
@@ -5530,6 +5596,12 @@ msgstr "ike 정책의 협상 모드입니다."
 msgid "The next hop for the destination."
 msgstr "대상의 다음 hop입니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "이 Bay의 노드 수입니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr "알람 상태가 ALARM이면 사용할 알림 방법입니다."
 
@@ -5557,6 +5629,12 @@ msgid ""
 msgstr ""
 "인스턴스를 양호 상태로 이동하기 전에 필요한 연속 상태 프로브 성공 수입니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "Bay의 마스터 노드 수입니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "컨테이너에 저장된 오브젝트 수입니다."
 
@@ -5585,6 +5663,16 @@ msgid ""
 msgstr "스택 작성 프로세스가 계속되기 전에 수신되어야 하는 성공 신호 수입니다."
 
 msgid ""
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"선택적 공개 키입니다. 이로 인해 사용자가 이전의 기존 키 쌍에서 공개 키를 제공"
+"할 수 있습니다. 제공되지 않는 경우 새 키 쌍이 생성됩니다."
+
+msgid ""
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr ""
@@ -6064,6 +6152,14 @@ msgstr ""
 "클라이언트 연결의 소켓 조작에 대한 제한시간입니다. 수신 연결이 이 기간(초) 동"
 "안 유휴 상태이면 연결이 닫힙니다. 값이 '0'이면 무기한 대기합니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr ""
+"Bay를 작성하기 위한 제한시간(분)입니다. 제한시간이 없게 하려면 0으로 설정하십"
+"시오."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr "스택 조치(즉, 작성 또는 업데이트)에 대한 제한시간(초)입니다."
 
@@ -6177,6 +6273,12 @@ msgstr ""
 msgid "URL of keystone service endpoint."
 msgstr "keystone 서비스 엔드포인트의 URL입니다."
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "히트 CloudWatch 서버의 URL입니다."
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6309,10 +6411,24 @@ msgstr "알 수 없는 키 %s"
 msgid "Unknown share_status during creation of share \"{0}\""
 msgstr "공유 \"{0}\" 작성 중에 알 수 없는 share_status"
 
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "알 수 없는 Bay '%(name)s' - %(reason)s 작성 상태"
+
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr "공유 \"{0}\" 삭제 중에 알 수 없는 상태"
 
 #, python-format
+<<<<<<< heat/locale/ko_KR/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr "알 수 없는 Bay '%(name)s' - %(reason)s 업데이트 상태"
+
+#, python-format
+>>>>>>> heat/locale/ko_KR/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "알 수 없는 상태: %s"
 
diff --git a/heat/locale/pt_BR/LC_MESSAGES/heat.po b/heat/locale/pt_BR/LC_MESSAGES/heat.po
index bd63e24..e2a295b 100644
--- a/heat/locale/pt_BR/LC_MESSAGES/heat.po
+++ b/heat/locale/pt_BR/LC_MESSAGES/heat.po
@@ -10,7 +10,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -431,6 +435,12 @@ msgstr "Uma lista de grupos de segurança para a porta."
 msgid "A list of security services IDs or names."
 msgstr "Uma lista de IDs ou nomes de serviço de segurança."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr "Uma lista de políticas de sequência a aplicar. Padrões antiafinidade."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "Um perfil de login para o usuário."
 
@@ -1650,6 +1660,12 @@ msgstr ""
 msgid "Email address of keystone user."
 msgstr "Endereço de e-mail do usuário do keystone."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "Ative o recurso OS::Heat::CWLiteAlarm de legado."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "Ative o recurso Stack Abandon de visualização."
 
@@ -1804,6 +1820,13 @@ msgid "Failed to attach volume %(vol)s to server %(srv)s - %(err)s"
 msgstr "Falha ao conectar volume %(vol)s ao servidor %(srv)s - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "Falha ao criar o Compartimento '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr "Falha ao desconectar a interface (%(port)s) do servidor (%(server)s)"
 
@@ -1860,6 +1883,13 @@ msgid "Failed to stop stack (%(stack_name)s) on other engine (%(engine_id)s)"
 msgstr ""
 "Falha ao parar pilha (%(stack_name)s) em outro mecanismo (%(engine_id)s)"
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "Falha ao atualizar o Compartimento '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr ""
 "Falha na atualização, não é possível localizar as informações da porta."
@@ -2388,6 +2418,12 @@ msgstr "Argumentos incorretos para \"%(fn_name)s\" deve ser um de: %(allowed)s"
 msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr "Argumentos incorretos para \"%(fn_name)s\" deve ser: %(example)s"
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr "Argumentos incorretos: Itens para mesclar devem ser mapas."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -3032,6 +3068,19 @@ msgstr ""
 "(geralmente aqueles gerados pela API Keystone v3 com catálogos de serviço "
 "grandes)."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"Tamanho máximo da linha de cabeçalhos da mensagem a ser aceito. "
+"max_header_line pode precisar ser aumentada ao utilizar tokens grandes "
+"(geralmente aqueles gerados pela API Keystone v3 com catálogos de serviço "
+"grandes)."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "Número máximo de instâncias no grupo."
 
@@ -3048,6 +3097,13 @@ msgstr ""
 "Número máximo de segundos para um monitor aguardar que uma conexão seja "
 "estabelecida antes de atingir o tempo limite."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr ""
+"Número máximo de pilhas que um locatário pode ter ativas ao mesmo tempo."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr ""
 "Tamanho máximo do prefixo que pode ser alocado a partir do conjunto de sub-"
@@ -5330,6 +5386,12 @@ msgstr "A zona de disponibilidade na qual o volume será criado."
 msgid "The availability zone of shared filesystem."
 msgstr "A zona de disponibilidade do sistema de arquivos compartilhado."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "O nome do compartimento."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "O comprimento de bit do segredo."
 
@@ -5803,6 +5865,12 @@ msgstr "O nome ou ID do pai desse projeto do keystone na hierarquia."
 msgid "The name or ID of target cluster."
 msgstr "O nome ou ID do cluster de destino."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "O nome ou ID do modelo do compartimento."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "O nome ou ID da sub-rede no qual alocar o endereço VIP."
 
@@ -5818,6 +5886,12 @@ msgstr "O modo de negociação da política ike."
 msgid "The next hop for the destination."
 msgstr "O próximo hop para o destino."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "A contagens de nós para esse compartimento."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr ""
 "Os métodos de notificação a serem usados quando um estado de alarme for "
@@ -5852,6 +5926,12 @@ msgstr ""
 "O número de sucessos de análise consecutivas de funcionamento necessária "
 "antes de mover a instância para o estado saudável."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "O número de nós principais para esse compartimento."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "O número de objetos armazenados no contêiner."
 
@@ -5883,6 +5963,17 @@ msgstr ""
 "criação da pilha continue."
 
 msgid ""
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"A chave pública opcional. Isso permite que os usuários forneçam a chave "
+"pública a partir de um par de chaves preexistente. Se não fornecido, um novo "
+"par de chaves será gerado."
+
+msgid ""
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr ""
@@ -6391,6 +6482,14 @@ msgstr ""
 "conexão recebida estiver inativa por esse número de segundos, ela será "
 "encerrada. Um valor de '0' significa aguardar para sempre."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr ""
+"Tempo limite para criar o compartimento em minutos. Configure para 0 para "
+"nenhum tempo limite. "
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr ""
 "Tempo limite em segundos para a ação da pilha (por exemplo, a criação ou "
@@ -6509,6 +6608,12 @@ msgstr ""
 msgid "URL of keystone service endpoint."
 msgstr "URL do terminal de serviço do keystone."
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "URL do servidor CloudWatch."
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6650,10 +6755,25 @@ msgid "Unknown share_status during creation of share \"{0}\""
 msgstr ""
 "share_status desconhecido durante a criação do compartilhamento \"{0}\""
 
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "Status desconhecido ao criar o Compartimento '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr "Status desconhecido durante a exclusão do compartilhamento \"{0}\""
 
 #, python-format
+<<<<<<< heat/locale/pt_BR/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr ""
+"Status desconhecido ao atualizar o Compartimento '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/pt_BR/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "Status desconhecido: %s"
 
diff --git a/heat/locale/ru/LC_MESSAGES/heat.po b/heat/locale/ru/LC_MESSAGES/heat.po
index 76444a4..072fd11 100644
--- a/heat/locale/ru/LC_MESSAGES/heat.po
+++ b/heat/locale/ru/LC_MESSAGES/heat.po
@@ -8,7 +8,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -428,6 +432,13 @@ msgstr "Список групп защиты для порта."
 msgid "A list of security services IDs or names."
 msgstr "Список ИД или имен служб защиты."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr ""
+"Список стратегий строк для применения. Значение по умолчанию: anti-affinity."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "Профайл регистрации для пользователя."
 
@@ -1627,6 +1638,12 @@ msgstr ""
 msgid "Email address of keystone user."
 msgstr "Адрес электронной почты пользователя keystone."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "Включить устаревший ресурс OS::Heat::CWLiteAlarm."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "Включить предварительный просмотр функции Отклонить стек."
 
@@ -1781,6 +1798,13 @@ msgid "Failed to attach volume %(vol)s to server %(srv)s - %(err)s"
 msgstr "Не удается присоединить том %(vol)s к серверу %(srv)s - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "Не удается создать отсек '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr "Не удается отсоединить интерфейс (%(port)s) от сервера (%(server)s)"
 
@@ -1837,6 +1861,13 @@ msgid "Failed to stop stack (%(stack_name)s) on other engine (%(engine_id)s)"
 msgstr ""
 "Не удалось остановить стек (%(stack_name)s) в другом модуле (%(engine_id)s)"
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "Не удается обновить отсек '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr "Не удалось обновить: не найдена информация о портах."
 
@@ -2368,6 +2399,12 @@ msgstr "Неверные аргументы \"%(fn_name)s\", должны быт
 msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr "Неверные аргументы \"%(fn_name)s\", должны быть: %(example)s"
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr "Недопустимые аргументы. Объединяемые элементы должны быть картами."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -3006,6 +3043,18 @@ msgstr ""
 "потребуется увеличить при использовании больших маркеров (как правило, "
 "созданных API Keystone версии 3 API с большими каталогами)."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"Максимальный размер строки заголовка сообщений. Возможно, max_header_line "
+"потребуется увеличить при использовании больших маркеров (как правило, "
+"созданных API Keystone версии 3 API с большими каталогами)."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "Максимальное число экземпляров в группе."
 
@@ -3024,6 +3073,14 @@ msgstr ""
 "Максимальное время ожидания соединения монитором, в секундах, до наступления "
 "тайм-аута."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr ""
+"Максимальное число стеков, которые могут быть активны одновременно для "
+"одного арендатора."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr ""
 "Максимальный размер префикса, разрешенный при выделении подсетей из пула."
@@ -5259,6 +5316,12 @@ msgstr "Область доступности, в которой будет со
 msgid "The availability zone of shared filesystem."
 msgstr "Зона доступности для общих файловых систем."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "Имя отсека."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "Число разрядов секретного ключа."
 
@@ -5730,6 +5793,12 @@ msgstr "Имя или ИД родительского объекта проек
 msgid "The name or ID of target cluster."
 msgstr "Имя или ИД целевого кластера."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "Имя или ИД модели отсека."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "Имя или ИД подсети, в которой выделяется адрес VIP."
 
@@ -5745,6 +5814,12 @@ msgstr "Режим согласования стратегии ike."
 msgid "The next hop for the destination."
 msgstr "Следующий промежуточный узел в маршруте."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "Количество узлов для отсека."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr ""
 "Методы уведомления для использования с состоянием ALARM предупреждения."
@@ -5777,6 +5852,12 @@ msgstr ""
 "Число последовательных успешных выполнений тестов работоспособности, после "
 "которых экземпляр будет переведен в работоспособное состояние."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "Количество главных узлов для отсека."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "Число объектов, хранимых в контейнере."
 
@@ -5807,6 +5888,16 @@ msgstr ""
 "продолжать процесс создания стека."
 
 msgid ""
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"Необязательный общий ключ. Позволяет пользователям указывать общий ключ из "
+"готовой пары ключей. Если не указан, будет создана новая пара ключей. "
+
+msgid ""
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr ""
@@ -6314,6 +6405,12 @@ msgstr ""
 "простаивает в течение этого времени, оно будет закрыто. Значение '0' "
 "означает неограниченное ожидание."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr "Тайм-аут в минутах для создания отсека. Значение 0 отменяет тайм-аут."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr ""
 "Тайм-аут в секундах для действия над стеком (например, создать или обновить)."
@@ -6432,6 +6529,12 @@ msgstr ""
 msgid "URL of keystone service endpoint."
 msgstr "URL конечной точки службы keystone."
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "URL сервера Heat CloudWatch."
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6574,10 +6677,24 @@ msgstr "Неизвестный ключ(и) %s"
 msgid "Unknown share_status during creation of share \"{0}\""
 msgstr "Неизвестный share_status при создании общего ресурса \"{0}\""
 
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "Неизвестное состояние при создании отсека '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr "Неизвестный код состояния при удалении общего ресурса \"{0}\""
 
 #, python-format
+<<<<<<< heat/locale/ru/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr "Неизвестное состояние при обновлении отсека '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/ru/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "Неизвестное состояние: %s"
 
diff --git a/heat/locale/zh_CN/LC_MESSAGES/heat.po b/heat/locale/zh_CN/LC_MESSAGES/heat.po
index eaac832..dd73b2b 100644
--- a/heat/locale/zh_CN/LC_MESSAGES/heat.po
+++ b/heat/locale/zh_CN/LC_MESSAGES/heat.po
@@ -9,7 +9,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -394,6 +398,12 @@ msgstr "端口的安全组列表。"
 msgid "A list of security services IDs or names."
 msgstr "安全服务标识或名称的列表。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr "要应用的字符串策略列表。缺省为“反亲缘关系”。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "用户的登录概要文件。"
 
@@ -1495,6 +1505,12 @@ msgstr "必须为设备映射 %s 指定 volume_id 或 snapshot_id"
 msgid "Email address of keystone user."
 msgstr "keystone 用户的电子邮件地址。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "请启用旧的 OS::Heat::CWLiteAlarm 资源。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "请启用预览“堆栈放弃”功能。"
 
@@ -1636,6 +1652,13 @@ msgid "Failed to attach volume %(vol)s to server %(srv)s - %(err)s"
 msgstr "无法将卷 %(vol)s 附加至服务器 %(srv)s - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "无法创建支架“%(name)s” - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr "无法从服务器 (%(server)s) 拆离接口 (%(port)s)"
 
@@ -1688,6 +1711,13 @@ msgstr "未能对其他引擎 (%(engine_id)s) 将消息发送至堆栈 (%(stack_
 msgid "Failed to stop stack (%(stack_name)s) on other engine (%(engine_id)s)"
 msgstr "未能对其他引擎 (%(engine_id)s) 停止堆栈 (%(stack_name)s)"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "无法更新支架“%(name)s” - %(reason)s"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr "未能更新，找不到端口信息。"
 
@@ -2174,6 +2204,12 @@ msgstr "针对“%(fn_name)s”的不正确自变量应该是下列其中一项
 msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr "针对“%(fn_name)s”的不正确自变量应该如下：%(example)s"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr "自变量不正确：要合并的项必须为映射。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -2755,6 +2791,17 @@ msgstr ""
 "要接受的消息头的最大行大小。将大型令牌（通常是由 Keystone V3 API 生成的那些令"
 "牌）与大型服务目录配合使用时，可能需要增大 max_header_line。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"要接受的消息头的最大行大小。将大型令牌（通常是由 Keystone V3 API 生成的那些令"
+"牌）与大型服务目录配合使用时，可能需要增大 max_header_line。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "组中的最大实例数。"
 
@@ -2769,6 +2816,12 @@ msgid ""
 "established before it times out."
 msgstr "在连接超时之前供监视器等待该连接建立的最长时间，以秒计。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr "一个租户可同时持有的最大活动堆栈数。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr "可从子网池分配的最大前缀大小。"
 
@@ -4860,6 +4913,12 @@ msgstr "将在其中创建卷的可用性区域。"
 msgid "The availability zone of shared filesystem."
 msgstr "共享文件系统的可用区域。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "支架名称。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "密钥的位长度。"
 
@@ -5289,6 +5348,12 @@ msgstr "此 keystone 项目在层次结构中的父代的名称或标识。"
 msgid "The name or ID of target cluster."
 msgstr "目标集群的名称或标识。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "支架模型的名称或标识。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "要在其上分配 VIP 地址的子网的名称或标识。"
 
@@ -5304,6 +5369,12 @@ msgstr "ike 策略的协商方式。"
 msgid "The next hop for the destination."
 msgstr "目标的下一中断段。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "此支架的节点计数。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr "要在警报状态为 ALARM 时使用的通知方法。"
 
@@ -5329,6 +5400,12 @@ msgid ""
 "instance to the healthy state."
 msgstr "将实例移至正常状态之前需要的连续运行状况探测器成功次数。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "此支架的主节点数。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "容器中存储的对象数。"
 
@@ -5357,6 +5434,16 @@ msgid ""
 msgstr "在堆栈创建过程继续之前，必须接收到的成功信号数。"
 
 msgid ""
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"可选公用密钥。这允许用户提供来自预先存在的密钥对的公用密钥。如果未提供，那么"
+"将生成新密钥对。"
+
+msgid ""
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr "地址范围的所有者租户标识。只有管理用户才能指定他们自身以外的租户标识。"
@@ -5815,6 +5902,12 @@ msgstr ""
 "客户机连接的套接字操作的超时。如果入局连接的空闲时间达到此秒数，那么该连接将"
 "被关闭。值“0”意味着永远等待。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr "针对创建支架的超时（以分钟计）。设置为 0 表示无超时。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr "堆栈操作（即，创建或更新）的超时，以秒计。"
 
@@ -5922,6 +6015,12 @@ msgstr "资源将在其中发出信号指示完成及（可选）上载数据的
 msgid "URL of keystone service endpoint."
 msgstr "keystone 服务端点的 URL。"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "Heat CloudWatch 服务器的 URL。"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6047,10 +6146,24 @@ msgstr "键 %s 未知"
 msgid "Unknown share_status during creation of share \"{0}\""
 msgstr "创建共享“{0}”期间出现未知 share_status"
 
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "创建支架“%(name)s” 时出现未知状态 - %(reason)s"
+
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr "删除共享“{0}”期间出现未知状态"
 
 #, python-format
+<<<<<<< heat/locale/zh_CN/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr "更新支架“%(name)s” 时出现未知状态 - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/zh_CN/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "未知状态：%s"
 
diff --git a/heat/locale/zh_TW/LC_MESSAGES/heat.po b/heat/locale/zh_TW/LC_MESSAGES/heat.po
index 552305a..df5f9e4 100644
--- a/heat/locale/zh_TW/LC_MESSAGES/heat.po
+++ b/heat/locale/zh_TW/LC_MESSAGES/heat.po
@@ -8,7 +8,11 @@ msgid ""
 msgstr ""
 "Project-Id-Version: heat VERSION\n"
 "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
 "POT-Creation-Date: 2022-11-11 06:43+0000\n"
+=======
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -393,6 +397,12 @@ msgstr "埠的安全群組清單。"
 msgid "A list of security services IDs or names."
 msgstr "安全服務 ID 或名稱清單。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "A list of string policies to apply. Defaults to anti-affinity."
+msgstr "要套用的字串原則清單。預設為反親緣性。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "A login profile for the user."
 msgstr "使用者的登入設定檔。"
 
@@ -1496,6 +1506,12 @@ msgstr "必須給裝置對映 %s 指定 volume_id 或 snapshot_id"
 msgid "Email address of keystone user."
 msgstr "Keystone 使用者的電子郵件位址。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "Enable the legacy OS::Heat::CWLiteAlarm resource."
+msgstr "啟用舊式 OS::Heat::CWLiteAlarm 資源。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "Enable the preview Stack Abandon feature."
 msgstr "啟用預覽「堆疊放棄」功能。"
 
@@ -1639,6 +1655,13 @@ msgid "Failed to attach volume %(vol)s to server %(srv)s - %(err)s"
 msgstr "無法將磁區 %(vol)s 連接至伺服器 %(srv)s - %(err)s"
 
 #, python-format
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "Failed to create Bay '%(name)s' - %(reason)s"
+msgstr "無法建立機架 '%(name)s' - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "Failed to detach interface (%(port)s) from server (%(server)s)"
 msgstr "無法將介面 (%(port)s) 從伺服器 (%(server)s) 分離"
 
@@ -1691,6 +1714,13 @@ msgstr "無法將訊息傳送至其他引擎 (%(engine_id)s) 上的堆疊 (%(sta
 msgid "Failed to stop stack (%(stack_name)s) on other engine (%(engine_id)s)"
 msgstr "無法在其他引擎 (%(engine_id)s) 上停止堆疊 (%(stack_name)s)"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Failed to update Bay '%(name)s' - %(reason)s"
+msgstr "無法更新機架 '%(name)s' - %(reason)s"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "Failed to update, can not found port info."
 msgstr "無法更新，找不到埠資訊。"
 
@@ -2179,6 +2209,12 @@ msgstr "\"%(fn_name)s\" 的引數不正確，應該是下列其中一項：%(all
 msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
 msgstr "\"%(fn_name)s\" 的引數不正確，應該是：%(example)s"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "Incorrect arguments: Items to merge must be maps."
+msgstr "引數不正確：要合併的項目必須是對映。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 #, python-format
 msgid ""
 "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
@@ -2764,6 +2800,17 @@ msgstr ""
 "要接受的訊息標頭行大小上限。如果使用大記號（通常是那些由 Keystone 第 3 版 "
 "API 透過大型服務型錄所產生的記號），則可能需要增加 max_header_line 值。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid ""
+"Maximum line size of message headers to be accepted. max_header_line may "
+"need to be increased when using large tokens (typically those generated by "
+"the Keystone v3 API with big service catalogs.)"
+msgstr ""
+"要接受的訊息標頭行大小上限。如果使用大記號（通常是那些由 Keystone 第 3 版 "
+"API 透過大型服務型錄所產生的記號），則可能需要增加 max_header_line 值。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "Maximum number of instances in the group."
 msgstr "群組中的實例數目上限。"
 
@@ -2778,6 +2825,12 @@ msgid ""
 "established before it times out."
 msgstr "監視器等待建立連線發生逾時之前的秒數上限。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "Maximum number of stacks any one tenant may have active at one time."
+msgstr "任何一個承租人一次可具有的作用中堆疊數目上限。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "Maximum prefix size that can be allocated from the subnet pool."
 msgstr "可從子網路儲存區配置的字首大小上限。"
 
@@ -4870,6 +4923,12 @@ msgstr "將在其中建立磁區的可用性區域。"
 msgid "The availability zone of shared filesystem."
 msgstr "共用檔案系統的可用性區域。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "The bay name."
+msgstr "機架名稱。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "The bit-length of the secret."
 msgstr "密碼的位元長度。"
 
@@ -5301,6 +5360,12 @@ msgstr "在階層中，此 Keystone 專案之母項的名稱或 ID。"
 msgid "The name or ID of target cluster."
 msgstr "目標叢集的名稱或 ID。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "The name or ID of the bay model."
+msgstr "機架型號的名稱或 ID。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "The name or ID of the subnet on which to allocate the VIP address."
 msgstr "在其中配置 VIP 位址之子網路的名稱或 ID。"
 
@@ -5316,6 +5381,12 @@ msgstr "IKE 原則的協議模式。"
 msgid "The next hop for the destination."
 msgstr "目標的下一個中繼站。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "The node count for this bay."
+msgstr "此機架的節點計數。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "The notification methods to use when an alarm state is ALARM."
 msgstr "在警示狀態為「警示」時，要使用的通知方法。"
 
@@ -5341,6 +5412,12 @@ msgid ""
 "instance to the healthy state."
 msgstr "將實例移至狀況良好狀態之前所需的連續性能探測成功次數。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "The number of master nodes for this bay."
+msgstr "此機架的主要節點數目。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "The number of objects stored in the container."
 msgstr "儲存器中所儲存的物件數。"
 
@@ -5369,6 +5446,16 @@ msgid ""
 msgstr "繼續執行堆疊建立程序之前，必須收到的成功信號數目。"
 
 msgid ""
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+"The optional public key. This allows users to supply the public key from a "
+"pre-existing key pair. If not supplied, a new key pair will be generated."
+msgstr ""
+"選用的公開金鑰。這容許使用者從預先存在的金鑰組提供公開金鑰。如果沒有提供，則"
+"將產生新金鑰組。"
+
+msgid ""
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 "The owner tenant ID of the address scope. Only administrative users can "
 "specify a tenant ID other than their own."
 msgstr "位址範圍的擁有者租戶 ID。僅管理使用者可以指定其他使用者的租戶 ID。"
@@ -5827,6 +5914,12 @@ msgstr ""
 "用戶端連線的 Socket 作業逾時。如果送入的連線處於閒置狀態的時間達到此秒數，則"
 "會將其關閉。值 '0' 表示永久等待。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "Timeout for creating the bay in minutes. Set to 0 for no timeout."
+msgstr "用於建立機架的逾時（以分鐘為單位）。設定為 0 表示沒有逾時。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "Timeout in seconds for stack action (ie. create or update)."
 msgstr "堆疊動作（例如，建立或更新）的逾時值（以秒為單位）。"
 
@@ -5933,6 +6026,12 @@ msgstr "TempURL 的 URL，資源將在該 URL 處傳送完成信號，並選擇
 msgid "URL of keystone service endpoint."
 msgstr "Keystone 服務端點的 URL。"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "URL of the Heat CloudWatch server."
+msgstr "Heat CloudWatch 伺服器的 URL。"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid ""
 "URL of the Heat metadata server. NOTE: Setting this is only needed if you "
 "require instances to use a different endpoint than in the keystone catalog"
@@ -6058,10 +6157,24 @@ msgstr "不明的索引鍵 %s"
 msgid "Unknown share_status during creation of share \"{0}\""
 msgstr "建立共用項目 \"{0}\" 時的不明 share_status"
 
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+#, python-format
+msgid "Unknown status creating Bay '%(name)s' - %(reason)s"
+msgstr "建立機架 '%(name)s' 時的不明狀態 - %(reason)s"
+
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "Unknown status during deleting share \"{0}\""
 msgstr "刪除共用項目 \"{0}\" 時的不明狀態"
 
 #, python-format
+<<<<<<< heat/locale/zh_TW/LC_MESSAGES/heat.po
+=======
+msgid "Unknown status updating Bay '%(name)s' - %(reason)s"
+msgstr "更新機架 '%(name)s' 時的不明狀態 - %(reason)s"
+
+#, python-format
+>>>>>>> heat/locale/zh_TW/LC_MESSAGES/heat.po
 msgid "Unknown status: %s"
 msgstr "不明狀態：%s"
 
diff --git a/heat/policies/actions.py b/heat/policies/actions.py
index 1aa53fd..7c7066c 100644
--- a/heat/policies/actions.py
+++ b/heat/policies/actions.py
@@ -10,13 +10,17 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/actions.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/actions.py
 from oslo_policy import policy
 
 from heat.policies import base
 
 POLICY_ROOT = 'actions:%s'
 
+<<<<<<< heat/policies/actions.py
 DEPRECATED_REASON = """
 The actions API now supports system scope and default roles.
 """
@@ -63,12 +67,29 @@ deprecated_cancel_without_rollback = policy.DeprecatedRule(
     deprecated_reason=DEPRECATED_REASON,
     deprecated_since=versionutils.deprecated.WALLABY
 )
+=======
+
+def _action_rule(action_name, description):
+    return policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % action_name,
+        check_str='rule:%s' % (POLICY_ROOT % 'action'),
+        description=description,
+        operations=[{
+            'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
+            'method': 'POST',
+        }]
+    )
+>>>>>>> heat/policies/actions.py
 
 
 actions_policies = [
     policy.DocumentedRuleDefault(
         name=POLICY_ROOT % 'action',
+<<<<<<< heat/policies/actions.py
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+=======
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/actions.py
         description='Performs non-lifecycle operations on the stack '
         '(Snapshot, Resume, Cancel update, or check stack resources). '
         'This is the default for all actions but can be overridden by more '
@@ -77,6 +98,7 @@ actions_policies = [
             'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
             'method': 'POST',
         }],
+<<<<<<< heat/policies/actions.py
         deprecated_rule=deprecated_action
     ),
     policy.DocumentedRuleDefault(
@@ -145,6 +167,16 @@ actions_policies = [
         }],
         deprecated_rule=deprecated_cancel_without_rollback
     )
+=======
+    ),
+    _action_rule('snapshot', 'Create stack snapshot.'),
+    _action_rule('suspend', 'Suspend a stack.'),
+    _action_rule('resume', 'Resume a suspended stack.'),
+    _action_rule('check', 'Check stack resources.'),
+    _action_rule('cancel_update', 'Cancel stack operation and roll back.'),
+    _action_rule('cancel_without_rollback',
+                 'Cancel stack operation without rolling back.'),
+>>>>>>> heat/policies/actions.py
 ]
 
 
diff --git a/heat/policies/base.py b/heat/policies/base.py
index cdc3b9f..0d1df38 100644
--- a/heat/policies/base.py
+++ b/heat/policies/base.py
@@ -18,6 +18,7 @@ RULE_DENY_STACK_USER = 'rule:deny_stack_user'
 RULE_DENY_EVERYBODY = 'rule:deny_everybody'
 RULE_ALLOW_EVERYBODY = 'rule:allow_everybody'
 
+<<<<<<< heat/policies/base.py
 # Check strings that embody common personas
 SYSTEM_ADMIN = 'role:admin and system_scope:all'
 SYSTEM_READER = 'role:reader and system_scope:all'
@@ -49,14 +50,20 @@ SYSTEM_OR_PROJECT_READER_OR_STACK_USER = (
     ' or (' + PROJECT_STACK_USER + ')'
 )
 
+=======
+>>>>>>> heat/policies/base.py
 
 rules = [
     policy.RuleDefault(
         name="context_is_admin",
+<<<<<<< heat/policies/base.py
         check_str=(
             "(role:admin and is_admin_project:True) OR "
             "(" + SYSTEM_ADMIN + ")"
         ),
+=======
+        check_str="role:admin and is_admin_project:True",
+>>>>>>> heat/policies/base.py
         description="Decides what is required for the 'is_admin:True' check "
         "to succeed."),
     policy.RuleDefault(
diff --git a/heat/policies/build_info.py b/heat/policies/build_info.py
index 5bc8e21..d50adad 100644
--- a/heat/policies/build_info.py
+++ b/heat/policies/build_info.py
@@ -10,11 +10,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/build_info.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/build_info.py
 from oslo_policy import policy
 
 from heat.policies import base
 
+<<<<<<< heat/policies/build_info.py
 DEPRECATED_REASON = """
 The build API now supports system scope and default roles.
 """
@@ -34,14 +38,26 @@ build_info_policies = [
         name=POLICY_ROOT % 'build_info',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+POLICY_ROOT = 'build_info:%s'
+
+build_info_policies = [
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'build_info',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/build_info.py
         description='Show build information.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/build_info',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/build_info.py
         ],
         deprecated_rule=deprecated_build_info
+=======
+        ]
+>>>>>>> heat/policies/build_info.py
     )
 ]
 
diff --git a/heat/policies/cloudformation.py b/heat/policies/cloudformation.py
index 2508d8d..a60581d 100644
--- a/heat/policies/cloudformation.py
+++ b/heat/policies/cloudformation.py
@@ -10,7 +10,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/cloudformation.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/cloudformation.py
 from oslo_policy import policy
 
 from heat.policies import base
@@ -18,6 +21,7 @@ from heat.policies import base
 # These policies are for AWS CloudFormation-like APIs, so we won't list out
 # the URI paths in rules.
 
+<<<<<<< heat/policies/cloudformation.py
 DEPRECATED_REASON = """
 The cloud formation API now supports system scope and default roles.
 """
@@ -182,6 +186,50 @@ cloudformation_policies = [
         scope_types=['system', 'project'],
         deprecated_rule=deprecated_list_stack_resources
     )
+=======
+POLICY_ROOT = 'cloudformation:%s'
+
+cloudformation_policies = [
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'ListStacks',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'CreateStack',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'DescribeStacks',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'DeleteStack',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'UpdateStack',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'CancelUpdateStack',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'DescribeStackEvents',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'ValidateTemplate',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'GetTemplate',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'EstimateTemplateCost',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'DescribeStackResource',
+        check_str=base.RULE_ALLOW_EVERYBODY),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'DescribeStackResources',
+        check_str=base.RULE_DENY_STACK_USER),
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'ListStackResources',
+        check_str=base.RULE_DENY_STACK_USER)
+>>>>>>> heat/policies/cloudformation.py
 ]
 
 
diff --git a/heat/policies/events.py b/heat/policies/events.py
index b314e70..de256ae 100644
--- a/heat/policies/events.py
+++ b/heat/policies/events.py
@@ -10,13 +10,17 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/events.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/events.py
 from oslo_policy import policy
 
 from heat.policies import base
 
 POLICY_ROOT = 'events:%s'
 
+<<<<<<< heat/policies/events.py
 DEPRECATED_REASON = """
 The events API now supports system scope and default roles.
 """
@@ -40,6 +44,12 @@ events_policies = [
         name=POLICY_ROOT % 'index',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+events_policies = [
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'index',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/events.py
         description='List events.',
         operations=[
             {
@@ -47,6 +57,7 @@ events_policies = [
                 'events',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/events.py
         ],
         deprecated_rule=deprecated_index
     ),
@@ -54,6 +65,13 @@ events_policies = [
         name=POLICY_ROOT % 'show',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'show',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/events.py
         description='Show event.',
         operations=[
             {
@@ -61,8 +79,12 @@ events_policies = [
                 'resources/{resource_name}/events/{event_id}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/events.py
         ],
         deprecated_rule=deprecated_show
+=======
+        ]
+>>>>>>> heat/policies/events.py
     )
 ]
 
diff --git a/heat/policies/resource.py b/heat/policies/resource.py
index 85f5821..b691665 100644
--- a/heat/policies/resource.py
+++ b/heat/policies/resource.py
@@ -10,13 +10,17 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/resource.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/resource.py
 from oslo_policy import policy
 
 from heat.policies import base
 
 POLICY_ROOT = 'resource:%s'
 
+<<<<<<< heat/policies/resource.py
 DEPRECATED_REASON = """
 The resources API now supports system scope and default roles.
 """
@@ -57,6 +61,12 @@ resource_policies = [
         name=POLICY_ROOT % 'index',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+resource_policies = [
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'index',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/resource.py
         description='List resources.',
         operations=[
             {
@@ -64,6 +74,7 @@ resource_policies = [
                 'resources',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/resource.py
         ],
         deprecated_rule=deprecated_list_resources
     ),
@@ -71,6 +82,13 @@ resource_policies = [
         name=POLICY_ROOT % 'metadata',
         check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'metadata',
+        check_str=base.RULE_ALLOW_EVERYBODY,
+>>>>>>> heat/policies/resource.py
         description='Show resource metadata.',
         operations=[
             {
@@ -78,6 +96,7 @@ resource_policies = [
                 'resources/{resource_name}/metadata',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/resource.py
         ],
         deprecated_rule=deprecated_metadata
     ),
@@ -85,6 +104,13 @@ resource_policies = [
         name=POLICY_ROOT % 'signal',
         check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'signal',
+        check_str=base.RULE_ALLOW_EVERYBODY,
+>>>>>>> heat/policies/resource.py
         description='Signal resource.',
         operations=[
             {
@@ -92,6 +118,7 @@ resource_policies = [
                 'resources/{resource_name}/signal',
                 'method': 'POST'
             }
+<<<<<<< heat/policies/resource.py
         ],
         deprecated_rule=deprecated_signal
     ),
@@ -99,6 +126,13 @@ resource_policies = [
         name=POLICY_ROOT % 'mark_unhealthy',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'mark_unhealthy',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/resource.py
         description='Mark resource as unhealthy.',
         operations=[
             {
@@ -106,6 +140,7 @@ resource_policies = [
                 'resources/{resource_name_or_physical_id}',
                 'method': 'PATCH'
             }
+<<<<<<< heat/policies/resource.py
         ],
         deprecated_rule=deprecated_mark_unhealthy
     ),
@@ -113,6 +148,13 @@ resource_policies = [
         name=POLICY_ROOT % 'show',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'show',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/resource.py
         description='Show resource.',
         operations=[
             {
@@ -120,8 +162,12 @@ resource_policies = [
                 'resources/{resource_name}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/resource.py
         ],
         deprecated_rule=deprecated_show_resource
+=======
+        ]
+>>>>>>> heat/policies/resource.py
     )
 ]
 
diff --git a/heat/policies/resource_types.py b/heat/policies/resource_types.py
index 3bb3fc6..9c71e99 100644
--- a/heat/policies/resource_types.py
+++ b/heat/policies/resource_types.py
@@ -57,9 +57,12 @@ resource_types_policies = [
         name=POLICY_ROOT % 'OS::Neutron::QoSMinimumBandwidthRule',
         check_str=base.RULE_PROJECT_ADMIN),
     policy.RuleDefault(
+<<<<<<< heat/policies/resource_types.py
         name=POLICY_ROOT % 'OS::Neutron::QoSMinimumPacketRateRule',
         check_str=base.RULE_PROJECT_ADMIN),
     policy.RuleDefault(
+=======
+>>>>>>> heat/policies/resource_types.py
         name=POLICY_ROOT % 'OS::Neutron::Segment',
         check_str=base.RULE_PROJECT_ADMIN),
     policy.RuleDefault(
diff --git a/heat/policies/service.py b/heat/policies/service.py
index 3c3f422..69407ac 100644
--- a/heat/policies/service.py
+++ b/heat/policies/service.py
@@ -10,11 +10,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/service.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/service.py
 from oslo_policy import policy
 
 from heat.policies import base
 
+<<<<<<< heat/policies/service.py
 DEPRECATED_REASON = """
 The service API now supports system scope and default roles.
 """
@@ -34,6 +38,14 @@ service_policies = [
         check_str=base.SYSTEM_READER,
         deprecated_rule=deprecated_index
     )
+=======
+POLICY_ROOT = 'service:%s'
+
+service_policies = [
+    policy.RuleDefault(
+        name=POLICY_ROOT % 'index',
+        check_str=base.RULE_CONTEXT_IS_ADMIN)
+>>>>>>> heat/policies/service.py
 ]
 
 
diff --git a/heat/policies/software_configs.py b/heat/policies/software_configs.py
index be5ca04..bec01d6 100644
--- a/heat/policies/software_configs.py
+++ b/heat/policies/software_configs.py
@@ -10,11 +10,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/software_configs.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/software_configs.py
 from oslo_policy import policy
 
 from heat.policies import base
 
+<<<<<<< heat/policies/software_configs.py
 DEPRECATED_REASON = """
 The software configuration API now support system scope and default roles.
 """
@@ -57,12 +61,21 @@ software_configs_policies = [
         name=POLICY_ROOT % 'global_index',
         check_str=base.SYSTEM_READER,
         scope_types=['system', 'project'],
+=======
+POLICY_ROOT = 'software_configs:%s'
+
+software_configs_policies = [
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'global_index',
+        check_str=base.RULE_DENY_EVERYBODY,
+>>>>>>> heat/policies/software_configs.py
         description='List configs globally.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_configs',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/software_configs.py
         ],
         deprecated_rule=deprecated_global_index
     ),
@@ -70,12 +83,20 @@ software_configs_policies = [
         name=POLICY_ROOT % 'index',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'index',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_configs.py
         description='List configs.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_configs',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/software_configs.py
         ],
         deprecated_rule=deprecated_index
     ),
@@ -83,12 +104,20 @@ software_configs_policies = [
         name=POLICY_ROOT % 'create',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'create',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_configs.py
         description='Create config.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_configs',
                 'method': 'POST'
             }
+<<<<<<< heat/policies/software_configs.py
         ],
         deprecated_rule=deprecated_create
     ),
@@ -96,12 +125,20 @@ software_configs_policies = [
         name=POLICY_ROOT % 'show',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'show',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_configs.py
         description='Show config details.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_configs/{config_id}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/software_configs.py
         ],
         deprecated_rule=deprecated_show
     ),
@@ -109,14 +146,25 @@ software_configs_policies = [
         name=POLICY_ROOT % 'delete',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'delete',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_configs.py
         description='Delete config.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_configs/{config_id}',
                 'method': 'DELETE'
             }
+<<<<<<< heat/policies/software_configs.py
         ],
         deprecated_rule=deprecated_delete
+=======
+        ]
+>>>>>>> heat/policies/software_configs.py
     )
 ]
 
diff --git a/heat/policies/software_deployments.py b/heat/policies/software_deployments.py
index 08e59c6..0d1f170 100644
--- a/heat/policies/software_deployments.py
+++ b/heat/policies/software_deployments.py
@@ -10,11 +10,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/software_deployments.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/software_deployments.py
 from oslo_policy import policy
 
 from heat.policies import base
 
+<<<<<<< heat/policies/software_deployments.py
 DEPRECATED_REASON = """
 The software deployment API now supports system scope and default roles.
 """
@@ -58,12 +62,21 @@ software_deployments_policies = [
         name=POLICY_ROOT % 'index',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+POLICY_ROOT = 'software_deployments:%s'
+
+software_deployments_policies = [
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'index',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_deployments.py
         description='List deployments.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_deployments',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/software_deployments.py
         ],
         deprecated_rule=deprecated_index
     ),
@@ -71,12 +84,20 @@ software_deployments_policies = [
         name=POLICY_ROOT % 'create',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'create',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_deployments.py
         description='Create deployment.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_deployments',
                 'method': 'POST'
             }
+<<<<<<< heat/policies/software_deployments.py
         ],
         deprecated_rule=deprecated_create
     ),
@@ -84,12 +105,20 @@ software_deployments_policies = [
         name=POLICY_ROOT % 'show',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'show',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_deployments.py
         description='Show deployment details.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/software_deployments.py
         ],
         deprecated_rule=deprecated_show
     ),
@@ -97,12 +126,20 @@ software_deployments_policies = [
         name=POLICY_ROOT % 'update',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'update',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_deployments.py
         description='Update deployment.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
                 'method': 'PUT'
             }
+<<<<<<< heat/policies/software_deployments.py
         ],
         deprecated_rule=deprecated_update
     ),
@@ -110,12 +147,20 @@ software_deployments_policies = [
         name=POLICY_ROOT % 'delete',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'delete',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/software_deployments.py
         description='Delete deployment.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
                 'method': 'DELETE'
             }
+<<<<<<< heat/policies/software_deployments.py
         ],
         deprecated_rule=deprecated_delete
     ),
@@ -123,6 +168,13 @@ software_deployments_policies = [
         name=POLICY_ROOT % 'metadata',
         check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'metadata',
+        check_str=base.RULE_ALLOW_EVERYBODY,
+>>>>>>> heat/policies/software_deployments.py
         description='Show server configuration metadata.',
         operations=[
             {
diff --git a/heat/policies/stacks.py b/heat/policies/stacks.py
index cebcf5a..c993d4a 100644
--- a/heat/policies/stacks.py
+++ b/heat/policies/stacks.py
@@ -10,11 +10,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+<<<<<<< heat/policies/stacks.py
 from oslo_log import versionutils
+=======
+>>>>>>> heat/policies/stacks.py
 from oslo_policy import policy
 
 from heat.policies import base
 
+<<<<<<< heat/policies/stacks.py
 DEPRECATED_REASON = """
 The stack API now supports system scope and default roles.
 """
@@ -208,6 +212,14 @@ stacks_policies = [
         name=POLICY_ROOT % 'abandon',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+POLICY_ROOT = 'stacks:%s'
+
+stacks_policies = [
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'abandon',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Abandon stack.',
         operations=[
             {
@@ -215,6 +227,7 @@ stacks_policies = [
                 'abandon',
                 'method': 'DELETE'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_abandon
     ),
@@ -222,12 +235,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'create',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'create',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Create stack.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks',
                 'method': 'POST'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_create
     ),
@@ -235,12 +256,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'delete',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'delete',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Delete stack.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
                 'method': 'DELETE'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_delete
     ),
@@ -248,12 +277,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'detail',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'detail',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='List stacks in detail.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_detail
     ),
@@ -261,6 +298,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'export',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'export',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Export stack.',
         operations=[
             {
@@ -268,6 +312,7 @@ stacks_policies = [
                 'export',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_export
     ),
@@ -275,6 +320,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'generate_template',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'generate_template',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Generate stack template.',
         operations=[
             {
@@ -282,6 +334,7 @@ stacks_policies = [
                 'template',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_generate_template
     ),
@@ -289,12 +342,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'global_index',
         check_str=base.SYSTEM_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'global_index',
+        check_str=base.RULE_DENY_EVERYBODY,
+>>>>>>> heat/policies/stacks.py
         description='List stacks globally.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_global_index
     ),
@@ -302,12 +363,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'index',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'index',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='List stacks.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_index
     ),
@@ -315,12 +384,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'list_resource_types',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'list_resource_types',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='List resource types.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/resource_types',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_list_resource_types
     ),
@@ -328,12 +405,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'list_template_versions',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'list_template_versions',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='List template versions.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/template_versions',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_list_template_versions
     ),
@@ -341,6 +426,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'list_template_functions',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'list_template_functions',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='List template functions.',
         operations=[
             {
@@ -348,6 +440,7 @@ stacks_policies = [
                 '{template_version}/functions',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_list_template_functions
     ),
@@ -355,12 +448,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'lookup',
         check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'lookup',
+        check_str=base.RULE_ALLOW_EVERYBODY,
+>>>>>>> heat/policies/stacks.py
         description='Find stack.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks/{stack_identity}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_lookup
     ),
@@ -368,12 +469,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'preview',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'preview',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Preview stack.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks/preview',
                 'method': 'POST'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_preview
     ),
@@ -381,12 +490,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'resource_schema',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'resource_schema',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Show resource type schema.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/resource_types/{type_name}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_resource_schema
     ),
@@ -394,12 +511,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'show',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'show',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Show stack.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks/{stack_identity}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_show
     ),
@@ -407,6 +532,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'template',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'template',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Get stack template.',
         operations=[
             {
@@ -414,6 +546,7 @@ stacks_policies = [
                 'template',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_template
     ),
@@ -421,6 +554,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'environment',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'environment',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Get stack environment.',
         operations=[
             {
@@ -428,6 +568,7 @@ stacks_policies = [
                 'environment',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_environment
     ),
@@ -435,6 +576,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'files',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'files',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Get stack files.',
         operations=[
             {
@@ -442,6 +590,7 @@ stacks_policies = [
                 'files',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_files
     ),
@@ -449,12 +598,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'update',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'update',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Update stack.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
                 'method': 'PUT'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_update
     ),
@@ -462,12 +619,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'update_patch',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'update_patch',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Update stack (PATCH).',
         operations=[
             {
                 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
                 'method': 'PATCH'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_update_patch
     ),
@@ -481,12 +646,18 @@ stacks_policies = [
                 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
                 'method': 'PATCH'
             }
+=======
+>>>>>>> heat/policies/stacks.py
         ]
     ),
     policy.DocumentedRuleDefault(
         name=POLICY_ROOT % 'preview_update',
+<<<<<<< heat/policies/stacks.py
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Preview update stack.',
         operations=[
             {
@@ -494,6 +665,7 @@ stacks_policies = [
                 'preview',
                 'method': 'PUT'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_preview_update
     ),
@@ -501,6 +673,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'preview_update_patch',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'preview_update_patch',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Preview update stack (PATCH).',
         operations=[
             {
@@ -508,6 +687,7 @@ stacks_policies = [
                 'preview',
                 'method': 'PATCH'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_preview_update_patch
     ),
@@ -515,12 +695,20 @@ stacks_policies = [
         name=POLICY_ROOT % 'validate_template',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'validate_template',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Validate template.',
         operations=[
             {
                 'path': '/v1/{tenant_id}/validate',
                 'method': 'POST'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_validate_template
     ),
@@ -528,6 +716,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'snapshot',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'snapshot',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Snapshot Stack.',
         operations=[
             {
@@ -535,6 +730,7 @@ stacks_policies = [
                 'snapshots',
                 'method': 'POST'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_snapshot
     ),
@@ -542,6 +738,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'show_snapshot',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'show_snapshot',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Show snapshot.',
         operations=[
             {
@@ -549,6 +752,7 @@ stacks_policies = [
                 'snapshots/{snapshot_id}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_show_snapshot
     ),
@@ -556,6 +760,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'delete_snapshot',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'delete_snapshot',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Delete snapshot.',
         operations=[
             {
@@ -563,6 +774,7 @@ stacks_policies = [
                 'snapshots/{snapshot_id}',
                 'method': 'DELETE'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_delete_snapshot
     ),
@@ -570,6 +782,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'list_snapshots',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'list_snapshots',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='List snapshots.',
         operations=[
             {
@@ -577,6 +796,7 @@ stacks_policies = [
                 'snapshots',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_list_snapshots
     ),
@@ -584,6 +804,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'restore_snapshot',
         check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'restore_snapshot',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Restore snapshot.',
         operations=[
             {
@@ -591,6 +818,7 @@ stacks_policies = [
                 'snapshots/{snapshot_id}/restore',
                 'method': 'POST'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_restore_snapshot
     ),
@@ -598,6 +826,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'list_outputs',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'list_outputs',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='List outputs.',
         operations=[
             {
@@ -605,6 +840,7 @@ stacks_policies = [
                 'outputs',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_list_outputs
     ),
@@ -612,6 +848,13 @@ stacks_policies = [
         name=POLICY_ROOT % 'show_output',
         check_str=base.SYSTEM_OR_PROJECT_READER,
         scope_types=['system', 'project'],
+=======
+        ]
+    ),
+    policy.DocumentedRuleDefault(
+        name=POLICY_ROOT % 'show_output',
+        check_str=base.RULE_DENY_STACK_USER,
+>>>>>>> heat/policies/stacks.py
         description='Show outputs.',
         operations=[
             {
@@ -619,8 +862,12 @@ stacks_policies = [
                 'outputs/{output_key}',
                 'method': 'GET'
             }
+<<<<<<< heat/policies/stacks.py
         ],
         deprecated_rule=deprecated_show_output
+=======
+        ]
+>>>>>>> heat/policies/stacks.py
     )
 ]
 
diff --git a/heat/tests/api/openstack_v1/test_software_configs.py b/heat/tests/api/openstack_v1/test_software_configs.py
index 110bfae..87047c1 100644
--- a/heat/tests/api/openstack_v1/test_software_configs.py
+++ b/heat/tests/api/openstack_v1/test_software_configs.py
@@ -47,6 +47,7 @@ class SoftwareConfigControllerTest(tools.ControllerTest, common.HeatTestCase):
                 {'software_configs': []}, resp)
 
     @mock.patch.object(policy.Enforcer, 'enforce')
+<<<<<<< heat/tests/api/openstack_v1/test_software_configs.py
     def test_index_limit_negative(self, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'index')
         params = {'limit': -1}
@@ -82,6 +83,8 @@ class SoftwareConfigControllerTest(tools.ControllerTest, common.HeatTestCase):
             self.assertFalse(mock_call.called)
 
     @mock.patch.object(policy.Enforcer, 'enforce')
+=======
+>>>>>>> heat/tests/api/openstack_v1/test_software_configs.py
     def test_show(self, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'show')
         config_id = 'a45559cd-8736-4375-bc39-d6a7bb62ade2'
diff --git a/heat/tests/api/openstack_v1/test_stacks.py b/heat/tests/api/openstack_v1/test_stacks.py
index ff0a575..cf0c2d6 100644
--- a/heat/tests/api/openstack_v1/test_stacks.py
+++ b/heat/tests/api/openstack_v1/test_stacks.py
@@ -338,6 +338,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
         self.assertNotIn('balrog', engine_args)
 
     @mock.patch.object(rpc_client.EngineClient, 'call')
+<<<<<<< heat/tests/api/openstack_v1/test_stacks.py
     def test_index_limit_negative(self, mock_call, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'index', True)
         params = {'limit': -1}
@@ -352,6 +353,8 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
         self.assertFalse(mock_call.called)
 
     @mock.patch.object(rpc_client.EngineClient, 'call')
+=======
+>>>>>>> heat/tests/api/openstack_v1/test_stacks.py
     def test_index_limit_not_int(self, mock_call, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'index', True)
         params = {'limit': 'not-an-int'}
@@ -485,9 +488,13 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
         mock_enforce.assert_called_with(action='global_index',
                                         scope=self.controller.REQUEST_SCOPE,
                                         is_registered_policy=True,
+<<<<<<< heat/tests/api/openstack_v1/test_stacks.py
                                         context=self.context,
                                         target={"project_id": self.tenant}
                                         )
+=======
+                                        context=self.context)
+>>>>>>> heat/tests/api/openstack_v1/test_stacks.py
 
     def test_global_index_uses_admin_context(self, mock_enforce):
         rpc_client = self.controller.rpc_client
@@ -1212,6 +1219,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
         self.assertEqual('HTTPBadRequest', resp.json['error']['type'])
         self.assertIsNotNone(resp.json['error']['traceback'])
 
+<<<<<<< heat/tests/api/openstack_v1/test_stacks.py
     def test_create_err_circulardep(self, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'create', True)
         stack_name = "foobar"
@@ -1259,6 +1267,8 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
             version='1.36'
         )
 
+=======
+>>>>>>> heat/tests/api/openstack_v1/test_stacks.py
     @mock.patch.object(rpc_client.EngineClient, 'call')
     @mock.patch.object(stacks.stacks_view, 'format_stack')
     def test_preview_stack(self, mock_format, mock_call, mock_enforce):
@@ -1738,9 +1748,13 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
             version='1.20'
         )
 
+<<<<<<< heat/tests/api/openstack_v1/test_stacks.py
     # the test_show_invalidtenant for stacks is now dealt with srbac
     #  more generic approach
     def test_deprecated_show_invalidtenant(self, mock_enforce):
+=======
+    def test_show_invalidtenant(self, mock_enforce):
+>>>>>>> heat/tests/api/openstack_v1/test_stacks.py
         identity = identifier.HeatIdentifier('wibble', 'wordpress', '6')
 
         req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
@@ -1997,6 +2011,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
             version='1.36'
         )
 
+<<<<<<< heat/tests/api/openstack_v1/test_stacks.py
     def test_update_timeout_negative(self, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'update', True)
         identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
@@ -2022,6 +2037,8 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
                          str(ex))
         self.assertFalse(mock_call.called)
 
+=======
+>>>>>>> heat/tests/api/openstack_v1/test_stacks.py
     def test_update_timeout_not_int(self, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'update', True)
         identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
@@ -2235,6 +2252,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
             version='1.36'
         )
 
+<<<<<<< heat/tests/api/openstack_v1/test_stacks.py
     def test_update_with_patch_timeout_negative(self, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'update_patch', True)
         identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
@@ -2260,6 +2278,8 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
                          str(ex))
         self.assertFalse(mock_call.called)
 
+=======
+>>>>>>> heat/tests/api/openstack_v1/test_stacks.py
     def test_update_with_patch_timeout_not_int(self, mock_enforce):
         self._mock_enforce_setup(mock_enforce, 'update_patch', True)
         identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
diff --git a/heat/tests/api/openstack_v1/test_util.py b/heat/tests/api/openstack_v1/test_util.py
index ec0ae5e..95804f0 100644
--- a/heat/tests/api/openstack_v1/test_util.py
+++ b/heat/tests/api/openstack_v1/test_util.py
@@ -88,7 +88,11 @@ class TestPolicyEnforce(common.HeatTestCase):
     def setUp(self):
         super(TestPolicyEnforce, self).setUp()
         self.req = wsgi.Request({})
+<<<<<<< heat/tests/api/openstack_v1/test_util.py
         self.req.context = context.RequestContext(project_id='foo',
+=======
+        self.req.context = context.RequestContext(tenant='foo',
+>>>>>>> heat/tests/api/openstack_v1/test_util.py
                                                   is_admin=False)
 
         class DummyController(object):
@@ -113,7 +117,11 @@ class TestPolicyEnforce(common.HeatTestCase):
 
     @mock.patch.object(policy.Enforcer, 'enforce')
     def test_policy_enforce_tenant_mismatch_is_admin(self, mock_enforce):
+<<<<<<< heat/tests/api/openstack_v1/test_util.py
         self.req.context = context.RequestContext(project_id='foo',
+=======
+        self.req.context = context.RequestContext(tenant='foo',
+>>>>>>> heat/tests/api/openstack_v1/test_util.py
                                                   is_admin=True)
         mock_enforce.return_value = True
 
diff --git a/heat/tests/api/openstack_v1/tools.py b/heat/tests/api/openstack_v1/tools.py
index 0f5247a..5b709c5 100644
--- a/heat/tests/api/openstack_v1/tools.py
+++ b/heat/tests/api/openstack_v1/tools.py
@@ -119,7 +119,10 @@ class ControllerTest(object):
                 action=self.action,
                 context=self.context,
                 scope=self.controller.REQUEST_SCOPE,
+<<<<<<< heat/tests/api/openstack_v1/tools.py
                 target={'project_id': self.tenant},
+=======
+>>>>>>> heat/tests/api/openstack_v1/tools.py
                 is_registered_policy=mock.ANY
             )
             self.assertEqual(self.expected_request_count,
diff --git a/heat/tests/api/test_wsgi.py b/heat/tests/api/test_wsgi.py
index 97ec3b3..3aff556 100644
--- a/heat/tests/api/test_wsgi.py
+++ b/heat/tests/api/test_wsgi.py
@@ -441,12 +441,19 @@ class GetSocketTestCase(common.HeatTestCase):
                           wsgi.cfg.CONF.heat_api, 1234)
 
     def test_get_socket_with_bind_problems(self):
+<<<<<<< heat/tests/api/test_wsgi.py
         err = wsgi.socket.error(
             socket.errno.EADDRINUSE, 'Address already in use')
         self.useFixture(fixtures.MonkeyPatch(
             'heat.common.wsgi.eventlet.listen',
             mock.Mock(side_effect=(
                 [err] * 3 + [None]))))
+=======
+        self.useFixture(fixtures.MonkeyPatch(
+            'heat.common.wsgi.eventlet.listen',
+            mock.Mock(side_effect=(
+                [wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None]))))
+>>>>>>> heat/tests/api/test_wsgi.py
         self.useFixture(fixtures.MonkeyPatch(
             'heat.common.wsgi.ssl.wrap_socket',
             lambda *x, **y: None))
diff --git a/heat/tests/aws/test_eip.py b/heat/tests/aws/test_eip.py
index af52578..981fde4 100644
--- a/heat/tests/aws/test_eip.py
+++ b/heat/tests/aws/test_eip.py
@@ -805,7 +805,12 @@ class AllocTest(common.HeatTestCase):
         before = self.create_association(t, stack, 'IPAssoc')
         after = rsrc_defn.ResourceDefinition(before.name, before.type(),
                                              after_props)
+<<<<<<< heat/tests/aws/test_eip.py
         self.assertTrue(before._needs_update(after, before, after_props,
+=======
+        self.assertTrue(resource.UpdateReplace,
+                        before._needs_update(after, before, after_props,
+>>>>>>> heat/tests/aws/test_eip.py
                                              before_props, None))
 
     def test_update_association_needs_update_InstanceId_EIP(self):
diff --git a/heat/tests/clients/test_aodh_client.py b/heat/tests/clients/test_aodh_client.py
index 6cc6d45..61df186 100644
--- a/heat/tests/clients/test_aodh_client.py
+++ b/heat/tests/clients/test_aodh_client.py
@@ -10,7 +10,10 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+<<<<<<< heat/tests/clients/test_aodh_client.py
 from testtools import testcase
+=======
+>>>>>>> heat/tests/clients/test_aodh_client.py
 
 from heat.tests import common
 from heat.tests import utils
@@ -18,7 +21,10 @@ from heat.tests import utils
 
 class AodhClientPluginTest(common.HeatTestCase):
 
+<<<<<<< heat/tests/clients/test_aodh_client.py
     @testcase.skip('skipped till python-aodhclient fixed for pyparsing 3.0.6')
+=======
+>>>>>>> heat/tests/clients/test_aodh_client.py
     def test_create(self):
         context = utils.dummy_context()
         plugin = context.clients.client_plugin('aodh')
diff --git a/heat/tests/clients/test_barbican_client.py b/heat/tests/clients/test_barbican_client.py
index 4f07dc6..2110899 100644
--- a/heat/tests/clients/test_barbican_client.py
+++ b/heat/tests/clients/test_barbican_client.py
@@ -32,8 +32,11 @@ class BarbicanClientPluginTest(common.HeatTestCase):
         self.barbican_plugin = c.client_plugin('barbican')
         self.barbican_plugin.client = lambda: self.barbican_client
 
+<<<<<<< heat/tests/clients/test_barbican_client.py
     @mock.patch('keystoneauth1.discover.get_version_data',
                 mock.MagicMock(return_value=[{'status': "STABLE"}]))
+=======
+>>>>>>> heat/tests/clients/test_barbican_client.py
     def test_create(self):
         context = utils.dummy_context()
         plugin = context.clients.client_plugin('barbican')
diff --git a/heat/tests/clients/test_heat_client.py b/heat/tests/clients/test_heat_client.py
index 300ad5d..dbbb79d 100644
--- a/heat/tests/clients/test_heat_client.py
+++ b/heat/tests/clients/test_heat_client.py
@@ -86,7 +86,10 @@ class KeystoneClientTest(common.HeatTestCase):
             session=utils.AnyInstance(ks_session.Session),
             auth=self.mock_ks_auth,
             connect_retries=2,
+<<<<<<< heat/tests/clients/test_heat_client.py
             interface='publicURL',
+=======
+>>>>>>> heat/tests/clients/test_heat_client.py
             region_name=None)
 
     def _stubs_auth(self, method='token', trust_scoped=True,
@@ -169,7 +172,10 @@ class KeystoneClientTest(common.HeatTestCase):
             self.m_client.assert_any_call(
                 session=utils.AnyInstance(ks_session.Session),
                 connect_retries=2,
+<<<<<<< heat/tests/clients/test_heat_client.py
                 interface='publicURL',
+=======
+>>>>>>> heat/tests/clients/test_heat_client.py
                 region_name=None)
         if self.stub_admin_auth:
             self.mock_admin_ks_auth.get_user_id.assert_called_once_with(
@@ -523,6 +529,7 @@ class KeystoneClientTest(common.HeatTestCase):
         self.assertRaises(exception.AuthorizationFailure,
                           heat_keystoneclient.KeystoneClient, ctx)
 
+<<<<<<< heat/tests/clients/test_heat_client.py
     def test_regenerate_trust_context_with_no_exist_trust_id(self):
 
         """Test regenerate_trust_context."""
@@ -582,6 +589,8 @@ class KeystoneClientTest(common.HeatTestCase):
         self.mock_ks_v3_client.trusts.delete.assert_called_once_with(
             ctx.trust_id)
 
+=======
+>>>>>>> heat/tests/clients/test_heat_client.py
     def test_create_trust_context_trust_id(self):
 
         """Test create_trust_context with existing trust_id."""
@@ -1581,7 +1590,10 @@ class KeystoneClientTestDomainName(KeystoneClientTest):
             session=utils.AnyInstance(ks_session.Session),
             auth=self.mock_ks_auth,
             connect_retries=2,
+<<<<<<< heat/tests/clients/test_heat_client.py
             interface='publicURL',
+=======
+>>>>>>> heat/tests/clients/test_heat_client.py
             region_name=None)
 
     def _stub_domain_admin_client(self, domain_id='adomain123'):
diff --git a/heat/tests/clients/test_keystone_client.py b/heat/tests/clients/test_keystone_client.py
index 5affb7b..4701379 100644
--- a/heat/tests/clients/test_keystone_client.py
+++ b/heat/tests/clients/test_keystone_client.py
@@ -887,7 +887,11 @@ class KeystoneClientPluginUserTest(common.HeatTestCase):
                           self._client.client.users.get,
                           self.sample_name)
         self._client.client.users.find.assert_called_once_with(
+<<<<<<< heat/tests/clients/test_keystone_client.py
             domain_id=None, name=self.sample_name)
+=======
+            domain=None, name=self.sample_name)
+>>>>>>> heat/tests/clients/test_keystone_client.py
 
     @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
     def test_get_user_id_with_name_and_domain(self, client_keystone):
@@ -905,7 +909,11 @@ class KeystoneClientPluginUserTest(common.HeatTestCase):
                           self._client.client.users.get,
                           self.sample_name)
         self._client.client.users.find.assert_called_once_with(
+<<<<<<< heat/tests/clients/test_keystone_client.py
             domain_id=client_plugin.get_domain_id(self.sample_domain_uuid),
+=======
+            domain=client_plugin.get_domain_id(self.sample_domain_uuid),
+>>>>>>> heat/tests/clients/test_keystone_client.py
             name=self.sample_name)
 
     @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
@@ -929,7 +937,11 @@ class KeystoneClientPluginUserTest(common.HeatTestCase):
                           self._client.client.users.get,
                           self.sample_name)
         self._client.client.users.find.assert_called_once_with(
+<<<<<<< heat/tests/clients/test_keystone_client.py
             domain_id=None, name=self.sample_name)
+=======
+            domain=None, name=self.sample_name)
+>>>>>>> heat/tests/clients/test_keystone_client.py
 
     @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
     def test_get_user_id_with_name_and_domain_invalid_input(self,
diff --git a/heat/tests/clients/test_magnum_client.py b/heat/tests/clients/test_magnum_client.py
index a12310e..4bd1904 100644
--- a/heat/tests/clients/test_magnum_client.py
+++ b/heat/tests/clients/test_magnum_client.py
@@ -56,3 +56,25 @@ class ClusterTemplateConstraintTest(common.HeatTestCase):
         self.mock_cluster_template_get.side_effect = mc_exc.NotFound()
         self.assertFalse(self.constraint.validate(
             "bad_cluster_template", self.ctx))
+<<<<<<< heat/tests/clients/test_ma