=== modified file 'Makefile'
--- Makefile	2014-04-15 14:41:32 +0000
+++ Makefile	2014-06-04 19:57:51 +0000
@@ -344,8 +344,8 @@
 # has a bug and always considers apt-source tarballs before the specified
 # branch.  So instead, export to a local tarball which is always found.
 # Make sure debhelper and dh-apport packages are installed before using this.
-PACKAGING := $(CURDIR)/../packaging.trunk
-PACKAGING_BRANCH := lp:~maas-maintainers/maas/packaging
+PACKAGING := $(CURDIR)/../packaging.trusty
+PACKAGING_BRANCH := lp:~maas-maintainers/maas/packaging.trusty
 
 package_branch:
 	@echo Downloading/refreshing packaging branch...
@@ -363,11 +363,11 @@
 	@bzr export --root=maas-$(VER).orig ../build-area/$(TARBALL) $(CURDIR)
 
 package: package_export
-	bzr bd --merge $(PACKAGING) -- -uc -us
+	bzr bd --merge $(PACKAGING) --result-dir=../build-area -- -uc -us
 	@echo Binary packages built, see parent directory.
 
 source_package: package_export
-	bzr bd --merge $(PACKAGING) -- -S -uc -us
+	bzr bd --merge $(PACKAGING) --result-dir=../build-area -- -S -uc -us
 	@echo Source package built, see parent directory.
 
 #

=== modified file 'debian/changelog'
--- debian/changelog	2014-05-09 22:35:43 +0000
+++ debian/changelog	2014-06-04 19:57:51 +0000
@@ -1,3 +1,34 @@
+maas (1.5.2+bzr2282-0ubuntu0.1) trusty; urgency=medium
+
+  * New upstream release:
+    - Remove workaround for fixed Django bug 1311433 (LP: #1311433)
+    - Ensure that validation errors are returned when adding a node over
+      the API and its cluster controller is not contactable. (LP: #1305061)
+    - Hardware enablement support for PowerKVM (LP: #1325038)
+    - Shorten the time taken for a cluster to initially connect to the region
+      via RPC to around 2 seconds (LP: #1317682)
+    - Faster DHCP leases parser (LP: #1305102)
+    - Documentation fixed explaining how to enable an ephemeral backdoor
+      (LP: #1321696)
+    - Use probe-and-enlist-hardware to enlist all virtual machine inside
+      a libvirt machine, allow password qemu+ssh connections.
+      (LP: #1315155, LP: #1315157)
+    - Rename ppc64el boot loader to PowerKVM (LP: #1315154)
+    - Fix NodeForm's is_valid() method so that it uses Django's way of setting
+      errors on forms instead of putting text in self.errors['architecture']
+      (LP: #1301465)
+    - Change BootMethods to return their own IReader per-request, update method
+      names to reflect new usage. (LP: #1315154)
+    - Return early and stop the DHCP server when the list of managed interfaces
+      of the nodegroup is empty. (LP: #1324944)
+    - Fix invalid attribute references in the VirshSSH class. Added more test
+      for the VirshSSH class. (LP: #1324966)
+  * debian/control:
+    - Add missing dependency in maas-cluster-controller for python-pexpect
+      (LP: #1322151)
+
+ -- Greg Lutostanski <gregory.lutostanski@canonical.com>  Wed, 04 Jun 2014 14:31:41 -0500
+
 maas (1.5.1+bzr2269-0ubuntu0.1) trusty; urgency=medium
 
   * Stable Release Update (LP: #1317601):

=== modified file 'debian/control'
--- debian/control	2014-05-09 22:35:43 +0000
+++ debian/control	2014-06-04 19:57:51 +0000
@@ -157,6 +157,7 @@
          python-maas-provisioningserver (=${binary:Version}),
          python-netaddr,
          python-oauth,
+         python-pexpect,
          python-seamicroclient,
          python-tempita,
          python-twisted,

=== modified file 'docs/changelog.rst'
--- docs/changelog.rst	2014-05-09 22:24:12 +0000
+++ docs/changelog.rst	2014-06-04 19:57:51 +0000
@@ -2,6 +2,27 @@
 Changelog
 =========
 
+1.5.2
+=====
+
+Bug fix update
+--------------
+
+- Remove workaround for fixed Django bug 1311433 (LP: #1311433)
+- Ensure that validation errors are returned when adding a node over
+  the API and its cluster controller is not contactable. (LP: #1305061)
+- Hardware enablement support for PowerKVM
+- Shorten the time taken for a cluster to initially connect to the region
+  via RPC to around 2 seconds (LP: #1317682)
+- Faster DHCP leases parser (LP: #1305102)
+- Documentation fixed explaining how to enable an ephemeral backdoor
+  (LP: #1321696)
+- Use probe-and-enlist-hardware to enlist all virtual machine inside
+  a libvirt machine, allow password qemu+ssh connections.
+  (LP: #1315155, LP: #1315157)
+- Rename ppc64el boot loader to PowerKVM (LP: #1315154)
+
+
 1.5.1
 =====
 

=== modified file 'docs/troubleshooting.rst'
--- docs/troubleshooting.rst	2014-03-28 10:43:53 +0000
+++ docs/troubleshooting.rst	2014-06-04 19:57:51 +0000
@@ -127,13 +127,13 @@
  sudo apt-get install --assume-yes bzr
  bzr branch lp:~maas-maintainers/maas/backdoor-image backdoor-image
 
- imgs=$(echo /var/lib/maas/ephemeral/*/*/*/*/*.img)
+ imgs=$(echo /var/lib/maas/boot-resources/*/*/*/*/*/root-image)
  for img in $imgs; do
      [ -f "$img.dist" ] || cp -a --sparse=always $img $img.dist
  done
 
  for img in $imgs; do
-     sudo ./backdoor-image -v --user=backdoor --password-auth --password=ubuntu $img
+     sudo ./backdoor-image/backdoor-image -v --user=backdoor --password-auth --password=ubuntu $img
  done
 
 Inside the ephemeral image

=== modified file 'etc/maas/templates/commissioning-user-data/snippets/maas_enlist.sh'
--- etc/maas/templates/commissioning-user-data/snippets/maas_enlist.sh	2014-03-28 10:43:53 +0000
+++ etc/maas/templates/commissioning-user-data/snippets/maas_enlist.sh	2014-06-04 19:57:51 +0000
@@ -61,7 +61,7 @@
 get_host_subarchitecture() {
 	local arch=$1
 	case $arch in
-	    i386|amd64)
+	    i386|amd64|ppc64el)
 		# Skip the call to archdetect as that's what
 		# get_host_architecture does
 		echo generic

=== modified file 'etc/maas/templates/power/virsh.template'
--- etc/maas/templates/power/virsh.template	2013-10-10 17:07:51 +0000
+++ etc/maas/templates/power/virsh.template	2014-06-04 19:57:51 +0000
@@ -3,50 +3,16 @@
 # Control virtual system's "power" through virsh.
 #
 
-# Parameters.
-power_change={{power_change}}
-power_address={{power_address}}
-power_id={{power_id}}
-virsh={{virsh}}
-
-
-# Choose command for virsh to make the requested power change happen.
-formulate_power_command() {
-  if [ ${power_change} = 'on' ]
-  then
-      echo 'start'
-  else
-      echo 'destroy'
-  fi
-}
-
-
-# Express system's current state as expressed by virsh as "on" or "off".
-formulate_power_state() {
-    case $1 in
-    'running') echo 'on' ;;
-    'shut off') echo 'off' ;;
-    *)
-        echo "Got unknown power state from virsh: '$1'" >&2
-        exit 1
-    esac
-}
-
-
-# Issue command to virsh, for the given system.
 issue_virsh_command() {
-    ${virsh} --connect ${power_address} $1 ${power_id}
-}
-
-
-# Get the given system's power state: 'on' or 'off'.
-get_power_state() {
-    virsh_state=$(issue_virsh_command domstate)
-    formulate_power_state ${virsh_state}
-}
-
-
-if [ "$(get_power_state)" != "${power_change}" ]
-then
-    issue_virsh_command $(formulate_power_command)
-fi
+python - << END
+from provisioningserver.custom_hardware.virsh import power_control_virsh
+power_control_virsh(
+    {{repr(power_address).decode("ascii") | safe}},
+    {{repr(power_id).decode("ascii") | safe}},
+    {{repr(power_change).decode("ascii") | safe}},
+    {{repr(power_pass).decode("ascii") | safe}},
+)
+END
+}
+
+issue_virsh_command

=== modified file 'etc/maas/templates/uefi/config.commissioning.template'
--- etc/maas/templates/uefi/config.commissioning.template	2014-03-28 10:43:53 +0000
+++ etc/maas/templates/uefi/config.commissioning.template	2014-06-04 19:57:51 +0000
@@ -1,10 +1,8 @@
 set default="0"
 set timeout=0
 
-# Force AMD64 for commissioning as UEFI only supports AMD64 currently.
 menuentry 'Commission' {
     echo   'Booting under MAAS direction...'
-    echo   '{{kernel_params() | kernel_command}} BOOTIF=01-'${net_default_mac}
-    linux  {{kernel_params(arch="amd64") | kernel_path }} {{kernel_params(arch="amd64") | kernel_command}} BOOTIF=01-${net_default_mac}
-    initrd {{kernel_params(arch="amd64") | initrd_path }}
+    linux  {{kernel_params | kernel_path }} {{kernel_params | kernel_command}} BOOTIF=01-${net_default_mac}
+    initrd {{kernel_params | initrd_path }}
 }

=== modified file 'etc/maas/templates/uefi/config.install.template'
--- etc/maas/templates/uefi/config.install.template	2014-03-28 10:43:53 +0000
+++ etc/maas/templates/uefi/config.install.template	2014-06-04 19:57:51 +0000
@@ -3,7 +3,6 @@
 
 menuentry 'Install' {
     echo   'Booting under MAAS direction...'
-    echo   '{{kernel_params | kernel_command}} BOOTIF=01-'${net_default_mac}
     linux  {{kernel_params | kernel_path }} {{kernel_params | kernel_command}} BOOTIF=01-${net_default_mac}
     initrd {{kernel_params | initrd_path }}
 }

=== renamed file 'etc/maas/templates/uefi/config.local.template' => 'etc/maas/templates/uefi/config.local.amd64.template'
--- etc/maas/templates/uefi/config.local.template	2014-03-28 10:43:53 +0000
+++ etc/maas/templates/uefi/config.local.amd64.template	2014-06-04 19:57:51 +0000
@@ -2,7 +2,7 @@
 set timeout=0
 
 menuentry 'Local' {
-    echo 'Booting local disk ...'
+    echo 'Booting local disk...'
     search --set=root --file /efi/ubuntu/grub.cfg
     configfile /efi/ubuntu/grub.cfg
 }

=== added file 'etc/maas/templates/uefi/config.local.ppc64el.template'
--- etc/maas/templates/uefi/config.local.ppc64el.template	1970-01-01 00:00:00 +0000
+++ etc/maas/templates/uefi/config.local.ppc64el.template	2014-06-04 19:57:51 +0000
@@ -0,0 +1,8 @@
+set default="0"
+set timeout=0
+
+menuentry 'Local' {
+    echo 'Booting local disk...'
+    search --set=root --file /boot/grub/grub.cfg
+    configfile /boot/grub/grub.cfg
+}

=== modified file 'required-packages/base'
--- required-packages/base	2014-03-28 10:43:53 +0000
+++ required-packages/base	2014-06-04 19:57:51 +0000
@@ -35,6 +35,7 @@
 python-oops-datedir-repo
 python-oops-twisted
 python-oops-wsgi
+python-pexpect
 python-psycopg2
 python-pyinotify
 python-seamicroclient

=== modified file 'src/maasserver/api.py'
--- src/maasserver/api.py	2014-05-09 22:35:43 +0000
+++ src/maasserver/api.py	2014-06-04 19:57:51 +0000
@@ -1684,8 +1684,8 @@
     def probe_and_enlist_hardware(self, request, uuid):
         """Add special hardware types.
 
-        :param model: The type of special hardware, currently only
-            'seamicro15k' is supported.
+        :param model: The type of special hardware, 'seamicro15k' and
+            'virsh' is supported.
         :type model: unicode
 
         The following are only required if you are probing a seamicro15k:
@@ -1702,6 +1702,17 @@
         :param power_control: The power_control to use, either ipmi (default)
             or restapi.
         :type power_control: unicode
+
+        The following are only required if you are probing a virsh:
+
+        :param power_address: The connection string to virsh.
+        :type power_address: unicode
+
+        The following are optional if you are probing a virsh:
+
+        :param power_pass: The password to use, when qemu+ssh is given as a
+            connection string and ssh key authentication is not being used.
+        :type power_pass: unicode
         """
         nodegroup = get_object_or_404(NodeGroup, uuid=uuid)
 
@@ -1716,6 +1727,12 @@
 
             nodegroup.add_seamicro15k(
                 mac, username, password, power_control=power_control)
+        elif model == 'powerkvm' or model == 'virsh':
+            poweraddr = get_mandatory_param(request.data, 'power_address')
+            password = get_optional_param(
+                request.data, 'power_pass', default=None)
+
+            nodegroup.add_virsh(poweraddr, password=password)
         else:
             return HttpResponse(status=httplib.BAD_REQUEST)
 

=== modified file 'src/maasserver/dhcp.py'
--- src/maasserver/dhcp.py	2014-03-28 10:43:53 +0000
+++ src/maasserver/dhcp.py	2014-06-04 19:57:51 +0000
@@ -22,6 +22,7 @@
 from netaddr import IPAddress
 from provisioningserver.tasks import (
     restart_dhcp_server,
+    stop_dhcp_server,
     write_dhcp_config,
     )
 
@@ -52,7 +53,15 @@
     from maasserver.dns import get_dns_server_address
 
     interfaces = get_interfaces_managed_by(nodegroup)
-    if interfaces is None:
+    if interfaces in [None, []]:
+        # interfaces being None means the cluster isn't accepted: stop
+        # the DHCP server in case it case started.
+        # interfaces being [] means there is no interface configured: stop
+        # the DHCP server;  Note that a config generated with this setup
+        # would not be valid and would result in the DHCP
+        # server failing with the error: "Not configured to listen on any
+        # interfaces!."
+        stop_dhcp_server.apply_async(queue=nodegroup.work_queue)
         return
 
     # Make sure this nodegroup has a key to communicate with the dhcp

=== modified file 'src/maasserver/forms.py'
--- src/maasserver/forms.py	2014-04-09 19:02:00 +0000
+++ src/maasserver/forms.py	2014-06-04 19:57:51 +0000
@@ -118,6 +118,17 @@
 BLANK_CHOICE = ('', '-------')
 
 
+def set_form_error(form, field_name, error_value):
+    """Set an error on a form's field.
+
+    This utility method encapsulates Django's arguably awkward way
+    of settings errors inside a form's clean()/is_valid() method.  This
+    method will override any previously-registered error for 'field_name'.
+    """
+    # Hey Django devs, this is a crap API to set errors.
+    form.errors[field_name] = form.error_class([error_value])
+
+
 def remove_None_values(data):
     """Return a new dictionary without the keys corresponding to None values.
     """
@@ -249,8 +260,8 @@
     def is_valid(self):
         is_valid = super(NodeForm, self).is_valid()
         if len(list_all_usable_architectures()) == 0:
-            self.errors['architecture'] = (
-                [NO_ARCHITECTURES_AVAILABLE])
+            set_form_error(
+                self, "architecture", NO_ARCHITECTURES_AVAILABLE)
             is_valid = False
         return is_valid
 
@@ -420,12 +431,11 @@
             try:
                 get_power_types([self._get_nodegroup()])
             except ClusterUnavailable as e:
-                # Hey Django devs, this is a crap API to set errors.
-                self._errors["power_type"] = self.error_class(
-                    [CLUSTER_NOT_AVAILABLE + e.args[0]])
+                set_form_error(
+                    self, "power_type", CLUSTER_NOT_AVAILABLE + e.args[0])
         # If power_type is not set and power_parameters_skip_check is not
         # on, reset power_parameters (set it to the empty string).
-        no_power_type = cleaned_data['power_type'] == ''
+        no_power_type = cleaned_data.get('power_type', '') == ''
         if no_power_type and not skip_check:
             cleaned_data['power_parameters'] = ''
         return cleaned_data

=== modified file 'src/maasserver/models/node.py'
--- src/maasserver/models/node.py	2014-05-09 22:35:43 +0000
+++ src/maasserver/models/node.py	2014-06-04 19:57:51 +0000
@@ -758,7 +758,6 @@
             power_params = {}
 
         power_params.setdefault('system_id', self.system_id)
-        power_params.setdefault('virsh', '/usr/bin/virsh')
         power_params.setdefault('fence_cdu', '/usr/sbin/fence_cdu')
         power_params.setdefault('ipmipower', '/usr/sbin/ipmipower')
         power_params.setdefault('ipmitool', '/usr/bin/ipmitool')
@@ -769,6 +768,7 @@
         power_params.setdefault('username', '')
         power_params.setdefault('power_id', self.system_id)
         power_params.setdefault('power_driver', '')
+        power_params.setdefault('power_pass', '')
 
         # The "mac" parameter defaults to the node's primary MAC
         # address, but only if not already set.

=== modified file 'src/maasserver/models/nodegroup.py'
--- src/maasserver/models/nodegroup.py	2014-05-09 22:35:43 +0000
+++ src/maasserver/models/nodegroup.py	2014-06-04 19:57:51 +0000
@@ -41,6 +41,7 @@
 from provisioningserver.tasks import (
     add_new_dhcp_host_map,
     add_seamicro15k,
+    add_virsh,
     enlist_nodes_from_ucsm,
     import_boot_images,
     report_boot_images,
@@ -287,6 +288,15 @@
         args = (mac, username, password, power_control)
         add_seamicro15k.apply_async(queue=self.uuid, args=args)
 
+    def add_virsh(self, poweraddr, password=None):
+        """ Add all of the virtual machines inside a virsh controller.
+
+        :param poweraddr: virsh connection string
+        :param password: ssh password
+        """
+        args = (poweraddr, password)
+        add_virsh.apply_async(queue=self.uuid, args=args)
+
     def enlist_nodes_from_ucsm(self, url, username, password):
         """ Add the servers from a Cicso UCS Manager.
 

=== modified file 'src/maasserver/tests/test_api_nodes.py'
--- src/maasserver/tests/test_api_nodes.py	2014-03-28 10:43:53 +0000
+++ src/maasserver/tests/test_api_nodes.py	2014-06-04 19:57:51 +0000
@@ -19,12 +19,14 @@
 import random
 
 from django.core.urlresolvers import reverse
+from maasserver import forms
 from maasserver.enum import (
     NODE_STATUS,
     NODE_STATUS_CHOICES_DICT,
     NODEGROUP_STATUS,
     NODEGROUPINTERFACE_MANAGEMENT,
     )
+from maasserver.exceptions import ClusterUnavailable
 from maasserver.fields import MAC
 from maasserver.models import Node
 from maasserver.models.user import (
@@ -175,6 +177,32 @@
             NODE_STATUS.DECLARED,
             Node.objects.get(system_id=system_id).status)
 
+    def test_POST_new_when_no_RPC_to_cluster_defaults_empty_power(self):
+        # Test for bug 1305061, if there is no cluster RPC connection
+        # then make sure that power_type is defaulted to the empty
+        # string rather than being entirely absent, which results in a
+        # crash.
+        cluster_error = factory.make_name("cluster error")
+        self.patch(forms, 'get_power_types').side_effect = (
+            ClusterUnavailable(cluster_error))
+        self.become_admin()
+        # The patching behind the scenes to avoid *real* RPC is
+        # complex and the available power types is actually a
+        # valid set, so use an invalid type to trigger the bug here.
+        power_type = factory.make_name("power_type")
+        response = self.client.post(
+            reverse('nodes_handler'),
+            {
+                'op': 'new',
+                'autodetect_nodegroup': '1',
+                'architecture': make_usable_architecture(self),
+                'mac_addresses': ['aa:bb:cc:dd:ee:ff'],
+                'power_type': power_type,
+            })
+        self.assertEqual(httplib.BAD_REQUEST, response.status_code)
+        validation_errors = json.loads(response.content)['power_type']
+        self.assertIn(cluster_error, validation_errors[0])
+
     def test_GET_list_lists_nodes(self):
         # The api allows for fetching the list of Nodes.
         node1 = factory.make_node()

=== modified file 'src/maasserver/tests/test_dhcp.py'
--- src/maasserver/tests/test_dhcp.py	2014-03-28 10:43:53 +0000
+++ src/maasserver/tests/test_dhcp.py	2014-06-04 19:57:51 +0000
@@ -33,7 +33,6 @@
 from maasserver.testing.testcase import MAASServerTestCase
 from maasserver.utils import map_enum
 from maastesting.celery import CeleryFixture
-from mock import ANY
 from netaddr import (
     IPAddress,
     IPNetwork,
@@ -72,6 +71,16 @@
             {status: None for status in unaccepted_statuses},
             managed_interfaces)
 
+    def test_configure_dhcp_stops_server_if_no_managed_interface(self):
+        self.patch(settings, "DHCP_CONNECT", True)
+        self.patch(dhcp, 'stop_dhcp_server')
+        nodegroup = factory.make_node_group(
+            status=NODEGROUP_STATUS.ACCEPTED,
+            management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED,
+            )
+        configure_dhcp(nodegroup)
+        self.assertEqual(1, dhcp.stop_dhcp_server.apply_async.call_count)
+
     def test_configure_dhcp_obeys_DHCP_CONNECT(self):
         self.patch(settings, "DHCP_CONNECT", False)
         self.patch(dhcp, 'write_dhcp_config')
@@ -205,19 +214,6 @@
         args, kwargs = task.subtask.call_args
         self.assertEqual(nodegroup.work_queue, kwargs['options']['queue'])
 
-    def test_write_dhcp_config_called_when_no_managed_interfaces(self):
-        nodegroup = factory.make_node_group(
-            status=NODEGROUP_STATUS.ACCEPTED,
-            management=NODEGROUPINTERFACE_MANAGEMENT.DHCP)
-        [interface] = nodegroup.nodegroupinterface_set.all()
-        self.patch(settings, "DHCP_CONNECT", True)
-        self.patch(tasks, 'sudo_write_file')
-        self.patch(dhcp, 'write_dhcp_config')
-        interface.management = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED
-        interface.save()
-        dhcp.write_dhcp_config.apply_async.assert_called_once_with(
-            queue=nodegroup.work_queue, kwargs=ANY)
-
     def test_dhcp_config_gets_written_when_interface_IP_changes(self):
         nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED)
         [interface] = nodegroup.nodegroupinterface_set.all()
@@ -318,6 +314,9 @@
             factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED)
         for x in range(num_inactive_nodegroups):
             factory.make_node_group(status=NODEGROUP_STATUS.PENDING)
+        # Silence stop_dhcp_server: it will be called for the inactive
+        # nodegroups.
+        self.patch(dhcp, 'stop_dhcp_server')
 
         self.patch(settings, "DHCP_CONNECT", True)
         self.patch(dhcp, 'write_dhcp_config')

=== modified file 'src/provisioningserver/boot/__init__.py'
--- src/provisioningserver/boot/__init__.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/boot/__init__.py	2014-06-04 19:57:51 +0000
@@ -23,6 +23,7 @@
     abstractproperty,
     )
 from errno import ENOENT
+from io import BytesIO
 from os import path
 
 from provisioningserver.boot.tftppath import compose_image_path
@@ -30,6 +31,23 @@
 from provisioningserver.utils import locate_config
 from provisioningserver.utils.registry import Registry
 import tempita
+from tftp.backend import IReader
+from zope.interface import implementer
+
+
+@implementer(IReader)
+class BytesReader:
+
+    def __init__(self, data):
+        super(BytesReader, self).__init__()
+        self.buffer = BytesIO(data)
+        self.size = len(data)
+
+    def read(self, size):
+        return self.buffer.read(size)
+
+    def finish(self):
+        self.buffer.close()
 
 
 class BootMethodError(Exception):
@@ -100,22 +118,24 @@
         """
 
     @abstractmethod
-    def match_config_path(self, path):
-        """Checks path for the configuration file that needs to be
-        generated.
+    def match_path(self, backend, path):
+        """Checks path for a file the boot method needs to handle.
 
+        :param backend: requesting backend
         :param path: requested path
         :returns: dict of match params from path, None if no match
         """
 
     @abstractmethod
-    def render_config(self, kernel_params, **extra):
-        """Render a configuration file as a unicode string.
+    def get_reader(self, backend, kernel_params, **extra):
+        """Gets the reader the backend will use for this combination of
+        boot method, kernel parameters, and extra parameters.
 
+        :param backend: requesting backend
         :param kernel_params: An instance of `KernelParameters`.
         :param extra: Allow for other arguments. This is a safety valve;
             parameters generated in another component (for example, see
-            `TFTPBackend.get_config_reader`) won't cause this to break.
+            `TFTPBackend.get_boot_method_reader`) won't cause this to break.
         """
 
     @abstractmethod
@@ -202,11 +222,13 @@
 # Import the supported boot methods after defining BootMethod.
 from provisioningserver.boot.pxe import PXEBootMethod
 from provisioningserver.boot.uefi import UEFIBootMethod
+from provisioningserver.boot.powerkvm import PowerKVMBootMethod
 
 
 builtin_boot_methods = [
     PXEBootMethod(),
     UEFIBootMethod(),
+    PowerKVMBootMethod(),
 ]
 for method in builtin_boot_methods:
     BootMethodRegistry.register_item(method.name, method)

=== added file 'src/provisioningserver/boot/powerkvm.py'
--- src/provisioningserver/boot/powerkvm.py	1970-01-01 00:00:00 +0000
+++ src/provisioningserver/boot/powerkvm.py	2014-06-04 19:57:51 +0000
@@ -0,0 +1,107 @@
+# Copyright 2014 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""PowerKVM Boot Method"""
+
+from __future__ import (
+    absolute_import,
+    print_function,
+    unicode_literals,
+    )
+
+str = None
+
+__metaclass__ = type
+__all__ = [
+    'PowerKVMBootMethod',
+    ]
+
+import glob
+import os.path
+from textwrap import dedent
+
+from provisioningserver.boot import (
+    BootMethod,
+    BootMethodInstallError,
+    utils,
+    )
+from provisioningserver.boot.install_bootloader import install_bootloader
+from provisioningserver.utils import (
+    call_and_check,
+    tempdir,
+    )
+
+
+GRUB_CONFIG = dedent("""\
+    configfile (pxe)/grub/grub.cfg-${net_default_mac}
+    configfile (pxe)/grub/grub.cfg-default-ppc64el
+    """)
+
+
+class PowerKVMBootMethod(BootMethod):
+
+    name = "powerkvm"
+    template_subdir = None
+    bootloader_path = "bootppc64.bin"
+    arch_octet = "00:0C"
+
+    def match_path(self, backend, path):
+        """Doesn't need to do anything, as the UEFIBootMethod provides
+        the grub implementation needed.
+        """
+        return None
+
+    def get_reader(self, backend, kernel_params, **extra):
+        """Doesn't need to do anything, as the UEFIBootMethod provides
+        the grub implementation needed.
+        """
+        return None
+
+    def install_bootloader(self, destination):
+        """Installs the required files for PowerKVM booting into the
+        tftproot.
+        """
+        with tempdir() as tmp:
+            # Download the grub-ieee1275-bin package
+            data, filename = utils.get_updates_package(
+                'grub-ieee1275-bin', 'http://ports.ubuntu.com',
+                'main', 'ppc64el')
+            if data is None:
+                raise BootMethodInstallError(
+                    'Failed to download grub-ieee1275-bin package from '
+                    'the archive.')
+            grub_output = os.path.join(tmp, filename)
+            with open(grub_output, 'wb') as stream:
+                stream.write(data)
+
+            # Extract the package with dpkg, and install the shim
+            call_and_check(["dpkg", "-x", grub_output, tmp])
+
+            # Output the embedded config, so grub-mkimage can use it
+            config_output = os.path.join(tmp, 'grub.cfg')
+            with open(config_output, 'wb') as stream:
+                stream.write(GRUB_CONFIG.encode('utf-8'))
+
+            # Get list of grub modules
+            module_dir = os.path.join(
+                tmp, 'usr', 'lib', 'grub', 'powerpc-ieee1275')
+            modules = []
+            for module_path in glob.glob(os.path.join(module_dir, '*.mod')):
+                module_filename = os.path.basename(module_path)
+                module_name, _ = os.path.splitext(module_filename)
+                modules.append(module_name)
+
+            # Generate the grub bootloader
+            mkimage_output = os.path.join(tmp, self.bootloader_path)
+            args = [
+                'grub-mkimage',
+                '-o', mkimage_output,
+                '-O', 'powerpc-ieee1275',
+                '-d', module_dir,
+                '-c', config_output,
+                ]
+            call_and_check(args + modules)
+
+            install_bootloader(
+                mkimage_output,
+                os.path.join(destination, self.bootloader_path))

=== modified file 'src/provisioningserver/boot/pxe.py'
--- src/provisioningserver/boot/pxe.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/boot/pxe.py	2014-06-04 19:57:51 +0000
@@ -22,6 +22,7 @@
 
 from provisioningserver.boot import (
     BootMethod,
+    BytesReader,
     get_parameters,
     )
 from provisioningserver.boot.install_bootloader import install_bootloader
@@ -78,10 +79,11 @@
     bootloader_path = "pxelinux.0"
     arch_octet = "00:00"
 
-    def match_config_path(self, path):
+    def match_path(self, backend, path):
         """Checks path for the configuration file that needs to be
         generated.
 
+        :param backend: requesting backend
         :param path: requested path
         :returns: dict of match params from path, None if no match
         """
@@ -90,19 +92,20 @@
             return None
         return get_parameters(match)
 
-    def render_config(self, kernel_params, **extra):
+    def get_reader(self, backend, kernel_params, **extra):
         """Render a configuration file as a unicode string.
 
+        :param backend: requesting backend
         :param kernel_params: An instance of `KernelParameters`.
         :param extra: Allow for other arguments. This is a safety valve;
             parameters generated in another component (for example, see
-            `TFTPBackend.get_config_reader`) won't cause this to break.
+            `TFTPBackend.get_boot_method_reader`) won't cause this to break.
         """
         template = self.get_template(
             kernel_params.purpose, kernel_params.arch,
             kernel_params.subarch)
         namespace = self.compose_template_namespace(kernel_params)
-        return template.substitute(namespace)
+        return BytesReader(template.substitute(namespace).encode("utf-8"))
 
     def install_bootloader(self, destination):
         """Installs the required files for PXE booting into the

=== modified file 'src/provisioningserver/boot/tests/test_boot.py'
--- src/provisioningserver/boot/tests/test_boot.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/boot/tests/test_boot.py	2014-06-04 19:57:51 +0000
@@ -24,6 +24,7 @@
 from provisioningserver import boot
 from provisioningserver.boot import (
     BootMethod,
+    BytesReader,
     gen_template_filenames,
     )
 import tempita
@@ -36,11 +37,11 @@
     bootloader_path = "fake.efi"
     arch_octet = "00:00"
 
-    def match_config_path(self, path):
+    def match_path(self, backend, path):
         return {}
 
-    def render_config(kernel_params, **extra):
-        return ""
+    def get_reader(backend, kernel_params, **extra):
+        return BytesReader("")
 
     def install_bootloader():
         pass

=== added file 'src/provisioningserver/boot/tests/test_powerkvm.py'
--- src/provisioningserver/boot/tests/test_powerkvm.py	1970-01-01 00:00:00 +0000
+++ src/provisioningserver/boot/tests/test_powerkvm.py	2014-06-04 19:57:51 +0000
@@ -0,0 +1,92 @@
+# Copyright 2014 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for `provisioningserver.boot.powerkvm`."""
+
+from __future__ import (
+    absolute_import,
+    print_function,
+    unicode_literals,
+    )
+
+str = None
+
+__metaclass__ = type
+__all__ = []
+
+from contextlib import contextmanager
+import os
+
+from maastesting.factory import factory
+from maastesting.matchers import MockCalledOnceWith
+from maastesting.testcase import MAASTestCase
+from provisioningserver.boot import (
+    BootMethodInstallError,
+    powerkvm as powerkvm_module,
+    utils,
+    )
+from provisioningserver.boot.powerkvm import (
+    GRUB_CONFIG,
+    PowerKVMBootMethod,
+    )
+from provisioningserver.tests.test_kernel_opts import make_kernel_parameters
+
+
+class TestPowerKVMBootMethod(MAASTestCase):
+    """Tests `provisioningserver.boot.powerkvm.PowerKVMBootMethod`."""
+
+    def test_match_path_returns_None(self):
+        method = PowerKVMBootMethod()
+        paths = [factory.getRandomString() for _ in range(3)]
+        for path in paths:
+            self.assertEqual(None, method.match_path(None, path))
+
+    def test_get_reader_returns_None(self):
+        method = PowerKVMBootMethod()
+        params = [make_kernel_parameters() for _ in range(3)]
+        for param in params:
+            self.assertEqual(None, method.get_reader(None, params))
+
+    def test_install_bootloader_get_package_raises_error(self):
+        method = PowerKVMBootMethod()
+        self.patch(utils, 'get_updates_package').return_value = (None, None)
+        self.assertRaises(
+            BootMethodInstallError, method.install_bootloader, None)
+
+    def test_install_bootloader(self):
+        method = PowerKVMBootMethod()
+        filename = factory.make_name('dpkg')
+        data = factory.getRandomString()
+        tmp = self.make_dir()
+        dest = self.make_dir()
+
+        @contextmanager
+        def tempdir():
+            try:
+                yield tmp
+            finally:
+                pass
+
+        mock_get_updates_package = self.patch(utils, 'get_updates_package')
+        mock_get_updates_package.return_value = (data, filename)
+        self.patch(powerkvm_module, 'call_and_check')
+        self.patch(powerkvm_module, 'tempdir').side_effect = tempdir
+
+        mock_install_bootloader = self.patch(
+            powerkvm_module, 'install_bootloader')
+
+        method.install_bootloader(dest)
+
+        with open(os.path.join(tmp, filename), 'rb') as stream:
+            saved_data = stream.read()
+        self.assertEqual(data, saved_data)
+
+        with open(os.path.join(tmp, 'grub.cfg'), 'rb') as stream:
+            saved_config = stream.read().decode('utf-8')
+        self.assertEqual(GRUB_CONFIG, saved_config)
+
+        mkimage_expected = os.path.join(tmp, method.bootloader_path)
+        dest_expected = os.path.join(dest, method.bootloader_path)
+        self.assertThat(
+            mock_install_bootloader,
+            MockCalledOnceWith(mkimage_expected, dest_expected))

=== modified file 'src/provisioningserver/boot/tests/test_pxe.py'
--- src/provisioningserver/boot/tests/test_pxe.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/boot/tests/test_pxe.py	2014-06-04 19:57:51 +0000
@@ -20,6 +20,7 @@
 from maastesting.factory import factory
 from maastesting.testcase import MAASTestCase
 from provisioningserver import kernel_opts
+from provisioningserver.boot import BytesReader
 from provisioningserver.boot.pxe import (
     ARP_HTYPE,
     PXEBootMethod,
@@ -150,14 +151,15 @@
 class TestPXEBootMethodRenderConfig(MAASTestCase):
     """Tests for `provisioningserver.boot.pxe.PXEBootMethod.render_config`."""
 
-    def test_render_install(self):
+    def test_get_reader_install(self):
         # Given the right configuration options, the PXE configuration is
         # correctly rendered.
         method = PXEBootMethod()
         params = make_kernel_parameters(self, purpose="install")
-        output = method.render_config(kernel_params=params)
-        # The output is always a Unicode string.
-        self.assertThat(output, IsInstance(unicode))
+        output = method.get_reader(backend=None, kernel_params=params)
+        # The output is a BytesReader.
+        self.assertThat(output, IsInstance(BytesReader))
+        output = output.read(10000)
         # The template has rendered without error. PXELINUX configurations
         # typically start with a DEFAULT line.
         self.assertThat(output, StartsWith("DEFAULT "))
@@ -177,54 +179,58 @@
                     r'.*^\s+APPEND .+?$',
                     re.MULTILINE | re.DOTALL)))
 
-    def test_render_with_extra_arguments_does_not_affect_output(self):
-        # render_config() allows any keyword arguments as a safety valve.
+    def test_get_reader_with_extra_arguments_does_not_affect_output(self):
+        # get_reader() allows any keyword arguments as a safety valve.
         method = PXEBootMethod()
         options = {
+            "backend": None,
             "kernel_params": make_kernel_parameters(self, purpose="install"),
         }
         # Capture the output before sprinking in some random options.
-        output_before = method.render_config(**options)
+        output_before = method.get_reader(**options).read(10000)
         # Sprinkle some magic in.
         options.update(
             (factory.make_name("name"), factory.make_name("value"))
             for _ in range(10))
         # Capture the output after sprinking in some random options.
-        output_after = method.render_config(**options)
+        output_after = method.get_reader(**options).read(10000)
         # The generated template is the same.
         self.assertEqual(output_before, output_after)
 
-    def test_render_config_with_local_purpose(self):
+    def test_get_reader_with_local_purpose(self):
         # If purpose is "local", the config.localboot.template should be
         # used.
         method = PXEBootMethod()
         options = {
+            "backend": None,
             "kernel_params": make_kernel_parameters(purpose="local"),
             }
-        output = method.render_config(**options)
+        output = method.get_reader(**options).read(10000)
         self.assertIn("LOCALBOOT 0", output)
 
-    def test_render_config_with_local_purpose_i386_arch(self):
+    def test_get_reader_with_local_purpose_i386_arch(self):
         # Intel i386 is a special case and needs to use the chain.c32
         # loader as the LOCALBOOT PXE directive is unreliable.
         method = PXEBootMethod()
         options = {
+            "backend": None,
             "kernel_params": make_kernel_parameters(
                 arch="i386", purpose="local"),
         }
-        output = method.render_config(**options)
+        output = method.get_reader(**options).read(10000)
         self.assertIn("chain.c32", output)
         self.assertNotIn("LOCALBOOT", output)
 
-    def test_render_config_with_local_purpose_amd64_arch(self):
+    def test_get_reader_with_local_purpose_amd64_arch(self):
         # Intel amd64 is a special case and needs to use the chain.c32
         # loader as the LOCALBOOT PXE directive is unreliable.
         method = PXEBootMethod()
         options = {
+            "backend": None,
             "kernel_params": make_kernel_parameters(
                 arch="amd64", purpose="local"),
         }
-        output = method.render_config(**options)
+        output = method.get_reader(**options).read(10000)
         self.assertIn("chain.c32", output)
         self.assertNotIn("LOCALBOOT", output)
 
@@ -237,17 +243,18 @@
         ("xinstall", dict(purpose="xinstall")),
         ]
 
-    def test_render_config_scenarios(self):
+    def test_get_reader_scenarios(self):
         # The commissioning config uses an extra PXELINUX module to auto
         # select between i386 and amd64.
         method = PXEBootMethod()
         get_ephemeral_name = self.patch(kernel_opts, "get_ephemeral_name")
         get_ephemeral_name.return_value = factory.make_name("ephemeral")
         options = {
+            "backend": None,
             "kernel_params": make_kernel_parameters(
                 testcase=self, subarch="generic", purpose=self.purpose),
         }
-        output = method.render_config(**options)
+        output = method.get_reader(**options).read(10000)
         config = parse_pxe_config(output)
         # The default section is defined.
         default_section_label = config.header["DEFAULT"]

=== modified file 'src/provisioningserver/boot/tests/test_uefi.py'
--- src/provisioningserver/boot/tests/test_uefi.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/boot/tests/test_uefi.py	2014-06-04 19:57:51 +0000
@@ -18,6 +18,7 @@
 
 from maastesting.factory import factory
 from maastesting.testcase import MAASTestCase
+from provisioningserver.boot import BytesReader
 from provisioningserver.boot.tftppath import compose_image_path
 from provisioningserver.boot.uefi import (
     re_config_file,
@@ -60,14 +61,15 @@
 class TestRenderUEFIConfig(MAASTestCase):
     """Tests for `provisioningserver.boot.uefi.UEFIBootMethod`."""
 
-    def test_render(self):
+    def test_get_reader(self):
         # Given the right configuration options, the UEFI configuration is
         # correctly rendered.
         method = UEFIBootMethod()
         params = make_kernel_parameters(purpose="install")
-        output = method.render_config(kernel_params=params)
-        # The output is always a Unicode string.
-        self.assertThat(output, IsInstance(unicode))
+        output = method.get_reader(backend=None, kernel_params=params)
+        # The output is a BytesReader.
+        self.assertThat(output, IsInstance(BytesReader))
+        output = output.read(10000)
         # The template has rendered without error. UEFI configurations
         # typically start with a DEFAULT line.
         self.assertThat(output, StartsWith("set default=\"0\""))
@@ -85,31 +87,34 @@
                     r'.*^\s+initrd %s/di-initrd$' % re.escape(image_dir),
                     re.MULTILINE | re.DOTALL)))
 
-    def test_render_with_extra_arguments_does_not_affect_output(self):
-        # render_config() allows any keyword arguments as a safety valve.
+    def test_get_reader_with_extra_arguments_does_not_affect_output(self):
+        # get_reader() allows any keyword arguments as a safety valve.
         method = UEFIBootMethod()
         options = {
+            "backend": None,
             "kernel_params": make_kernel_parameters(purpose="install"),
         }
         # Capture the output before sprinking in some random options.
-        output_before = method.render_config(**options)
+        output_before = method.get_reader(**options).read(10000)
         # Sprinkle some magic in.
         options.update(
             (factory.make_name("name"), factory.make_name("value"))
             for _ in range(10))
         # Capture the output after sprinking in some random options.
-        output_after = method.render_config(**options)
+        output_after = method.get_reader(**options).read(10000)
         # The generated template is the same.
         self.assertEqual(output_before, output_after)
 
-    def test_render_config_with_local_purpose(self):
+    def test_get_reader_with_local_purpose(self):
         # If purpose is "local", the config.localboot.template should be
         # used.
         method = UEFIBootMethod()
         options = {
-            "kernel_params": make_kernel_parameters(purpose="local"),
+            "backend": None,
+            "kernel_params": make_kernel_parameters(
+                purpose="local", arch="amd64"),
             }
-        output = method.render_config(**options)
+        output = method.get_reader(**options).read(10000)
         self.assertIn("configfile /efi/ubuntu/grub.cfg", output)
 
 

=== modified file 'src/provisioningserver/boot/uefi.py'
--- src/provisioningserver/boot/uefi.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/boot/uefi.py	2014-06-04 19:57:51 +0000
@@ -25,6 +25,7 @@
 from provisioningserver.boot import (
     BootMethod,
     BootMethodInstallError,
+    BytesReader,
     get_parameters,
     utils,
     )
@@ -120,10 +121,11 @@
     bootloader_path = "bootx64.efi"
     arch_octet = "00:07"  # AMD64 EFI
 
-    def match_config_path(self, path):
+    def match_path(self, backend, path):
         """Checks path for the configuration file that needs to be
         generated.
 
+        :param backend: requesting backend
         :param path: requested path
         :returns: dict of match params from path, None if no match
         """
@@ -139,19 +141,20 @@
 
         return params
 
-    def render_config(self, kernel_params, **extra):
+    def get_reader(self, backend, kernel_params, **extra):
         """Render a configuration file as a unicode string.
 
+        :param backend: requesting backend
         :param kernel_params: An instance of `KernelParameters`.
         :param extra: Allow for other arguments. This is a safety valve;
             parameters generated in another component (for example, see
-            `TFTPBackend.get_config_reader`) won't cause this to break.
+            `TFTPBackend.get_boot_method_reader`) won't cause this to break.
         """
         template = self.get_template(
             kernel_params.purpose, kernel_params.arch,
             kernel_params.subarch)
         namespace = self.compose_template_namespace(kernel_params)
-        return template.substitute(namespace)
+        return BytesReader(template.substitute(namespace).encode("utf-8"))
 
     def install_bootloader(self, destination):
         """Installs the required files for UEFI booting into the

=== added file 'src/provisioningserver/custom_hardware/tests/test_virsh.py'
--- src/provisioningserver/custom_hardware/tests/test_virsh.py	1970-01-01 00:00:00 +0000
+++ src/provisioningserver/custom_hardware/tests/test_virsh.py	2014-06-04 19:57:51 +0000
@@ -0,0 +1,351 @@
+# Copyright 2014 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for `provisioningserver.custom_hardware.virsh`.
+"""
+
+from __future__ import (
+    absolute_import,
+    print_function,
+    unicode_literals,
+    )
+
+str = None
+
+__metaclass__ = type
+__all__ = []
+
+import random
+from textwrap import dedent
+
+from maastesting.factory import factory
+from maastesting.matchers import (
+    MockCalledOnceWith,
+    MockCallsMatch,
+    )
+from maastesting.testcase import MAASTestCase
+from mock import call
+from provisioningserver.custom_hardware import (
+    utils,
+    virsh,
+    )
+
+
+SAMPLE_IFLIST = dedent("""
+    Interface  Type       Source     Model       MAC
+    -------------------------------------------------------
+    -          bridge     br0        e1000       %s
+    -          bridge     br1        e1000       %s
+    """)
+
+SAMPLE_DUMPXML = dedent("""
+    <domain type='kvm'>
+      <name>test</name>
+      <memory unit='KiB'>4096576</memory>
+      <currentMemory unit='KiB'>4096576</currentMemory>
+      <vcpu placement='static'>1</vcpu>
+      <os>
+        <type arch='%s'>hvm</type>
+        <boot dev='network'/>
+      </os>
+    </domain>
+    """)
+
+
+class TestVirshSSH(MAASTestCase):
+    """Tests for `VirshSSH`."""
+
+    def configure_virshssh_pexpect(self, inputs=None):
+        """Configures the VirshSSH class to use 'cat' process
+        for testing instead of the actual virsh."""
+        conn = virsh.VirshSSH(timeout=1)
+        self.addCleanup(conn.close)
+        self.patch(conn, '_execute')
+        conn._spawn('cat')
+        if inputs is not None:
+            for line in inputs:
+                conn.sendline(line)
+        return conn
+
+    def configure_virshssh(self, output):
+        self.patch(virsh.VirshSSH, 'run').return_value = output
+        return virsh.VirshSSH()
+
+    def test_login_prompt(self):
+        virsh_outputs = [
+            'virsh # '
+        ]
+        conn = self.configure_virshssh_pexpect(virsh_outputs)
+        self.assertTrue(conn.login(poweraddr=None))
+
+    def test_login_with_sshkey(self):
+        virsh_outputs = [
+            "The authenticity of host '127.0.0.1' can't be established.",
+            "ECDSA key fingerprint is "
+            "00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff.",
+            "Are you sure you want to continue connecting (yes/no)? ",
+        ]
+        conn = self.configure_virshssh_pexpect(virsh_outputs)
+        mock_sendline = self.patch(conn, 'sendline')
+        conn.login(poweraddr=None)
+        self.assertThat(mock_sendline, MockCalledOnceWith('yes'))
+
+    def test_login_with_password(self):
+        virsh_outputs = [
+            "ubuntu@%s's password: " % factory.getRandomIPAddress(),
+        ]
+        conn = self.configure_virshssh_pexpect(virsh_outputs)
+        fake_password = factory.make_name('password')
+        mock_sendline = self.patch(conn, 'sendline')
+        conn.login(poweraddr=None, password=fake_password)
+        self.assertThat(mock_sendline, MockCalledOnceWith(fake_password))
+
+    def test_login_missing_password(self):
+        virsh_outputs = [
+            "ubuntu@%s's password: " % factory.getRandomIPAddress(),
+        ]
+        conn = self.configure_virshssh_pexpect(virsh_outputs)
+        mock_close = self.patch(conn, 'close')
+        self.assertFalse(conn.login(poweraddr=None, password=None))
+        mock_close.assert_called()
+
+    def test_login_invalid(self):
+        virsh_outputs = [
+            factory.getRandomString(),
+        ]
+        conn = self.configure_virshssh_pexpect(virsh_outputs)
+        mock_close = self.patch(conn, 'close')
+        self.assertFalse(conn.login(poweraddr=None))
+        mock_close.assert_called()
+
+    def test_logout(self):
+        conn = self.configure_virshssh_pexpect()
+        mock_sendline = self.patch(conn, 'sendline')
+        mock_close = self.patch(conn, 'close')
+        conn.logout()
+        self.assertThat(mock_sendline, MockCalledOnceWith('quit'))
+        mock_close.assert_called()
+
+    def test_prompt(self):
+        virsh_outputs = [
+            'virsh # '
+        ]
+        conn = self.configure_virshssh_pexpect(virsh_outputs)
+        self.assertTrue(conn.prompt())
+
+    def test_invalid_prompt(self):
+        virsh_outputs = [
+            factory.getRandomString()
+        ]
+        conn = self.configure_virshssh_pexpect(virsh_outputs)
+        self.assertFalse(conn.prompt())
+
+    def test_run(self):
+        cmd = ['list', '--all', '--name']
+        expected = ' '.join(cmd)
+        names = [factory.make_name('machine') for _ in range(3)]
+        conn = self.configure_virshssh_pexpect()
+        conn.before = '\n'.join([expected] + names)
+        mock_sendline = self.patch(conn, 'sendline')
+        mock_prompt = self.patch(conn, 'prompt')
+        output = conn.run(cmd)
+        self.assertThat(mock_sendline, MockCalledOnceWith(expected))
+        mock_prompt.assert_called()
+        self.assertEqual('\n'.join(names), output)
+
+    def test_list(self):
+        names = [factory.make_name('machine') for _ in range(3)]
+        conn = self.configure_virshssh('\n'.join(names))
+        expected = conn.list()
+        self.assertItemsEqual(names, expected)
+
+    def test_get_state(self):
+        state = factory.make_name('state')
+        conn = self.configure_virshssh(state)
+        expected = conn.get_state('')
+        self.assertEqual(state, expected)
+
+    def test_get_state_error(self):
+        conn = self.configure_virshssh('error')
+        expected = conn.get_state('')
+        self.assertEqual(None, expected)
+
+    def test_mac_addresses_returns_list(self):
+        macs = [factory.getRandomMACAddress() for _ in range(2)]
+        output = SAMPLE_IFLIST % (macs[0], macs[1])
+        conn = self.configure_virshssh(output)
+        expected = conn.get_mac_addresses('')
+        for i in range(2):
+            self.assertEqual(macs[i], expected[i])
+
+    def test_get_arch_returns_valid(self):
+        arch = factory.make_name('arch')
+        output = SAMPLE_DUMPXML % arch
+        conn = self.configure_virshssh(output)
+        expected = conn.get_arch('')
+        self.assertEqual(arch, expected)
+
+    def test_get_arch_returns_valid_fixed(self):
+        arch = random.choice(virsh.ARCH_FIX.keys())
+        fixed_arch = virsh.ARCH_FIX[arch]
+        output = SAMPLE_DUMPXML % arch
+        conn = self.configure_virshssh(output)
+        expected = conn.get_arch('')
+        self.assertEqual(fixed_arch, expected)
+
+
+class TestVirsh(MAASTestCase):
+    """Tests for `probe_virsh_and_enlist`."""
+
+    def test_probe_and_enlist(self):
+        # Patch VirshSSH list so that some machines are returned
+        # with some fake architectures.
+        machines = [factory.make_name('machine') for _ in range(3)]
+        self.patch(virsh.VirshSSH, 'list').return_value = machines
+        fake_arch = factory.make_name('arch')
+        mock_arch = self.patch(virsh.VirshSSH, 'get_arch')
+        mock_arch.return_value = fake_arch
+
+        # Patch get_state so that one of the machines is on, so we
+        # can check that it will be forced off.
+        fake_states = [
+            virsh.VirshVMState.ON,
+            virsh.VirshVMState.OFF,
+            virsh.VirshVMState.OFF
+            ]
+        mock_state = self.patch(virsh.VirshSSH, 'get_state')
+        mock_state.side_effect = fake_states
+
+        # Setup the power parameters that we should expect to be
+        # the output of the probe_and_enlist
+        fake_password = factory.getRandomString()
+        poweraddr = factory.make_name('poweraddr')
+        called_params = []
+        fake_macs = []
+        for machine in machines:
+            macs = [factory.getRandomMACAddress() for _ in range(3)]
+            fake_macs.append(macs)
+            called_params.append({
+                'power_address': poweraddr,
+                'power_id': machine,
+                'power_pass': fake_password,
+                })
+
+        # Patch the get_mac_addresses so we get a known list of
+        # mac addresses for each machine.
+        mock_macs = self.patch(virsh.VirshSSH, 'get_mac_addresses')
+        mock_macs.side_effect = fake_macs
+
+        # Patch the poweroff and create as we really don't want these
+        # actions to occur, but want to also check that they are called.
+        mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff')
+        mock_create = self.patch(utils, 'create_node')
+
+        # Patch login and logout so that we don't really contact
+        # a server at the fake poweraddr
+        mock_login = self.patch(virsh.VirshSSH, 'login')
+        mock_login.return_value = True
+        mock_logout = self.patch(virsh.VirshSSH, 'logout')
+
+        # Perform the probe and enlist
+        virsh.probe_virsh_and_enlist(poweraddr, password=fake_password)
+
+        # Check that login was called with the provided poweraddr and
+        # password.
+        self.assertThat(
+            mock_login, MockCalledOnceWith(poweraddr, fake_password))
+
+        # The first machine should have poweroff called on it, as it
+        # was initial in the on state.
+        self.assertThat(
+            mock_poweroff, MockCalledOnceWith(machines[0]))
+
+        # Check that the create command had the correct parameters for
+        # each machine.
+        self.assertThat(
+            mock_create, MockCallsMatch(
+                call(fake_macs[0], fake_arch, 'virsh', called_params[0]),
+                call(fake_macs[1], fake_arch, 'virsh', called_params[1]),
+                call(fake_macs[2], fake_arch, 'virsh', called_params[2])))
+        mock_logout.assert_called()
+
+    def test_probe_and_enlist_login_failure(self):
+        mock_login = self.patch(virsh.VirshSSH, 'login')
+        mock_login.return_value = False
+        self.assertRaises(
+            virsh.VirshError, virsh.probe_virsh_and_enlist,
+            factory.make_name('poweraddr'), password=factory.getRandomString())
+
+
+class TestVirshPowerControl(MAASTestCase):
+    """Tests for `power_control_virsh`."""
+
+    def test_power_control_login_failure(self):
+        mock_login = self.patch(virsh.VirshSSH, 'login')
+        mock_login.return_value = False
+        self.assertRaises(
+            virsh.VirshError, virsh.power_control_virsh,
+            factory.make_name('poweraddr'), factory.make_name('machine'),
+            'on', password=factory.getRandomString())
+
+    def test_power_control_on(self):
+        mock_login = self.patch(virsh.VirshSSH, 'login')
+        mock_login.return_value = True
+        mock_state = self.patch(virsh.VirshSSH, 'get_state')
+        mock_state.return_value = virsh.VirshVMState.OFF
+        mock_poweron = self.patch(virsh.VirshSSH, 'poweron')
+
+        poweraddr = factory.make_name('poweraddr')
+        machine = factory.make_name('machine')
+        virsh.power_control_virsh(poweraddr, machine, 'on')
+
+        self.assertThat(
+            mock_login, MockCalledOnceWith(poweraddr, None))
+        self.assertThat(
+            mock_state, MockCalledOnceWith(machine))
+        self.assertThat(
+            mock_poweron, MockCalledOnceWith(machine))
+
+    def test_power_control_off(self):
+        mock_login = self.patch(virsh.VirshSSH, 'login')
+        mock_login.return_value = True
+        mock_state = self.patch(virsh.VirshSSH, 'get_state')
+        mock_state.return_value = virsh.VirshVMState.ON
+        mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff')
+
+        poweraddr = factory.make_name('poweraddr')
+        machine = factory.make_name('machine')
+        virsh.power_control_virsh(poweraddr, machine, 'off')
+
+        self.assertThat(
+            mock_login, MockCalledOnceWith(poweraddr, None))
+        self.assertThat(
+            mock_state, MockCalledOnceWith(machine))
+        self.assertThat(
+            mock_poweroff, MockCalledOnceWith(machine))
+
+    def test_power_control_bad_domain(self):
+        mock_login = self.patch(virsh.VirshSSH, 'login')
+        mock_login.return_value = True
+        mock_state = self.patch(virsh.VirshSSH, 'get_state')
+        mock_state.return_value = None
+
+        poweraddr = factory.make_name('poweraddr')
+        machine = factory.make_name('machine')
+        self.assertRaises(
+            virsh.VirshError, virsh.power_control_virsh,
+            poweraddr, machine, 'on')
+
+    def test_power_control_power_failure(self):
+        mock_login = self.patch(virsh.VirshSSH, 'login')
+        mock_login.return_value = True
+        mock_state = self.patch(virsh.VirshSSH, 'get_state')
+        mock_state.return_value = virsh.VirshVMState.ON
+        mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff')
+        mock_poweroff.return_value = False
+
+        poweraddr = factory.make_name('poweraddr')
+        machine = factory.make_name('machine')
+        self.assertRaises(
+            virsh.VirshError, virsh.power_control_virsh,
+            poweraddr, machine, 'off')

=== modified file 'src/provisioningserver/custom_hardware/utils.py'
--- src/provisioningserver/custom_hardware/utils.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/custom_hardware/utils.py	2014-06-04 19:57:51 +0000
@@ -42,3 +42,7 @@
         'autodetect_nodegroup': 'true'
     }
     return client.post('/api/1.0/nodes/', 'new', **data)
+
+
+def escape_string(data):
+    return repr(data).decode("ascii")

=== added file 'src/provisioningserver/custom_hardware/virsh.py'
--- src/provisioningserver/custom_hardware/virsh.py	1970-01-01 00:00:00 +0000
+++ src/provisioningserver/custom_hardware/virsh.py	2014-06-04 19:57:51 +0000
@@ -0,0 +1,223 @@
+# Copyright 2014 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+from __future__ import (
+    absolute_import,
+    print_function,
+    unicode_literals,
+    )
+
+str = None
+
+__metaclass__ = type
+__all__ = [
+    'probe_virsh_and_enlist',
+    ]
+
+from lxml import etree
+import pexpect
+import provisioningserver.custom_hardware.utils as utils
+
+
+XPATH_ARCH = "/domain/os/type/@arch"
+
+# Virsh stores the architecture with a different
+# label then MAAS. This maps virsh architecture to
+# MAAS architecture.
+ARCH_FIX = {
+    'x86_64': 'amd64',
+    'ppc64': 'ppc64el',
+    }
+
+
+class VirshVMState:
+    OFF = "shut off"
+    ON = "running"
+
+
+class VirshError(Exception):
+    """Failure communicating to virsh. """
+
+
+class VirshSSH(pexpect.spawn):
+
+    PROMPT = r"virsh \#"
+    PROMPT_SSHKEY = "(?i)are you sure you want to continue connecting"
+    PROMPT_PASSWORD = "(?i)(?:password)|(?:passphrase for key)"
+    PROMPT_DENIED = "(?i)permission denied"
+    PROMPT_CLOSED = "(?i)connection closed by remote host"
+
+    PROMPTS = [
+        PROMPT_SSHKEY,
+        PROMPT_PASSWORD,
+        PROMPT,
+        PROMPT_DENIED,
+        PROMPT_CLOSED,
+        pexpect.TIMEOUT,
+        pexpect.EOF,
+    ]
+
+    I_PROMPT = PROMPTS.index(PROMPT)
+    I_PROMPT_SSHKEY = PROMPTS.index(PROMPT_SSHKEY)
+    I_PROMPT_PASSWORD = PROMPTS.index(PROMPT_PASSWORD)
+
+    def __init__(self, timeout=30, maxread=2000):
+        super(VirshSSH, self).__init__(
+            None, timeout=timeout, maxread=maxread)
+        self.name = '<virssh>'
+
+    def _execute(self, poweraddr):
+        """Spawns the pexpect command."""
+        cmd = 'virsh --connect %s' % poweraddr
+        self._spawn(cmd)
+
+    def login(self, poweraddr, password=None):
+        """Starts connection to virsh."""
+        self._execute(poweraddr)
+        i = self.expect(self.PROMPTS, timeout=10)
+        if i == self.I_PROMPT_SSHKEY:
+            # New certificate, lets always accept but if
+            # it changes it will fail to login.
+            self.sendline("yes")
+            i = self.expect(self.PROMPTS)
+        elif i == self.I_PROMPT_PASSWORD:
+            # Requesting password, give it if available.
+            if password is None:
+                self.close()
+                return False
+            self.sendline(password)
+            i = self.expect(self.PROMPTS)
+
+        if i != self.I_PROMPT:
+            # Something bad happened, either disconnect,
+            # timeout, wrong password.
+            self.close()
+            return False
+        return True
+
+    def logout(self):
+        """Quits the virsh session."""
+        self.sendline("quit")
+        self.close()
+
+    def prompt(self, timeout=None):
+        """Waits for virsh prompt."""
+        if timeout is None:
+            timeout = self.timeout
+        i = self.expect([self.PROMPT, pexpect.TIMEOUT], timeout=timeout)
+        if i == 1:
+            return False
+        return True
+
+    def run(self, args):
+        cmd = ' '.join(args)
+        self.sendline(cmd)
+        self.prompt()
+        result = self.before.splitlines()
+        return '\n'.join(result[1:])
+
+    def list(self):
+        """Lists all virtual machines by name."""
+        machines = self.run(['list', '--all', '--name'])
+        return machines.strip().splitlines()
+
+    def get_state(self, machine):
+        """Gets the virtual machine state."""
+        state = self.run(['domstate', machine])
+        state = state.strip()
+        if 'error' in state:
+            return None
+        return state
+
+    def get_mac_addresses(self, machine):
+        """Gets list of mac addressess assigned to the virtual machine."""
+        output = self.run(['domiflist', machine]).strip()
+        if 'error' in output:
+            return None
+        output = output.splitlines()[2:]
+        return [line.split()[4] for line in output]
+
+    def get_arch(self, machine):
+        """Gets the virtual machine architecture."""
+        output = self.run(['dumpxml', machine]).strip()
+        if 'error' in output:
+            return None
+
+        doc = etree.XML(output)
+        evaluator = etree.XPathEvaluator(doc)
+        arch = evaluator(XPATH_ARCH)[0]
+
+        # Fix architectures that need to be referenced by a different
+        # name, that MAAS understands.
+        return ARCH_FIX.get(arch, arch)
+
+    def poweron(self, machine):
+        """Poweron a virtual machine."""
+        output = self.run(['start', machine]).strip()
+        if 'error' in output:
+            return False
+        return True
+
+    def poweroff(self, machine):
+        """Poweroff a virtual machine."""
+        output = self.run(['destroy', machine]).strip()
+        if 'error' in output:
+            return False
+        return True
+
+
+def probe_virsh_and_enlist(poweraddr, password=None):
+    """Extracts all of the virtual machines from virsh and enlists them
+    into MAAS.
+
+    :param poweraddr: virsh connection string
+    """
+    conn = VirshSSH()
+    if not conn.login(poweraddr, password):
+        raise VirshError('Failed to login to virsh console.')
+
+    for machine in conn.list():
+        arch = conn.get_arch(machine)
+        state = conn.get_state(machine)
+        macs = conn.get_mac_addresses(machine)
+
+        # Force the machine off, as MAAS will control the machine
+        # and it needs to be in a known state of off.
+        if state == VirshVMState.ON:
+            conn.poweroff(machine)
+
+        params = {
+            'power_address': poweraddr,
+            'power_id': machine,
+        }
+        if password is not None:
+            params['power_pass'] = password
+        utils.create_node(macs, arch, 'virsh', params)
+
+    conn.logout()
+
+
+def power_control_virsh(poweraddr, machine, power_change, password=None):
+    """Powers controls a virtual machine using virsh."""
+
+    # Force password to None if blank, as the power control
+    # script will send a blank password if one is not set.
+    if password == '':
+        password = None
+
+    conn = VirshSSH()
+    if not conn.login(poweraddr, password):
+        raise VirshError('Failed to login to virsh console.')
+
+    state = conn.get_state(machine)
+    if state is None:
+        raise VirshError('Failed to get domain: %s' % machine)
+
+    if state == VirshVMState.OFF:
+        if power_change == 'on':
+            if conn.poweron(machine) is False:
+                raise VirshError('Failed to power on domain: %s' % machine)
+    elif state == VirshVMState.ON:
+        if power_change == 'off':
+            if conn.poweroff(machine) is False:
+                raise VirshError('Failed to power off domain: %s' % machine)

=== modified file 'src/provisioningserver/dhcp/leases.py'
--- src/provisioningserver/dhcp/leases.py	2013-10-04 12:33:05 +0000
+++ src/provisioningserver/dhcp/leases.py	2014-06-04 19:57:51 +0000
@@ -54,7 +54,7 @@
     get_recorded_nodegroup_uuid,
     )
 from provisioningserver.cluster_config import get_maas_url
-from provisioningserver.dhcp.leases_parser import parse_leases
+from provisioningserver.dhcp.leases_parser_fast import parse_leases
 
 
 logger = getLogger(__name__)

=== added file 'src/provisioningserver/dhcp/leases_parser_fast.py'
--- src/provisioningserver/dhcp/leases_parser_fast.py	1970-01-01 00:00:00 +0000
+++ src/provisioningserver/dhcp/leases_parser_fast.py	2014-06-04 19:57:51 +0000
@@ -0,0 +1,87 @@
+# Copyright 2013 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""A speedier version of `leases_parser`.
+
+This extracts the relevant stanzas from a leases file, keeping only the
+most recent "host" and "lease" entries, then uses the existing and
+properly defined but slow parser to parse them. This massively speeds up
+parsing a leases file that contains a modest number of unique host and
+lease entries, but has become very large because of churn.
+"""
+
+from __future__ import (
+    absolute_import,
+    print_function,
+    unicode_literals,
+    )
+
+str = None
+
+__metaclass__ = type
+__all__ = [
+    'parse_leases',
+    ]
+
+from collections import defaultdict
+from datetime import datetime
+from itertools import chain
+import re
+
+from provisioningserver.dhcp.leases_parser import (
+    get_host_mac,
+    has_expired,
+    is_host,
+    is_lease,
+    lease_parser,
+    )
+
+
+re_entry = re.compile(
+    r'''
+    ^\s*              # Ignore leading whitespace on each line.
+    (host|lease)      # Look only for host or lease stanzas.
+    \s+               # Mandatory whitespace.
+    ([0-9a-fA-F.:]+)  # Capture the IP/MAC address for this stanza.
+    \s*{              # Optional whitespace then an opening brace.
+    ''',
+    re.MULTILINE | re.DOTALL | re.VERBOSE)
+
+
+def find_lease_starts(leases_contents):
+    results = defaultdict(dict)
+    for match in re_entry.finditer(leases_contents):
+        stanza, address = match.groups()
+        results[stanza][address] = match.start()
+    return chain.from_iterable(
+        mapping.itervalues() for mapping in results.itervalues())
+
+
+def extract_leases(leases_contents):
+    starts = find_lease_starts(leases_contents)
+    for start in sorted(starts):
+        record = lease_parser.scanString(leases_contents[start:])
+        try:
+            token, _, _ = next(record)
+        except StopIteration:
+            pass
+        else:
+            yield token
+
+
+def parse_leases(leases_contents):
+    results = {}
+    now = datetime.utcnow()
+    for entry in extract_leases(leases_contents):
+        if is_lease(entry):
+            if not has_expired(entry, now):
+                results[entry.ip] = entry.hardware.mac
+        elif is_host(entry):
+            mac = get_host_mac(entry)
+            if mac is None:
+                # TODO: Test this.
+                if entry.ip in results:
+                    del results[entry.ip]
+            else:
+                results[entry.ip] = mac
+    return results

=== modified file 'src/provisioningserver/dhcp/tests/test_leases_parser.py'
--- src/provisioningserver/dhcp/tests/test_leases_parser.py	2013-10-04 12:33:05 +0000
+++ src/provisioningserver/dhcp/tests/test_leases_parser.py	2014-06-04 19:57:51 +0000
@@ -20,6 +20,10 @@
 
 from maastesting.factory import factory
 from maastesting.testcase import MAASTestCase
+from provisioningserver.dhcp import (
+    leases_parser,
+    leases_parser_fast,
+    )
 from provisioningserver.dhcp.leases_parser import (
     combine_entries,
     gather_hosts,
@@ -30,197 +34,51 @@
     is_host,
     is_lease,
     lease_parser,
-    parse_leases,
-    )
-
-
-class TestLeasesParser(MAASTestCase):
-
-    def fake_parsed_lease(self, ip=None, mac=None, ends=None,
-                          entry_type='lease'):
-        """Fake a lease as produced by the parser."""
-        if ip is None:
-            ip = factory.getRandomIPAddress()
-        if mac is None:
-            mac = factory.getRandomMACAddress()
-        Hardware = namedtuple('Hardware', ['mac'])
-        Lease = namedtuple(
-            'Lease', ['lease_or_host', 'ip', 'hardware', 'ends'])
-        return Lease(entry_type, ip, Hardware(mac), ends)
-
-    def fake_parsed_host(self, ip=None, mac=None):
-        """Fake a host declaration as produced by the parser."""
-        return self.fake_parsed_lease(ip=ip, mac=mac, entry_type='host')
-
-    def fake_parsed_rubout(self, ip=None):
-        """Fake a "rubout" host declaration."""
-        if ip is None:
-            ip = factory.getRandomIPAddress()
-        Rubout = namedtuple('Rubout', ['lease_or_host', 'ip'])
-        return Rubout('host', ip)
-
-    def test_get_expiry_date_parses_expiry_date(self):
-        lease = self.fake_parsed_lease(ends='0 2011/01/02 03:04:05')
-        self.assertEqual(
-            datetime(
-                year=2011, month=01, day=02,
-                hour=03, minute=04, second=05),
-            get_expiry_date(lease))
-
-    def test_get_expiry_date_returns_None_for_never(self):
-        self.assertIsNone(
-            get_expiry_date(self.fake_parsed_lease(ends='never')))
-
-    def test_get_expiry_date_returns_None_if_no_expiry_given(self):
-        self.assertIsNone(get_expiry_date(self.fake_parsed_lease(ends=None)))
-
-    def test_has_expired_returns_False_for_eternal_lease(self):
-        now = datetime.utcnow()
-        self.assertFalse(has_expired(self.fake_parsed_lease(ends=None), now))
-
-    def test_has_expired_returns_False_for_future_expiry_date(self):
-        now = datetime.utcnow()
-        later = '1 2035/12/31 23:59:59'
-        self.assertFalse(has_expired(self.fake_parsed_lease(ends=later), now))
-
-    def test_has_expired_returns_True_for_past_expiry_date(self):
-        now = datetime.utcnow()
-        earlier = '1 2001/01/01 00:00:00'
-        self.assertTrue(
-            has_expired(self.fake_parsed_lease(ends=earlier), now))
-
-    def test_gather_leases_finds_current_leases(self):
-        lease = self.fake_parsed_lease()
-        self.assertEqual(
-            {lease.ip: lease.hardware.mac},
-            gather_leases([lease]))
-
-    def test_gather_leases_ignores_expired_leases(self):
-        earlier = '1 2001/01/01 00:00:00'
-        lease = self.fake_parsed_lease(ends=earlier)
-        self.assertEqual({}, gather_leases([lease]))
-
-    def test_gather_leases_combines_expired_and_current_leases(self):
-        earlier = '1 2001/01/01 00:00:00'
-        ip = factory.getRandomIPAddress()
-        old_owner = factory.getRandomMACAddress()
-        new_owner = factory.getRandomMACAddress()
-        leases = [
-            self.fake_parsed_lease(ip=ip, mac=old_owner, ends=earlier),
-            self.fake_parsed_lease(ip=ip, mac=new_owner),
-            ]
-        self.assertEqual({ip: new_owner}, gather_leases(leases))
-
-    def test_gather_leases_ignores_ordering(self):
-        earlier = '1 2001/01/01 00:00:00'
-        ip = factory.getRandomIPAddress()
-        old_owner = factory.getRandomMACAddress()
-        new_owner = factory.getRandomMACAddress()
-        leases = [
-            self.fake_parsed_lease(ip=ip, mac=new_owner),
-            self.fake_parsed_lease(ip=ip, mac=old_owner, ends=earlier),
-            ]
-        self.assertEqual({ip: new_owner}, gather_leases(leases))
-
-    def test_gather_leases_ignores_host_declarations(self):
-        self.assertEqual({}, gather_leases([self.fake_parsed_host()]))
-
-    def test_gather_hosts_finds_hosts(self):
-        host = self.fake_parsed_host()
-        self.assertEqual({host.ip: host.hardware.mac}, gather_hosts([host]))
-
-    def test_gather_hosts_ignores_unaccompanied_rubouts(self):
-        self.assertEqual({}, gather_hosts([self.fake_parsed_rubout()]))
-
-    def test_gather_hosts_ignores_rubbed_out_entries(self):
-        ip = factory.getRandomIPAddress()
-        hosts = [
-            self.fake_parsed_host(ip=ip),
-            self.fake_parsed_rubout(ip=ip),
-            ]
-        self.assertEqual({}, gather_hosts(hosts))
-
-    def test_gather_hosts_follows_reassigned_host(self):
-        ip = factory.getRandomIPAddress()
-        new_owner = factory.getRandomMACAddress()
-        hosts = [
-            self.fake_parsed_host(ip=ip),
-            self.fake_parsed_rubout(ip=ip),
-            self.fake_parsed_host(ip=ip, mac=new_owner),
-            ]
-        self.assertEqual({ip: new_owner}, gather_hosts(hosts))
-
-    def test_is_lease_and_is_host_recognize_lease(self):
-        params = {
-            'ip': factory.getRandomIPAddress(),
-            'mac': factory.getRandomMACAddress(),
-        }
-        [parsed_lease] = lease_parser.searchString(dedent("""\
-            lease %(ip)s {
-                hardware ethernet %(mac)s;
-            }
-            """ % params))
-        self.assertEqual(
-            (True, False),
-            (is_lease(parsed_lease), is_host(parsed_lease)))
-
-    def test_is_lease_and_is_host_recognize_host(self):
-        params = {
-            'ip': factory.getRandomIPAddress(),
-            'mac': factory.getRandomMACAddress(),
-        }
-        [parsed_host] = lease_parser.searchString(dedent("""\
-            host %(ip)s {
-                hardware ethernet %(mac)s;
-            }
-            """ % params))
-        self.assertEqual(
-            (False, True),
-            (is_lease(parsed_host), is_host(parsed_host)))
-
-    def test_get_host_mac_returns_None_for_host(self):
-        params = {
-            'ip': factory.getRandomIPAddress(),
-            'mac': factory.getRandomMACAddress(),
-        }
-        [parsed_host] = lease_parser.searchString(dedent("""\
-            host %(ip)s {
-                hardware ethernet %(mac)s;
-            }
-            """ % params))
-        self.assertEqual(params['mac'], get_host_mac(parsed_host))
-
-    def test_get_host_mac_returns_None_for_rubout(self):
-        ip = factory.getRandomIPAddress()
-        [parsed_host] = lease_parser.searchString(dedent("""\
-            host %s {
-                deleted;
-            }
-            """ % ip))
-        self.assertIsNone(get_host_mac(parsed_host))
-
-    def test_get_host_mac_returns_None_for_rubout_even_with_mac(self):
-        params = {
-            'ip': factory.getRandomIPAddress(),
-            'mac': factory.getRandomMACAddress(),
-        }
-        [parsed_host] = lease_parser.searchString(dedent("""\
-            host %(ip)s {
-                deleted;
-                hardware ethernet %(mac)s;
-            }
-            """ % params))
-        self.assertIsNone(get_host_mac(parsed_host))
+    )
+
+
+def fake_parsed_lease(ip=None, mac=None, ends=None,
+                      entry_type='lease'):
+    """Fake a lease as produced by the parser."""
+    if ip is None:
+        ip = factory.getRandomIPAddress()
+    if mac is None:
+        mac = factory.getRandomMACAddress()
+    Hardware = namedtuple('Hardware', ['mac'])
+    Lease = namedtuple(
+        'Lease', ['lease_or_host', 'ip', 'hardware', 'ends'])
+    return Lease(entry_type, ip, Hardware(mac), ends)
+
+
+def fake_parsed_host(ip=None, mac=None):
+    """Fake a host declaration as produced by the parser."""
+    return fake_parsed_lease(ip=ip, mac=mac, entry_type='host')
+
+
+def fake_parsed_rubout(ip=None):
+    """Fake a "rubout" host declaration."""
+    if ip is None:
+        ip = factory.getRandomIPAddress()
+    Rubout = namedtuple('Rubout', ['lease_or_host', 'ip'])
+    return Rubout('host', ip)
+
+
+class TestLeasesParsers(MAASTestCase):
+
+    scenarios = (
+        ("original", dict(parse=leases_parser.parse_leases)),
+        ("fast", dict(parse=leases_parser_fast.parse_leases)),
+    )
 
     def test_parse_leases_copes_with_empty_file(self):
-        self.assertEqual({}, parse_leases(""))
+        self.assertEqual({}, self.parse(""))
 
     def test_parse_leases_parses_lease(self):
         params = {
             'ip': factory.getRandomIPAddress(),
             'mac': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             lease %(ip)s {
                 starts 5 2010/01/01 00:00:01;
                 ends never;
@@ -254,7 +112,7 @@
             'ip': factory.getRandomIPAddress(),
             'mac': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             host %(ip)s {
                 dynamic;
                 hardware ethernet %(mac)s;
@@ -263,8 +121,36 @@
             """ % params))
         self.assertEqual({params['ip']: params['mac']}, leases)
 
+    def test_parse_leases_copes_with_misleading_values(self):
+        params = {
+            'ip1': factory.getRandomIPAddress(),
+            'mac1': factory.getRandomMACAddress(),
+            'ip2': factory.getRandomIPAddress(),
+            'mac2': factory.getRandomMACAddress(),
+        }
+        leases = self.parse(dedent("""\
+            host %(ip1)s {
+                dynamic;
+              ### NOTE the following value has a closing brace, and
+              ### also looks like a host record.
+                uid "foo}host 12.34.56.78 { }";
+                hardware ethernet %(mac1)s;
+                fixed-address %(ip1)s;
+            }
+              ### NOTE the extra indent on the line below.
+                host %(ip2)s {
+                dynamic;
+                hardware ethernet %(mac2)s;
+                fixed-address %(ip2)s;
+            }
+            """ % params))
+        self.assertEqual(
+            {params['ip1']: params['mac1'],
+             params['ip2']: params['mac2']},
+            leases)
+
     def test_parse_leases_parses_host_rubout(self):
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             host %s {
                 deleted;
             }
@@ -277,7 +163,7 @@
             'mac': factory.getRandomMACAddress(),
             'incomplete_ip': factory.getRandomIPAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             lease %(ip)s {
                 hardware ethernet %(mac)s;
             }
@@ -291,7 +177,7 @@
             'ip': factory.getRandomIPAddress(),
             'mac': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             # Top comment (ignored).
             lease %(ip)s { # End-of-line comment (ignored).
                 # Comment in lease block (ignored).
@@ -306,7 +192,7 @@
             'ip': factory.getRandomIPAddress(),
             'mac': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             lease %(ip)s {
                 hardware ethernet %(mac)s;
                 ends 1 2001/01/01 00:00:00;
@@ -319,7 +205,7 @@
             'ip': factory.getRandomIPAddress(),
             'mac': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             lease %(ip)s {
                 hardware ethernet %(mac)s;
                 ends never;
@@ -332,7 +218,7 @@
             'ip': factory.getRandomIPAddress(),
             'mac': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             lease %(ip)s {
                 hardware ethernet %(mac)s;
             }
@@ -345,7 +231,7 @@
             'old_owner': factory.getRandomMACAddress(),
             'new_owner': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             lease %(ip)s {
                 hardware ethernet %(old_owner)s;
             }
@@ -360,7 +246,7 @@
             'ip': factory.getRandomIPAddress(),
             'mac': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             host %(ip)s {
                 dynamic;
                 hardware ethernet %(mac)s;
@@ -375,7 +261,7 @@
             'ip': factory.getRandomIPAddress(),
             'mac': factory.getRandomMACAddress(),
         }
-        leases = parse_leases(dedent("""\
+        leases = self.parse(dedent("""\
             host %(ip)s {
                 hardware ethernet %(mac)s;
                 fixed-address %(ip)s;
@@ -383,13 +269,235 @@
             """ % params))
         self.assertEqual({params['ip']: params['mac']}, leases)
 
+
+class TestLeasesParserFast(MAASTestCase):
+
+    def test_expired_lease_does_not_shadow_earlier_host_stanza(self):
+        params = {
+            'ip': factory.getRandomIPAddress(),
+            'mac1': factory.getRandomMACAddress(),
+            'mac2': factory.getRandomMACAddress(),
+        }
+        leases = leases_parser_fast.parse_leases(dedent("""\
+            host %(ip)s {
+                dynamic;
+                hardware ethernet %(mac1)s;
+                fixed-address %(ip)s;
+            }
+            lease %(ip)s {
+                starts 5 2010/01/01 00:00:01;
+                ends 1 2010/01/01 00:00:02;
+                hardware ethernet %(mac2)s;
+            }
+            """ % params))
+        # The lease has expired so it doesn't shadow the host stanza,
+        # and so the MAC returned is from the host stanza.
+        self.assertEqual({params["ip"]: params["mac1"]}, leases)
+
+    def test_active_lease_shadows_earlier_host_stanza(self):
+        params = {
+            'ip': factory.getRandomIPAddress(),
+            'mac1': factory.getRandomMACAddress(),
+            'mac2': factory.getRandomMACAddress(),
+        }
+        leases = leases_parser_fast.parse_leases(dedent("""\
+            host %(ip)s {
+                dynamic;
+                hardware ethernet %(mac1)s;
+                fixed-address %(ip)s;
+            }
+            lease %(ip)s {
+                starts 5 2010/01/01 00:00:01;
+                hardware ethernet %(mac2)s;
+            }
+            """ % params))
+        # The lease hasn't expired, so shadows the earlier host stanza.
+        self.assertEqual({params["ip"]: params["mac2"]}, leases)
+
+    def test_host_stanza_shadows_earlier_active_lease(self):
+        params = {
+            'ip': factory.getRandomIPAddress(),
+            'mac1': factory.getRandomMACAddress(),
+            'mac2': factory.getRandomMACAddress(),
+        }
+        leases = leases_parser_fast.parse_leases(dedent("""\
+            lease %(ip)s {
+                starts 5 2010/01/01 00:00:01;
+                hardware ethernet %(mac2)s;
+            }
+            host %(ip)s {
+                dynamic;
+                hardware ethernet %(mac1)s;
+                fixed-address %(ip)s;
+            }
+            """ % params))
+        # The lease hasn't expired, but the host entry is later, so it
+        # shadows the earlier lease stanza.
+        self.assertEqual({params["ip"]: params["mac1"]}, leases)
+
+
+class TestLeasesParserFunctions(MAASTestCase):
+
+    def test_get_expiry_date_parses_expiry_date(self):
+        lease = fake_parsed_lease(ends='0 2011/01/02 03:04:05')
+        self.assertEqual(
+            datetime(
+                year=2011, month=01, day=02,
+                hour=03, minute=04, second=05),
+            get_expiry_date(lease))
+
+    def test_get_expiry_date_returns_None_for_never(self):
+        self.assertIsNone(
+            get_expiry_date(fake_parsed_lease(ends='never')))
+
+    def test_get_expiry_date_returns_None_if_no_expiry_given(self):
+        self.assertIsNone(get_expiry_date(fake_parsed_lease(ends=None)))
+
+    def test_has_expired_returns_False_for_eternal_lease(self):
+        now = datetime.utcnow()
+        self.assertFalse(has_expired(fake_parsed_lease(ends=None), now))
+
+    def test_has_expired_returns_False_for_future_expiry_date(self):
+        now = datetime.utcnow()
+        later = '1 2035/12/31 23:59:59'
+        self.assertFalse(has_expired(fake_parsed_lease(ends=later), now))
+
+    def test_has_expired_returns_True_for_past_expiry_date(self):
+        now = datetime.utcnow()
+        earlier = '1 2001/01/01 00:00:00'
+        self.assertTrue(
+            has_expired(fake_parsed_lease(ends=earlier), now))
+
+    def test_gather_leases_finds_current_leases(self):
+        lease = fake_parsed_lease()
+        self.assertEqual(
+            {lease.ip: lease.hardware.mac},
+            gather_leases([lease]))
+
+    def test_gather_leases_ignores_expired_leases(self):
+        earlier = '1 2001/01/01 00:00:00'
+        lease = fake_parsed_lease(ends=earlier)
+        self.assertEqual({}, gather_leases([lease]))
+
+    def test_gather_leases_combines_expired_and_current_leases(self):
+        earlier = '1 2001/01/01 00:00:00'
+        ip = factory.getRandomIPAddress()
+        old_owner = factory.getRandomMACAddress()
+        new_owner = factory.getRandomMACAddress()
+        leases = [
+            fake_parsed_lease(ip=ip, mac=old_owner, ends=earlier),
+            fake_parsed_lease(ip=ip, mac=new_owner),
+            ]
+        self.assertEqual({ip: new_owner}, gather_leases(leases))
+
+    def test_gather_leases_ignores_ordering(self):
+        earlier = '1 2001/01/01 00:00:00'
+        ip = factory.getRandomIPAddress()
+        old_owner = factory.getRandomMACAddress()
+        new_owner = factory.getRandomMACAddress()
+        leases = [
+            fake_parsed_lease(ip=ip, mac=new_owner),
+            fake_parsed_lease(ip=ip, mac=old_owner, ends=earlier),
+            ]
+        self.assertEqual({ip: new_owner}, gather_leases(leases))
+
+    def test_gather_leases_ignores_host_declarations(self):
+        self.assertEqual({}, gather_leases([fake_parsed_host()]))
+
+    def test_gather_hosts_finds_hosts(self):
+        host = fake_parsed_host()
+        self.assertEqual({host.ip: host.hardware.mac}, gather_hosts([host]))
+
+    def test_gather_hosts_ignores_unaccompanied_rubouts(self):
+        self.assertEqual({}, gather_hosts([fake_parsed_rubout()]))
+
+    def test_gather_hosts_ignores_rubbed_out_entries(self):
+        ip = factory.getRandomIPAddress()
+        hosts = [
+            fake_parsed_host(ip=ip),
+            fake_parsed_rubout(ip=ip),
+            ]
+        self.assertEqual({}, gather_hosts(hosts))
+
+    def test_gather_hosts_follows_reassigned_host(self):
+        ip = factory.getRandomIPAddress()
+        new_owner = factory.getRandomMACAddress()
+        hosts = [
+            fake_parsed_host(ip=ip),
+            fake_parsed_rubout(ip=ip),
+            fake_parsed_host(ip=ip, mac=new_owner),
+            ]
+        self.assertEqual({ip: new_owner}, gather_hosts(hosts))
+
+    def test_is_lease_and_is_host_recognize_lease(self):
+        params = {
+            'ip': factory.getRandomIPAddress(),
+            'mac': factory.getRandomMACAddress(),
+        }
+        [parsed_lease] = lease_parser.searchString(dedent("""\
+            lease %(ip)s {
+                hardware ethernet %(mac)s;
+            }
+            """ % params))
+        self.assertEqual(
+            (True, False),
+            (is_lease(parsed_lease), is_host(parsed_lease)))
+
+    def test_is_lease_and_is_host_recognize_host(self):
+        params = {
+            'ip': factory.getRandomIPAddress(),
+            'mac': factory.getRandomMACAddress(),
+        }
+        [parsed_host] = lease_parser.searchString(dedent("""\
+            host %(ip)s {
+                hardware ethernet %(mac)s;
+            }
+            """ % params))
+        self.assertEqual(
+            (False, True),
+            (is_lease(parsed_host), is_host(parsed_host)))
+
+    def test_get_host_mac_returns_None_for_host(self):
+        params = {
+            'ip': factory.getRandomIPAddress(),
+            'mac': factory.getRandomMACAddress(),
+        }
+        [parsed_host] = lease_parser.searchString(dedent("""\
+            host %(ip)s {
+                hardware ethernet %(mac)s;
+            }
+            """ % params))
+        self.assertEqual(params['mac'], get_host_mac(parsed_host))
+
+    def test_get_host_mac_returns_None_for_rubout(self):
+        ip = factory.getRandomIPAddress()
+        [parsed_host] = lease_parser.searchString(dedent("""\
+            host %s {
+                deleted;
+            }
+            """ % ip))
+        self.assertIsNone(get_host_mac(parsed_host))
+
+    def test_get_host_mac_returns_None_for_rubout_even_with_mac(self):
+        params = {
+            'ip': factory.getRandomIPAddress(),
+            'mac': factory.getRandomMACAddress(),
+        }
+        [parsed_host] = lease_parser.searchString(dedent("""\
+            host %(ip)s {
+                deleted;
+                hardware ethernet %(mac)s;
+            }
+            """ % params))
+        self.assertIsNone(get_host_mac(parsed_host))
+
     def test_combine_entries_accepts_host_followed_by_expired_lease(self):
         ip = factory.getRandomIPAddress()
         mac = factory.getRandomMACAddress()
         earlier = '1 2001/01/01 00:00:00'
         entries = [
-            self.fake_parsed_host(ip=ip, mac=mac),
-            self.fake_parsed_lease(ip=ip, ends=earlier),
+            fake_parsed_host(ip=ip, mac=mac),
+            fake_parsed_lease(ip=ip, ends=earlier),
             ]
         self.assertEqual({ip: mac}, combine_entries(entries))
 
@@ -398,8 +506,8 @@
         mac = factory.getRandomMACAddress()
         earlier = '1 2001/01/01 00:00:00'
         entries = [
-            self.fake_parsed_lease(ip=ip, ends=earlier),
-            self.fake_parsed_host(ip=ip, mac=mac),
+            fake_parsed_lease(ip=ip, ends=earlier),
+            fake_parsed_host(ip=ip, mac=mac),
             ]
         self.assertEqual({ip: mac}, combine_entries(entries))
 
@@ -407,9 +515,9 @@
         ip = factory.getRandomIPAddress()
         mac = factory.getRandomMACAddress()
         entries = [
-            self.fake_parsed_host(ip=ip),
-            self.fake_parsed_rubout(ip=ip),
-            self.fake_parsed_lease(ip=ip, mac=mac),
+            fake_parsed_host(ip=ip),
+            fake_parsed_rubout(ip=ip),
+            fake_parsed_lease(ip=ip, mac=mac),
             ]
         self.assertEqual({ip: mac}, combine_entries(entries))
 
@@ -418,9 +526,9 @@
         mac = factory.getRandomMACAddress()
         earlier = '1 2001/01/01 00:00:00'
         entries = [
-            self.fake_parsed_host(ip=ip),
-            self.fake_parsed_rubout(ip=ip),
-            self.fake_parsed_lease(ip=ip, mac=mac, ends=earlier),
+            fake_parsed_host(ip=ip),
+            fake_parsed_rubout(ip=ip),
+            fake_parsed_lease(ip=ip, mac=mac, ends=earlier),
             ]
         self.assertEqual({}, combine_entries(entries))
 
@@ -429,9 +537,9 @@
         mac = factory.getRandomMACAddress()
         earlier = '1 2001/01/01 00:00:00'
         entries = [
-            self.fake_parsed_host(ip=ip),
-            self.fake_parsed_lease(ip=ip, mac=mac, ends=earlier),
-            self.fake_parsed_rubout(ip=ip),
+            fake_parsed_host(ip=ip),
+            fake_parsed_lease(ip=ip, mac=mac, ends=earlier),
+            fake_parsed_rubout(ip=ip),
             ]
         self.assertEqual({}, combine_entries(entries))
 
@@ -439,9 +547,9 @@
         ip = factory.getRandomIPAddress()
         mac = factory.getRandomMACAddress()
         entries = [
-            self.fake_parsed_host(ip=ip),
-            self.fake_parsed_lease(ip=ip, mac=mac),
-            self.fake_parsed_rubout(ip=ip),
+            fake_parsed_host(ip=ip),
+            fake_parsed_lease(ip=ip, mac=mac),
+            fake_parsed_rubout(ip=ip),
             ]
         self.assertEqual({ip: mac}, combine_entries(entries))
 
@@ -449,8 +557,8 @@
         ip = factory.getRandomIPAddress()
         mac = factory.getRandomMACAddress()
         entries = [
-            self.fake_parsed_host(ip=ip),
-            self.fake_parsed_rubout(ip=ip),
-            self.fake_parsed_host(ip=ip, mac=mac),
+            fake_parsed_host(ip=ip),
+            fake_parsed_rubout(ip=ip),
+            fake_parsed_host(ip=ip, mac=mac),
             ]
         self.assertEqual({ip: mac}, combine_entries(entries))

=== modified file 'src/provisioningserver/driver/__init__.py'
--- src/provisioningserver/driver/__init__.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/driver/__init__.py	2014-06-04 19:57:51 +0000
@@ -139,6 +139,7 @@
     Architecture(
         name="armhf/generic", description="armhf/generic",
         pxealiases=["arm"], kernel_options=["console=ttyAMA0"]),
+    Architecture(name="ppc64el/generic", description="ppc64el"),
 ]
 for arch in builtin_architectures:
     ArchitectureRegistry.register_item(arch.name, arch)

=== modified file 'src/provisioningserver/power/tests/test_poweraction.py'
--- src/provisioningserver/power/tests/test_poweraction.py	2014-04-15 14:41:32 +0000
+++ src/provisioningserver/power/tests/test_poweraction.py	2014-06-04 19:57:51 +0000
@@ -158,20 +158,6 @@
             PowerActionFail,
             pa.execute, power_change='off', mac=factory.getRandomMACAddress())
 
-    def test_virsh_checks_vm_state(self):
-        # We can't test the virsh template in detail (and it may be
-        # customized), but by making it use "echo" instead of a real
-        # virsh we can make it get a bogus answer from its status check.
-        # The bogus answer is actually the rest of the virsh command
-        # line.  It will complain about this and fail.
-        action = PowerAction('virsh')
-        script = action.render_template(
-            action.get_template(), power_change='on',
-            power_address='qemu://example.com/',
-            power_id='mysystem', virsh='echo')
-        output = action.run_shell(script)
-        self.assertIn("Got unknown power state from virsh", output)
-
     def test_fence_cdu_checks_state(self):
         # We can't test the fence_cdu template in detail (and it may be
         # customized), but by making it use "echo" instead of a real

=== modified file 'src/provisioningserver/power_schema.py'
--- src/provisioningserver/power_schema.py	2014-04-15 14:41:32 +0000
+++ src/provisioningserver/power_schema.py	2014-06-04 19:57:51 +0000
@@ -168,6 +168,9 @@
         'fields': [
             make_json_field('power_address', "Power address"),
             make_json_field('power_id', "Power ID"),
+            make_json_field(
+                'power_pass', "Power password (optional)",
+                required=False),
         ],
     },
     {

=== modified file 'src/provisioningserver/rpc/clusterservice.py'
--- src/provisioningserver/rpc/clusterservice.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/rpc/clusterservice.py	2014-06-04 19:57:51 +0000
@@ -221,9 +221,13 @@
         instances connected to it.
     """
 
+    INTERVAL_LOW = 2  # seconds.
+    INTERVAL_MID = 10  # seconds.
+    INTERVAL_HIGH = 30  # seconds.
+
     def __init__(self, reactor):
         super(ClusterClientService, self).__init__(
-            self._get_random_interval(), self.update)
+            self._calculate_interval(None, None), self.update)
         self.connections = {}
         self.clock = reactor
 
@@ -248,9 +252,6 @@
         This obtains a list of endpoints from the region then connects
         to new ones and drops connections to those no longer used.
         """
-        # 0. Update interval.
-        self._update_interval()
-        # 1. Obtain RPC endpoints.
         try:
             info_url = self._get_rpc_info_url()
             info_page = yield getPage(info_url)
@@ -258,9 +259,13 @@
             eventloops = info["eventloops"]
             yield self._update_connections(eventloops)
         except ConnectError as error:
+            self._update_interval(None, len(self.connections))
             log.msg("Region not available: %s" % (error,))
         except:
+            self._update_interval(None, len(self.connections))
             log.err()
+        else:
+            self._update_interval(len(eventloops), len(self.connections))
 
     @staticmethod
     def _get_rpc_info_url():
@@ -270,14 +275,39 @@
         url = url.geturl()
         return ascii_url(url)
 
-    @staticmethod
-    def _get_random_interval():
-        """Return a random interval between 30 and 90 seconds."""
-        return random.randint(30, 90)
-
-    def _update_interval(self):
-        """Change the interval randomly to avoid stampedes of clusters."""
-        self._loop.interval = self.step = self._get_random_interval()
+    def _calculate_interval(self, num_eventloops, num_connections):
+        """Calculate the update interval.
+
+        The interval is `INTERVAL_LOW` seconds when there are no
+        connections, so that this can quickly obtain its first
+        connection.
+
+        The interval changes to `INTERVAL_MID` seconds when there are
+        some connections, but fewer than there are event-loops.
+
+        After that it drops back to `INTERVAL_HIGH` seconds.
+        """
+        if num_eventloops is None:
+            # The region is not available; keep trying regularly.
+            return self.INTERVAL_LOW
+        elif num_eventloops == 0:
+            # The region is coming up; keep trying regularly.
+            return self.INTERVAL_LOW
+        elif num_connections == 0:
+            # No connections to the region; keep trying regularly.
+            return self.INTERVAL_LOW
+        elif num_connections < num_eventloops:
+            # Some connections to the region, but not to all event
+            # loops; keep updating reasonably frequently.
+            return self.INTERVAL_MID
+        else:
+            # Fully connected to the region; update every so often.
+            return self.INTERVAL_HIGH
+
+    def _update_interval(self, num_eventloops, num_connections):
+        """Change the update interval."""
+        self._loop.interval = self.step = self._calculate_interval(
+            num_eventloops, num_connections)
 
     @inlineCallbacks
     def _update_connections(self, eventloops):

=== modified file 'src/provisioningserver/rpc/tests/test_clusterservice.py'
--- src/provisioningserver/rpc/tests/test_clusterservice.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/rpc/tests/test_clusterservice.py	2014-06-04 19:57:51 +0000
@@ -254,40 +254,6 @@
         observed_rpc_info_url = ClusterClientService._get_rpc_info_url()
         self.assertThat(observed_rpc_info_url, Equals(expected_rpc_info_url))
 
-    def test__get_random_interval(self):
-        # _get_random_interval() returns a random number between 30 and
-        # 90 inclusive.
-        is_between_30_and_90_inclusive = MatchesAll(
-            MatchesAny(GreaterThan(30), Equals(30)),
-            MatchesAny(LessThan(90), Equals(90)))
-        for _ in range(100):
-            self.assertThat(
-                ClusterClientService._get_random_interval(),
-                is_between_30_and_90_inclusive)
-
-    def test__get_random_interval_calls_into_standard_library(self):
-        # _get_random_interval() depends entirely on the standard library.
-        random = self.patch(clusterservice, "random")
-        random.randint.return_value = sentinel.randint
-        self.assertIs(
-            sentinel.randint,
-            ClusterClientService._get_random_interval())
-        self.assertThat(random.randint, MockCalledOnceWith(30, 90))
-
-    def test__update_interval(self):
-        service = ClusterClientService(Clock())
-        # ClusterClientService's superclass, TimerService, creates a
-        # LoopingCall with now=True. We neuter it here because we only
-        # want to observe the behaviour of _update_interval().
-        service.call = (lambda: None, (), {})
-        service.startService()
-        self.assertThat(service.step, MatchesAll(
-            Equals(service._loop.interval), IsInstance(int)))
-        service.step = service._loop.interval = sentinel.undefined
-        service._update_interval()
-        self.assertThat(service.step, MatchesAll(
-            Equals(service._loop.interval), IsInstance(int)))
-
     def test_update_connect_error_is_logged_tersely(self):
         getPage = self.patch(clusterservice, "getPage")
         getPage.side_effect = error.ConnectionRefusedError()
@@ -512,6 +478,53 @@
             service.getClient)
 
 
+class TestClusterClientServiceIntervals(MAASTestCase):
+
+    scenarios = (
+        ("initial", {
+            "num_eventloops": None,
+            "num_connections": None,
+            "expected": ClusterClientService.INTERVAL_LOW,
+        }),
+        ("no-event-loops", {
+            "num_eventloops": 0,
+            "num_connections": sentinel.undefined,
+            "expected": ClusterClientService.INTERVAL_LOW,
+        }),
+        ("no-connections", {
+            "num_eventloops": 1,  # anything > 1.
+            "num_connections": 0,
+            "expected": ClusterClientService.INTERVAL_LOW,
+        }),
+        ("fewer-connections-than-event-loops", {
+            "num_eventloops": 2,  # anything > num_connections.
+            "num_connections": 1,  # anything > 0.
+            "expected": ClusterClientService.INTERVAL_MID,
+        }),
+        ("default", {
+            "num_eventloops": 3,  # same as num_connections.
+            "num_connections": 3,  # same as num_eventloops.
+            "expected": ClusterClientService.INTERVAL_HIGH,
+        }),
+    )
+
+    def make_inert_client_service(self):
+        service = ClusterClientService(Clock())
+        # ClusterClientService's superclass, TimerService, creates a
+        # LoopingCall with now=True. We neuter it here to allow
+        # observation of the behaviour of _update_interval() for
+        # example.
+        service.call = (lambda: None, (), {})
+        return service
+
+    def test__calculate_interval(self):
+        service = self.make_inert_client_service()
+        service.startService()
+        self.assertEqual(
+            self.expected, service._calculate_interval(
+                self.num_eventloops, self.num_connections))
+
+
 class TestClusterClient(MAASTestCase):
 
     run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5)

=== modified file 'src/provisioningserver/tasks.py'
--- src/provisioningserver/tasks.py	2014-04-15 14:41:32 +0000
+++ src/provisioningserver/tasks.py	2014-06-04 19:57:51 +0000
@@ -46,6 +46,7 @@
     probe_seamicro15k_and_enlist,
     )
 from provisioningserver.custom_hardware.ucsm import probe_and_enlist_ucsm
+from provisioningserver.custom_hardware.virsh import probe_virsh_and_enlist
 from provisioningserver.dhcp import (
     config,
     detect,
@@ -384,10 +385,18 @@
     call_and_check(['sudo', '-n', 'service', 'maas-dhcp-server', 'restart'])
 
 
+# Message to put in the DHCP config file when the DHCP server gets stopped.
+DISABLED_DHCP_SERVER = "# DHCP server stopped."
+
+
 @task
 @log_exception_text
 def stop_dhcp_server():
-    """Stop a DHCP server."""
+    """Write a blank config file and stop a DHCP server."""
+    # Write an empty config file to avoid having an outdated config laying
+    # around.
+    sudo_write_file(
+        celery_config.DHCP_CONFIG_FILE, DISABLED_DHCP_SERVER)
     call_and_check(['sudo', '-n', 'service', 'maas-dhcp-server', 'stop'])
 
 
@@ -464,7 +473,7 @@
 @task
 @log_exception_text
 def add_seamicro15k(mac, username, password, power_control=None):
-    """ See `maasserver.api.NodeGroupsHandler.add_seamicro15k`. """
+    """ See `maasserver.api.NodeGroup.add_seamicro15k`. """
     ip = find_ip_via_arp(mac)
     if ip is not None:
         probe_seamicro15k_and_enlist(
@@ -476,6 +485,13 @@
 
 @task
 @log_exception_text
+def add_virsh(poweraddr, password=None):
+    """ See `maasserver.api.NodeGroup.add_virsh`. """
+    probe_virsh_and_enlist(poweraddr, password=password)
+
+
+@task
+@log_exception_text
 def enlist_nodes_from_ucsm(url, username, password):
     """ See `maasserver.api.NodeGroupsHandler.enlist_nodes_from_ucsm`. """
     probe_and_enlist_ucsm(url, username, password)

=== modified file 'src/provisioningserver/tests/test_tasks.py'
--- src/provisioningserver/tests/test_tasks.py	2014-04-15 14:41:32 +0000
+++ src/provisioningserver/tests/test_tasks.py	2014-06-04 19:57:51 +0000
@@ -330,11 +330,14 @@
         self.assertThat(tasks.call_and_check, MockCalledOnceWith(
             ['sudo', '-n', 'service', 'maas-dhcp-server', 'restart']))
 
-    def test_stop_dhcp_server_sends_command(self):
+    def test_stop_dhcp_server_sends_command_and_writes_empty_config(self):
         self.patch(tasks, 'call_and_check')
+        self.patch(tasks, 'sudo_write_file')
         stop_dhcp_server()
         self.assertThat(tasks.call_and_check, MockCalledOnceWith(
             ['sudo', '-n', 'service', 'maas-dhcp-server', 'stop']))
+        self.assertThat(tasks.sudo_write_file, MockCalledOnceWith(
+            celery_config.DHCP_CONFIG_FILE, tasks.DISABLED_DHCP_SERVER))
 
 
 def assertTaskRetried(runner, result, nb_retries, task_name):

=== modified file 'src/provisioningserver/tests/test_tftp.py'
--- src/provisioningserver/tests/test_tftp.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/tests/test_tftp.py	2014-06-04 19:57:51 +0000
@@ -28,11 +28,11 @@
 from maastesting.testcase import MAASTestCase
 import mock
 from provisioningserver import tftp as tftp_module
+from provisioningserver.boot import BytesReader
 from provisioningserver.boot.pxe import PXEBootMethod
 from provisioningserver.boot.tests.test_pxe import compose_config_path
 from provisioningserver.tests.test_kernel_opts import make_kernel_parameters
 from provisioningserver.tftp import (
-    BytesReader,
     TFTPBackend,
     TFTPService,
     )
@@ -127,9 +127,9 @@
         self.assertEqual(b"", reader.read(1))
 
     @inlineCallbacks
-    def test_get_reader_config_file(self):
-        # For paths matching re_config_file, TFTPBackend.get_reader() returns
-        # a Deferred that will yield a BytesReader.
+    def test_get_render_file(self):
+        # For paths matching PXEBootMethod.match_path, TFTPBackend.get_reader()
+        # returns a Deferred that will yield a BytesReader.
         cluster_uuid = factory.getRandomUUID()
         self.patch(tftp_module, 'get_cluster_uuid').return_value = (
             cluster_uuid)
@@ -147,8 +147,8 @@
                 factory.getRandomPort()),
             }
 
-        @partial(self.patch, backend, "get_config_reader")
-        def get_config_reader(boot_method, params):
+        @partial(self.patch, backend, "get_boot_method_reader")
+        def get_boot_method_reader(boot_method, params):
             params_json = json.dumps(params)
             params_json_reader = BytesReader(params_json)
             return succeed(params_json_reader)
@@ -168,9 +168,10 @@
         self.assertEqual(expected_params, observed_params)
 
     @inlineCallbacks
-    def test_get_config_reader_returns_rendered_params(self):
-        # get_config_reader() takes a dict() of parameters and returns an
-        # `IReader` of a PXE configuration, rendered by `render_pxe_config`.
+    def test_get_boot_method_reader_returns_rendered_params(self):
+        # get_boot_method_reader() takes a dict() of parameters and returns an
+        # `IReader` of a PXE configuration, rendered by
+        # `PXEBootMethod.get_reader`.
         backend = TFTPBackend(self.make_dir(), b"http://example.com/")
         # Fake configuration parameters, as discovered from the file path.
         fake_params = {"mac": factory.getRandomMACAddress("-")}
@@ -182,15 +183,15 @@
         get_page_patch = self.patch(backend, "get_page")
         get_page_patch.return_value = succeed(fake_get_page_result)
 
-        # Stub render_config to return the render parameters.
+        # Stub get_reader to return the render parameters.
         method = PXEBootMethod()
-        fake_render_result = factory.make_name("render")
-        render_patch = self.patch(method, "render_config")
-        render_patch.return_value = fake_render_result
+        fake_render_result = factory.make_name("render").encode("utf-8")
+        render_patch = self.patch(method, "get_reader")
+        render_patch.return_value = BytesReader(fake_render_result)
 
         # Get the rendered configuration, which will actually be a JSON dump
         # of the render-time parameters.
-        reader = yield backend.get_config_reader(method, fake_params)
+        reader = yield backend.get_boot_method_reader(method, fake_params)
         self.addCleanup(reader.finish)
         self.assertIsInstance(reader, BytesReader)
         output = reader.read(10000)
@@ -198,13 +199,13 @@
         # The kernel parameters were fetched using `backend.get_page`.
         self.assertThat(backend.get_page, MockCalledOnceWith(mock.ANY))
 
-        # The result has been rendered by `backend.render_config`.
+        # The result has been rendered by `method.get_reader`.
         self.assertEqual(fake_render_result.encode("utf-8"), output)
-        self.assertThat(method.render_config, MockCalledOnceWith(
-            kernel_params=fake_kernel_params, **fake_params))
+        self.assertThat(method.get_reader, MockCalledOnceWith(
+            backend, kernel_params=fake_kernel_params, **fake_params))
 
     @inlineCallbacks
-    def test_get_config_reader_substitutes_armhf_in_params(self):
+    def test_get_boot_method_render_substitutes_armhf_in_params(self):
         # get_config_reader() should substitute "arm" for "armhf" in the
         # arch field of the parameters (mapping from pxe to maas
         # namespace).
@@ -224,8 +225,8 @@
                 factory.getRandomPort()),
             }
 
-        @partial(self.patch, backend, "get_config_reader")
-        def get_config_reader(boot_method, params):
+        @partial(self.patch, backend, "get_boot_method_reader")
+        def get_boot_method_reader(boot_method, params):
             params_json = json.dumps(params)
             params_json_reader = BytesReader(params_json)
             return succeed(params_json_reader)

=== modified file 'src/provisioningserver/tftp.py'
--- src/provisioningserver/tftp.py	2014-03-28 10:43:53 +0000
+++ src/provisioningserver/tftp.py	2014-06-04 19:57:51 +0000
@@ -18,7 +18,6 @@
     ]
 
 import httplib
-from io import BytesIO
 import json
 from urllib import urlencode
 from urlparse import (
@@ -34,10 +33,7 @@
     deferred,
     get_all_interface_addresses,
     )
-from tftp.backend import (
-    FilesystemSynchronousBackend,
-    IReader,
-    )
+from tftp.backend import FilesystemSynchronousBackend
 from tftp.errors import FileNotFound
 from tftp.protocol import TFTP
 from twisted.application import internet
@@ -45,22 +41,6 @@
 from twisted.python.context import get
 from twisted.web.client import getPage
 import twisted.web.error
-from zope.interface import implementer
-
-
-@implementer(IReader)
-class BytesReader:
-
-    def __init__(self, data):
-        super(BytesReader, self).__init__()
-        self.buffer = BytesIO(data)
-        self.size = len(data)
-
-    def read(self, size):
-        return self.buffer.read(size)
-
-    def finish(self):
-        self.buffer.close()
 
 
 class TFTPBackend(FilesystemSynchronousBackend):
@@ -118,7 +98,7 @@
     def get_boot_method(self, file_name):
         """Finds the correct boot method."""
         for _, method in BootMethodRegistry:
-            params = method.match_config_path(file_name)
+            params = method.match_path(self, file_name)
             if params is not None:
                 return method, params
         return None, None
@@ -142,21 +122,19 @@
         return d
 
     @deferred
-    def get_config_reader(self, boot_method, params):
+    def get_boot_method_reader(self, boot_method, params):
         """Return an `IReader` for a boot method.
 
         :param boot_method: Boot method that is generating the config
         :param params: Parameters so far obtained, typically from the file
             path requested.
         """
-        def generate_config(kernel_params):
-            config = boot_method.render_config(
-                kernel_params=kernel_params, **params)
-            return config.encode("utf-8")
+        def generate(kernel_params):
+            return boot_method.get_reader(
+                self, kernel_params=kernel_params, **params)
 
         d = self.get_kernel_params(params)
-        d.addCallback(generate_config)
-        d.addCallback(BytesReader)
+        d.addCallback(generate)
         return d
 
     @staticmethod
@@ -203,7 +181,7 @@
         remote_host, remote_port = get("remote", (None, None))
         params["remote"] = remote_host
         params["cluster_uuid"] = get_cluster_uuid()
-        d = self.get_config_reader(boot_method, params)
+        d = self.get_boot_method_reader(boot_method, params)
         d.addErrback(self.get_page_errback, file_name)
         return d
 

