diff -pruN 2015.9-1/debian/changelog 2016.3-1/debian/changelog
--- 2015.9-1/debian/changelog	2015-09-10 08:31:03.000000000 +0000
+++ 2016.3-1/debian/changelog	2016-03-04 15:03:22.000000000 +0000
@@ -1,3 +1,34 @@
+lava-dispatcher (2016.3-1) unstable; urgency=medium
+
+  * New production release.
+  * Update standards version.
+  * Update copyright file with changes from debmake
+  * Ensure lava-slave log file is rotated
+  * Fix handling of ramdisk when combined with NFS for armmp testing.
+  * Add ConfigObj support for improved tftp config parsing.
+  * Combine compression handling for consistent handling.
+  * Workaround broken 302 redirects.
+
+ -- Neil Williams <codehelp@debian.org>  Fri, 04 Mar 2016 15:03:01 +0000
+
+lava-dispatcher (2016.2-1) unstable; urgency=medium
+
+  * New production release 
+
+ -- Neil Williams <codehelp@debian.org>  Tue, 02 Feb 2016 08:34:13 +0000
+
+lava-dispatcher (2015.12-1) unstable; urgency=medium
+
+  * New production release
+
+ -- Neil Williams <codehelp@debian.org>  Mon, 14 Dec 2015 09:40:30 +0000
+
+lava-dispatcher (2015.11-1) unstable; urgency=medium
+
+  * New production release
+
+ -- Neil Williams <codehelp@debian.org>  Mon, 02 Nov 2015 13:35:50 +0000
+
 lava-dispatcher (2015.9-1) unstable; urgency=medium
 
   * New production release
diff -pruN 2015.9-1/debian/control 2016.3-1/debian/control
--- 2015.9-1/debian/control	2015-09-10 08:31:03.000000000 +0000
+++ 2016.3-1/debian/control	2016-03-04 15:03:22.000000000 +0000
@@ -11,8 +11,8 @@ Build-Depends: debhelper (>= 8.0.0), dh-
  python-sphinx (>= 1.0.7+dfsg) | python3-sphinx,
  python-setuptools (>= 3)
 X-Python-Version: 2.7
-XS-Testsuite: autopkgtest
-Standards-Version: 3.9.6
+Testsuite: autopkgtest
+Standards-Version: 3.9.7
 Homepage: http://www.linaro.org/projects/test-validation/
 Vcs-Git: https://github.com/Linaro/pkg-lava-dispatcher.git
 Vcs-Browser: https://github.com/Linaro/pkg-lava-dispatcher
@@ -29,6 +29,7 @@ Multi-Arch: foreign
 Recommends: ntp, bzr, git, htop, tftpd-hpa, openbsd-inetd,
  qemu-system-x86 (>= 2.0.0) [amd64], qemu-system-x86 (>= 2.0.0) [i386],
  qemu-system-arm (>= 2.0.0) [armhf],
+ android-tools-adb, android-tools-fastboot,
  libguestfs-tools [amd64], libguestfs-tools [i386],
  nfs-kernel-server, python-launchpadlib, rpcbind,
  python-setproctitle, u-boot-tools, unzip, xz-utils, lxc, bridge-utils
diff -pruN 2015.9-1/debian/copyright 2016.3-1/debian/copyright
--- 2015.9-1/debian/copyright	2015-09-10 08:31:03.000000000 +0000
+++ 2016.3-1/debian/copyright	2016-03-04 15:03:22.000000000 +0000
@@ -3,7 +3,7 @@ Upstream-Name: lava-dispatcher
 Source: https://git.linaro.org/lava/lava-dispatcher.git
 
 Files: *
-Copyright: 2010-2013, Linaro Limited
+Copyright: 2010-2016, Linaro Limited
 License: GPL-2.0+
 
 Files: linaro_dashboard_bundle/evolution.py
@@ -12,21 +12,7 @@ Files: linaro_dashboard_bundle/evolution
  linaro_dashboard_bundle/io.py
  linaro_dashboard_bundle/tests.py
 Copyright: 2010-2013, Linaro Limited
-License: LGPL-3.0+
- This package is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation.
- .
- This package is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- Lesser General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
- .
- On Debian systems, the complete text of the GNU Lesser General
- Public License can be found in "/usr/share/common-licenses/LGPL-3".
+License: LGPL-3.0
 
 Files: debian/*
 Copyright: 2013 Neil Williams <codehelp@debian.org>
@@ -49,3 +35,18 @@ License: GPL-2.0+
  On Debian systems, the complete text of the GNU General
  Public License version 2 can be found in "/usr/share/common-licenses/GPL-2".
 
+License: LGPL-3.0
+ This package is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation.
+ .
+ This package is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ Lesser General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ .
+ On Debian systems, the complete text of the GNU Lesser General
+ Public License can be found in "/usr/share/common-licenses/LGPL-3".
diff -pruN 2015.9-1/debian/lava-dispatcher.lintian-overrides 2016.3-1/debian/lava-dispatcher.lintian-overrides
--- 2015.9-1/debian/lava-dispatcher.lintian-overrides	2015-09-10 08:31:03.000000000 +0000
+++ 2016.3-1/debian/lava-dispatcher.lintian-overrides	2016-03-04 15:03:22.000000000 +0000
@@ -38,6 +38,9 @@ lava-dispatcher: script-not-executable u
 lava-dispatcher: script-not-executable usr/lib/python2.7/dist-packages/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-add-keys
 lava-dispatcher: script-not-executable usr/lib/python2.7/dist-packages/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-add-sources
 lava-dispatcher: script-not-executable usr/lib/python2.7/dist-packages/lava_dispatcher/lava_test_shell/lava-vm-groups-setup-host
+lava-dispatcher: script-not-executable usr/lib/python2.7/dist-packages/lava_dispatcher/lava_test_shell/vland/lava-vland-self
+lava-dispatcher: script-not-executable usr/lib/python2.7/dist-packages/lava_dispatcher/lava_test_shell/vland/lava-vland-tags
+lava-dispatcher: script-not-executable usr/lib/python2.7/dist-packages/lava_dispatcher/lava_test_shell/multi_node/lava-echo-config
 
 # android specific
 #lava-dispatcher: wrong-path-for-interpreter usr/lib/python2.7/dist-packages/lava_dispatcher/lava_test_shell/distro/android/lava-test-runner (#!/system/bin/mksh != /bin/mksh)
diff -pruN 2015.9-1/debian/tests/control 2016.3-1/debian/tests/control
--- 2015.9-1/debian/tests/control	2015-09-10 08:31:03.000000000 +0000
+++ 2016.3-1/debian/tests/control	2016-03-04 15:03:22.000000000 +0000
@@ -1,5 +1,6 @@
 Tests: testsuite
 Depends: @, @builddeps@,
  python-testscenarios, pep8, git, telnet, qemu-system-x86,
- nfs-kernel-server, u-boot-tools, iproute2, tftpd-hpa
+ nfs-kernel-server, u-boot-tools, iproute2, tftpd-hpa,
+ android-tools-fastboot, android-tools-adb
 Restrictions: allow-stderr
diff -pruN 2015.9-1/etc/logrotate.d/lava-slave-log 2016.3-1/etc/logrotate.d/lava-slave-log
--- 2015.9-1/etc/logrotate.d/lava-slave-log	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/etc/logrotate.d/lava-slave-log	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,9 @@
+/var/log/lava-dispatcher/lava-slave.log {
+	weekly
+	rotate 12
+	compress
+	delaycompress
+	missingok
+	notifempty
+	create 644 root root
+}
diff -pruN 2015.9-1/lava/dispatcher/commands.py 2016.3-1/lava/dispatcher/commands.py
--- 2015.9-1/lava/dispatcher/commands.py	2015-09-10 10:34:46.000000000 +0000
+++ 2016.3-1/lava/dispatcher/commands.py	2016-02-02 08:07:05.000000000 +0000
@@ -87,8 +87,9 @@ def get_pipeline_runner(job):
             job.validate(simulate=validate_only)
             if not validate_only:
                 exitcode = job.run()
-        except JobError as exc:
-            logging.debug("%s" % exc)
+        except (JobError, RuntimeError, TypeError, ValueError) as exc:
+            import traceback
+            traceback.print_exc()
             sys.exit(2)
         if exitcode:
             sys.exit(exitcode)
diff -pruN 2015.9-1/lava/dispatcher/daemonise.py 2016.3-1/lava/dispatcher/daemonise.py
--- 2015.9-1/lava/dispatcher/daemonise.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava/dispatcher/daemonise.py	2015-11-30 21:58:47.000000000 +0000
@@ -39,7 +39,7 @@ child = None
 def signal_handler(sig, frame):  # pylint: disable=unused-argument
     global child  # pylint: disable=global-statement
     try:
-        logging.info("Closing daemon and child %d", child.pid)
+        logging.info("Closing daemon and child %d" % child.pid)  # pylint: disable=logging-not-lazy
         child.send_signal(sig)
         child = None
         sys.exit(os.EX_OK)
diff -pruN 2015.9-1/lava/dispatcher/lava-dispatcher-slave 2016.3-1/lava/dispatcher/lava-dispatcher-slave
--- 2015.9-1/lava/dispatcher/lava-dispatcher-slave	2015-09-02 16:44:52.000000000 +0000
+++ 2016.3-1/lava/dispatcher/lava-dispatcher-slave	2016-03-02 14:31:47.000000000 +0000
@@ -19,7 +19,15 @@
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, see <http://www.gnu.org/licenses>.
 
-"""Start the lava dispatcher and the zmq messager."""
+"""
+Start the lava dispatcher and the zmq messager.
+
+Slaves are allowed to connect over ZMQ but devices can only
+be assigned to known slaves by the admin of the instance
+(by selecting the worker_host for each pipeline device.
+Initially, the details of the workers will be configured
+via the current dispatcher support.
+"""
 
 import argparse
 import atexit
@@ -52,8 +60,9 @@ import zmq
 TIMEOUT = 5
 SEND_QUEUE = 10
 
-# FIXME: This is a temporary fix and the dir contents need to be assessed
-# whether they are useful (logs should go into /tmp or tmpfs) or not.
+# FIXME: This is a temporary fix until the overlay is sent to the master
+# The job.yaml and device.yaml are retained so that lava-dispatch can be re-run manually
+# (at least until the slave is rebooted).
 TMP_DIR = os.path.join(tempfile.gettempdir(), "lava-dispatcher/slave/")
 
 # Setup the log.
@@ -127,6 +136,15 @@ class Job(object):
         environ.update(conf.get("overrides", {}))
         return environ
 
+    def log_errors(self):
+        err_file = os.path.join(self.base_dir, "err")
+        msg = None
+        if os.stat(err_file).st_size != 0:
+            with open(err_file, 'r') as errlog:
+                msg = errlog.read()
+            LOG.exception(msg)
+        return msg
+
     def start(self):
         """Start the process."""
         out_file = os.path.join(self.base_dir, "out")
@@ -168,7 +186,6 @@ class Job(object):
             else:
                 LOG.exception(exc)
             with open(err_file, "a") as errlog:
-                # TODO: send something to the zmq LOG
                 errlog.write("%s\n%s\n" % (exc, traceback.format_exc()))
             self.cancel()
 
@@ -302,8 +319,10 @@ def main():
     jobs = {}
 
     LOG.info("Connecting to master as <%s>", host_name)
-    LOG.debug("Greeting the master => 'HELLO'")
-    sock.send_multipart(["HELLO"])
+    hello_msg = "HELLO"
+    retry_msg = "HELLO_RETRY"
+    LOG.debug("Greeting the master => '%s'", hello_msg)
+    sock.send_multipart([hello_msg])
 
     while True:
         try:
@@ -334,8 +353,8 @@ def main():
                 else:
                     LOG.info("Unexpected message from the master: %s", message)
 
-        LOG.debug("Sending new HELLO_RETRY message to the master")
-        sock.send_multipart(["HELLO_RETRY"])
+        LOG.debug("Sending new %s message to the master", retry_msg)
+        sock.send_multipart([retry_msg])
 
     # Loop for server instructions
     LOG.info("Waiting for master instructions")
@@ -359,7 +378,7 @@ def main():
             except (IndexError, TypeError):
                 LOG.error("Invalid message from the master: %s", msg)
                 continue
-            LOG.debug("Received action=%s, args=(%s)", action, msg[1:])
+            LOG.debug("Received action=%s", action)
 
             # Parse the action
             if action == "HELLO_OK":
@@ -493,6 +512,9 @@ def main():
                     job_status = jobs[job_id].proc.returncode
                     if job_status:
                         LOG.info("[%d] Job returned non-zero", job_id)
+                        errs = jobs[job_id].log_errors()
+                        if errs:
+                            sock.send_multipart(["ERROR", str(job_id), str(errs)])
 
                     jobs[job_id].running = False
                     sock.send_multipart(["END", str(job_id), str(job_status)])
diff -pruN 2015.9-1/lava_dispatcher/actions/deploy.py 2016.3-1/lava_dispatcher/actions/deploy.py
--- 2015.9-1/lava_dispatcher/actions/deploy.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/actions/deploy.py	2016-02-02 08:07:05.000000000 +0000
@@ -236,7 +236,7 @@ class cmd_deploy_linaro_kernel(BaseActio
             'rootfstype': {'type': 'string', 'optional': True},
             'bootloadertype': {'type': 'string', 'optional': True,
                                'default': 'u_boot'},
-            'target_type': {'type': 'string', 'enum': ['ubuntu', 'oe', 'android', 'fedora'],
+            'target_type': {'type': 'string', 'enum': ['ubuntu', 'oe', 'android', 'fedora', 'debian_installer'],
                             'optional': True, 'default': 'oe'},
             'login_prompt': {'type': 'string', 'optional': True},
             'password_prompt': {'type': 'string', 'optional': True},
diff -pruN 2015.9-1/lava_dispatcher/actions/lava_test_shell.py 2016.3-1/lava_dispatcher/actions/lava_test_shell.py
--- 2015.9-1/lava_dispatcher/actions/lava_test_shell.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/actions/lava_test_shell.py	2016-03-04 14:35:18.000000000 +0000
@@ -157,6 +157,7 @@ LAVA_SEND_FILE = 'lava-send'
 LAVA_SYNC_FILE = 'lava-sync'
 LAVA_WAIT_FILE = 'lava-wait'
 LAVA_WAIT_ALL_FILE = 'lava-wait-all'
+LAVA_ECHO_CONFIG_FILE = 'lava-echo-config'
 LAVA_MULTI_NODE_CACHE_FILE = '/tmp/lava_multi_node_cache.txt'
 LAVA_LMP_CACHE_FILE = '/tmp/lava_lmp_cache.txt'
 
@@ -1001,6 +1002,14 @@ class cmd_lava_test_shell(BaseAction):
                         fout.write("TARGET_ROLE='%s'\n" % self.context.test_data.metadata['role'])
                     elif foutname == LAVA_SELF_FILE:
                         fout.write("LAVA_HOSTNAME='%s'\n" % self.context.test_data.metadata['target.hostname'])
+                    elif foutname == LAVA_ECHO_CONFIG_FILE:
+                        fout.write('LAVA_SHARED_CONFIG="')
+                        if 'shared_config' in self.context.test_data.metadata:
+                            for device, data in self.context.test_data.metadata['shared_config'].iteritems():
+                                if device in self.context.group_data['roles']:
+                                    for key, value in data.iteritems():
+                                        fout.write(r"%s %s %s\n" % (device, key, value))
+                        fout.write('"\n')
                     else:
                         fout.write("LAVA_TEST_BIN='%s/bin'\n" %
                                    target.lava_test_dir)
diff -pruN 2015.9-1/lava_dispatcher/client/base.py 2016.3-1/lava_dispatcher/client/base.py
--- 2015.9-1/lava_dispatcher/client/base.py	2015-09-10 08:06:51.000000000 +0000
+++ 2016.3-1/lava_dispatcher/client/base.py	2015-12-14 09:32:55.000000000 +0000
@@ -234,7 +234,9 @@ class AndroidTesterCommandRunner(Network
         if self._client.target_device.config.android_adb_over_tcp:
             self._setup_adb_over_tcp()
         elif self._client.target_device.config.android_adb_over_usb:
-            self._setup_adb_over_usb()
+            # Nothing to do here. If in the future we decide to dynamically
+            # determine a usb serial number, this would be the place to do it.
+            pass
         else:
             raise CriticalError('ADB not configured for TCP or USB')
 
@@ -257,12 +259,6 @@ class AndroidTesterCommandRunner(Network
         self.android_adb_over_tcp_connect()
         self.wait_until_attached()
 
-    def _setup_adb_over_usb(self):
-        self.run(
-            'getprop ro.serialno',
-            response=self._client.target_device.config.android_serialno_patterns)
-        self.dev_name = self.match.group(0)
-
     def disconnect(self):
         if self._client.target_device.config.android_adb_over_tcp:
             self.android_adb_over_tcp_disconnect()
@@ -284,6 +280,11 @@ class AndroidTesterCommandRunner(Network
         else:
             raise ADBConnectError(('Failed to connected to device with'
                                    ' command:%s') % cmd)
+        # Users can define 'android_adb_over_tcp' dynamically, so we must set
+        # the adb_command dynamically as well. This also allows us to re-use
+        # the existing code in the fastboot classes seamlessly.
+        self._client.target_device.config.adb_command = \
+            'adb -s %s:%s' % (dev_ip, adb_port)
 
     def android_adb_over_tcp_disconnect(self):
         dev_ip = self.dev_ip
@@ -314,7 +315,7 @@ class AndroidTesterCommandRunner(Network
             logging.warning(traceback.format_exc())
             return None
         ip_pattern1 = "%s: ip (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) mask" % nic_name
-        ip_pattern2 = "inet addr:(\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?)"
+        ip_pattern2 = "inet addr:(\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?) +Bcast:"
         try:
             self.run("ifconfig %s" % nic_name,
                      [ip_pattern1, ip_pattern2, pexpect.EOF],
@@ -322,7 +323,8 @@ class AndroidTesterCommandRunner(Network
         except Exception as e:
             raise NetworkError("ifconfig can not match ip pattern for %s:%s" % (nic_name, e))
 
-        if self.match_id == 0:
+        # keep the match_id range in line with the number of patterns.
+        if self.match_id in [0, 1]:
             match_group = self.match.groups()
             if len(match_group) > 0:
                 return match_group[0]
diff -pruN 2015.9-1/lava_dispatcher/config.py 2016.3-1/lava_dispatcher/config.py
--- 2015.9-1/lava_dispatcher/config.py	2015-09-10 08:06:51.000000000 +0000
+++ 2016.3-1/lava_dispatcher/config.py	2016-03-04 14:35:18.000000000 +0000
@@ -57,6 +57,7 @@ class DeviceSchema(schema.Schema):
     boot_cmd_timeout = schema.IntOption(default=10)
     boot_options = schema.ListOption()
     boot_linaro_timeout = schema.IntOption(default=300)
+    extended_boot_timeout = schema.IntOption(default=3600)
     boot_part = schema.IntOption(fatal=True)
     boot_part_android_org = schema.IntOption()
     boot_retries = schema.IntOption(default=3)
@@ -106,6 +107,8 @@ class DeviceSchema(schema.Schema):
     pre_connect_command = schema.StringOption()
     power_on_cmd = schema.StringOption()  # for sdmux
     power_off_cmd = schema.StringOption()  # for sdmux
+    pre_os_cmd = schema.StringOption(default=None)
+    pre_power_cmd = schema.StringOption(default=None)
     reset_port_command = schema.StringOption()
     root_part = schema.IntOption()
     sata_block_device = schema.StringOption(default="sda")
@@ -297,6 +300,7 @@ class DeviceSchema(schema.Schema):
     u_load_addrs = schema.ListOption(default=None)
     z_load_addrs = schema.ListOption(default=None)
     uimage_only = schema.BoolOption(default=False)
+    uimage_arch = schema.StringOption(default='arm')
     text_offset = schema.StringOption(default=None)
     multi_image_only = schema.BoolOption(default=False)
     uimage_xip = schema.BoolOption(default=False)
@@ -333,6 +337,7 @@ class DeviceSchema(schema.Schema):
     vexpress_firmware_default = schema.StringOption(default=None)
     vexpress_flash_range_low = schema.StringOption(default=None)
     vexpress_flash_range_high = schema.StringOption(default=None)
+    vexpress_sky2_mac = schema.StringOption(default=None)
 
     # for lxc devices
     lxc_driver = schema.StringOption(default=None)
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/arndale.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/arndale.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/arndale.conf	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/arndale.conf	2015-11-30 21:58:47.000000000 +0000
@@ -33,14 +33,16 @@ possible_partitions_files =
     init.rc
     fstab.exynos5250-arndale
 
+# u-boot on Arndale load itself at 0x43E00000. A safe location to load ramdisk
+# is above that address so that it wouldn't overwrite u-boot.
 u_load_addrs =
     0x40007000
-    0x42000000
+    0x45000000
     0x41f00000
 
 z_load_addrs =
     0x41000000
-    0x42000000
+    0x45000000
     0x41f00000
 
 boot_cmds_master =
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls1021a-twr.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls1021a-twr.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls1021a-twr.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls1021a-twr.conf	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,47 @@
+client_type = bootloader
+
+bootloader_prompt = =>
+interrupt_boot_prompt = autoboot
+send_char = False
+
+u_load_addrs =
+    0x81000000
+    0x88000000
+    0x8f000000
+  
+z_load_addrs =
+    0x81000000
+    0x88000000
+    0x8f000000
+
+
+boot_cmds_nfs =
+     setenv autoload no,
+     setenv ethaddr           %(mac)s,
+     setenv kernel_addr_r     "'{KERNEL_ADDR}'",
+     setenv initrd_addr_r     "'{RAMDISK_ADDR}'",
+     setenv fdt_addr_r        "'{DTB_ADDR}'",
+     setenv loadkernel        "'tftp ${kernel_addr_r} {KERNEL}'",
+     setenv loadinitrd        "'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'",
+     setenv loadfdt           "'tftp ${fdt_addr_r} {DTB}'",
+     setenv nfsargs           "'setenv bootargs console=ttyS0,115200 debug root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp'",
+     setenv bootcmd           "'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; run nfsargs; {BOOTX}'",
+     boot
+
+boot_cmds_ramdisk =
+     setenv autoload no,
+     setenv ethaddr           %(mac)s,
+     setenv kernel_addr_r     "'{KERNEL_ADDR}'",
+     setenv initrd_addr_r     "'{RAMDISK_ADDR}'",
+     setenv fdt_addr_r        "'{DTB_ADDR}'",
+     setenv bootargs          "root=/dev/ram0 rw console=ttyS0,115200 ramdisk_size=0x2000000",
+     setenv loadinitrd        "'tftpboot  ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'",
+     setenv loadfdt           "'tftpboot  ${fdt_addr_r} {DTB}'",
+     setenv loadkernel        "'tftpboot  ${kernel_addr_r} {KERNEL}'",
+     setenv bootcmd           "'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; {BOOTX}'",
+     boot
+boot_options =
+    boot_cmds
+
+[boot_cmds]
+default = boot_cmds
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls2085a-rdb.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls2085a-rdb.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls2085a-rdb.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls2085a-rdb.conf	2015-11-30 21:58:47.000000000 +0000
@@ -0,0 +1,49 @@
+client_type = bootloader
+bootloader_prompt = =>
+interrupt_boot_prompt = autoboot
+text_offset = 80080000
+uimage_only = True
+uimage_arch = arm64
+send_char = False
+
+u_load_addrs =
+    0xa0000000
+    0x91000000
+    0x90000000
+
+z_load_addrs =
+    0xa0000000
+    0x91000000
+    0x90000000
+
+boot_cmds_nfs =
+    setenv autoload no,
+    setenv kernel_addr_r "'{KERNEL_ADDR}'",
+    setenv initrd_addr_r "'{RAMDISK_ADDR}'",
+    setenv fdt_addr_r "'{DTB_ADDR}'",
+    setenv loadkernel "'tftp ${kernel_addr_r} {KERNEL}'",
+    setenv loadinitrd "'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'",
+    setenv loadfdt "'tftp ${fdt_addr_r} {DTB}'",
+    setenv nfsargs "'console=ttyS1,115200 earlycon=uart8250,mmio,0x21c0600 debug rw root=/dev/ram0 ip=dhcp'",
+    setenv bootcmd "'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; run nfsargs; {BOOTX}'",
+    boot
+
+boot_cmds_ramdisk =
+    setenv autoload no,
+    setenv initrd_high "'0xffffffff'",
+    setenv fdt_high "'0xffffffff'",
+    setenv kernel_addr_r "'{KERNEL_ADDR}'",
+    setenv initrd_addr_r "'{RAMDISK_ADDR}'",
+    setenv fdt_addr_r "'{DTB_ADDR}'",
+    setenv loadkernel "'tftp ${kernel_addr_r} {KERNEL}'",
+    setenv loadinitrd "'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'",
+    setenv loadfdt "'tftp ${fdt_addr_r} {DTB}'",
+    setenv bootargs "'console=ttyS1,115200 earlycon=uart8250,mmio,0x21c0600 debug rw root=/dev/ram0 ip=dhcp'",
+    setenv bootcmd "'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; {BOOTX}'",
+    boot
+
+boot_options =
+    boot_cmds
+
+[boot_cmds]
+default = boot_cmds
\ No newline at end of file
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/hi6220-hikey.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/hi6220-hikey.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/hi6220-hikey.conf	2015-08-07 08:19:39.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/hi6220-hikey.conf	2015-11-30 21:58:47.000000000 +0000
@@ -55,30 +55,34 @@ fastboot_efi_image = http://images.valid
 
 boot_cmds = expect Start:,
             sendline 2,
-            expect Shell>,
-            sendline "Image dtb=hi6220-hikey.dtb initrd=initrd.img console=ttyAMA0,115200 earlycon=pl011,0xf8015000 root=/dev/mmcblk0p9 rw rootwait ip=dhcp",
-            sendcontrol M
+            expect Debian,
+            sendline c,
+            expect grub>,
+            sendline "linux /Image console=tty0 console=ttyAMA3,115200 root=/dev/disk/by-partlabel/system rootwait rw efi=noruntime",
+            expect grub>,
+            sendline "initrd /initrd.img",
+            expect grub>,
+            sendline "devicetree /hi6220-hikey.dtb",
+            expect grub>,
+            sendline boot
 
 boot_cmds_android = expect Start:,
-                    sendline 2,
-                    expect Shell>,
-                    sendline "Image dtb=hi6220-hikey.dtb initrd=initrd.img console=ttyAMA0,115200 earlycon=pl011,0xf8015000 root=/dev/ram0 rw rootwait ip=dhcp",
-                    sendcontrol M
+                    sendline 2
 
 boot_cmds_ramdisk = expect Start:,
-                    sendline 2,
+                    sendline 4,
                     expect Shell>,
-                    sendline "{KERNEL} dtb={DTB} initrd={RAMDISK} console=ttyAMA0,115200 earlycon=pl011,0xf8015000 root=/dev/ram0 ip=dhcp",
+                    sendline "{KERNEL} dtb={DTB} initrd={RAMDISK} console=tty0 console=ttyAMA3,115200 root=/dev/ram0 ip=dhcp efi=noruntime",
                     sendcontrol M
 
 boot_cmds_nfs = expect Start:,
                 sendline 2,
                 expect Shell>,
-                sendline "{KERNEL} dtb={DTB} initrd={RAMDISK} console=ttyAMA0,115200 earlycon=pl011,0xf8015000 root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp",
+                sendline "{KERNEL} dtb={DTB} initrd={RAMDISK} console=tty0 console=ttyAMA3,115200 root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp",
                 sendcontrol M
 
 boot_cmds_rootfs = expect Start:,
                    sendline 2,
                    expect Shell>,
-                   sendline "{KERNEL} dtb={DTB} initrd={RAMDISK} console=ttyAMA0,115200 earlycon=pl011,0xf8015000 root=/dev/mmcblk0p9 rw rootwait ip=dhcp",
+                   sendline "{KERNEL} dtb={DTB} initrd={RAMDISK} console=tty0 console=ttyAMA3,115200 root=/dev/mmcblk0p9 rw rootwait ip=dhcp",
                    sendcontrol M
\ No newline at end of file
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/highbank.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/highbank.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/highbank.conf	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/highbank.conf	2015-12-14 09:33:11.000000000 +0000
@@ -21,12 +21,12 @@ interrupt_boot_command = s
 
 u_load_addrs =
     0x00800000
-    0x01000000
+    0x01800000
     0x00001000
 
 z_load_addrs =
     0x00800000
-    0x01000000
+    0x01800000
     0x00001000
 
 boot_cmds =
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/juno-r2.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/juno-r2.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/juno-r2.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/juno-r2.conf	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,207 @@
+client_type = vexpress
+
+master_testboot_label = testboot
+master_sdcard_label = sdcard
+vexpress_uefi_path = SOFTWARE/fip.bin
+vexpress_uefi_backup_path = SOFTWARE/backup-fip.bin
+vexpress_uefi_image_filename = juno_fip.bin
+vexpress_uefi_image_files =
+    juno_fip.bin
+    fip.bin
+vexpress_bl0_path = SOFTWARE/bl0.bin
+vexpress_bl0_backup_path = SOFTWARE/backup-bl0.bin
+vexpress_bl1_path = SOFTWARE/bl1.bin
+vexpress_bl1_backup_path = SOFTWARE/backup-bl1.bin
+vexpress_bl1_image_filename = juno_bl1.bin
+vexpress_bl1_image_files =
+    juno_bl1.bin
+    bl1.bin
+vexpress_usb_mass_storage_device = /dev/disk/by-label/WG
+vexpress_requires_trusted_firmware = True
+vexpress_uefi_default = http://images.validation.linaro.org/juno/juno-fip-2015-01-28-001.bin
+vexpress_bl1_default = http://images.validation.linaro.org/juno/bl1.bin
+
+vexpress_complete_firmware = True
+vexpress_firmware_path_hwpack = board-recovery-image
+vexpress_firmware_path_android = board_recovery_image.tar.bz2
+vexpress_firmware_default = http://images.validation.linaro.org/juno/board-recovery-image.tgz
+
+vexpress_flash_range_low = 0x0BFC0000
+vexpress_flash_range_high = 0x0BFEFFFF
+
+vexpress_sky2_mac = 0x00,0x01,0x02,0x03,0x04,0x05
+
+boot_cmd_timeout = 30
+
+interrupt_boot_prompt = The default boot selection will start in
+bootloader_prompt = Start:
+lmc_dev_arg = fastmodel
+android_orig_block_device = sda
+android_lava_block_device = sda
+partition_padding_string_android =
+partition_padding_string_org =
+sys_part_android = 5
+data_part_android = 6
+sdcard_part_android = 7
+read_boot_cmds_from_image = 0
+boot_retries = 5
+bootloader_serial_delay_ms = 30
+
+possible_partitions_files =
+    fstab.juno
+    init.partitions.rc
+    fstab.partitions
+    init.rc
+
+boot_cmds_master = expect Start:,
+                   sendline 2,
+                   expect in 5 seconds to skip,
+                   sendcontrol [,
+                   sendcontrol [,
+                   expect Shell>,
+                   sendline "MasterImage dtb=juno-r2.dtb initrd=ramdisk.img console=ttyAMA0,115200 androidboot.hardware=juno systemd.log_target=null rootwait root=/dev/sda2 sky2.mac_address={'SKY2_MAC'}",
+                   sendcontrol M
+
+boot_cmds =        expect Start:,
+                   sendline 2,
+                   expect in 5 seconds to skip,
+                   sendcontrol [,
+                   sendcontrol [,
+                   expect Shell>,
+                   sendline "Image dtb=juno-r2.dtb initrd=ramdisk.img console=ttyAMA0,115200 androidboot.hardware=juno systemd.log_target=null rootwait root=/dev/sda2 sky2.mac_address={'SKY2_MAC'}",
+                   sendcontrol M
+
+boot_cmds_oe =     expect Start:,
+                   sendline 2,
+                   expect in 5 seconds to skip,
+                   sendcontrol [,
+                   sendcontrol [,
+                   expect Shell>,
+                   sendline "Image dtb=juno-r2.dtb initrd=ramdisk.img console=ttyAMA0,115200 androidboot.hardware=juno systemd.log_target=null rootwait root=/dev/sda2 sky2.mac_address={'SKY2_MAC'}",
+                   sendcontrol M
+
+boot_cmds_android = expect Start:,
+                   sendline 2,
+                   expect in 5 seconds to skip,
+                   sendcontrol [,
+                   sendcontrol [,
+                   expect Shell>,
+                   sendline "kernel dtb=juno-r2.dtb initrd=ramdisk.img console=ttyAMA0,115200 androidboot.hardware=juno systemd.log_target=null rootwait root=/dev/sda2 sky2.mac_address={'SKY2_MAC'}",
+                   sendcontrol M
+
+boot_cmds_ramdisk = expect Start:,
+                    sendline 3,
+                    expect Choice:,
+                    sendline 3,
+                    expect Delete entry:,
+                    sendline 1,
+                    expect Choice:,
+                    sendline 1,
+                    expect Select the Boot Device:,
+                    sendcontrol [,
+                    sendcontrol M,
+                    expect Choice:,
+                    expect Choice:,
+                    sendline 1,
+                    expect Select the Boot Device:,
+                    sendline 8,
+                    expect Get the IP address from DHCP: [y/n],
+                    sendline y,
+                    expect Get the TFTP server IP address:,
+                    sendline {SERVER_IP},
+                    expect File path of the EFI Application or the kernel,
+                    sendline {KERNEL},
+                    expect Has FDT support? [y/n],
+                    sendline y,
+                    expect Add an initrd: [y/n],
+                    sendline y,
+                    expect Get the IP address from DHCP: [y/n],
+                    sendline y,
+                    expect Get the TFTP server IP address:,
+                    sendline {SERVER_IP},
+                    expect File path of the initrd,
+                    sendline {RAMDISK},
+                    expect Arguments to pass to the EFI Application:,
+                    sendline "console=ttyAMA0,115200 earlyprintk=pl011,0x7ff80000 root=/dev/ram0 verbose debug ip=dhcp",
+                    expect Description for this new Entry:,
+                    sendline LAVA Ramdisk Test Image,
+                    expect Choice:,
+                    sendline 5,
+                    expect Select the Boot Device:,
+                    sendcontrol [,
+                    sendcontrol M,
+                    expect Choice:,
+                    expect Choice:,
+                    sendline 5,
+                    expect Select the Boot Device:,
+                    sendline 8,
+                    expect Get the IP address from DHCP: [y/n],
+                    sendline y,
+                    expect Get the TFTP server IP address:,
+                    sendline {SERVER_IP},
+                    expect File path of the FDT blob,
+                    sendline {DTB},
+                    expect Choice:,
+                    sendline 7,
+                    expect Start:,
+                    sendline 1
+
+boot_cmds_nfs = expect Start:,
+                sendline 3,
+                expect Choice:,
+                sendline 3,
+                expect Delete entry:,
+                sendline 1,
+                expect Choice:,
+                sendline 1,
+                expect Select the Boot Device:,
+                sendcontrol [,
+                sendcontrol M,
+                expect Choice:,
+                expect Choice:,
+                sendline 1,
+                expect Select the Boot Device:,
+                sendline 8,
+                expect Get the IP address from DHCP: [y/n],
+                sendline y,
+                expect Get the TFTP server IP address:,
+                sendline {SERVER_IP},
+                expect File path of the EFI Application or the kernel,
+                sendline {KERNEL},
+                expect Is an EFI Application? [y/n],
+                sendline n,
+                expect Has FDT support? [y/n],
+                sendline y,
+                expect Add an initrd: [y/n],
+                sendline n,
+                expect Arguments to pass to the binary:,
+                sendline "console=ttyAMA0,115200 earlyprintk=pl011,0x7ff80000 root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp",
+                expect Description for this new Entry:,
+                sendline LAVA NFS Test Image,
+                expect Choice:,
+                sendline 5,
+                expect Select the Boot Device:,
+                sendcontrol [,
+                sendcontrol M,
+                expect Choice:,
+                expect Choice:,
+                sendline 5,
+                expect Select the Boot Device:,
+                sendline 8,
+                expect Get the IP address from DHCP: [y/n],
+                sendline y,
+                expect Get the TFTP server IP address:,
+                sendline {SERVER_IP},
+                expect File path of the FDT blob,
+                sendline {DTB},
+                expect Choice:,
+                sendline 7,
+                expect Start:,
+                sendline 1
+
+boot_options =
+    boot_cmds
+
+[boot_cmds]
+default = boot_cmds
+
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/odroid-xu3.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/odroid-xu3.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/odroid-xu3.conf	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/odroid-xu3.conf	2015-12-14 09:33:11.000000000 +0000
@@ -4,12 +4,12 @@ append_dtb = True
 
 u_load_addrs =
     0x41000000
-    0x42000000
+    0x45000000
     0x43000000
 
 z_load_addrs =
     0x41000000
-    0x42000000
+    0x45000000
     0x43000000
 
 boot_cmds_ramdisk =
@@ -38,4 +38,4 @@ boot_options =
     boot_cmds
 
 [boot_cmds]
-default = boot_cmds
\ No newline at end of file
+default = boot_cmds
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/omap5-uevm.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/omap5-uevm.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/omap5-uevm.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/omap5-uevm.conf	2015-12-14 09:33:11.000000000 +0000
@@ -0,0 +1,49 @@
+client_type = bootloader
+
+#lmc_dev_arg = omap5-uevm
+bootloader_prompt = =>
+send_char = False
+
+u_load_addrs =
+    0x80200000
+    0x81600000
+    0x815f0000
+
+z_load_addrs =
+    0x81000000
+    0x82000000
+    0x81f00000
+
+boot_cmds_nfs =
+    setenv autoload no,
+    setenv initrd_high "'0xffffffff'",
+    setenv fdt_high "'0xffffffff'",
+    setenv kernel_addr_r "'{KERNEL_ADDR}'",
+    setenv initrd_addr_r "'{RAMDISK_ADDR}'",
+    setenv fdt_addr_r "'{DTB_ADDR}'",
+    setenv loadkernel "'tftp ${kernel_addr_r} {KERNEL}'",
+    setenv loadinitrd "'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'",
+    setenv loadfdt "'tftp ${fdt_addr_r} {DTB}'",
+    setenv nfsargs "'setenv bootargs console=ttyO2,115200n8 root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp'",
+    setenv bootcmd "'usb start; dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; run nfsargs; {BOOTX}'",
+    boot
+
+boot_cmds_ramdisk =
+    setenv autoload no,
+    setenv initrd_high "'0xffffffff'",
+    setenv fdt_high "'0xffffffff'",
+    setenv kernel_addr_r "'{KERNEL_ADDR}'",
+    setenv initrd_addr_r "'{RAMDISK_ADDR}'",
+    setenv fdt_addr_r "'{DTB_ADDR}'",
+    setenv loadkernel "'tftp ${kernel_addr_r} {KERNEL}'",
+    setenv loadinitrd "'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'",
+    setenv loadfdt "'tftp ${fdt_addr_r} {DTB}'",
+    setenv bootargs "'console=ttyO2,115200n8 root=/dev/ram0 ip=dhcp'",
+    setenv bootcmd "'usb start; dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; {BOOTX}'",
+    boot
+
+boot_options =
+    boot_cmds
+
+[boot_cmds]
+default = boot_cmds
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm.conf	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm.conf	2016-02-02 08:07:05.000000000 +0000
@@ -2,13 +2,13 @@ client_type=qemu
 
 qemu_binary = qemu-system-arm
 qemu_networking_options = -net nic,model=virtio -net user
-qemu_options = -M %(qemu_machine_type)s -no-reboot -show-cursor -no-reboot -nographic -m 128 %(qemu_networking_options)s
+qemu_options = -M %(qemu_machine_type)s -no-reboot -show-cursor -no-reboot -nographic -m 256 %(qemu_networking_options)s
 
 qemu_machine_type = versatilepb
 
-boot_cmds = root=/dev/sda rw console=ttyAMA0,115200 ip=dhcp mem=128M
+boot_cmds = root=/dev/sda rw console=ttyAMA0,115200 ip=dhcp mem=256M
 
-boot_cmds_ramdisk = root=/dev/ram0 rw console=ttyAMA0,115200 ip=dhcp mem=128M
+boot_cmds_ramdisk = root=/dev/ram0 rw console=ttyAMA0,115200 ip=dhcp mem=256M
 
 boot_options =
     boot_cmds
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm-cortex-a15.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm-cortex-a15.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm-cortex-a15.conf	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm-cortex-a15.conf	2016-02-02 08:07:05.000000000 +0000
@@ -2,16 +2,16 @@ client_type=qemu
 
 qemu_binary = qemu-system-arm
 qemu_networking_options = -net user
-qemu_options = -M %(qemu_machine_type)s -no-reboot -show-cursor -no-reboot -nographic -m 128 %(qemu_networking_options)s
+qemu_options = -M %(qemu_machine_type)s -no-reboot -show-cursor -no-reboot -nographic -m 256 %(qemu_networking_options)s
 
 qemu_machine_type = vexpress-a15
 
-boot_cmds = root=/dev/sda rw console=ttyAMA0,115200 ip=dhcp mem=128M
+boot_cmds = root=/dev/sda rw console=ttyAMA0,115200 ip=dhcp mem=256M
 
-boot_cmds_ramdisk = root=/dev/ram0 rw console=ttyAMA0,115200 ip=dhcp mem=128M
+boot_cmds_ramdisk = root=/dev/ram0 rw console=ttyAMA0,115200 ip=dhcp mem=256M
 
 boot_options =
     boot_cmds
 
 [boot_cmds]
-default = boot_cmds
\ No newline at end of file
+default = boot_cmds
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm-cortex-a9.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm-cortex-a9.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm-cortex-a9.conf	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu-arm-cortex-a9.conf	2016-02-02 08:07:05.000000000 +0000
@@ -2,16 +2,16 @@ client_type=qemu
 
 qemu_binary = qemu-system-arm
 qemu_networking_options = -net user
-qemu_options = -M %(qemu_machine_type)s -no-reboot -show-cursor -no-reboot -nographic -m 128 %(qemu_networking_options)s
+qemu_options = -M %(qemu_machine_type)s -no-reboot -show-cursor -no-reboot -nographic -m 256 %(qemu_networking_options)s
 
 qemu_machine_type = vexpress-a9
 
-boot_cmds = root=/dev/sda rw console=ttyAMA0,115200 ip=dhcp mem=128M
+boot_cmds = root=/dev/sda rw console=ttyAMA0,115200 ip=dhcp mem=256M
 
-boot_cmds_ramdisk = root=/dev/ram0 rw console=ttyAMA0,115200 ip=dhcp mem=128M
+boot_cmds_ramdisk = root=/dev/ram0 rw console=ttyAMA0,115200 ip=dhcp mem=256M
 
 boot_options =
     boot_cmds
 
 [boot_cmds]
-default = boot_cmds
\ No newline at end of file
+default = boot_cmds
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu.conf	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/qemu.conf	2016-02-02 08:07:05.000000000 +0000
@@ -2,13 +2,13 @@ client_type=qemu
 
 qemu_binary = qemu-system-arm
 qemu_networking_options = -net nic,model=virtio -net user
-qemu_options = -M %(qemu_machine_type)s -no-reboot -show-cursor -no-reboot -nographic -m 128 %(qemu_networking_options)s
+qemu_options = -M %(qemu_machine_type)s -no-reboot -show-cursor -no-reboot -nographic -m 256 %(qemu_networking_options)s
 
 qemu_machine_type = versatilepb
 
-boot_cmds = root=/dev/sda rw console=ttyAMA0,115200 ip=dhcp mem=128M
+boot_cmds = root=/dev/sda rw console=ttyAMA0,115200 ip=dhcp mem=256M
 
-boot_cmds_ramdisk = root=/dev/ram0 rw console=ttyAMA0,115200 ip=dhcp mem=128M
+boot_cmds_ramdisk = root=/dev/ram0 rw console=ttyAMA0,115200 ip=dhcp mem=256M
 
 boot_options =
     boot_cmds
diff -pruN 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/x86.conf 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/x86.conf
--- 2015.9-1/lava_dispatcher/default-config/lava-dispatcher/device-types/x86.conf	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/default-config/lava-dispatcher/device-types/x86.conf	2015-12-14 09:33:11.000000000 +0000
@@ -4,17 +4,19 @@ boot_cmd_timeout = 30
 
 boot_cmds_ramdisk =
     dhcp net0,
-    set console "console=ttyS0,115200n8 %(lava_network_info)s",
-    set extraargs "init=/sbin/init",
-    kernel http://{SERVER_IP}/tmp/{KERNEL} ${extraargs} ${console},
+    set console "console=ttyS0,115200n8",
+    set network	"ip=:::::eth0:dhcp %(lava_network_info)s",
+    set extraargs "init=/sbin/init ",
+    kernel http://{SERVER_IP}/tmp/{KERNEL} ${extraargs} ${network} ${console},
     initrd http://{SERVER_IP}/tmp/{RAMDISK},
     boot
 
 boot_cmds_nfs =
     dhcp net0,
-    set console "console=ttyS0,115200n8 %(lava_network_info)s",
-    set extraargs "root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=eth0:dhcp",
-    kernel http://{SERVER_IP}/tmp/{KERNEL} ${extraargs} ${console},
+    set console "console=ttyS0,115200n8",
+    set network	"ip=:::::eth0:dhcp %(lava_network_info)s",
+    set extraargs "root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr"
+    kernel http://{SERVER_IP}/tmp/{KERNEL} ${extraargs} ${network} ${console},
     initrd http://{SERVER_IP}/tmp/{RAMDISK},
     boot
 
diff -pruN 2015.9-1/lava_dispatcher/deployment_data.py 2016.3-1/lava_dispatcher/deployment_data.py
--- 2015.9-1/lava_dispatcher/deployment_data.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/deployment_data.py	2016-02-02 08:07:05.000000000 +0000
@@ -190,3 +190,20 @@ plamo = deployment_data_dict({
     'lava_test_results_part_attr': 'root_part',
     'lava_test_results_dir': '/lava-%s',
 })
+
+debian_installer = deployment_data_dict({  # pylint: disable=invalid-name
+    'TESTER_PS1': r"Enter",
+    'TESTER_PS1_PATTERN': r"Enter",
+    'TESTER_PS1_INCLUDES_RC': False,
+    'boot_cmds': 'boot_cmds',
+    'boot_linaro_timeout': 'extended_boot_timeout',  # run the installer
+    'skip_newlines': True,
+
+    # for lava-test-shell
+    'distro': 'debian',
+    'lava_test_sh_cmd': '/bin/bash',
+    'lava_test_dir': '/lava-%s',
+    'lava_test_results_part_attr': 'root_part',
+    'lava_test_results_dir': '/lava-%s',
+    'lava_test_shell_file': '~/.bashrc',
+})
diff -pruN 2015.9-1/lava_dispatcher/device/bootloader.py 2016.3-1/lava_dispatcher/device/bootloader.py
--- 2015.9-1/lava_dispatcher/device/bootloader.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/device/bootloader.py	2015-12-14 09:33:11.000000000 +0000
@@ -187,7 +187,8 @@ class BootloaderTarget(MasterImageTarget
                         else:
                             load_addr = self.config.u_load_addrs[0]
                         kernel = create_uimage(kernel, load_addr,
-                                               self._tmpdir, self.config.uimage_xip)
+                                               self._tmpdir, self.config.uimage_xip,
+                                               self.config.uimage_arch)
                         logging.info('uImage created successfully')
                     else:
                         logging.error('Undefined u_load_addrs, aborting uImage creation')
@@ -212,8 +213,8 @@ class BootloaderTarget(MasterImageTarget
                     if not self._is_uboot_ramdisk(ramdisk):
                         ramdisk_uboot = ramdisk + ".uboot"
                         logging.info("RAMdisk needs u-boot header.  Adding.")
-                        cmd = "mkimage -A arm -T ramdisk -C none -d %s %s > /dev/null" \
-                            % (ramdisk, ramdisk_uboot)
+                        cmd = "mkimage -A %s -T ramdisk -C none -d %s %s > /dev/null" \
+                            % (self.config.uimage_arch, ramdisk, ramdisk_uboot)
                         r = subprocess.call(cmd, shell=True)
                         if r == 0:
                             ramdisk = ramdisk_uboot
@@ -316,6 +317,8 @@ class BootloaderTarget(MasterImageTarget
         if self.proc:
             if self.config.connection_command_terminate:
                 self.proc.sendline(self.config.connection_command_terminate)
+            else:
+                self._politely_close_console(self.proc)
             finalize_process(self.proc)
             self.proc = None
         self.proc = connect_to_serial(self.context)
diff -pruN 2015.9-1/lava_dispatcher/device/dummy_drivers.py 2016.3-1/lava_dispatcher/device/dummy_drivers.py
--- 2015.9-1/lava_dispatcher/device/dummy_drivers.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/device/dummy_drivers.py	2015-11-30 21:58:47.000000000 +0000
@@ -215,4 +215,4 @@ class lxc(BaseDriver):
 
     def finalize(self, proc):
         logging.info("Finalizing lxc session %s", self.session)
-        subprocess.check_call(['lxc-stop', '-n', self.container])
+        subprocess.check_call(['lxc-stop', '-n', self.container, '-k'])
diff -pruN 2015.9-1/lava_dispatcher/device/fastboot_drivers.py 2016.3-1/lava_dispatcher/device/fastboot_drivers.py
--- 2015.9-1/lava_dispatcher/device/fastboot_drivers.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/device/fastboot_drivers.py	2015-12-14 09:33:11.000000000 +0000
@@ -59,7 +59,7 @@ class FastBoot(object):
 
     def __call__(self, args, ignore_failure=False, timeout=600):
         command = self.device.config.fastboot_command + ' ' + args
-        command = "flock /var/lock/lava-fastboot.lck " + command
+        command = "flock -o /var/lock/lava-fastboot.lck " + command
         _call(self.context, command, ignore_failure, timeout)
 
     def enter(self):
@@ -69,6 +69,9 @@ class FastBoot(object):
         except subprocess.CalledProcessError:
             # Now a more brute force attempt. In this case the device is
             # probably hung.
+            if self.device.config.pre_power_cmd:
+                self.context.run_command(self.device.config.pre_power_cmd,
+                                         failok=True)
             if self.device.config.hard_reset_command:
                 logging.debug("Will hard reset the device")
                 self.context.run_command(self.device.config.hard_reset_command)
@@ -245,7 +248,8 @@ class BaseDriver(object):
                     self._kernel = create_uimage(self._kernel,
                                                  load_addr,
                                                  self.working_dir,
-                                                 self.config.uimage_xip)
+                                                 self.config.uimage_xip,
+                                                 self.config.uimage_arch)
             else:
                 raise CriticalError('Kernel load address not defined!')
         elif self.config.boot_fat_image_only:
@@ -312,17 +316,18 @@ class BaseDriver(object):
     @contextmanager
     def adb_file_system(self, partition, directory):
 
-        mount_point = self._get_partition_mount_point(partition)
+        with self.context.client.android_tester_session() as session:
+            mount_point = self._get_partition_mount_point(partition)
 
-        host_dir = '%s/mnt/%s' % (self.working_dir, directory)
-        target_dir = '%s/%s' % (mount_point, directory)
+            host_dir = '%s/mnt/%s' % (self.working_dir, directory)
+            target_dir = '%s/%s' % (mount_point, directory)
 
-        subprocess.check_call(['mkdir', '-p', host_dir])
-        self.adb('pull %s %s' % (target_dir, host_dir), ignore_failure=True)
+            subprocess.check_call(['mkdir', '-p', host_dir])
+            self.adb('pull %s %s' % (target_dir, host_dir), ignore_failure=True)
 
-        yield host_dir
+            yield host_dir
 
-        self.adb('push %s %s' % (host_dir, target_dir))
+            self.adb('push %s %s' % (host_dir, target_dir))
 
     # Private Methods
 
diff -pruN 2015.9-1/lava_dispatcher/device/fastboot.py 2016.3-1/lava_dispatcher/device/fastboot.py
--- 2015.9-1/lava_dispatcher/device/fastboot.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/device/fastboot.py	2015-11-30 21:58:47.000000000 +0000
@@ -162,7 +162,8 @@ class FastbootTarget(Target):
                                                  boot_tags=self.driver.get_boot_tags())
                 self._customize_bootloader(self.proc, boot_cmds)
             self._monitor_boot(self.proc, self.tester_ps1, self.tester_ps1_pattern)
-            if self.config.start_fastboot_command:
+            if self.config.start_fastboot_command and not \
+               self.config.android_adb_over_tcp:
                 self.driver.wait_for_adb()
             self._booted = True
             return self.proc
@@ -172,9 +173,12 @@ class FastbootTarget(Target):
             raise OperationFailed(msg)
 
     def power_off(self, proc):
-        super(FastbootTarget, self).power_off(proc)
-        if self.config.power_off_cmd:
+        if self.config.power_off_cmd != "":
             self.context.run_command(self.config.power_off_cmd)
+        else:
+            proc.send("~$")
+            proc.sendline("off")
+        super(FastbootTarget, self).power_off(proc)
         self.driver.finalize(proc)
 
     @contextlib.contextmanager
@@ -220,7 +224,8 @@ class FastbootTarget(Target):
                 # Connect to serial
                 self.proc = self.driver.connect()
                 # Hard reset the platform
-                if self.config.hard_reset_command:
+                if self.config.hard_reset_command or \
+                   self.config.hard_reset_command == "":
                     self._hard_reboot(self.proc)
                 else:
                     self._soft_reboot(self.proc)
diff -pruN 2015.9-1/lava_dispatcher/device/lxc.py 2016.3-1/lava_dispatcher/device/lxc.py
--- 2015.9-1/lava_dispatcher/device/lxc.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/device/lxc.py	2015-11-30 21:58:47.000000000 +0000
@@ -93,7 +93,7 @@ class LxcTarget(Target):
     def power_off(self, proc):
         if self.proc:
             try:
-                subprocess.check_call(['lxc-stop', '-n', self.name])
+                subprocess.check_call(['lxc-stop', '-n', self.name, '-k'])
                 if not self.persist:
                     subprocess.check_call(['lxc-destroy', '-n', self.name])
                     logging.info('Destroyed container %s' % self.name)
diff -pruN 2015.9-1/lava_dispatcher/device/master.py 2016.3-1/lava_dispatcher/device/master.py
--- 2015.9-1/lava_dispatcher/device/master.py	2015-09-10 08:06:51.000000000 +0000
+++ 2016.3-1/lava_dispatcher/device/master.py	2015-12-14 09:33:11.000000000 +0000
@@ -137,6 +137,8 @@ class MasterImageTarget(Target):
             proc.sendline("off")
         if self.config.connection_command_terminate:
                 self.proc.sendline(self.config.connection_command_terminate)
+        else:
+            self._politely_close_console(self.proc)
         finalize_process(self.proc)
         self.proc = None
 
@@ -514,6 +516,8 @@ class MasterImageTarget(Target):
                 if self.proc:
                     if self.config.connection_command_terminate:
                         self.proc.sendline(self.config.connection_command_terminate)
+                    else:
+                        self._politely_close_console(self.proc)
                     finalize_process(self.proc)
                     self.proc = None
                 self.proc = connect_to_serial(self.context)
diff -pruN 2015.9-1/lava_dispatcher/device/target.py 2016.3-1/lava_dispatcher/device/target.py
--- 2015.9-1/lava_dispatcher/device/target.py	2015-09-10 08:06:51.000000000 +0000
+++ 2016.3-1/lava_dispatcher/device/target.py	2016-02-02 08:07:05.000000000 +0000
@@ -333,8 +333,8 @@ class Target(object):
                     return dest
         return dest
 
-    def _wait_for_prompt(self, connection, prompt_pattern, timeout):
-        wait_for_prompt(connection, prompt_pattern, timeout)
+    def _wait_for_prompt(self, connection, prompt_pattern, timeout, skip_newlines=False):
+        wait_for_prompt(connection, prompt_pattern, timeout, skip_newlines=skip_newlines)
 
     def _is_job_defined_boot_cmds(self, boot_cmds):
         if isinstance(self.config.boot_cmds, basestring):
@@ -352,11 +352,13 @@ class Target(object):
             return True
 
     def _auto_login(self, connection, is_master=False):
+        logging.info("Starting auto_login")
+        logging.info("timeout: %s" % self.boot_linaro_timeout)
         if is_master:
             if self.config.master_login_prompt is not None:
                 self._wait_for_prompt(connection,
                                       self.config.master_login_prompt,
-                                      timeout=self.config.boot_linaro_timeout)
+                                      timeout=self.boot_linaro_timeout, skip_newlines=True)
                 connection.sendline(self.config.master_username)
             if self.config.master_password_prompt is not None:
                 self._wait_for_prompt(connection,
@@ -369,7 +371,7 @@ class Target(object):
             if self.config.login_prompt is not None:
                 self._wait_for_prompt(connection,
                                       self.config.login_prompt,
-                                      timeout=self.config.boot_linaro_timeout)
+                                      timeout=self.boot_linaro_timeout, skip_newlines=True)
                 connection.sendline(self.config.username)
             if self.config.password_prompt is not None:
                 self._wait_for_prompt(connection,
@@ -491,6 +493,9 @@ class Target(object):
 
     def _hard_reboot(self, connection):
         logging.info("Perform hard reset on the system")
+        if self.config.pre_power_cmd:
+            self.context.run_command(self.config.pre_power_cmd,
+                                     failok=True)
         if self.config.hard_reset_command != "":
             self.context.run_command(self.config.hard_reset_command)
         else:
@@ -542,6 +547,10 @@ class Target(object):
 
     def _monitor_boot(self, connection, ps1, ps1_pattern, is_master=False):
 
+        if self.config.pre_os_cmd:
+            self.context.run_command(self.config.pre_os_cmd,
+                                     failok=True)
+
         good = 'pass'
         bad = 'fail'
         if not is_master:
@@ -614,7 +623,7 @@ class Target(object):
         try:
             self._auto_login(connection, is_master)
         except pexpect.TIMEOUT:
-            msg = "Userspace Error: auto login prompt not found."
+            msg = "Userspace Error: auto login prompt not found. %s" % self.boot_linaro_timeout
             logging.error(msg)
             self.context.test_data.add_result(wait_for_login_prompt,
                                               bad, message=msg)
@@ -626,7 +635,7 @@ class Target(object):
             else:
                 pattern = self.config.test_image_prompts
 
-            self._wait_for_prompt(connection, pattern, self.config.boot_linaro_timeout)
+            self._wait_for_prompt(connection, pattern, self.boot_linaro_timeout)
             if not is_master:
                 if self.target_distro == 'android':
                     # Gain root access
@@ -855,6 +864,13 @@ class Target(object):
             if mounted:
                 runner.run('umount /mnt')
 
+    def _politely_close_console(self, connection):
+        if "telnet" in self.config.connection_command:
+            logging.debug("Telnet connection: Closing connection nicely")
+            connection.sendcontrol("]")
+            connection.expect("telnet> ", timeout=2)
+            connection.send("quit")
+
     def _start_busybox_http_server(self, runner, ip):
         runner.run('busybox httpd -f -p %d &' % self.config.busybox_http_port)
         runner.run('echo $! > /tmp/httpd.pid')
@@ -882,6 +898,31 @@ class Target(object):
         return self.deployment_data['distro']
 
     @property
+    def boot_linaro_timeout(self):
+        """
+        Reversed version of _get_from_config_or_deployment_data.
+        If deployment_data defines the key with the value of extended,
+        then the extended key will be retrieved from config instead.
+        The value itself still needs to come from config as it is
+        device dependent.
+        :return: timeout, extended if deployment_data sets boot_linaro_timeout
+        """
+        key = 'boot_linaro_timeout'
+        if not self.__deployment_data__:
+            # image in particular does not set deployment_data at this point.
+            value = getattr(self.config, key.lower())
+            return value
+        extended = 'extended_boot_timeout'
+        value_str = self.deployment_data.get(key)
+        if value_str == extended:
+            extended_value = getattr(self.config, extended.lower())
+            if extended_value:
+                logging.info("Using extended boot timeout: %s" % extended_value)
+                return extended_value
+        value = getattr(self.config, key.lower())
+        return value
+
+    @property
     def tester_ps1(self):
         return self._get_from_config_or_deployment_data('tester_ps1')
 
diff -pruN 2015.9-1/lava_dispatcher/device/vexpress.py 2016.3-1/lava_dispatcher/device/vexpress.py
--- 2015.9-1/lava_dispatcher/device/vexpress.py	2015-09-10 08:06:51.000000000 +0000
+++ 2016.3-1/lava_dispatcher/device/vexpress.py	2016-03-04 14:35:18.000000000 +0000
@@ -99,6 +99,9 @@ class VexpressTarget(BootloaderTarget):
                     "vexpress_uefi_path, vexpress_uefi_backup_path and "
                     "vexpress_usb_mass_storage_device")
 
+        if self.config.vexpress_sky2_mac:
+            self._boot_tags['{SKY2_MAC}'] = self.config.vexpress_sky2_mac
+
     ##################################################################
     # methods inherited from BootloaderTarget and overriden here
     ##################################################################
@@ -154,7 +157,7 @@ class VexpressTarget(BootloaderTarget):
                         self.test_bl0 = download_image(self.config.vexpress_bl0_default, self.context,
                                                        self._tmpdir,
                                                        decompress=False)
-                else:
+                elif bl0 is not None:
                     self.test_bl0 = download_image(bl0, self.context,
                                                    self._tmpdir,
                                                    decompress=False)
diff -pruN 2015.9-1/lava_dispatcher/job.py 2016.3-1/lava_dispatcher/job.py
--- 2015.9-1/lava_dispatcher/job.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/job.py	2016-03-04 14:35:18.000000000 +0000
@@ -338,6 +338,10 @@ class LavaTestJob(object):
                 metadata['group_size'] = self.job_data['group_size']
                 self.context.test_data.add_metadata(metadata)
 
+            if 'shared_config' in self.job_data:
+                metadata['shared_config'] = self.job_data['shared_config']
+                self.context.test_data.add_metadata(metadata)
+
             logging.debug("[ACTION-B] Multi Node test!")
             logging.debug("[ACTION-B] target_group is (%s)." % self.context.test_data.metadata['target_group'])
         else:
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/distro/debian/lava-add-sources 2016.3-1/lava_dispatcher/lava_test_shell/distro/debian/lava-add-sources
--- 2015.9-1/lava_dispatcher/lava_test_shell/distro/debian/lava-add-sources	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/distro/debian/lava-add-sources	2016-03-04 14:35:18.000000000 +0000
@@ -3,7 +3,7 @@
 which add-apt-repository >/dev/null
 ret=$?
 if [ $ret -ne 0 ]; then
-    DEBIAN_FRONTEND=noninteractive apt-get update
+    DEBIAN_FRONTEND=noninteractive apt-get update -q
     DEBIAN_FRONTEND=noninteractive apt-get install -y -q "software-properties-common"
 fi
 
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/distro/debian/lava-install-packages 2016.3-1/lava_dispatcher/lava_test_shell/distro/debian/lava-install-packages
--- 2015.9-1/lava_dispatcher/lava_test_shell/distro/debian/lava-install-packages	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/distro/debian/lava-install-packages	2016-03-04 14:35:18.000000000 +0000
@@ -5,7 +5,7 @@ max_retry=4
 
 while [ 1 ]
 do
-  DEBIAN_FRONTEND=noninteractive apt-get update
+  DEBIAN_FRONTEND=noninteractive apt-get update -q
   DEBIAN_FRONTEND=noninteractive apt-get install -y -q "$@"
   return_value=$?
   [ "$return_value" = 0 ] && break
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-add-sources 2016.3-1/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-add-sources
--- 2015.9-1/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-add-sources	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-add-sources	2016-03-04 14:35:18.000000000 +0000
@@ -3,7 +3,7 @@
 which add-apt-repository >/dev/null
 ret=$?
 if [ $ret -ne 0 ]; then
-    DEBIAN_FRONTEND=noninteractive apt-get update
+    DEBIAN_FRONTEND=noninteractive apt-get update -q
     DEBIAN_FRONTEND=noninteractive apt-get install -y -q "software-properties-common"
 fi
 
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-install-packages 2016.3-1/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-install-packages
--- 2015.9-1/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-install-packages	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/distro/ubuntu/lava-install-packages	2016-03-04 14:35:18.000000000 +0000
@@ -5,7 +5,7 @@ max_retry=4
 
 while [ 1 ]
 do
-  DEBIAN_FRONTEND=noninteractive apt-get update
+  DEBIAN_FRONTEND=noninteractive apt-get update -q
   DEBIAN_FRONTEND=noninteractive apt-get install -y -q "$@"
   return_value=$?
   [ "$return_value" = 0 ] && break
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/lava-echo-ipv4 2016.3-1/lava_dispatcher/lava_test_shell/lava-echo-ipv4
--- 2015.9-1/lava_dispatcher/lava_test_shell/lava-echo-ipv4	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/lava-echo-ipv4	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,20 @@
+#NOTE the lava_test_shell_action fills in the proper interpreter path
+# above during target deployment
+
+usage () {
+    echo "Usage: lava-echo-ipv4 INTERFACE"
+    echo ""
+    echo "Runs ifconfig for the specified interface and outputs the ipv4 IP address, if any."
+}
+
+_NETWORK_INTERFACE=$1
+
+if [ -z "$_NETWORK_INTERFACE" ]; then
+    echo "Specify the interface to query"
+    exit
+fi
+
+if [ -x /sbin/ifconfig ]; then
+    _RAW_STREAM_V4=`/sbin/ifconfig $_NETWORK_INTERFACE |grep -o -E '([[:xdigit:]]{1,3}\.){3}[[:xdigit:]]{1,3}'`
+    echo $_RAW_STREAM_V4 | awk '{print$1}'
+fi
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/lava-test-set 2016.3-1/lava_dispatcher/lava_test_shell/lava-test-set
--- 2015.9-1/lava_dispatcher/lava_test_shell/lava-test-set	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/lava-test-set	2015-11-30 21:58:47.000000000 +0000
@@ -0,0 +1,33 @@
+set_usage () {
+    echo "Usage:"
+    echo "       lava-test-set start NAME"
+    echo "       lava-test-set stop"
+    echo ""
+    echo "Start a test set with the given NAME, or stop the test set section."
+}
+
+set_start () {
+    echo "<LAVA_SIGNAL_TESTSET START $1>"
+    exit 0
+}
+
+set_stop () {
+    echo "<LAVA_SIGNAL_TESTSET STOP>"
+    exit 0
+}
+
+COMMAND="$1"
+shift
+if [ "$COMMAND" = "start" ]; then
+    SETNAME="$1"
+    if [ -z "$SETNAME" ]; then
+        set_usage
+        exit 1
+    fi
+    set_start "$SETNAME"
+elif [ "$COMMAND" = "stop" ]; then
+    set_stop
+else
+    set_usage
+    exit 1
+fi
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/multi_node/lava-echo-config 2016.3-1/lava_dispatcher/lava_test_shell/multi_node/lava-echo-config
--- 2015.9-1/lava_dispatcher/lava_test_shell/multi_node/lava-echo-config	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/multi_node/lava-echo-config	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+#This file is for Multi-Node test
+#
+#This command will print to stdout any extra device information contained in
+# /etc/lava-server/shared-device-config.yaml
+#
+#Usage: ``lava-echo-config``
+#
+
+if [ -z "${LAVA_SHARED_CONFIG}" ]
+then
+    printf "no shared config\n"
+    exit 1
+fi
+
+FILTER=$1
+if [ -z "$FILTER" ]; then
+    printf "${LAVA_SHARED_CONFIG}\n"
+    exit 0
+fi
+
+MATCH=$(printf "${LAVA_SHARED_CONFIG}" | sed "/^${FILTER} /!D" | sed "s/^${FILTER} //")
+if [ "$MATCH" = "" ] || [ -z "$MATCH" ]; then
+  # failed to find the filter in the group
+  printf "no matching devices\n"
+  exit 1
+fi
+printf "${MATCH}"
+exit 0
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/multi_node/lava-multi-node.lib 2016.3-1/lava_dispatcher/lava_test_shell/multi_node/lava-multi-node.lib
--- 2015.9-1/lava_dispatcher/lava_test_shell/multi_node/lava-multi-node.lib	2015-09-04 07:44:33.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/multi_node/lava-multi-node.lib	2016-02-02 08:07:05.000000000 +0000
@@ -184,7 +184,7 @@ if [ -n "$1" ] ; then
 fi
 
 if [ ! -f $LAVA_MULTI_NODE_CACHE ] ; then
-	_lava_multi_node_debug "$FUNCNAME not cache file $LAVA_MULTI_NODE_CACHE !"
+	_lava_multi_node_debug "$FUNCNAME no cache file exists $LAVA_MULTI_NODE_CACHE !"
 	exit $LAVA_MULTI_NODE_EXIT_ERROR
 fi
 
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/vland/lava-vland-self 2016.3-1/lava_dispatcher/lava_test_shell/vland/lava-vland-self
--- 2015.9-1/lava_dispatcher/lava_test_shell/vland/lava-vland-self	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/vland/lava-vland-self	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,9 @@
+#!/bin/sh
+#
+# This file is for Vland test
+#
+# Prints the interface details for this device
+#
+# Usage: ``lava-vland-self``
+
+echo -e ${LAVA_VLAND_SELF}
diff -pruN 2015.9-1/lava_dispatcher/lava_test_shell/vland/lava-vland-tags 2016.3-1/lava_dispatcher/lava_test_shell/vland/lava-vland-tags
--- 2015.9-1/lava_dispatcher/lava_test_shell/vland/lava-vland-tags	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/lava_test_shell/vland/lava-vland-tags	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,9 @@
+#!/bin/sh
+#
+# This file is for Vland test
+#
+# Prints the tag details for this device
+#
+# Usage: ``lava-vland-tags``
+
+echo -e ${LAVA_VLAND_TAGS}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/action.py 2016.3-1/lava_dispatcher/pipeline/action.py
--- 2015.9-1/lava_dispatcher/pipeline/action.py	2015-09-10 10:34:46.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/action.py	2016-03-02 14:34:40.000000000 +0000
@@ -18,6 +18,7 @@
 # along
 # with this program; if not, see <http://www.gnu.org/licenses>.
 
+import re
 import logging
 import os
 import sys
@@ -26,6 +27,7 @@ import time
 import types
 import signal
 import datetime
+import traceback
 import subprocess
 from collections import OrderedDict
 from contextlib import contextmanager
@@ -119,6 +121,26 @@ class Pipeline(object):  # pylint: disab
         if not action:
             raise RuntimeError("Unable to add empty action to pipeline")
 
+    def _override_action_timeout(self, action, override):
+        if not isinstance(override, dict):
+            return
+        action.timeout = Timeout(
+            action.name,
+            Timeout.parse(
+                override[action.name]
+            )
+        )
+
+    def _override_connection_timeout(self, action, override):
+        if not isinstance(override, dict):
+            return
+        action.connection_timeout = Timeout(
+            action.name,
+            Timeout.parse(
+                override[action.name]
+            )
+        )
+
     def add_action(self, action, parameters=None):
         self._check_action(action)
         self.actions.append(action)
@@ -149,26 +171,36 @@ class Pipeline(object):  # pylint: disab
             parameters = self.parameters
         # if the action has an internal pipeline, initialise that here.
         action.populate(parameters)
+        if 'default_connection_timeout' in parameters:
+            # some action handlers do not need to pass all parameters to their children.
+            action.connection_timeout.duration = parameters['default_connection_timeout']
         # Set the timeout
         # FIXME: only the last test is really useful. The first ones are only
         # needed to run the tests that do not use a device and job.
-        if self.job is not None and \
-           self.job.device is not None and \
-           action.name in self.job.device.get('timeouts', {}):
-            action.timeout = Timeout(
-                action.name,
-                Timeout.parse(
-                    self.job.device['timeouts'][action.name]
-                )
-            )
+        if self.job is not None and self.job.device is not None:
+            # set device level overrides
+            overrides = self.job.device.get('timeouts', {})
+            if 'actions' in overrides and action.name in overrides['actions']:
+                self._override_action_timeout(action, overrides['actions'])
+            elif action.name in overrides:
+                self._override_action_timeout(action, overrides)
+            if 'connections' in overrides and action.name in overrides['connections']:
+                self._override_connection_timeout(action, overrides['connections'])
         # Set the parameters after populate so the sub-actions are also
         # getting the parameters.
         # Also set the parameters after the creation of the default timeout
         # so timeouts specified in the job override the defaults.
         # job overrides device timeouts:
-        # FIXME: this shouldn't be needed once the device configuration is generated using the job definition
-        if self.job and 'timeouts' in self.job.parameters and action.name in self.job.parameters['timeouts']:
-            parameters['timeout'] = self.job.parameters['timeouts'][action.name]
+        if self.job and 'timeouts' in self.job.parameters:
+            overrides = self.job.parameters['timeouts']
+            if 'actions' in overrides and action.name in overrides:
+                # set job level overrides
+                self._override_action_timeout(action, overrides['actions'])
+            elif action.name in overrides:
+                self._override_action_timeout(action, overrides)
+                parameters['timeout'] = overrides[action.name]
+            if 'connections' in overrides and action.name in overrides:
+                self._override_connection_timeout(action, overrides['connections'])
 
         action.parameters = parameters
 
@@ -300,27 +332,20 @@ class Pipeline(object):  # pylint: disab
                     signal.signal(signal.SIGINT, cancelling_handler)
                     signal.signal(signal.SIGTERM, cancelling_handler)
                 start = time.time()
-                new_connection = None
                 try:
-                    # FIXME: not sure to understand why we have two cases here?
-                    if not connection:
-                        with action.timeout.action_timeout():
-                            new_connection = action.run(connection, args)
-                    else:
+                    with action.timeout.action_timeout():
                         new_connection = action.run(connection, args)
                 # overly broad exceptions will cause issues with RetryActions
                 # always ensure the unit tests continue to pass with changes here.
-                except (ValueError, KeyError, TypeError, RuntimeError, AttributeError) as exc:
-                    exc_type, exc_value, exc_traceback = sys.exc_info()
-                    traceback_details = {
-                        'filename': exc_traceback.tb_frame.f_code.co_filename,
-                        'lineno': exc_traceback.tb_lineno,
-                        'name': exc_traceback.tb_frame.f_code.co_name,
-                        'type': exc_type.__name__,
-                        'message': exc_value.message,
-                    }
-                    action.logger.exception(traceback_details)
-                    raise RuntimeError(exc)
+                except (ValueError, KeyError, NameError, SyntaxError, OSError,
+                        TypeError, RuntimeError, AttributeError):
+                    msg = re.sub('\s+', ' ', ''.join(traceback.format_exc().split('\n')))
+                    action.logger.exception(msg)
+                    action.errors = msg
+                    action.cleanup()
+                    self.cleanup_actions(connection, None)
+                    # report action errors so that the last part of the message is the most relevant.
+                    raise RuntimeError(action.errors)
                 except KeyboardInterrupt:
                     raise KeyboardInterrupt
                 action.elapsed_time = time.time() - start
@@ -349,6 +374,8 @@ class Pipeline(object):  # pylint: disab
             except (JobError, InfrastructureError) as exc:
                 action.errors = exc.message
                 # set results including retries
+                if "boot-result" not in action.data:
+                    action.data['boot-result'] = 'failed'
                 action.results = {"fail": exc}
                 self._diagnose(connection)
                 action.cleanup()
@@ -393,6 +420,7 @@ class Action(object):  # pylint: disable
         self.diagnostics = []
         self.protocols = []  # list of protocol objects supported by this action, full list in job.protocols
         self.section = None
+        self.connection_timeout = Timeout(self.name)
 
     # public actions (i.e. those who can be referenced from a job file) must
     # declare a 'class-type' name so they can be looked up.
@@ -514,6 +542,8 @@ class Action(object):  # pylint: disable
         # Overide the duration if needed
         if 'timeout' in self.parameters:
             self.timeout.duration = Timeout.parse(self.parameters['timeout'])
+        if 'connection_timeout' in self.parameters:
+            self.connection_timeout.duration = Timeout.parse(self.parameters['connection_timeout'])
 
         # only unit tests should have actions without a pointer to the job.
         if 'failure_retry' in self.parameters and 'repeat' in self.parameters:
@@ -621,15 +651,22 @@ class Action(object):  # pylint: disable
         if 'protocols' not in self.parameters:
             return
         for protocol in self.job.protocols:
-            for params in self.parameters['protocols'][protocol.name]:
-                for call in [
-                        params for name in params
-                        if name == 'action' and params[name] == self.name]:
-                    reply = protocol(call)
-                    message = protocol.collate(reply, params)
-                    self.logger.debug(
-                        "Setting common data key %s to %s"
-                        % (message[0], message[1]))
+            if protocol.name not in self.parameters['protocols']:
+                # nothing to do for this action with this protocol
+                continue
+            params = self.parameters['protocols'][protocol.name]
+            for call_dict in [call for call in params if 'action' in call and call['action'] == self.name]:
+                del call_dict['yaml_line']
+                if 'message' in call_dict:
+                    del call_dict['message']['yaml_line']
+                if 'timeout' in call_dict:
+                    del call_dict['timeout']['yaml_line']
+                protocol.check_timeout(self.connection_timeout.duration, call_dict)
+                self.logger.info("Making protocol call for %s using %s", self.name, protocol.name)
+                reply = protocol(call_dict)
+                message = protocol.collate(reply, call_dict)
+                if message:
+                    self.logger.info("Setting common data key %s to %s", message[0], message[1])
                     self.set_common_data(protocol.name, message[0], message[1])
 
     def run(self, connection, args=None):
@@ -652,7 +689,7 @@ class Action(object):  # pylint: disable
         if self.internal_pipeline:
             return self.internal_pipeline.run_actions(connection, args)
         if connection:
-            connection.timeout = self.timeout
+            connection.timeout = self.connection_timeout
         return connection
 
     def cleanup(self):
@@ -738,7 +775,7 @@ class Action(object):  # pylint: disable
         if not connection.connected:
             self.logger.debug("Already disconnected")
             return
-        self.logger.debug("%s: Wait for prompt. %s seconds" % (self.name, int(self.timeout.duration)))
+        self.logger.debug("%s: Wait for prompt. %s seconds" % (self.name, int(self.connection_timeout.duration)))
         connection.wait()
 
 
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/fastboot.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/fastboot.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/fastboot.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/fastboot.py	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,167 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Senthil Kumaran S <senthil.kumaran@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+import os
+import tarfile
+from lava_dispatcher.pipeline.action import (
+    Pipeline,
+    Action,
+    Timeout,
+    JobError,
+)
+from lava_dispatcher.pipeline.logical import Boot
+from lava_dispatcher.pipeline.actions.boot import BootAction
+from lava_dispatcher.pipeline.actions.boot.environment import (
+    ExportDeviceEnvironment,
+)
+from lava_dispatcher.pipeline.actions.boot import AutoLoginAction
+from lava_dispatcher.pipeline.connections.adb import (
+    ConnectAdb,
+    WaitForAdbDevice,
+)
+from lava_dispatcher.pipeline.shell import ExpectShellSession
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
+from lava_dispatcher.pipeline.utils.constants import (
+    DISPATCHER_DOWNLOAD_DIR,
+    ANDROID_TMP_DIR,
+)
+
+
+class BootFastboot(Boot):
+    """
+    Expects fastboot bootloader, and boots.
+    """
+    compatibility = 1
+
+    def __init__(self, parent, parameters):
+        super(BootFastboot, self).__init__(parent)
+        self.action = BootFastbootAction()
+        self.action.section = self.action_type
+        self.action.job = self.job
+        parent.add_action(self.action, parameters)
+
+    @classmethod
+    def accepts(cls, device, parameters):
+        if 'method' in parameters:
+            if parameters['method'] == 'fastboot':
+                return True
+        return False
+
+
+class BootFastbootAction(BootAction):
+    """
+    Provide for auto_login parameters in this boot stanza and re-establish the
+    connection after boot.
+    """
+    def __init__(self):
+        super(BootFastbootAction, self).__init__()
+        self.name = "fastboot_boot"
+        self.summary = "fastboot boot"
+        self.description = "fastboot boot into the system"
+
+    def populate(self, parameters):
+        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        self.internal_pipeline.add_action(FastbootAction())
+        self.internal_pipeline.add_action(WaitForAdbDevice())
+        self.internal_pipeline.add_action(ConnectAdb())
+        # Add AutoLoginAction unconditionnally as this action does nothing if
+        # the configuration does not contain 'auto_login'
+        self.internal_pipeline.add_action(AutoLoginAction())
+        self.internal_pipeline.add_action(ExpectShellSession())
+        self.internal_pipeline.add_action(AdbOverlayUnpack())
+
+
+class FastbootAction(Action):
+    """
+    This action calls fastboot to reboot into the system.
+    """
+
+    def __init__(self):
+        super(FastbootAction, self).__init__()
+        self.name = "boot-fastboot"
+        self.summary = "attempt to fastboot boot"
+        self.description = "fastboot boot into system"
+        self.command = ''
+
+    def validate(self):
+        super(FastbootAction, self).validate()
+        self.errors = infrastructure_error('fastboot')
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(FastbootAction, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number, 'reboot']
+        command_output = self.run_command(fastboot_cmd)
+        if command_output and 'rebooting' not in command_output:
+            raise JobError("Unable to boot with fastboot: %s" %
+                           command_output)
+        return connection
+
+
+class AdbOverlayUnpack(Action):
+
+    def __init__(self):
+        super(AdbOverlayUnpack, self).__init__()
+        self.name = "adb-overlay-unpack"
+        self.summary = "unpack the overlay on the remote device"
+        self.description = "unpack the overlay over adb"
+
+    def validate(self):
+        super(AdbOverlayUnpack, self).validate()
+        if 'adb_serial_number' not in self.job.device:
+            self.errors = "device adb serial number missing"
+            if self.job.device['adb_serial_number'] == '0000000000':
+                self.errors = "device adb serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(AdbOverlayUnpack, self).run(connection, args)
+        serial_number = self.job.device['adb_serial_number']
+        overlay_type = 'adb-overlay'
+        overlay_file = self.data['compress-overlay'].get('output')
+        host_dir = mkdtemp()
+        target_dir = ANDROID_TMP_DIR
+        try:
+            tar = tarfile.open(overlay_file)
+            tar.extractall(host_dir)
+            tar.close()
+        except tarfile.TarError as exc:
+            raise RuntimeError("Unable to unpack %s overlay: %s" % (
+                overlay_type, exc))
+        host_dir = os.path.join(host_dir, 'data/local/tmp')
+        adb_cmd = ['adb', '-s', serial_number, 'push', host_dir,
+                   target_dir]
+        command_output = self.run_command(adb_cmd)
+        if command_output and 'pushed' not in command_output:
+            raise JobError("Unable to push overlay files with adb: %s" %
+                           command_output)
+        adb_cmd = ['adb', '-s', serial_number, 'shell', '/system/bin/chmod',
+                   '0777', target_dir]
+        command_output = self.run_command(adb_cmd)
+        if command_output and 'pushed' not in command_output:
+            raise JobError("Unable to chmod overlay files with adb: %s" %
+                           command_output)
+        self.data['boot-result'] = 'failed' if self.errors else 'success'
+        return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/__init__.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/__init__.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/__init__.py	2015-09-09 14:30:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/__init__.py	2016-02-02 08:07:05.000000000 +0000
@@ -20,8 +20,12 @@
 
 from lava_dispatcher.pipeline.action import Action
 from lava_dispatcher.pipeline.logical import RetryAction
-from lava_dispatcher.pipeline.connection import wait_for_prompt
-from lava_dispatcher.pipeline.utils.constants import AUTOLOGIN_DEFAULT_TIMEOUT
+from lava_dispatcher.pipeline.utils.constants import (
+    AUTOLOGIN_DEFAULT_TIMEOUT,
+    DEFAULT_SHELL_PROMPT,
+    DISTINCTIVE_PROMPT_CHARACTERS,
+)
+from lava_dispatcher.pipeline.utils.shell import wait_for_prompt
 
 
 class BootAction(RetryAction):
@@ -48,47 +52,93 @@ class AutoLoginAction(Action):
     """
     Automatically login on the device.
     If 'auto_login' is not present in the parameters, this action does nothing.
+
+    This Action expect POSIX-compatible support of PS1 from shell
     """
     def __init__(self):
         super(AutoLoginAction, self).__init__()
         self.name = 'auto-login-action'
         self.description = "automatically login after boot using job parameters"
         self.summary = "Auto-login after boot"
+        self.check_prompt_characters_warning = (
+            "The string '%s' does not look like a typical prompt and"
+            " could match status messages instead. Please check the"
+            " job log files and use a prompt string which matches the"
+            " actual prompt string more closely."
+        )
         # FIXME: self.timeout.duration = AUTOLOGIN_DEFAULT_TIMEOUT
 
     def validate(self):
         super(AutoLoginAction, self).validate()
         # Skip auto login if the configuration is not found
         params = self.parameters.get('auto_login', None)
-        if params is None:
-            return
-
-        if not isinstance(params, dict):
-            self.errors = "'auto_login' should be a dictionary"
-            return
-
-        if 'login_prompt' not in params:
-            self.errors = "'login_prompt' is mandatory for auto_login"
-        if 'username' not in params:
-            self.errors = "'username' is mandatory for auto_login"
-
-        if 'password_prompt' in params:
-            if 'password' not in params:
-                self.errors = "'password' is mandatory if 'password_prompt' is used in auto_login"
+        if params:
+            if not isinstance(params, dict):
+                self.errors = "'auto_login' should be a dictionary"
+                return
+
+            if 'login_prompt' not in params:
+                self.errors = "'login_prompt' is mandatory for auto_login"
+            if not params['login_prompt']:
+                self.errors = "Value for 'login_prompt' cannot be empty"
+            if 'username' not in params:
+                self.errors = "'username' is mandatory for auto_login"
+
+            if 'password_prompt' in params:
+                if 'password' not in params:
+                    self.errors = "'password' is mandatory if 'password_prompt' is used in auto_login"
+
+        prompts = self.parameters.get('prompts', None)
+        if prompts is None:
+            self.errors = "'prompts' is mandatory for AutoLoginAction"
+
+        if not isinstance(prompts, (list, str)):
+            self.errors = "'prompts' should be a list or a str"
+
+        if not prompts:
+            self.errors = "Value for 'prompts' cannot be empty"
+
+        if isinstance(prompts, list):
+            for prompt in prompts:
+                if not prompt:
+                    self.errors = "Items of 'prompts' can't be empty"
 
     def run(self, connection, args=None):
+        def check_prompt_characters(prompt):
+            if not any([True for c in DISTINCTIVE_PROMPT_CHARACTERS if c in prompt]):
+                self.logger.warning(self.check_prompt_characters_warning % prompt)
+
         # Skip auto login if the configuration is not found
         params = self.parameters.get('auto_login', None)
         if params is None:
-            self.logger.debug("Skipping auto login")
-            return connection
+            self.logger.debug("Skipping of auto login")
+        else:
+            self.logger.debug("Waiting for the login prompt")
+            connection.prompt_str = params['login_prompt']
+            self.wait(connection)
+            connection.sendline(params['username'])
+
+            if 'password_prompt' in params:
+                self.logger.debug("Waiting for password prompt")
+                connection.prompt_str = params['password_prompt']
+                self.wait(connection)
+                connection.sendline(params['password'])
+        # prompt_str can be a list or str
+        connection.prompt_str = [DEFAULT_SHELL_PROMPT]
+
+        prompts = self.parameters.get('prompts', None)
+        if isinstance(prompts, list):
+            connection.prompt_str.extend(prompts)
+            for prompt in prompts:
+                check_prompt_characters(prompt)
+        else:
+            connection.prompt_str.extend([prompts])
+            check_prompt_characters(prompts)
+
+        self.logger.debug("Setting shell prompt")
+        # may need to force a prompt here.
+        wait_for_prompt(connection.raw_connection, connection.prompt_str, connection.timeout.duration, '#')
+        # self.wait(connection)
+        connection.sendline('export PS1="%s"' % DEFAULT_SHELL_PROMPT)
 
-        self.logger.debug("Waiting for the login prompt")
-        wait_for_prompt(connection.raw_connection, params['login_prompt'], self.timeout.duration)
-        connection.sendline(params['username'])
-
-        if 'password_prompt' in params:
-            self.logger.debug("Waiting for password prompt")
-            wait_for_prompt(connection.raw_connection, params['password_prompt'], self.timeout.duration)
-            connection.sendline(params['password'])
         return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/ipxe.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/ipxe.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/ipxe.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/ipxe.py	2016-02-03 11:46:15.000000000 +0000
@@ -0,0 +1,314 @@
+# Copyright (C) 2014 Linaro Limited
+#
+# Author: Matthew Hart <matthew.hart@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+# List just the subclasses supported for this base strategy
+# imported by the parser to populate the list of subclasses.
+
+import os
+import re
+from lava_dispatcher.pipeline.action import (
+    Action,
+    Pipeline,
+    Timeout,
+    InfrastructureError,
+)
+from lava_dispatcher.pipeline.logical import Boot
+from lava_dispatcher.pipeline.actions.boot import BootAction, AutoLoginAction
+from lava_dispatcher.pipeline.actions.boot.environment import ExportDeviceEnvironment
+from lava_dispatcher.pipeline.shell import ExpectShellSession
+from lava_dispatcher.pipeline.connections.serial import ConnectDevice
+from lava_dispatcher.pipeline.power import ResetDevice
+from lava_dispatcher.pipeline.utils.constants import (
+    IPXE_BOOT_PROMPT,
+    BOOT_MESSAGE,
+    BOOTLOADER_DEFAULT_CMD_TIMEOUT
+)
+from lava_dispatcher.pipeline.utils.strings import substitute
+from lava_dispatcher.pipeline.utils.network import dispatcher_ip
+from lava_dispatcher.pipeline.utils.filesystem import write_bootscript
+
+
+def bootloader_accepts(device, parameters):
+    if 'method' not in parameters:
+        raise RuntimeError("method not specified in boot parameters")
+    if parameters['method'] != 'ipxe':
+        return False
+    if 'actions' not in device:
+        raise RuntimeError("Invalid device configuration")
+    if 'boot' not in device['actions']:
+        return False
+    if 'methods' not in device['actions']['boot']:
+        raise RuntimeError("Device misconfiguration")
+    return True
+
+
+class IPXE(Boot):
+    """
+    The IPXE method prepares the command to run on the dispatcher but this
+    command needs to start a new connection and then interrupt iPXE.
+    An expect shell session can then be handed over to the BootloaderAction.
+    self.run_command is a blocking call, so Boot needs to use
+    a direct spawn call via ShellCommand (which wraps pexpect.spawn) then
+    hand this pexpect wrapper to subsequent actions as a shell connection.
+    """
+
+    compatibility = 1
+
+    def __init__(self, parent, parameters):
+        super(IPXE, self).__init__(parent)
+        self.action = BootloaderAction()
+        self.action.section = self.action_type
+        self.action.job = self.job
+        parent.add_action(self.action, parameters)
+
+    @classmethod
+    def accepts(cls, device, parameters):
+        if not bootloader_accepts(device, parameters):
+            return False
+        return 'ipxe' in device['actions']['boot']['methods']
+
+
+class BootloaderAction(BootAction):
+    """
+    Wraps the Retry Action to allow for actions which precede
+    the reset, e.g. Connect.
+    """
+    def __init__(self):
+        super(BootloaderAction, self).__init__()
+        self.name = "bootloader-action"
+        self.description = "interactive bootloader action"
+        self.summary = "pass boot commands"
+
+    def populate(self, parameters):
+        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        # customize the device configuration for this job
+        self.internal_pipeline.add_action(BootloaderCommandOverlay())
+        self.internal_pipeline.add_action(ConnectDevice())
+        self.internal_pipeline.add_action(BootloaderRetry())
+
+
+class BootloaderRetry(BootAction):
+
+    def __init__(self):
+        super(BootloaderRetry, self).__init__()
+        self.name = "bootloader-retry"
+        self.description = "interactive uboot retry action"
+        self.summary = "uboot commands with retry"
+        self.type = "ipxe"
+
+    def populate(self, parameters):
+        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        # establish a new connection before trying the reset
+        self.internal_pipeline.add_action(ResetDevice())
+        self.internal_pipeline.add_action(BootloaderInterrupt())
+        # need to look for Hit any key to stop autoboot
+        self.internal_pipeline.add_action(BootloaderCommandsAction())
+        # Add AutoLoginAction unconditionnally as this action does nothing if
+        # the configuration does not contain 'auto_login'
+        self.internal_pipeline.add_action(AutoLoginAction())
+        self.internal_pipeline.add_action(ExpectShellSession())  # wait
+        self.internal_pipeline.add_action(ExportDeviceEnvironment())
+
+    def validate(self):
+        super(BootloaderRetry, self).validate()
+        if 'bootloader_prompt' not in self.job.device['actions']['boot']['methods'][self.type]['parameters']:
+            self.errors = "Missing bootloader prompt for device"
+        self.set_common_data(
+            'bootloader_prompt',
+            'prompt',
+            self.job.device['actions']['boot']['methods'][self.type]['parameters']['bootloader_prompt']
+        )
+
+    def run(self, connection, args=None):
+        connection = super(BootloaderRetry, self).run(connection, args)
+        self.logger.debug("Setting default test shell prompt")
+        if not connection.prompt_str:
+            connection.prompt_str = self.parameters['prompts']
+        connection.timeout = self.connection_timeout
+        self.wait(connection)
+        self.data['boot-result'] = 'failed' if self.errors else 'success'
+        return connection
+
+
+class BootloaderInterrupt(Action):
+    """
+    Support for interrupting the bootloader.
+    """
+    def __init__(self):
+        super(BootloaderInterrupt, self).__init__()
+        self.name = "bootloader-interrupt"
+        self.description = "interrupt bootloader"
+        self.summary = "interrupt bootloader to get a prompt"
+        self.type = "ipxe"
+
+    def validate(self):
+        super(BootloaderInterrupt, self).validate()
+        hostname = self.job.device['hostname']
+        # boards which are reset manually can be supported but errors have to handled manually too.
+        if self.job.device.power_state in ['on', 'off']:
+            # to enable power to a device, either power_on or hard_reset are needed.
+            if self.job.device.power_command is '':
+                self.errors = "Unable to power on or reset the device %s" % hostname
+            if self.job.device.connect_command is '':
+                self.errors = "Unable to connect to device %s" % hostname
+        else:
+            self.logger.debug("%s may need manual intervention to reboot" % hostname)
+        device_methods = self.job.device['actions']['boot']['methods']
+        if 'bootloader_prompt' not in device_methods[self.type]['parameters']:
+            self.errors = "Missing bootloader prompt for device"
+
+    def run(self, connection, args=None):
+        if not connection:
+            raise RuntimeError("%s started without a connection already in use" % self.name)
+        connection = super(BootloaderInterrupt, self).run(connection, args)
+        self.logger.debug("Changing prompt to '%s'", IPXE_BOOT_PROMPT)
+        # device is to be put into a reset state, either by issuing 'reboot' or power-cycle
+        connection.prompt_str = IPXE_BOOT_PROMPT
+        self.wait(connection)
+        connection.sendcontrol("b")
+        return connection
+
+
+class BootloaderCommandOverlay(Action):
+    """
+    Replace KERNEL_ADDR and DTB placeholders with the actual values for this
+    particular pipeline.
+    addresses are read from the device configuration parameters
+    bootloader_type is determined from the boot action method strategy
+    bootz or bootm is determined by boot action method type. (i.e. it is up to
+    the test writer to select the correct download file for the correct boot command.)
+    server_ip is calculated at runtime
+    filenames are determined from the download Action.
+    """
+    def __init__(self):
+        super(BootloaderCommandOverlay, self).__init__()
+        self.name = "bootloader-overlay"
+        self.summary = "replace placeholders with job data"
+        self.description = "substitute job data into bootloader command list"
+        self.commands = None
+        self.type = "ipxe"
+        self.use_bootscript = False
+        self.lava_mac = ""
+
+    def validate(self):
+        super(BootloaderCommandOverlay, self).validate()
+        device_methods = self.job.device['actions']['boot']['methods']
+        params = self.job.device['actions']['boot']['methods'][self.type]['parameters']
+        if 'method' not in self.parameters:
+            self.errors = "missing method"
+        # FIXME: allow u-boot commands in the job definition (which make this type a list)
+        elif 'commands' not in self.parameters:
+            self.errors = "missing commands"
+        elif self.parameters['commands'] not in device_methods[self.parameters['method']]:
+            self.errors = "Command not found in supported methods"
+        elif 'commands' not in device_methods[self.parameters['method']][self.parameters['commands']]:
+            self.errors = "No commands found in parameters"
+        # download_action will set ['dtb'] as tftp_path, tmpdir & filename later, in the run step.
+        if 'use_bootscript' in params:
+            self.use_bootscript = params['use_bootscript']
+        if 'lava_mac' in params:
+            if re.match("([0-9A-F]{2}[:-]){5}([0-9A-F]{2})", params['lava_mac'], re.IGNORECASE):
+                self.lava_mac = params['lava_mac']
+            else:
+                self.errors = "lava_mac is not a valid mac address"
+        self.data.setdefault(self.type, {})
+        self.data[self.type].setdefault('commands', [])
+        self.commands = device_methods[self.parameters['method']][self.parameters['commands']]['commands']
+
+    def run(self, connection, args=None):
+        """
+        Read data from the download action and replace in context
+        Use common data for all values passed into the substitutions so that
+        multiple actions can use the same code.
+        """
+        # Multiple deployments would overwrite the value if parsed in the validate step.
+        # FIXME: implement isolation for repeated steps.
+        connection = super(BootloaderCommandOverlay, self).run(connection, args)
+        try:
+            ip_addr = dispatcher_ip()
+        except InfrastructureError as exc:
+            raise RuntimeError("Unable to get dispatcher IP address: %s" % exc)
+        substitutions = {
+            '{SERVER_IP}': ip_addr
+        }
+        substitutions['{RAMDISK}'] = self.get_common_data('file', 'ramdisk')
+        substitutions['{KERNEL}'] = self.get_common_data('file', 'kernel')
+        substitutions['{LAVA_MAC}'] = self.lava_mac
+        nfs_url = self.get_common_data('nfs_url', 'nfsroot')
+        if 'download_action' in self.data and 'nfsrootfs' in self.data['download_action']:
+            substitutions['{NFSROOTFS}'] = self.get_common_data('file', 'nfsroot')
+            substitutions['{NFS_SERVER_IP}'] = ip_addr
+        elif nfs_url:
+            substitutions['{NFSROOTFS}'] = nfs_url
+            substitutions['{NFS_SERVER_IP}'] = self.get_common_data('nfs_url', 'serverip')
+
+        substitutions['{ROOT}'] = self.get_common_data('uuid', 'root')  # UUID label, not a file
+        substitutions['{ROOT_PART}'] = self.get_common_data('uuid', 'boot_part')
+        if self.use_bootscript:
+            script = "/script.ipxe"
+            bootscript = self.get_common_data('tftp', 'tftp_dir') + script
+            bootscripturi = "tftp://%s/%s" % (ip_addr, os.path.dirname(substitutions['{KERNEL}']) + script)
+            write_bootscript(substitute(self.commands, substitutions), bootscript)
+            bootscript_commands = ['dhcp net0', "chain %s" % bootscripturi]
+            self.data[self.type]['commands'] = bootscript_commands
+        else:
+            self.data[self.type]['commands'] = substitute(self.commands, substitutions)
+        self.logger.debug("Parsed boot commands: %s" % '; '.join(self.data[self.type]['commands']))
+        return connection
+
+
+class BootloaderCommandsAction(Action):
+    """
+    Send the boot commands to the bootloader
+    """
+    def __init__(self):
+        super(BootloaderCommandsAction, self).__init__()
+        self.name = "bootloader-commands"
+        self.description = "send commands to bootloader"
+        self.summary = "interactive bootloader"
+        self.params = None
+        self.timeout = Timeout(self.name, BOOTLOADER_DEFAULT_CMD_TIMEOUT)
+        self.type = "ipxe"
+
+    def validate(self):
+        super(BootloaderCommandsAction, self).validate()
+        if self.type not in self.data:
+            self.errors = "Unable to read bootloader context data"
+        # get prompt_str from device parameters
+        self.params = self.job.device['actions']['boot']['methods'][self.type]['parameters']
+
+    def run(self, connection, args=None):
+        if not connection:
+            self.errors = "%s started without a connection already in use" % self.name
+        connection = super(BootloaderCommandsAction, self).run(connection, args)
+        connection.prompt_str = self.params['bootloader_prompt']
+        self.logger.debug("Changing prompt to %s" % connection.prompt_str)
+        self.wait(connection)
+        i = 1
+        for line in self.data[self.type]['commands']:
+            connection.sendline(line, delay=100, send_char=True)
+            if i != (len(self.data[self.type]['commands'])):
+                self.wait(connection)
+                i += 1
+        # allow for auto_login
+        connection.prompt_str = self.params.get('boot_message', BOOT_MESSAGE)
+        self.logger.debug("Changing prompt to %s" % connection.prompt_str)
+        self.wait(connection)
+        return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/kexec.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/kexec.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/kexec.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/kexec.py	2015-11-30 21:58:47.000000000 +0000
@@ -36,6 +36,9 @@ class BootKExec(Boot):
     Expects a shell session, checks for kexec executable and
     prepares the arguments to run kexec,
     """
+
+    compatibility = 1
+
     def __init__(self, parent, parameters):
         super(BootKExec, self).__init__(parent)
         self.action = BootKexecAction()
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/lxc.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/lxc.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/lxc.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/lxc.py	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,142 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Senthil Kumaran S <senthil.kumaran@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+import os
+import tarfile
+from lava_dispatcher.pipeline.action import (
+    Pipeline,
+    Action,
+    JobError,
+)
+from lava_dispatcher.pipeline.logical import Boot
+from lava_dispatcher.pipeline.actions.boot import BootAction
+from lava_dispatcher.pipeline.actions.boot.environment import (
+    ExportDeviceEnvironment,
+)
+from lava_dispatcher.pipeline.actions.boot import AutoLoginAction
+from lava_dispatcher.pipeline.connections.lxc import (
+    ConnectLxc,
+)
+from lava_dispatcher.pipeline.shell import ExpectShellSession
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.utils.constants import (
+    LXC_PATH,
+)
+
+
+class BootLxc(Boot):
+    """
+    Attaches to the lxc container.
+    """
+    compatibility = 1
+
+    def __init__(self, parent, parameters):
+        super(BootLxc, self).__init__(parent)
+        self.action = BootLxcAction()
+        self.action.section = self.action_type
+        self.action.job = self.job
+        parent.add_action(self.action, parameters)
+
+    @classmethod
+    def accepts(cls, device, parameters):
+        if 'method' in parameters:
+            if parameters['method'] == 'lxc':
+                return True
+        return False
+
+
+class BootLxcAction(BootAction):
+    """
+    Provide for auto_login parameters in this boot stanza and re-establish the
+    connection after boot.
+    """
+    def __init__(self):
+        super(BootLxcAction, self).__init__()
+        self.name = "lxc_boot"
+        self.summary = "lxc boot"
+        self.description = "lxc boot into the system"
+
+    def populate(self, parameters):
+        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        self.internal_pipeline.add_action(LxcStartAction())
+        self.internal_pipeline.add_action(ConnectLxc())
+        # Add AutoLoginAction unconditionnally as this action does nothing if
+        # the configuration does not contain 'auto_login'
+        self.internal_pipeline.add_action(AutoLoginAction())
+        self.internal_pipeline.add_action(ExpectShellSession())
+        self.internal_pipeline.add_action(ExportDeviceEnvironment())
+        self.internal_pipeline.add_action(LxcOverlayUnpack())
+
+
+class LxcStartAction(Action):
+    """
+    This action calls lxc-start to get into the system.
+    """
+
+    def __init__(self):
+        super(LxcStartAction, self).__init__()
+        self.name = "boot-lxc"
+        self.summary = "attempt to boot"
+        self.description = "boot into lxc container"
+        self.command = ''
+
+    def validate(self):
+        super(LxcStartAction, self).validate()
+        self.errors = infrastructure_error('lxc-start')
+
+    def run(self, connection, args=None):
+        connection = super(LxcStartAction, self).run(connection, args)
+        lxc_cmd = ['lxc-start', '-n', self.get_common_data('lxc', 'name'),
+                   '-d']
+        command_output = self.run_command(lxc_cmd)
+        if command_output and command_output is not '':
+            raise JobError("Unable to start lxc container: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+class LxcOverlayUnpack(Action):
+
+    def __init__(self):
+        super(LxcOverlayUnpack, self).__init__()
+        self.name = "lxc-overlay-unpack"
+        self.summary = "unpack the overlay on the container"
+        self.description = "unpack the overlay to the container by copying"
+
+    def validate(self):
+        super(LxcOverlayUnpack, self).validate()
+        self.errors = infrastructure_error('tar')
+
+    def run(self, connection, args=None):
+        connection = super(LxcOverlayUnpack, self).run(connection, args)
+        overlay_file = self.data['compress-overlay'].get('output')
+        lxc_path = os.path.join(LXC_PATH, self.get_common_data('lxc', 'name'),
+                                "rootfs")
+        if not os.path.exists(lxc_path):
+            raise JobError("Lxc container rootfs not found")
+        tar_cmd = ['tar', '--warning', 'no-timestamp', '-C', lxc_path, '-xaf',
+                   overlay_file]
+        command_output = self.run_command(tar_cmd)
+        if command_output and command_output is not '':
+            raise JobError("Unable to untar overlay: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        self.data['boot-result'] = 'failed' if self.errors else 'success'
+        return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/qemu.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/qemu.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/qemu.py	2015-09-04 07:44:33.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/qemu.py	2016-03-02 14:34:40.000000000 +0000
@@ -32,8 +32,8 @@ from lava_dispatcher.pipeline.shell impo
     ShellSession
 )
 from lava_dispatcher.pipeline.utils.shell import which
+from lava_dispatcher.pipeline.utils.strings import substitute
 from lava_dispatcher.pipeline.actions.boot import AutoLoginAction
-from lava_dispatcher.pipeline.connections.ssh import ConnectDynamicSsh
 
 
 # FIXME: decide if root_partition is needed, supported or can be removed from YAML.
@@ -49,6 +49,8 @@ class BootQEMU(Boot):
     hand this pexpect wrapper to subsequent actions as a shell connection.
     """
 
+    compatibility = 1
+
     def __init__(self, parent, parameters):
         super(BootQEMU, self).__init__(parent)
         self.action = BootQEMUImageAction()
@@ -93,19 +95,6 @@ class BootQemuRetry(RetryAction):
         self.description = "boot image using QEMU command line"
         self.summary = "boot QEMU image"
 
-    def validate(self):
-        super(BootQemuRetry, self).validate()
-        try:
-            # FIXME: need a schema and do this inside the NewDevice with a QemuDevice class? (just for parsing)
-            boot = self.job.device['actions']['boot']['methods']['qemu']
-            qemu_binary = which(boot['parameters']['command'])
-            command = [qemu_binary]
-            command.extend(boot['parameters'].get('options', []))
-            self.set_common_data('qemu-command', 'command', command)
-        # FIXME: AttributeError is an InfrastructureError in fact
-        except (KeyError, TypeError, AttributeError):
-            self.errors = "Invalid parameters"
-
     def populate(self, parameters):
         self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
         self.internal_pipeline.add_action(CallQemuAction())
@@ -118,11 +107,37 @@ class CallQemuAction(Action):
         self.name = "execute-qemu"
         self.description = "call qemu to boot the image"
         self.summary = "execute qemu to boot the image"
+        self.sub_command = []
 
     def validate(self):
         super(CallQemuAction, self).validate()
-        if 'test_image_prompts' not in self.job.device:
-            self.errors = "Unable to identify test image prompts from device configuration."
+        if 'prompts' not in self.parameters:
+            self.errors = "Unable to identify boot prompts from job definition."
+        if 'download_action' not in self.data:
+            self.errors = "No download_action data"
+        try:
+            # FIXME: need a schema and do this inside the NewDevice with a QemuDevice class? (just for parsing)
+            boot = self.job.device['actions']['boot']['methods']['qemu']
+            qemu_binary = which(boot['parameters']['command'])
+            self.sub_command = [qemu_binary]
+            self.sub_command.extend(boot['parameters'].get('options', []))
+        # FIXME: AttributeError is an InfrastructureError in fact
+        except (KeyError, TypeError, AttributeError):
+            self.errors = "Invalid parameters"
+        substitutions = {}
+        commands = []
+        for action in self.data['download_action'].keys():
+            if action != 'offset' or action != 'available_loops':
+                image_arg = self.data['download_action'][action]['image_arg']
+                action_arg = self.data['download_action'][action]['file']
+                if not image_arg or not action_arg:
+                    self.errors = "Missing image_arg for %s. " % action
+                    continue
+                substitutions["{%s}" % action] = action_arg
+                commands.append(image_arg)
+        self.sub_command.extend(substitute(commands, substitutions))
+        if not self.sub_command:
+            self.errors = "No QEMU command to execute"
 
     def run(self, connection, args=None):
         """
@@ -135,22 +150,16 @@ class CallQemuAction(Action):
         to run commands issued *after* the device has booted.
         pexpect.spawn is one of the raw_connection objects for a Connection class.
         """
-        if 'download_action' not in self.data:
-            raise RuntimeError("Value for download_action is missing from %s" % self.name)
-        if 'image' not in self.data['download_action']:
-            raise RuntimeError("No image file setting from the download_action")
-        command = self.get_common_data('qemu-command', 'command')
-        command.extend(["-hda", self.data['download_action']['image']['file']])
-        self.logger.info("Boot command: %s" % ' '.join(command))
-
         # initialise the first Connection object, a command line shell into the running QEMU.
-        shell = ShellCommand(' '.join(command), self.timeout)
+        self.logger.info("Boot command: %s", ' '.join(self.sub_command))
+        shell = ShellCommand(' '.join(self.sub_command), self.timeout, logger=self.logger)
         if shell.exitstatus:
-            raise JobError("%s command exited %d: %s" % (command, shell.exitstatus, shell.readlines()))
+            raise JobError("%s command exited %d: %s" % (self.sub_command, shell.exitstatus, shell.readlines()))
         self.logger.debug("started a shell command")
 
         shell_connection = ShellSession(self.job, shell)
-        shell_connection.prompt_str = self.job.device['test_image_prompts']
+        if not shell_connection.prompt_str:
+            shell_connection.prompt_str = self.parameters['prompts']
         shell_connection = super(CallQemuAction, self).run(shell_connection, args)
 
         # FIXME: tests with multiple boots need to be handled too.
@@ -158,41 +167,4 @@ class CallQemuAction(Action):
         return shell_connection
 
 
-class VirtualMachine(Boot):
-
-    def __init__(self, parent, parameters):
-        super(VirtualMachine, self).__init__(parent)
-        self.action = BootVMAction()
-        self.action.job = self.job
-        parent.add_action(self.action, parameters)
-
-    @classmethod
-    def accepts(cls, device, parameters):
-        if 'actions' not in device or 'boot' not in device['actions']:
-            return False
-        if 'methods' not in device['actions']['boot']:
-            return False
-        if 'vm' not in device['actions']['boot']['methods']:
-            return False
-        if 'vm' != parameters['method']:
-            return False
-        if 'commands' not in parameters:
-            return False
-        return True
-
-
-class BootVMAction(BootAction):
-
-    def __init__(self):
-        super(BootVMAction, self).__init__()
-        self.name = "boot-vm"
-        self.summary = "boot a VM on a host"
-        self.description = "Execute commands to boot a VM"
-
-    def populate(self, parameters):
-        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
-        self.internal_pipeline.add_action(ConnectDynamicSsh())
-
-    def validate(self):
-        super(BootVMAction, self).validate()
-        print '###### FIXME ########', self.parameters['commands']
+# FIXME: implement a QEMU protocol to monitor VM boots
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/ssh.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/ssh.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/ssh.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/ssh.py	2016-02-02 08:07:05.000000000 +0000
@@ -18,8 +18,10 @@
 # along
 # with this program; if not, see <http://www.gnu.org/licenses>.
 
-# pylint: disable=too-many-return-statements
+# pylint: disable=too-many-return-statements,too-many-instance-attributes
 
+import os
+import yaml
 from lava_dispatcher.pipeline.action import Pipeline, Action
 from lava_dispatcher.pipeline.logical import Boot, RetryAction
 from lava_dispatcher.pipeline.actions.boot import AutoLoginAction
@@ -27,6 +29,7 @@ from lava_dispatcher.pipeline.actions.bo
 from lava_dispatcher.pipeline.utils.shell import infrastructure_error
 from lava_dispatcher.pipeline.shell import ExpectShellSession
 from lava_dispatcher.pipeline.connections.ssh import ConnectSsh
+from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
 
 
 class SshLogin(Boot):
@@ -34,6 +37,9 @@ class SshLogin(Boot):
     Ssh boot strategy is a login process, without actually booting a kernel
     but still needs AutoLoginAction.
     """
+
+    compatibility = 1
+
     def __init__(self, parent, parameters):
         super(SshLogin, self).__init__(parent)
         self.action = SshAction()
@@ -72,6 +78,9 @@ class SshAction(RetryAction):
 
     def populate(self, parameters):
         self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        scp = Scp('overlay')
+        self.internal_pipeline.add_action(scp)
+        self.internal_pipeline.add_action(PrepareSsh())
         self.internal_pipeline.add_action(ConnectSsh())
         self.internal_pipeline.add_action(AutoLoginAction())
         self.internal_pipeline.add_action(ExpectShellSession())
@@ -79,6 +88,104 @@ class SshAction(RetryAction):
         self.internal_pipeline.add_action(ScpOverlayUnpack())
 
 
+class Scp(ConnectSsh):
+    """
+    Use the SSH connection options to copy files over SSH
+    One action per scp operation, just as with download action
+    Needs the reference into the common data for each file to copy
+    This is a Deploy action. lava-start is managed by the protocol,
+    when this action starts, the device is in the "receiving" state.
+    """
+    def __init__(self, key):
+        super(Scp, self).__init__()
+        self.name = "scp-deploy"
+        self.summary = "scp over the ssh connection"
+        self.description = "copy a file to a known device using scp"
+        self.key = key
+        self.scp = []
+
+    def validate(self):
+        super(Scp, self).validate()
+        params = self._check_params()
+        self.errors = infrastructure_error('scp')
+        if 'ssh' not in self.job.device['actions']['deploy']['methods']:
+            self.errors = "Unable to use %s without ssh deployment" % self.name
+        if 'ssh' not in self.job.device['actions']['boot']['methods']:
+            self.errors = "Unable to use %s without ssh boot" % self.name
+        if self.get_common_data("prepare-scp-overlay", self.key):
+            self.primary = False
+        elif 'host' not in self.job.device['actions']['deploy']['methods']['ssh']:
+            self.errors = "Invalid device or job configuration, missing host."
+        if not self.primary and len(
+                self.get_common_data("prepare-scp-overlay", self.key)) != 1:
+            self.errors = "Invalid number of host_keys"
+        if self.valid:
+            self.scp.append('scp')
+            self.scp.extend(params['options'])
+
+    def run(self, connection, args=None):
+        path = self.get_common_data(self.name, self.key)
+        if not path:
+            self.errors = "%s: could not find details of '%s'" % (self.name, self.key)
+            self.logger.error("%s: could not find details of '%s'", self.name, self.key)
+            return connection
+        overrides = self.get_common_data("prepare-scp-overlay", self.key)
+        if not self.primary:
+            self.logger.info("Retrieving common data for prepare-scp-overlay using %s", ','.join(overrides))
+            self.host = str(self.get_common_data("prepare-scp-overlay", overrides[0]))
+            self.logger.debug("Using common data for host: %s", self.host)
+        elif not self.host:
+            self.errors = "%s: could not find host for deployment" % self.name
+            self.logger.error("%s: could not find host for deployment", self.name)
+            return connection
+        destination = "%s-%s" % (self.job.job_id, os.path.basename(path))
+        command = self.scp[:]  # local copy
+        command.extend(['-i', self.identity_file])
+        command.extend(['-v'])
+        # add the local file as source
+        command.append(path)
+        command_str = " ".join(str(item) for item in command)
+        self.logger.info("Copying %s using %s to %s", self.key, command_str, self.host)
+        # add the remote as destination, with :/ top level directory
+        command.extend(["%s@%s:/%s" % (self.ssh_user, self.host, destination)])
+        self.logger.info(yaml.dump(command))
+        self.run_command(command)
+        connection = super(Scp, self).run(connection, args)
+        self.results = {'success': 'ssh deployment'}
+        self.set_common_data('scp-overlay-unpack', 'overlay', destination)
+        self.data['boot-result'] = 'failed' if self.errors else 'success'
+        return connection
+
+
+class PrepareSsh(Action):
+    """
+    Sets the host for the ConnectSsh
+    """
+    def __init__(self):
+        super(PrepareSsh, self).__init__()
+        self.name = "prepare-ssh"
+        self.summary = "set the host address of the ssh connection"
+        self.description = "determine which address to use for primary or secondary connections"
+        self.primary = False
+
+    def validate(self):
+        if 'parameters' in self.parameters and 'hostID' in self.parameters['parameters']:
+            self.set_common_data('ssh-connection', 'host', True)
+        else:
+            self.set_common_data('ssh-connection', 'host', False)
+            self.primary = True
+
+    def run(self, connection, args=None):
+        connection = super(PrepareSsh, self).run(connection, args)
+        if not self.primary:
+            host_data = self.get_common_data(MultinodeProtocol.name, self.parameters['parameters']['hostID'])
+            self.set_common_data(
+                'ssh-connection', 'host_address',
+                host_data[self.parameters['parameters']['host_key']]
+            )
+        return connection
+
+
 class ScpOverlayUnpack(Action):
 
     def __init__(self):
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/strategies.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/strategies.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/strategies.py	2015-09-09 14:30:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/strategies.py	2016-02-02 08:07:05.000000000 +0000
@@ -23,7 +23,11 @@
 
 # pylint: disable=unused-import
 
-from lava_dispatcher.pipeline.actions.boot.qemu import BootQEMU, VirtualMachine
+from lava_dispatcher.pipeline.actions.boot.qemu import BootQEMU
 from lava_dispatcher.pipeline.actions.boot.u_boot import UBoot
 from lava_dispatcher.pipeline.actions.boot.kexec import BootKExec
 from lava_dispatcher.pipeline.actions.boot.ssh import SshLogin, Schroot
+from lava_dispatcher.pipeline.actions.boot.fastboot import BootFastboot
+from lava_dispatcher.pipeline.actions.boot.uefi_menu import UefiMenu
+from lava_dispatcher.pipeline.actions.boot.lxc import BootLxc
+from lava_dispatcher.pipeline.actions.boot.ipxe import IPXE
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/u_boot.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/u_boot.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/u_boot.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/u_boot.py	2016-03-02 14:34:40.000000000 +0000
@@ -24,7 +24,6 @@
 from lava_dispatcher.pipeline.action import (
     Action,
     Pipeline,
-    JobError,
     Timeout,
     InfrastructureError,
 )
@@ -38,7 +37,6 @@ from lava_dispatcher.pipeline.utils.cons
     UBOOT_AUTOBOOT_PROMPT,
     UBOOT_DEFAULT_CMD_TIMEOUT,
     BOOT_MESSAGE,
-    SHUTDOWN_MESSAGE,
 )
 from lava_dispatcher.pipeline.utils.strings import substitute
 from lava_dispatcher.pipeline.utils.network import dispatcher_ip
@@ -68,6 +66,8 @@ class UBoot(Boot):
     hand this pexpect wrapper to subsequent actions as a shell connection.
     """
 
+    compatibility = 1
+
     def __init__(self, parent, parameters):
         super(UBoot, self).__init__(parent)
         self.action = UBootAction()
@@ -156,9 +156,12 @@ class UBootRetry(BootAction):
     def run(self, connection, args=None):
         connection = super(UBootRetry, self).run(connection, args)
         self.logger.debug("Setting default test shell prompt")
-        connection.prompt_str = self.job.device['test_image_prompts']
-        connection.timeout = self.timeout
+        if not connection.prompt_str:
+            connection.prompt_str = self.parameters['prompts']
+        self.logger.debug(connection.prompt_str)
+        connection.timeout = self.connection_timeout
         self.wait(connection)
+        self.logger.error(self.errors)
         self.data['boot-result'] = 'failed' if self.errors else 'success'
         return connection
 
@@ -327,8 +330,14 @@ class UBootCommandOverlay(Action):
         substitutions['{KERNEL}'] = self.get_common_data('file', 'kernel')
         substitutions['{DTB}'] = self.get_common_data('file', 'dtb')
 
+        nfs_url = self.get_common_data('nfs_url', 'nfsroot')
         if 'download_action' in self.data and 'nfsrootfs' in self.data['download_action']:
             substitutions['{NFSROOTFS}'] = self.get_common_data('file', 'nfsroot')
+            substitutions['{NFS_SERVER_IP}'] = ip_addr
+        elif nfs_url:
+
+            substitutions['{NFSROOTFS}'] = nfs_url
+            substitutions['{NFS_SERVER_IP}'] = self.get_common_data('nfs_url', 'serverip')
 
         substitutions['{ROOT}'] = self.get_common_data('uuid', 'root')  # UUID label, not a file
         substitutions['{ROOT_PART}'] = self.get_common_data('uuid', 'boot_part')
@@ -366,7 +375,6 @@ class UBootCommandsAction(Action):
         for line in self.data['u-boot']['commands']:
             self.wait(connection)
             connection.sendline(line)
-            self.wait(connection)
         # allow for auto_login
         params = self.job.device['actions']['boot']['methods']['u-boot']['parameters']
         connection.prompt_str = params.get('boot_message', BOOT_MESSAGE)
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/boot/uefi_menu.py 2016.3-1/lava_dispatcher/pipeline/actions/boot/uefi_menu.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/boot/uefi_menu.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/boot/uefi_menu.py	2016-03-02 14:34:48.000000000 +0000
@@ -0,0 +1,225 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Neil Williams <neil.williams@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+from lava_dispatcher.pipeline.action import (
+    Action,
+    Pipeline,
+    InfrastructureError,
+)
+from lava_dispatcher.pipeline.menus.menus import (
+    SelectorMenuAction,
+    MenuConnect,
+    MenuInterrupt,
+    MenuReset
+)
+from lava_dispatcher.pipeline.connections.adb import (
+    ConnectAdb,
+    WaitForAdbDevice,
+)
+from lava_dispatcher.pipeline.logical import Boot
+from lava_dispatcher.pipeline.power import ResetDevice
+from lava_dispatcher.pipeline.shell import ExpectShellSession
+from lava_dispatcher.pipeline.utils.strings import substitute
+from lava_dispatcher.pipeline.utils.network import dispatcher_ip
+from lava_dispatcher.pipeline.actions.boot import BootAction, AutoLoginAction
+from lava_dispatcher.pipeline.actions.boot.fastboot import AdbOverlayUnpack
+from lava_dispatcher.pipeline.actions.boot.environment import ExportDeviceEnvironment
+
+
+class UefiMenu(Boot):
+    """
+    The UEFI Menu strategy selects the specified options
+    and inserts relevant strings into the UEFI menu instead
+    of issuing commands over a shell-like serial connection.
+    """
+
+    def __init__(self, parent, parameters):
+        super(UefiMenu, self).__init__(parent)
+        self.action = UefiMenuAction()
+        self.action.section = self.action_type
+        self.action.job = self.job
+        parent.add_action(self.action, parameters)
+
+    @classmethod
+    def accepts(cls, device, parameters):
+        if 'method' not in parameters:
+            raise RuntimeError("method not specified in boot parameters")
+        if parameters['method'] != 'uefi-menu':
+            return False
+        if 'boot' not in device['actions']:
+            return False
+        if 'methods' not in device['actions']['boot']:
+            raise RuntimeError("Device misconfiguration")
+        if 'uefi-menu' in device['actions']['boot']['methods']:
+            params = device['actions']['boot']['methods']['uefi-menu']['parameters']
+            if 'interrupt_prompt' in params and 'interrupt_string' in params:
+                return True
+        return False
+
+
+class UEFIMenuInterrupt(MenuInterrupt):
+
+    def __init__(self):
+        super(UEFIMenuInterrupt, self).__init__()
+        self.interrupt_prompt = None
+        self.interrupt_string = None
+
+    def validate(self):
+        super(UEFIMenuInterrupt, self).validate()
+        params = self.job.device['actions']['boot']['methods']['uefi-menu']['parameters']
+        self.interrupt_prompt = params['interrupt_prompt']
+        self.interrupt_string = params['interrupt_string']
+
+    def run(self, connection, args=None):
+        if not connection:
+            self.logger.debug("%s called without active connection", self.name)
+            return
+        connection = super(UEFIMenuInterrupt, self).run(connection, args)
+        connection.prompt_str = self.interrupt_prompt
+        self.wait(connection)
+        connection.raw_connection.send(self.interrupt_string)
+        return connection
+
+
+class UefiMenuSelector(SelectorMenuAction):
+
+    def __init__(self):
+        super(UefiMenuSelector, self).__init__()
+        self.name = 'uefi-menu-selector'
+        self.summary = 'select options in the uefi menu'
+        self.description = 'select specified uefi menu items'
+        self.selector.prompt = "Start:"
+        self.boot_message = None
+
+    def validate(self):
+        """
+        Setup the items and pattern based on the parameters for this
+        specific action, then let the base class complete the validation.
+        """
+        params = self.job.device['actions']['boot']['methods']['uefi-menu']['parameters']
+        if ('item_markup' not in params or
+                'item_class' not in params or 'separator' not in params or
+                'label_class' not in params):
+            self.errors = "Missing device parameters for UEFI menu operations"
+        if 'commands' not in self.parameters:
+            self.errors = "Missing commands in action parameters"
+            return
+        if self.parameters['commands'] not in self.job.device['actions']['boot']['methods']['uefi-menu']:
+            self.errors = "Missing commands for %s" % self.parameters['commands']
+        self.selector.item_markup = params['item_markup']
+        self.selector.item_class = params['item_class']
+        self.selector.separator = params['separator']
+        self.selector.label_class = params['label_class']
+        self.selector.prompt = params['bootloader_prompt']  # initial prompt
+        self.boot_message = params['boot_message']  # final prompt
+        self.items = self.job.device['actions']['boot']['methods']['uefi-menu'][self.parameters['commands']]
+        if 'character_delay' in self.job.device['actions']['boot']['methods']['uefi-menu']['parameters']:
+            self.send_char_delay = self.job.device['actions']['boot']['methods']['uefi-menu']['parameters']['character_delay']
+        super(UefiMenuSelector, self).validate()
+
+    def run(self, connection, args=None):
+        if not connection:
+            return connection
+        connection.prompt_str = self.selector.prompt
+        self.logger.debug("Looking for %s", self.selector.prompt)
+        self.wait(connection)
+        connection = super(UefiMenuSelector, self).run(connection, args)
+        self.logger.debug("Looking for %s", self.boot_message)
+        connection.prompt_str = self.boot_message
+        self.wait(connection)
+        self.data['boot-result'] = 'failed' if self.errors else 'success'
+        return connection
+
+
+class UefiSubstituteCommands(Action):
+
+    def __init__(self):
+        super(UefiSubstituteCommands, self).__init__()
+        self.name = 'uefi-commands'
+        self.summary = 'substitute job values into uefi commands'
+        self.description = 'set job-specific variables into the uefi menu commands'
+        self.items = None
+
+    def validate(self):
+        super(UefiSubstituteCommands, self).validate()
+        if self.parameters['commands'] not in self.job.device['actions']['boot']['methods']['uefi-menu']:
+            self.errors = "Missing commands for %s" % self.parameters['commands']
+        self.items = self.job.device['actions']['boot']['methods']['uefi-menu'][self.parameters['commands']]
+        for item in self.items:
+            if 'select' not in item:
+                self.errors = "Invalid device configuration for %s: %s" % (self.name, item)
+
+    def run(self, connection, args=None):
+        connection = super(UefiSubstituteCommands, self).run(connection, args)
+        try:
+            ip_addr = dispatcher_ip()
+        except InfrastructureError as exc:
+            raise RuntimeError("Unable to get dispatcher IP address: %s" % exc)
+        substitution_dictionary = {
+            '{SERVER_IP}': ip_addr,
+            '{RAMDISK}': self.get_common_data('file', 'ramdisk'),
+            '{KERNEL}': self.get_common_data('file', 'kernel'),
+            '{DTB}': self.get_common_data('file', 'dtb'),
+            'TEST_MENU_NAME': "LAVA %s test image" % self.parameters['commands']
+        }
+        if 'download_action' in self.data and 'nfsrootfs' in self.data['download_action']:
+            substitution_dictionary['{NFSROOTFS}'] = self.get_common_data('file', 'nfsroot')
+        for item in self.items:
+            if 'enter' in item['select']:
+                item['select']['enter'] = substitute([item['select']['enter']], substitution_dictionary)
+            if 'items' in item['select']:
+                # items is already a list, so pass without wrapping in []
+                item['select']['items'] = substitute(item['select']['items'], substitution_dictionary)
+        return connection
+
+
+class UefiMenuAction(BootAction):
+
+    def __init__(self):
+        super(UefiMenuAction, self).__init__()
+        self.name = 'uefi-menu-action'
+        self.summary = 'interact with uefi menu'
+        self.description = 'interrupt and select uefi menu items'
+
+    def validate(self):
+        super(UefiMenuAction, self).validate()
+        self.set_common_data(
+            'bootloader_prompt',
+            'prompt',
+            self.job.device['actions']['boot']['methods']['uefi-menu']['parameters']['bootloader_prompt']
+        )
+
+    def populate(self, parameters):
+        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        self.internal_pipeline.add_action(UefiSubstituteCommands())
+        self.internal_pipeline.add_action(MenuConnect())
+        self.internal_pipeline.add_action(ResetDevice())
+        self.internal_pipeline.add_action(UEFIMenuInterrupt())
+        self.internal_pipeline.add_action(UefiMenuSelector())
+        self.internal_pipeline.add_action(MenuReset())
+        if 'adb_serial_number' in self.job.device:
+            self.internal_pipeline.add_action(WaitForAdbDevice())
+            self.internal_pipeline.add_action(ConnectAdb())
+        self.internal_pipeline.add_action(AutoLoginAction())
+        if 'adb_serial_number' in self.job.device:
+            self.internal_pipeline.add_action(ExpectShellSession())
+            self.internal_pipeline.add_action(AdbOverlayUnpack())
+        self.internal_pipeline.add_action(ExportDeviceEnvironment())
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/apply_overlay.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/apply_overlay.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/apply_overlay.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/apply_overlay.py	2016-03-04 14:35:18.000000000 +0000
@@ -19,9 +19,7 @@
 # with this program; if not, see <http://www.gnu.org/licenses>.
 
 import os
-import lzma
-import tarfile
-import contextlib
+import shutil
 import subprocess
 from lava_dispatcher.pipeline.action import (
     Action,
@@ -31,12 +29,16 @@ from lava_dispatcher.pipeline.action imp
 )
 from lava_dispatcher.pipeline.actions.deploy.overlay import OverlayAction
 from lava_dispatcher.pipeline.utils.constants import (
-    RAMDISK_COMPRESSED_FNAME,
     RAMDISK_FNAME,
     DISPATCHER_DOWNLOAD_DIR,
 )
 from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
-from lava_dispatcher.pipeline.utils.shell import which
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.utils.compression import (
+    compress_file,
+    decompress_file,
+    untar_file
+)
 
 
 class ApplyOverlayImage(Action):
@@ -58,12 +60,7 @@ class ApplyOverlayImage(Action):
                                self.data['loop_mount']['mntdir'])
         connection = super(ApplyOverlayImage, self).run(connection, args)
         # use tarfile module - no SELinux support here yet
-        try:
-            tar = tarfile.open(self.data['compress-overlay'].get('output'))
-            tar.extractall(self.data['loop_mount']['mntdir'])
-            tar.close()
-        except tarfile.TarError as exc:
-            raise RuntimeError("Unable to unpack overlay: %s" % exc)
+        untar_file(self.data['compress-overlay'].get('output'), self.data['loop_mount']['mntdir'])
         return connection
 
 
@@ -94,9 +91,13 @@ class PrepareOverlayTftp(Action):
         return connection
 
 
-class ApplyOverlayTftp(Action):  # FIXME: generic to more than just tftp
+class ApplyOverlayTftp(Action):
     """
     Unpacks the overlay on top of the ramdisk or nfsrootfs
+    Implicit default order: overlay goes into the NFS by preference
+    only into the ramdisk if NFS not specified
+    Other actions applying overlays for other deployments need their
+    own logic.
     """
     def __init__(self):
         super(ApplyOverlayTftp, self).__init__()
@@ -106,34 +107,41 @@ class ApplyOverlayTftp(Action):  # FIXME
 
     def run(self, connection, args=None):
         connection = super(ApplyOverlayTftp, self).run(connection, args)
-        overlay_type = ''
         overlay_file = None
         directory = None
-        if self.parameters.get('ramdisk', None) is not None:
-            overlay_type = 'ramdisk'
-            overlay_file = self.data['compress-overlay'].get('output')
-            directory = self.data['extract-overlay-ramdisk']['extracted_ramdisk']
-        elif self.parameters.get('nfsrootfs', None) is not None:
-            overlay_type = 'nfsrootfs'
+        nfs_url = None
+        if self.parameters.get('nfsrootfs', None) is not None:
             overlay_file = self.data['compress-overlay'].get('output')
             directory = self.get_common_data('file', 'nfsroot')
+            self.logger.info("Applying overlay to NFS")
+        elif self.parameters.get('nfs_url', None) is not None:
+            nfs_url = self.parameters.get('nfs_url')
+            overlay_file = self.data['compress-overlay'].get('output')
+            self.logger.info("Applying overlay to persistent NFS")
+            # need to mount the persistent NFS here.
+            directory = mkdtemp(autoremove=False)
+            try:
+                subprocess.check_output(['mount', '-t', 'nfs', nfs_url, directory])
+            except subprocess.CalledProcessError as exc:
+                raise JobError(exc)
+        elif self.parameters.get('ramdisk', None) is not None:
+            overlay_file = self.data['compress-overlay'].get('output')
+            directory = self.data['extract-overlay-ramdisk']['extracted_ramdisk']
+            self.logger.info("Applying overlay to ramdisk")
         elif self.parameters.get('rootfs', None) is not None:
-            overlay_type = 'rootfs'
             overlay_file = self.data['compress-overlay'].get('output')
             directory = self.get_common_data('file', 'root')
         else:
             self.logger.debug("No overlay directory")
             self.logger.debug(self.parameters)
-        try:
-            tar = tarfile.open(overlay_file)
-            tar.extractall(directory)
-            tar.close()
-        except tarfile.TarError as exc:
-            raise RuntimeError("Unable to unpack %s overlay: %s" % (overlay_type, exc))
+        untar_file(overlay_file, directory)
+        if nfs_url:
+            subprocess.check_output(['umount', directory])
+            os.rmdir(directory)  # fails if the umount fails
         return connection
 
 
-class ExtractRootfs(Action):
+class ExtractRootfs(Action):  # pylint: disable=too-many-instance-attributes
     """
     Unpacks the rootfs and applies the overlay to it
     """
@@ -152,20 +160,6 @@ class ExtractRootfs(Action):
         super(ExtractRootfs, self).validate()
         if not self.parameters.get(self.param_key, None):  # idempotency
             return
-        if 'rootfs_compression' not in self.parameters:
-            self.errors = "Missing compression value for the rootfs"
-        valid = tarfile.TarFile
-        compression = self.parameters['rootfs_compression']
-        # tarfile in 2.7 lacks xz support, it is present in 3.4
-        if compression not in valid.__dict__['OPEN_METH'].keys():
-            if compression not in self.extra_compression:
-                self.errors = "Unsupported compression method: %s" % compression
-            elif compression == 'xz':
-                self.use_lzma = True
-                self.use_tarfile = False
-            else:
-                self.use_tarfile = False
-                self.errors = "Unrecognised compression method: %s" % compression
 
     def run(self, connection, args=None):
         if not self.parameters.get(self.param_key, None):  # idempotency
@@ -173,24 +167,9 @@ class ExtractRootfs(Action):
         connection = super(ExtractRootfs, self).run(connection, args)
         root = self.data['download_action'][self.param_key]['file']
         root_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
-        if self.use_tarfile:
-            try:
-                tar = tarfile.open(root)
-                tar.extractall(root_dir)
-                tar.close()
-            except tarfile.TarError as exc:
-                raise JobError("Unable to unpack %s: '%s' - %s" % (self.param_key, os.path.basename(root), exc))
-        elif self.use_lzma:
-            with contextlib.closing(lzma.LZMAFile(root)) as xz:
-                with tarfile.open(fileobj=xz) as tarball:
-                    try:
-                        tarball.extractall(root_dir)
-                    except tarfile.TarError as exc:
-                        raise JobError("Unable to unpack %s: '%s' - %s" % (self.param_key, os.path.basename(root), exc))
-        else:
-            raise RuntimeError("Unable to decompress %s: '%s'" % (self.param_key, os.path.basename(root)))
+        untar_file(root, root_dir)
         self.set_common_data('file', self.file_key, root_dir)
-        self.logger.debug("Extracted %s to %s" % (self.file_key, root_dir))
+        self.logger.debug("Extracted %s to %s", self.file_key, root_dir)
         return connection
 
 
@@ -237,23 +216,22 @@ class ExtractModules(Action):
     def run(self, connection, args=None):
         if not self.parameters.get('modules', None):  # idempotency
             return connection
-        self.logger.info("extracting")
         connection = super(ExtractModules, self).run(connection, args)
+        modules = self.data['download_action']['modules']['file']
         if not self.parameters.get('ramdisk', None):
             if not self.parameters.get('nfsrootfs', None):
                 raise JobError("Unable to identify a location for the unpacked modules")
-            else:
-                root = self.get_common_data('file', 'nfsroot')
-        else:
+        # if both NFS and ramdisk are specified, apply modules to both
+        # as the kernel may need some modules to raise the network and
+        # will need other modules to support operations within the NFS
+        if self.parameters.get('nfsrootfs', None):
+            root = self.get_common_data('file', 'nfsroot')
+            self.logger.info("extracting modules file %s to %s", modules, root)
+            untar_file(modules, root)
+        if self.parameters.get('ramdisk', None):
             root = self.data['extract-overlay-ramdisk']['extracted_ramdisk']
-
-        modules = self.data['download_action']['modules']['file']
-        try:
-            tar = tarfile.open(modules)
-            tar.extractall(root)
-            tar.close()
-        except tarfile.TarError:
-            raise RuntimeError('Unable to extract tarball: %s to %s' % (modules, root))
+            self.logger.info("extracting modules file %s to %s", modules, root)
+            untar_file(modules, root)
         try:
             os.unlink(modules)
         except OSError as exc:
@@ -288,8 +266,12 @@ class ExtractRamdisk(Action):
         ramdisk_dir = mkdtemp()
         extracted_ramdisk = os.path.join(ramdisk_dir, 'ramdisk')
         os.mkdir(extracted_ramdisk)
-        ramdisk_compressed_data = os.path.join(ramdisk_dir, RAMDISK_COMPRESSED_FNAME)
-        if self.parameters.get('ramdisk-type', None) == 'u-boot':
+        compression = self.parameters['ramdisk'].get('compression', None)
+        suffix = ""
+        if compression:
+            suffix = ".%s" % compression
+        ramdisk_compressed_data = os.path.join(ramdisk_dir, RAMDISK_FNAME + suffix)
+        if self.parameters['ramdisk'].get('header', None) == 'u-boot':
             # TODO: 64 bytes is empirical - may need to be configurable in the future
             cmd = ('dd if=%s of=%s ibs=64 skip=1' % (ramdisk, ramdisk_compressed_data)).split(' ')
             try:
@@ -298,13 +280,9 @@ class ExtractRamdisk(Action):
                 raise RuntimeError('Unable to remove uboot header: %s' % ramdisk)
         else:
             # give the file a predictable name
-            os.rename(ramdisk, ramdisk_compressed_data)
+            shutil.move(ramdisk, ramdisk_compressed_data)
         self.logger.debug(os.system("file %s" % ramdisk_compressed_data))
-        cmd = ('gzip -d -f %s' % ramdisk_compressed_data).split(' ')
-        if self.run_command(cmd) is not '':
-            raise JobError('Unable to uncompress: %s - missing ramdisk-type?' % ramdisk_compressed_data)
-        # filename has been changed by gzip
-        ramdisk_data = os.path.join(ramdisk_dir, RAMDISK_FNAME)
+        ramdisk_data = decompress_file(ramdisk_compressed_data, compression)
         pwd = os.getcwd()
         os.chdir(extracted_ramdisk)
         cmd = ('cpio -i -F %s' % ramdisk_data).split(' ')
@@ -326,15 +304,20 @@ class CompressRamdisk(Action):
         self.name = "compress-ramdisk"
         self.summary = "compress ramdisk with overlay"
         self.description = "recreate a ramdisk with the overlay applied."
+        self.mkimage_arch = None
 
     def validate(self):
         super(CompressRamdisk, self).validate()
         if not self.parameters.get('ramdisk', None):  # idempotency
             return
-        try:
-            which('mkimage')
-        except InfrastructureError:
-            raise InfrastructureError("Unable to find mkimage - is u-boot-tools installed?")
+        if self.parameters['ramdisk'].get('add-header', None) == 'u-boot':
+            self.errors = infrastructure_error('mkimage')
+            if 'mkimage_arch' not in self.job.device['actions']['boot']['methods']['u-boot']['parameters']:
+                self.errors = "Missing architecture string for uboot mkimage support"
+                return
+            self.mkimage_arch = self.job.device['actions']['boot']['methods']['u-boot']['parameters']['mkimage_arch']
+        if not self.parameters['ramdisk'].get('compression', None):
+            self.errors = "No ramdisk compression method specified."
 
     def run(self, connection, args=None):
         if not self.parameters.get('ramdisk', None):  # idempotency
@@ -348,36 +331,34 @@ class CompressRamdisk(Action):
         ramdisk_data = self.data['extract-overlay-ramdisk']['ramdisk_file']
         pwd = os.getcwd()
         os.chdir(ramdisk_dir)
-        self.logger.debug("Building ramdisk %s containing %s" % (
-            ramdisk_data, ramdisk_dir
-        ))
+        self.logger.debug("Building ramdisk %s containing %s",
+                          ramdisk_data, ramdisk_dir)
         cmd = "find . | cpio --create --format='newc' > %s" % ramdisk_data
         try:
             # safe to use shell=True here, no external arguments
             log = subprocess.check_output(cmd, shell=True)
         except OSError as exc:
             raise RuntimeError('Unable to create cpio filesystem: %s' % exc)
-        self.logger.debug("%s\n%s" % (cmd, log))
-        os.chdir(os.path.dirname(ramdisk_data))
-        if self.run_command(("gzip %s" % ramdisk_data).split(' ')) is not '':
-            raise RuntimeError('Unable to compress cpio filesystem')
+        # lazy-logging would mean that the quoting of cmd causes invalid YAML
+        self.logger.debug("%s\n%s" % (cmd, log))  # pylint: disable=logging-not-lazy
+
+        # we need to compress the ramdisk with the same method is was submitted with
+        compression = self.parameters['ramdisk'].get('compression', None)
+        final_file = compress_file(ramdisk_data, compression)
         os.chdir(pwd)
-        final_file = os.path.join(os.path.dirname(ramdisk_data), 'ramdisk.cpio.gz')
         tftp_dir = os.path.dirname(self.data['download_action']['ramdisk']['file'])
 
-        if self.parameters.get('ramdisk-type', None) == 'u-boot':
+        if self.parameters['ramdisk'].get('add-header', None) == 'u-boot':
             ramdisk_uboot = final_file + ".uboot"
             self.logger.debug("Adding RAMdisk u-boot header.")
-            # FIXME: hidden architecture assumption
-            cmd = ("mkimage -A arm -T ramdisk -C none -d %s %s" % (final_file, ramdisk_uboot)).split(' ')
+            cmd = ("mkimage -A %s -T ramdisk -C none -d %s %s" % (self.mkimage_arch, final_file, ramdisk_uboot)).split(' ')
             if not self.run_command(cmd):
                 raise RuntimeError("Unable to add uboot header to ramdisk")
             final_file = ramdisk_uboot
 
-        os.rename(final_file, os.path.join(tftp_dir, os.path.basename(final_file)))
-        self.logger.debug("rename %s to %s" % (
-            final_file, os.path.join(tftp_dir, os.path.basename(final_file))
-        ))
+        shutil.move(final_file, os.path.join(tftp_dir, os.path.basename(final_file)))
+        self.logger.debug("rename %s to %s",
+                          final_file, os.path.join(tftp_dir, os.path.basename(final_file)))
         if self.parameters['to'] == 'tftp':
             suffix = self.data['tftp-deploy'].get('suffix', '')
             self.set_common_data('file', 'ramdisk', os.path.join(suffix, os.path.basename(final_file)))
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/download.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/download.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/download.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/download.py	2016-03-04 14:35:18.000000000 +0000
@@ -33,6 +33,7 @@ import bz2
 import contextlib
 import lzma
 import zlib
+import shutil
 from lava_dispatcher.pipeline.action import (
     Action,
     JobError,
@@ -68,7 +69,10 @@ class DownloaderAction(RetryAction):
         self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
 
         # Find the right action according to the url
-        url = urlparse.urlparse(parameters[self.key])
+        if 'images' in parameters:
+            url = urlparse.urlparse(parameters['images'][self.key]['url'])
+        else:
+            url = urlparse.urlparse(parameters[self.key]['url'])
         if url.scheme == 'scp':
             action = ScpDownloadAction(self.key, self.path, url)
         elif url.scheme == 'http' or url.scheme == 'https':
@@ -122,9 +126,22 @@ class DownloadHandler(Action):  # pylint
     @contextlib.contextmanager
     def _decompressor_stream(self):
         dwnld_file = None
-        compression = self.parameters.get('compression', False)
+        compression = False
+        if 'images' in self.parameters:
+            compression = self.parameters['images'][self.key].get('compression', False)
+        else:
+            if self.key == 'ramdisk':
+                self.logger.debug("Not decompressing ramdisk as can be used compressed.")
+            else:
+                compression = self.parameters[self.key].get('compression', False)
+
         fname, _ = self._url_to_fname_suffix(self.path, compression)
 
+        if os.path.exists(fname):
+            nested_tmp_dir = os.path.join(self.path, self.key)
+            os.makedirs(nested_tmp_dir)
+            fname = os.path.join(nested_tmp_dir, os.path.basename(fname))
+
         decompressor = None
         if compression:
             if compression == 'gz':
@@ -139,7 +156,11 @@ class DownloadHandler(Action):  # pylint
 
         def write(buff):
             if decompressor:
-                buff = decompressor.decompress(buff)
+                try:
+                    buff = decompressor.decompress(buff)
+                except zlib.error as exc:
+                    self.logger.exception(exc)
+                    raise JobError(exc)
             dwnld_file.write(buff)
 
         try:
@@ -151,13 +172,27 @@ class DownloadHandler(Action):  # pylint
 
     def validate(self):
         super(DownloadHandler, self).validate()
-        self.url = urlparse.urlparse(self.parameters[self.key])
-        compression = self.parameters.get('compression', False)
-        fname, _ = self._url_to_fname_suffix(self.path, compression)
-
         self.data.setdefault('download_action', {self.key: {}})
-        self.data['download_action'][self.key] = {'file': fname}
+        if 'images' in self.parameters:
+            image = self.parameters['images'][self.key]
+            self.url = urlparse.urlparse(image['url'])
+            compression = image.get('compression', None)
+            image_name, _ = self._url_to_fname_suffix(self.path, compression)
+            image_arg = image.get('image_arg', None)
+            overlay = image.get('overlay', False)
+
+            self.data['download_action'].setdefault(self.key, {})
+            self.data['download_action'][self.key]['file'] = image_name
+            self.data['download_action'][self.key]['image_arg'] = image_arg
+        else:
+            self.url = urlparse.urlparse(self.parameters[self.key]['url'])
+            compression = self.parameters[self.key].get('compression', False)
+            overlay = self.parameters.get('overlay', False)
+            fname, _ = self._url_to_fname_suffix(self.path, compression)
+            self.data['download_action'][self.key] = {'file': fname}
 
+        if overlay:
+            self.data['download_action'][self.key]['overlay'] = overlay
         if compression:
             if compression not in ['gz', 'bz2', 'xz']:
                 self.errors = "Unknown 'compression' format '%s'" % compression
@@ -181,7 +216,24 @@ class DownloadHandler(Action):  # pylint
         md5 = hashlib.md5()
         sha256 = hashlib.sha256()
         with self._decompressor_stream() as (writer, fname):
-            self.logger.info("downloading %s as %s" % (self.parameters[self.key], fname))
+            md5sum = None
+            sha256sum = None
+
+            if 'images' in self.parameters:
+                remote = self.parameters['images'][self.key]
+
+                md5sum = remote.get('md5sum', None)
+                sha256sum = remote.get('sha256sum', None)
+            else:
+                remote = self.parameters[self.key]
+
+                if 'md5sum' in self.parameters:
+                    md5sum = self.parameters['md5sum'].get(self.key, None)
+
+                if 'sha256sum' in self.parameters:
+                    sha256sum = self.parameters['sha256sum'].get(self.key, None)
+
+            self.logger.info("downloading %s as %s" % (remote, fname))
 
             downloaded_size = 0
             beginning = time.time()
@@ -213,19 +265,28 @@ class DownloadHandler(Action):  # pylint
                               round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2)))
 
         # set the dynamic data into the context
-        self.data['download_action'][self.key] = {
-            'file': fname,
-            'md5': md5.hexdigest(),
-            'sha256': sha256.hexdigest()
-        }
+        self.data['download_action'][self.key]['file'] = fname
+        self.data['download_action'][self.key]['md5'] = md5.hexdigest()
+        self.data['download_action'][self.key]['sha256'] = sha256.hexdigest()
+
+        if md5sum and md5sum != self.data['download_action'][self.key]['md5']:
+            self.logger.error("md5sum of downloaded content: %s" % (self.data['download_action'][self.key]['md5']))
+            self.logger.error("sha256sum of downloaded content: %s" % (self.data['download_action'][self.key]['sha256']))
+            raise JobError("MD5 checksum for '%s' does not match." % fname)
+
+        if sha256sum and sha256sum != self.data['download_action'][self.key]['sha256']:
+            self.logger.error("md5sum of downloaded content: %s" % (self.data['download_action'][self.key]['md5']))
+            self.logger.error("sha256sum of downloaded content: %s" % (self.data['download_action'][self.key]['sha256']))
+            raise JobError("SHA256 checksum for '%s' does not match." % fname)
+
         # certain deployments need prefixes set
         if self.parameters['to'] == 'tftp':
             suffix = self.data['tftp-deploy'].get('suffix', '')
             self.set_common_data('file', self.key, os.path.join(suffix, os.path.basename(fname)))
         else:
             self.set_common_data('file', self.key, fname)
-        self.logger.info("md5sum of downloaded content: %s" % (md5.hexdigest()))
-        self.logger.info("sha256sum of downloaded content: %s" % (sha256.hexdigest()))
+        self.logger.info("md5sum of downloaded content: %s" % (self.data['download_action'][self.key]['md5']))
+        self.logger.info("sha256sum of downloaded content: %s" % (self.data['download_action'][self.key]['sha256']))
         return connection
 
 
@@ -279,7 +340,12 @@ class HttpDownloadAction(DownloadHandler
         try:
             res = requests.head(self.url.geturl(), allow_redirects=True, timeout=HTTP_DOWNLOAD_TIMEOUT)
             if res.status_code != requests.codes.OK:  # pylint: disable=no-member
-                self.errors = "Resources not available at '%s'" % (self.url.geturl())
+                # try using (the slower) get for services with broken redirect support
+                res = requests.get(
+                    self.url.geturl(), allow_redirects=True, stream=True,
+                    timeout=HTTP_DOWNLOAD_TIMEOUT)
+                if res.status_code != requests.codes.OK:  # pylint: disable=no-member
+                    self.errors = "Resources not available at '%s'" % (self.url.geturl())
             else:
                 self.size = int(res.headers.get('content-length', -1))
         except requests.Timeout:
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/fastboot.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/fastboot.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/fastboot.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/fastboot.py	2016-03-02 14:34:40.000000000 +0000
@@ -0,0 +1,433 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Senthil Kumaran S <senthil.kumaran@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+from lava_dispatcher.pipeline.logical import Deployment
+from lava_dispatcher.pipeline.connections.serial import ConnectDevice
+from lava_dispatcher.pipeline.power import ResetDevice
+from lava_dispatcher.pipeline.action import (
+    Pipeline,
+    JobError,
+)
+from lava_dispatcher.pipeline.actions.deploy import DeployAction
+from lava_dispatcher.pipeline.actions.deploy.overlay import OverlayAction
+from lava_dispatcher.pipeline.actions.deploy.download import (
+    DownloaderAction,
+)
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
+from lava_dispatcher.pipeline.utils.constants import (
+    DISPATCHER_DOWNLOAD_DIR,
+    FASTBOOT_REBOOT_TIMEOUT,
+)
+
+
+def fastboot_accept(device, parameters):
+    """
+    Each fastboot deployment strategy uses these checks
+    as a base, then makes the final decision on the
+    style of fastboot deployment.
+    """
+    if 'to' not in parameters:
+        return False
+    if parameters['to'] != 'fastboot':
+        return False
+    if not device:
+        return False
+    if 'actions' not in device:
+        raise RuntimeError("Invalid device configuration")
+    if 'deploy' not in device['actions']:
+        return False
+    if 'adb_serial_number' not in device:
+        return False
+    if 'fastboot_serial_number' not in device:
+        return False
+    if 'methods' not in device['actions']['deploy']:
+        raise RuntimeError("Device misconfiguration")
+    return True
+
+
+class Fastboot(Deployment):
+    """
+    Strategy class for a fastboot deployment.
+    Downloads the relevant parts, copies to the locations using fastboot.
+    """
+    compatibility = 1
+
+    def __init__(self, parent, parameters):
+        super(Fastboot, self).__init__(parent)
+        self.action = FastbootAction()
+        self.action.section = self.action_type
+        self.action.job = self.job
+        parent.add_action(self.action, parameters)
+
+    @classmethod
+    def accepts(cls, device, parameters):
+        if not fastboot_accept(device, parameters):
+            return False
+        if 'fastboot' in device['actions']['deploy']['methods']:
+            return True
+        return False
+
+
+class FastbootAction(DeployAction):  # pylint:disable=too-many-instance-attributes
+
+    def __init__(self):
+        super(FastbootAction, self).__init__()
+        self.name = "fastboot-deploy"
+        self.description = "download files and deploy using fastboot"
+        self.summary = "fastboot deployment"
+        self.fastboot_dir = DISPATCHER_DOWNLOAD_DIR
+        try:
+            self.fastboot_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
+        except OSError:
+            pass
+
+    def validate(self):
+        super(FastbootAction, self).validate()
+        self.errors = infrastructure_error('adb')
+        self.errors = infrastructure_error('fastboot')
+        lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir']
+        self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id
+
+    def populate(self, parameters):
+        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        self.internal_pipeline.add_action(OverlayAction())
+        if hasattr(self.job.device, 'power_state'):
+            if self.job.device.power_state in ['on', 'off']:
+                self.internal_pipeline.add_action(ConnectDevice())
+                self.internal_pipeline.add_action(ResetDevice())
+        self.internal_pipeline.add_action(EnterFastbootAction())
+        for image in parameters['images'].keys():
+            if image != 'yaml_line':
+                download = DownloaderAction(image, self.fastboot_dir)
+                download.max_retries = 3  # overridden by failure_retry in the parameters, if set.
+                self.internal_pipeline.add_action(download)
+            if image == 'image':
+                self.internal_pipeline.add_action(FastbootUpdateAction())
+            if image == 'ptable':
+                self.internal_pipeline.add_action(ApplyPtableAction())
+            if image == 'boot':
+                self.internal_pipeline.add_action(ApplyBootAction())
+            if image == 'cache':
+                self.internal_pipeline.add_action(ApplyCacheAction())
+            if image == 'userdata':
+                self.internal_pipeline.add_action(ApplyUserdataAction())
+            if image == 'system':
+                self.internal_pipeline.add_action(ApplySystemAction())
+
+
+class EnterFastbootAction(DeployAction):
+    """
+    Enters fastboot bootloader.
+    """
+
+    def __init__(self):
+        super(EnterFastbootAction, self).__init__()
+        self.name = "enter_fastboot_action"
+        self.description = "enter fastboot bootloader"
+        self.summary = "enter fastboot"
+        self.retries = 10
+        self.sleep = 10
+
+    def validate(self):
+        super(EnterFastbootAction, self).validate()
+        if 'adb_serial_number' not in self.job.device:
+            self.errors = "device adb serial number missing"
+            if self.job.device['adb_serial_number'] == '0000000000':
+                self.errors = "device adb serial number unset"
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(EnterFastbootAction, self).run(connection, args)
+        adb_serial_number = self.job.device['adb_serial_number']
+        fastboot_serial_number = self.job.device['fastboot_serial_number']
+        adb_cmd = ['adb', '-s', adb_serial_number, 'get-serialno']
+        command_output = self.run_command(adb_cmd)
+        if command_output and adb_serial_number in command_output:
+            adb_cmd = ['adb', '-s', adb_serial_number, 'reboot', 'bootloader']
+            command_output = self.run_command(adb_cmd)
+            if command_output and command_output is not '':
+                raise JobError("Unable to enter fastboot using adb: %s" %
+                               command_output)  # FIXME: JobError needs a unit test
+        else:
+            fastboot_cmd = ['fastboot', '-s', fastboot_serial_number,
+                            'devices']
+            command_output = self.run_command(fastboot_cmd)
+            if command_output and fastboot_serial_number in command_output:
+                self.logger.debug("Device is in fastboot: %s" % command_output)
+                fastboot_cmd = ['fastboot', '-s', fastboot_serial_number,
+                                'reboot-bootloader']
+                command_output = self.run_command(fastboot_cmd)
+                if command_output and 'OKAY' not in command_output:
+                    raise JobError("Unable to enter fastboot: %s" %
+                                   command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+class FastbootUpdateAction(DeployAction):
+    """
+    Fastboot update image.
+    """
+
+    def __init__(self):
+        super(FastbootUpdateAction, self).__init__()
+        self.name = "fastboot_update_action"
+        self.description = "fastboot update image"
+        self.summary = "fastboot update"
+        self.retries = 3
+        self.sleep = 10
+
+    def validate(self):
+        super(FastbootUpdateAction, self).validate()
+        if 'download_action' not in self.data:
+            raise RuntimeError("download-action missing: %s" % self.name)
+        if 'file' not in self.data['download_action']['image']:
+            self.errors = "no file specified for fastboot"
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(FastbootUpdateAction, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number, '-w', 'update',
+                        self.data['download_action']['image']['file']]
+        command_output = self.run_command(fastboot_cmd)
+        if command_output and 'error' in command_output:
+            raise JobError("Unable to update image using fastboot: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+class FastbootRebootAction(DeployAction):
+    """
+    Fastboot Reboot.
+    """
+
+    def __init__(self):
+        super(FastbootRebootAction, self).__init__()
+        self.name = "fastboot_reboot_action"
+        self.description = "fastboot reboot"
+        self.summary = "fastboot reboot"
+        self.retries = 3
+        self.sleep = FASTBOOT_REBOOT_TIMEOUT
+
+    def validate(self):
+        super(FastbootRebootAction, self).validate()
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(FastbootRebootAction, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number, 'reboot']
+        command_output = self.run_command(fastboot_cmd)
+        if command_output and 'error' in command_output:
+            raise JobError("Unable to reboot using fastboot: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+class ApplyPtableAction(DeployAction):
+    """
+    Fastboot deploy ptable image.
+    """
+
+    def __init__(self):
+        super(ApplyPtableAction, self).__init__()
+        self.name = "fastboot_apply_ptable_action"
+        self.description = "fastboot apply ptable image"
+        self.summary = "fastboot apply ptable"
+        self.retries = 3
+        self.sleep = 10
+
+    def validate(self):
+        super(ApplyPtableAction, self).validate()
+        if 'download_action' not in self.data:
+            raise RuntimeError("download-action missing: %s" % self.name)
+        if 'file' not in self.data['download_action']['ptable']:
+            self.errors = "no file specified for fastboot ptable image"
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(ApplyPtableAction, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number, 'flash', 'ptable',
+                        self.data['download_action']['ptable']['file']]
+        command_output = self.run_command(fastboot_cmd)
+        if command_output and 'error' in command_output:
+            raise JobError("Unable to apply ptable image using fastboot: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+class ApplyBootAction(DeployAction):
+    """
+    Fastboot deploy boot image.
+    """
+
+    def __init__(self):
+        super(ApplyBootAction, self).__init__()
+        self.name = "fastboot_apply_boot_action"
+        self.description = "fastboot apply boot image"
+        self.summary = "fastboot apply boot"
+        self.retries = 3
+        self.sleep = 10
+
+    def validate(self):
+        super(ApplyBootAction, self).validate()
+        if 'download_action' not in self.data:
+            raise RuntimeError("download-action missing: %s" % self.name)
+        if 'file' not in self.data['download_action']['boot']:
+            self.errors = "no file specified for fastboot boot image"
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(ApplyBootAction, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number, 'flash', 'boot',
+                        self.data['download_action']['boot']['file']]
+        command_output = self.run_command(fastboot_cmd)
+        if command_output and 'error' in command_output:
+            raise JobError("Unable to apply boot image using fastboot: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+class ApplyCacheAction(DeployAction):
+    """
+    Fastboot deploy cache image.
+    """
+
+    def __init__(self):
+        super(ApplyCacheAction, self).__init__()
+        self.name = "fastboot_apply_cache_action"
+        self.description = "fastboot apply cache image"
+        self.summary = "fastboot apply cache"
+        self.retries = 3
+        self.sleep = 10
+
+    def validate(self):
+        super(ApplyCacheAction, self).validate()
+        if 'download_action' not in self.data:
+            raise RuntimeError("download-action missing: %s" % self.name)
+        if 'file' not in self.data['download_action']['cache']:
+            self.errors = "no file specified for fastboot cache image"
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(ApplyCacheAction, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number, 'flash', 'cache',
+                        self.data['download_action']['cache']['file']]
+        command_output = self.run_command(fastboot_cmd)
+        if command_output and 'error' in command_output:
+            raise JobError("Unable to apply cache image using fastboot: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+class ApplyUserdataAction(DeployAction):
+    """
+    Fastboot deploy userdata image.
+    """
+
+    def __init__(self):
+        super(ApplyUserdataAction, self).__init__()
+        self.name = "fastboot_apply_userdata_action"
+        self.description = "fastboot apply userdata image"
+        self.summary = "fastboot apply userdata"
+        self.retries = 3
+        self.sleep = 10
+
+    def validate(self):
+        super(ApplyUserdataAction, self).validate()
+        if 'download_action' not in self.data:
+            raise RuntimeError("download-action missing: %s" % self.name)
+        if 'file' not in self.data['download_action']['userdata']:
+            self.errors = "no file specified for fastboot userdata image"
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(ApplyUserdataAction, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number,
+                        'flash', 'userdata',
+                        self.data['download_action']['userdata']['file']]
+        command_output = self.run_command(fastboot_cmd)
+        if command_output and 'error' in command_output:
+            raise JobError("Unable to apply userdata image using fastboot: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+class ApplySystemAction(DeployAction):
+    """
+    Fastboot deploy system image.
+    """
+
+    def __init__(self):
+        super(ApplySystemAction, self).__init__()
+        self.name = "fastboot_apply_system_action"
+        self.description = "fastboot apply system image"
+        self.summary = "fastboot apply system"
+        self.retries = 3
+        self.sleep = 10
+
+    def validate(self):
+        super(ApplySystemAction, self).validate()
+        if 'download_action' not in self.data:
+            raise RuntimeError("download-action missing: %s" % self.name)
+        if 'file' not in self.data['download_action']['system']:
+            self.errors = "no file specified for fastboot system image"
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(ApplySystemAction, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number,
+                        'flash', 'system',
+                        self.data['download_action']['system']['file']]
+        command_output = self.run_command(fastboot_cmd)
+        if command_output and 'error' in command_output:
+            raise JobError("Unable to apply system image using fastboot: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/image.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/image.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/image.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/image.py	2016-03-02 14:34:40.000000000 +0000
@@ -38,36 +38,38 @@ from lava_dispatcher.pipeline.actions.de
 from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
 
 
-class DeployImageAction(DeployAction):
+class DeployImagesAction(DeployAction):
 
     def __init__(self):
-        super(DeployImageAction, self).__init__()
-        self.name = 'deployimage'
-        self.description = "deploy image using loopback mounts"
-        self.summary = "deploy image"
+        super(DeployImagesAction, self).__init__()
+        self.name = 'deployimages'
+        self.description = "deploy images using loopback mounts"
+        self.summary = "deploy images"
 
     def validate(self):
         # Nothing to do at this stage. Everything is done by internal actions
-        super(DeployImageAction, self).validate()
+        super(DeployImagesAction, self).validate()
 
     def populate(self, parameters):
         self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
         path = mkdtemp()
-        download = DownloaderAction('image', path)
-        download.max_retries = 3  # overridden by failure_retry in the parameters, if set.
-        self.internal_pipeline.add_action(download)
-        if parameters.get('format', '') == 'qcow2':
-            self.internal_pipeline.add_action(QCowConversionAction('image'))
-        self.internal_pipeline.add_action(MountAction())
-        self.internal_pipeline.add_action(CustomisationAction())
-        self.internal_pipeline.add_action(OverlayAction())  # idempotent, includes testdef
-        self.internal_pipeline.add_action(ApplyOverlayImage())  # specific to image deployments
-        self.internal_pipeline.add_action(DeployDeviceEnvironment())
-        self.internal_pipeline.add_action(UnmountAction())
+        for image in parameters['images'].keys():
+            if image != 'yaml_line':
+                download = DownloaderAction(image, path)
+                download.max_retries = 3  # overridden by failure_retry in the parameters, if set.
+                self.internal_pipeline.add_action(download)
+                if parameters['images'][image].get('format', '') == 'qcow2':
+                    self.internal_pipeline.add_action(QCowConversionAction(image))
+                self.internal_pipeline.add_action(MountAction(image))
+                self.internal_pipeline.add_action(CustomisationAction())
+                self.internal_pipeline.add_action(OverlayAction())  # idempotent, includes testdef
+                self.internal_pipeline.add_action(ApplyOverlayImage())  # specific to image deployments
+                self.internal_pipeline.add_action(DeployDeviceEnvironment())
+                self.internal_pipeline.add_action(UnmountAction())
 
 
 # FIXME: may need to be renamed if it can only deal with QEMU image deployment
-class DeployImage(Deployment):
+class DeployImages(Deployment):
     """
     Strategy class for an Image based Deployment.
     Accepts parameters to deploy a QEMU
@@ -83,10 +85,11 @@ class DeployImage(Deployment):
             test_definitions_action
         umount action
     """
+    compatibility = 1
 
     def __init__(self, parent, parameters):
-        super(DeployImage, self).__init__(parent)
-        self.action = DeployImageAction()
+        super(DeployImages, self).__init__(parent)
+        self.action = DeployImagesAction()
         self.action.section = self.action_type
         self.action.job = self.job
         parent.add_action(self.action, parameters)
@@ -102,7 +105,7 @@ class DeployImage(Deployment):
         if device['device_type'] != 'qemu':
             return False
         # lookup if the job parameters match the available device methods
-        if 'image' not in parameters:
+        if 'images' not in parameters:
             # python3 compatible
             # FIXME: too broad
             print("Parameters %s have not been implemented yet." % parameters.keys())  # pylint: disable=superfluous-parens
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/lxc.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/lxc.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/lxc.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/lxc.py	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,132 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Senthil Kumaran S <senthil.kumaran@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+from lava_dispatcher.pipeline.logical import Deployment
+from lava_dispatcher.pipeline.action import (
+    Pipeline,
+    JobError,
+)
+from lava_dispatcher.pipeline.actions.deploy import DeployAction
+from lava_dispatcher.pipeline.actions.deploy.overlay import OverlayAction
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.protocols.lxc import LxcProtocol
+
+
+def lxc_accept(device, parameters):
+    """
+    Each lxc deployment strategy uses these checks as a base, then makes the
+    final decision on the style of lxc deployment.
+    """
+    if 'to' not in parameters:
+        return False
+    if 'os' not in parameters:
+        return False
+    if parameters['to'] != 'lxc':
+        return False
+    if not device:
+        return False
+    if 'actions' not in device:
+        raise RuntimeError("Invalid device configuration")
+    if 'deploy' not in device['actions']:
+        return False
+    if 'methods' not in device['actions']['deploy']:
+        raise RuntimeError("Device misconfiguration")
+    return True
+
+
+class Lxc(Deployment):
+    """
+    Strategy class for a lxc deployment.
+    Downloads the relevant parts, copies to the locations using lxc.
+    """
+    compatibility = 1
+
+    def __init__(self, parent, parameters):
+        super(Lxc, self).__init__(parent)
+        self.action = LxcAction()
+        self.action.section = self.action_type
+        self.action.job = self.job
+        parent.add_action(self.action, parameters)
+
+    @classmethod
+    def accepts(cls, device, parameters):
+        if not lxc_accept(device, parameters):
+            return False
+        if 'lxc' in device['actions']['deploy']['methods']:
+            return True
+        return False
+
+
+class LxcAction(DeployAction):  # pylint:disable=too-many-instance-attributes
+
+    def __init__(self):
+        super(LxcAction, self).__init__()
+        self.name = "lxc-deploy"
+        self.description = "download files and deploy using lxc"
+        self.summary = "lxc deployment"
+
+    def validate(self):
+        super(LxcAction, self).validate()
+        if LxcProtocol.name not in [protocol.name for protocol in self.job.protocols]:
+            self.errors = "Invalid job - missing protocol"
+        self.errors = infrastructure_error('lxc-create')
+        lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir']
+        self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id
+
+    def populate(self, parameters):
+        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        self.protocols = [protocol for protocol in self.job.protocols if protocol.name == LxcProtocol.name]
+        self.set_common_data('lxc', 'name', self.protocols[0].lxc_name)
+        self.set_common_data('lxc', 'distribution', self.protocols[0].lxc_dist)
+        self.set_common_data('lxc', 'release', self.protocols[0].lxc_release)
+        self.set_common_data('lxc', 'arch', self.protocols[0].lxc_arch)
+        self.internal_pipeline.add_action(OverlayAction())
+        self.internal_pipeline.add_action(LxcCreateAction())
+
+
+class LxcCreateAction(DeployAction):
+    """
+    Creates Lxc container.
+    """
+
+    def __init__(self):
+        super(LxcCreateAction, self).__init__()
+        self.name = "lxc_create_action"
+        self.description = "create lxc action"
+        self.summary = "create lxc"
+        self.retries = 10
+        self.sleep = 10
+
+    def validate(self):
+        super(LxcCreateAction, self).validate()
+        pass
+
+    def run(self, connection, args=None):
+        connection = super(LxcCreateAction, self).run(connection, args)
+        lxc_cmd = ['lxc-create', '-t', 'download',
+                   '-n', self.get_common_data('lxc', 'name'), '--',
+                   '--dist', self.get_common_data('lxc', 'distribution'),
+                   '--release', self.get_common_data('lxc', 'release'),
+                   '--arch', self.get_common_data('lxc', 'arch')]
+        command_output = self.run_command(lxc_cmd)
+        if command_output and 'Unpacking the rootfs' not in command_output:
+            raise JobError("Unable to create lxc container: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/mount.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/mount.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/mount.py	2015-09-07 10:25:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/mount.py	2015-11-30 21:58:47.000000000 +0000
@@ -51,13 +51,13 @@ class OffsetAction(DeployAction):
     def validate(self):
         if 'download_action' not in self.data:
             self.errors = "missing download_action in parameters"
-        elif 'file' not in self.data['download_action']['image']:
+        elif 'file' not in self.data['download_action'][self.key]:
             self.errors = "no file specified to calculate offset"
 
     def run(self, connection, args=None):
         if 'download_action' not in self.data:
             raise RuntimeError("Missing download action")
-        if 'offset' in self.data['download_action']:
+        if 'offset' in self.data['download_action'][self.key]:
             # idempotency
             return connection
         connection = super(OffsetAction, self).run(connection, args)
@@ -82,8 +82,8 @@ class OffsetAction(DeployAction):
         for line in part_data.splitlines():
             found = re.match(pattern, line)
             if found:
-                self.data['download_action']['offset'] = found.group(1)
-        if 'offset' not in self.data['download_action']:
+                self.data['download_action'][self.key]['offset'] = found.group(1)
+        if 'offset' not in self.data['download_action'][self.key]:
             raise JobError(  # FIXME: JobError needs a unit test
                 "Unable to determine offset for %s" % image
             )
@@ -92,11 +92,12 @@ class OffsetAction(DeployAction):
 
 class LoopCheckAction(DeployAction):
 
-    def __init__(self):
+    def __init__(self, key):
         super(LoopCheckAction, self).__init__()
         self.name = "loop_check"
         self.description = "ensure a loop back mount operation is possible"
         self.summary = "check available loop back support"
+        self.key = key
 
     def validate(self):
         if 'download_action' not in self.data:
@@ -106,16 +107,16 @@ class LoopCheckAction(DeployAction):
             raise InfrastructureError("Could not mount the image without loopback devices. "
                                       "Is the 'loop' kernel module activated?")
         available_loops = len(glob.glob('/sys/block/loop*'))
-        self.data['download_action']['available_loops'] = available_loops
+        self.data['download_action'][self.key]['available_loops'] = available_loops
 
     def run(self, connection, args=None):
         connection = super(LoopCheckAction, self).run(connection, args)
-        if 'available_loops' not in self.data['download_action']:
+        if 'available_loops' not in self.data['download_action'][self.key]:
             raise RuntimeError("Unable to check available loop devices")
         args = ['/sbin/losetup', '-a']
         pro = self.run_command(args)
         mounted_loops = len(pro.strip().split("\n")) if pro else 0
-        available_loops = self.data['download_action']['available_loops']
+        available_loops = self.data['download_action'][self.key]['available_loops']
         # FIXME: we should retry as this can happen and be fixed automatically
         # when one is unmounted
         if mounted_loops >= available_loops:
@@ -133,7 +134,7 @@ class LoopMountAction(RetryAction):
     again in the test shell.
     """
 
-    def __init__(self):
+    def __init__(self, key):
         super(LoopMountAction, self).__init__()
         self.name = "loop_mount"
         self.description = "Mount using a loopback device and offset"
@@ -141,6 +142,7 @@ class LoopMountAction(RetryAction):
         self.retries = 10
         self.sleep = 10
         self.mntdir = None
+        self.key = key
 
     def validate(self):
         self.data[self.name] = {}
@@ -149,7 +151,7 @@ class LoopMountAction(RetryAction):
             raise RuntimeError("download-action missing: %s" % self.name)
         lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir']
         self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id
-        if 'file' not in self.data['download_action']['image']:
+        if 'file' not in self.data['download_action'][self.key]:
             self.errors = "no file specified to mount"
 
     def run(self, connection, args=None):
@@ -160,8 +162,8 @@ class LoopMountAction(RetryAction):
         mount_cmd = [
             'mount',
             '-o',
-            'loop,offset=%s' % self.data['download_action']['offset'],
-            self.data['download_action']['image']['file'],
+            'loop,offset=%s' % self.data['download_action'][self.key]['offset'],
+            self.data['download_action'][self.key]['file'],
             self.data[self.name]['mntdir']
         ]
         command_output = self.run_command(mount_cmd)
@@ -188,11 +190,12 @@ class MountAction(DeployAction):
     an OffsetAction, LoopCheckAction, LoopMountAction
     """
 
-    def __init__(self):
+    def __init__(self, key):
         super(MountAction, self).__init__()
         self.name = "mount_action"
         self.description = "mount with offset"
         self.summary = "mount loop"
+        self.key = key
 
     def validate(self):
         if not self.job:
@@ -209,10 +212,10 @@ class MountAction(DeployAction):
             raise RuntimeError("No job object supplied to action")
         # FIXME: not all mount operations will need these actions
         self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
-        self.internal_pipeline.add_action(OffsetAction('image'))
+        self.internal_pipeline.add_action(OffsetAction(self.key))
         # FIXME: LoopCheckAction and LoopMountAction should be in only one Action
-        self.internal_pipeline.add_action(LoopCheckAction())
-        self.internal_pipeline.add_action(LoopMountAction())
+        self.internal_pipeline.add_action(LoopCheckAction(self.key))
+        self.internal_pipeline.add_action(LoopMountAction(self.key))
 
 
 class UnmountAction(RetryAction):
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/overlay.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/overlay.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/overlay.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/overlay.py	2016-03-02 14:34:40.000000000 +0000
@@ -27,7 +27,10 @@ from lava_dispatcher.pipeline.actions.de
 from lava_dispatcher.pipeline.action import Action, Pipeline
 from lava_dispatcher.pipeline.actions.deploy.testdef import TestDefinitionAction
 from lava_dispatcher.pipeline.utils.filesystem import mkdtemp, check_ssh_identity_file
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.utils.network import rpcinfo_nfs
 from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
+from lava_dispatcher.pipeline.protocols.vland import VlandProtocol
 
 
 class CustomisationAction(DeployAction):
@@ -96,9 +99,11 @@ class OverlayAction(DeployAction):
         if any('ssh' in data for data in self.job.device['actions']['deploy']['methods']):
             # only devices supporting ssh deployments add this action.
             self.internal_pipeline.add_action(SshAuthorize())
+        self.internal_pipeline.add_action(VlandOverlayAction())
         self.internal_pipeline.add_action(MultinodeOverlayAction())
         self.internal_pipeline.add_action(TestDefinitionAction())
         self.internal_pipeline.add_action(CompressOverlay())
+        self.internal_pipeline.add_action(PersistentNFSOverlay())  # idempotent
 
     def run(self, connection, args=None):
         """
@@ -107,7 +112,7 @@ class OverlayAction(DeployAction):
         * copy runners into test runner directories
         """
         self.data[self.name].setdefault('location', mkdtemp())
-        self.logger.debug("Preparing overlay tarball in %s" % self.data[self.name]['location'])
+        self.logger.debug("Preparing overlay tarball in %s", self.data[self.name]['location'])
         if 'lava_test_results_dir' not in self.data:
             self.logger.error("Unable to identify lava test results directory - missing OS type?")
             return connection
@@ -117,11 +122,11 @@ class OverlayAction(DeployAction):
             path = os.path.abspath("%s/%s" % (lava_path, runner_dir))
             if not os.path.exists(path):
                 os.makedirs(path, 0755)
-                self.logger.debug("makedir: %s" % path)
+                self.logger.debug("makedir: %s", path)
         for fname in self.scripts_to_copy:
             with open(fname, 'r') as fin:
                 output_file = '%s/bin/%s' % (lava_path, os.path.basename(fname))
-                self.logger.debug("Creating %s" % output_file)
+                self.logger.debug("Creating %s", output_file)
                 with open(output_file, 'w') as fout:
                     fout.write("#!%s\n\n" % self.parameters['deployment_data']['lava_test_sh_cmd'])
                     fout.write(fin.read())
@@ -166,7 +171,7 @@ class MultinodeOverlayAction(OverlayActi
 
     def run(self, connection, args=None):
         if self.role is None:
-            self.logger.debug("skipped %s" % self.name)
+            self.logger.debug("skipped %s", self.name)
             return connection
         if 'location' not in self.data['lava-overlay']:
             raise RuntimeError("Missing lava overlay location")
@@ -184,13 +189,13 @@ class MultinodeOverlayAction(OverlayActi
         lava_path = os.path.abspath("%s/%s" % (location, self.data['lava_test_results_dir']))
         scripts_to_copy = glob.glob(os.path.join(self.lava_multi_node_test_dir, 'lava-*'))
         self.logger.debug(self.lava_multi_node_test_dir)
-        self.logger.debug("lava_path:%s scripts:%s" % (lava_path, scripts_to_copy))
+        self.logger.debug({"lava_path": lava_path, "scripts": scripts_to_copy})
 
         for fname in scripts_to_copy:
             with open(fname, 'r') as fin:
                 foutname = os.path.basename(fname)
                 output_file = '%s/bin/%s' % (lava_path, foutname)
-                self.logger.debug("Creating %s" % output_file)
+                self.logger.debug("Creating %s", output_file)
                 with open(output_file, 'w') as fout:
                     fout.write("#!%s\n\n" % shell)
                     # Target-specific scripts (add ENV to the generic ones)
@@ -200,7 +205,7 @@ class MultinodeOverlayAction(OverlayActi
                             if client_name == 'yaml_line':
                                 continue
                             role_line = self.job.parameters['protocols'][self.protocol]['roles'][client_name]
-                            self.logger.debug("group roles:\t%s\t%s" % (client_name, role_line))
+                            self.logger.debug("group roles:\t%s\t%s", client_name, role_line)
                             fout.write(r"\t%s\t%s\n" % (client_name, role_line))
                         fout.write('"\n')
                     elif foutname == 'lava-role':
@@ -214,6 +219,111 @@ class MultinodeOverlayAction(OverlayActi
                         fout.write("LAVA_MULTI_NODE_DEBUG='yes'\n")
                     fout.write(fin.read())
                     os.fchmod(fout.fileno(), self.xmod)
+        self.call_protocols()
+        return connection
+
+
+class VlandOverlayAction(OverlayAction):
+    """
+    Adds data for vland interface locations, MAC addresses and vlan names
+    """
+    def __init__(self):
+        super(VlandOverlayAction, self).__init__()
+        self.name = "lava-vland-overlay"
+        self.summary = "Add files detailing vlan configuration."
+        self.description = "Populate specific vland scripts for tests to lookup vlan data."
+
+        # vland-only
+        self.lava_vland_test_dir = os.path.realpath(
+            '%s/../../../lava_test_shell/vland' % os.path.dirname(__file__))
+        self.lava_vland_cache_file = '/tmp/lava_vland_cache.txt'
+        self.params = {}
+        self.sysfs = []
+        self.tags = []
+        self.protocol = VlandProtocol.name
+
+    def populate(self, parameters):
+        # override the populate function of overlay action which provides the
+        # lava test directory settings etc.
+        pass
+
+    def validate(self):
+        super(VlandOverlayAction, self).validate()
+        # idempotency
+        if 'actions' not in self.job.parameters:
+            return
+        if 'protocols' not in self.job.parameters:
+            return
+        if self.protocol not in [protocol.name for protocol in self.job.protocols]:
+            return
+        if 'parameters' not in self.job.device:
+            self.errors = "Device lacks parameters"
+        elif 'interfaces' not in self.job.device['parameters']:
+            self.errors = "Device lacks vland interfaces data."
+        if not self.valid:
+            return
+        # same as the parameters of the protocol itself.
+        self.params = self.job.parameters['protocols'][self.protocol]
+        device_params = self.job.device['parameters']['interfaces']
+        for interface in device_params:
+            self.sysfs.extend(
+                [
+                    device_params[interface]['sysfs'],
+                    device_params[interface]['mac'],
+                    interface
+                ]
+            )
+        for interface in device_params:
+            for tag in device_params[interface]['tags']:
+                self.tags.extend([interface, tag])
+
+    # pylint: disable=anomalous-backslash-in-string
+    def run(self, connection, args=None):
+        """
+        Writes out file contents from lists, across multiple lines
+        VAR="VAL1\n\
+        VAL2\n\
+        "
+        The \n and \ are used to avoid unwanted whitespace, so are escaped.
+        \n becomes \\n, \ becomes \\, which itself then needs \n to output:
+        VAL1
+        VAL2
+        """
+        if not self.params:
+            self.logger.debug("skipped %s", self.name)
+            return connection
+        if 'location' not in self.data['lava-overlay']:
+            raise RuntimeError("Missing lava overlay location")
+        if not os.path.exists(self.data['lava-overlay']['location']):
+            raise RuntimeError("Unable to find overlay location")
+        location = self.data['lava-overlay']['location']
+        shell = self.parameters['deployment_data']['lava_test_sh_cmd']
+
+        lava_path = os.path.abspath("%s/%s" % (location, self.data['lava_test_results_dir']))
+        scripts_to_copy = glob.glob(os.path.join(self.lava_vland_test_dir, 'lava-*'))
+        self.logger.debug(self.lava_vland_test_dir)
+        self.logger.debug({"lava_path": lava_path, "scripts": scripts_to_copy})
+
+        for fname in scripts_to_copy:
+            with open(fname, 'r') as fin:
+                foutname = os.path.basename(fname)
+                output_file = '%s/bin/%s' % (lava_path, foutname)
+                self.logger.debug("Creating %s", output_file)
+                with open(output_file, 'w') as fout:
+                    fout.write("#!%s\n\n" % shell)
+                    # Target-specific scripts (add ENV to the generic ones)
+                    if foutname == 'lava-vland-self':
+                        fout.write(r'LAVA_VLAND_SELF="')
+                        for line in self.sysfs:
+                            fout.write(r"%s\n" % line)
+                    elif foutname == 'lava-vland-tags':
+                        fout.write(r'LAVA_VLAND_TAGS="')
+                        for line in self.tags:
+                            fout.write(r"%s\n" % line)
+                    fout.write('"\n\n')
+                    fout.write(fin.read())
+                    os.fchmod(fout.fileno(), self.xmod)
+        self.call_protocols()
         return connection
 
 
@@ -324,7 +434,34 @@ class SshAuthorize(Action):
         # the key exists in the lava_test_results_dir to allow test writers to work around this
         # after logging in via the identity_file set here
         authorize = os.path.join(user_sshdir, 'authorized_keys')
-        self.logger.debug("Copying %s to %s" % ("%s.pub" % self.identity_file, authorize))
+        self.logger.debug("Copying %s to %s", "%s.pub" % self.identity_file, authorize)
         shutil.copyfile("%s.pub" % self.identity_file, authorize)
         os.chmod(authorize, 0600)
         return connection
+
+
+class PersistentNFSOverlay(Action):
+    """
+    Instead of extracting, just populate the location of the persistent NFS
+    so that it can be mounted later when the overlay is applied.
+    """
+
+    def __init__(self):
+        super(PersistentNFSOverlay, self).__init__()
+        self.name = "persistent-nfs-overlay"
+        self.section = 'deploy'
+        self.summary = "add test overlay to NFS"
+        self.description = "unpack overlay into persistent NFS"
+
+    def validate(self):
+        super(PersistentNFSOverlay, self).validate()
+        if 'nfs_url' not in self.parameters:
+            return None
+        if ':' not in self.parameters['nfs_url']:
+            self.errors = "Unrecognised NFS URL: '%s'" % self.parameters['nfs_url']
+            return
+        nfs_server, dirname = self.parameters['nfs_url'].split(':')
+        self.errors = infrastructure_error('rpcinfo')
+        self.errors = rpcinfo_nfs(nfs_server)
+        self.set_common_data('nfs_url', 'nfsroot', dirname)
+        self.set_common_data('nfs_url', 'serverip', nfs_server)
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/removable.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/removable.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/removable.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/removable.py	2015-11-30 21:58:47.000000000 +0000
@@ -52,6 +52,9 @@ class Removable(Deployment):
     configuration and specifying a restricted UUID will invalidate the job to protect the bootloader.
 
     """
+
+    compatibility = 1
+
     def __init__(self, parent, parameters):
         super(Removable, self).__init__(parent)
         self.action = MassStorage()
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/ssh.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/ssh.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/ssh.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/ssh.py	2016-03-02 14:34:40.000000000 +0000
@@ -21,7 +21,6 @@
 
 import os
 from lava_dispatcher.pipeline.logical import Deployment
-from lava_dispatcher.pipeline.connections.ssh import Scp
 from lava_dispatcher.pipeline.action import Pipeline, Action
 from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
 from lava_dispatcher.pipeline.actions.deploy import DeployAction
@@ -30,6 +29,7 @@ from lava_dispatcher.pipeline.actions.de
 from lava_dispatcher.pipeline.actions.deploy.overlay import OverlayAction
 from lava_dispatcher.pipeline.actions.deploy.download import DownloaderAction
 from lava_dispatcher.pipeline.utils.constants import DISPATCHER_DOWNLOAD_DIR
+from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
 
 # Deploy SSH can mean a few options:
 # for a primary connection, the device might need to be powered_on
@@ -47,6 +47,8 @@ class Ssh(Deployment):
     typically the overlay.
     """
 
+    compatibility = 1
+
     def __init__(self, parent, parameters):
         super(Ssh, self).__init__(parent)
         self.action = ScpOverlay()
@@ -89,7 +91,6 @@ class ScpOverlay(DeployAction):
             'firmware', 'kernel', 'dtb', 'rootfs', 'modules'
         ]
         lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir']
-        # FIXME: apply job_id to other overlay classes when settings lava_test_results_dir
         self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id
 
     def populate(self, parameters):
@@ -105,8 +106,6 @@ class ScpOverlay(DeployAction):
         self.internal_pipeline.add_action(PrepareOverlayScp())
         # prepare the device environment settings in common data for enabling in the boot step
         self.internal_pipeline.add_action(DeployDeviceEnvironment())
-        scp = Scp('overlay')
-        self.internal_pipeline.add_action(scp)
 
 
 class PrepareOverlayScp(Action):
@@ -120,6 +119,7 @@ class PrepareOverlayScp(Action):
         self.name = "prepare-scp-overlay"
         self.summary = "scp the overlay to the remote device"
         self.description = "copy the overlay over an existing ssh connection"
+        self.host_keys = []
 
     def validate(self):
         super(PrepareOverlayScp, self).validate()
@@ -130,6 +130,22 @@ class PrepareOverlayScp(Action):
             environment = {}
         environment.update({"LC_ALL": "C.UTF-8", "LANG": "C"})
         self.set_common_data('environment', 'env_dict', environment)
+        if 'protocols' in self.parameters:
+            # set run to call the protocol, retrieve the data and store.
+            for params in self.parameters['protocols'][MultinodeProtocol.name]:
+                if isinstance(params, str):
+                    self.errors = "Invalid protocol action setting - needs to be a list."
+                    continue
+                if 'action' not in params or params['action'] != self.name:
+                    continue
+                if 'messageID' not in params:
+                    self.errors = "Invalid protocol block: %s" % params
+                    return
+                if 'message' not in params or not isinstance(params['message'], dict):
+                    self.errors = "Missing message block for scp deployment"
+                    return
+                self.host_keys.append(params['messageID'])
+        self.set_common_data(self.name, 'overlay', self.host_keys)
 
     def populate(self, parameters):
         self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
@@ -138,6 +154,18 @@ class PrepareOverlayScp(Action):
 
     def run(self, connection, args=None):
         connection = super(PrepareOverlayScp, self).run(connection, args)
-        self.logger.info("Preparing to copy: %s" % os.path.basename(self.data['compress-overlay'].get('output')))
+        self.logger.info("Preparing to copy: %s", os.path.basename(self.data['compress-overlay'].get('output')))
         self.set_common_data('scp-deploy', 'overlay', self.data['compress-overlay'].get('output'))
+        for host_key in self.host_keys:
+            data = self.get_common_data(MultinodeProtocol.name, host_key)
+            if not data:
+                self.logger.warning("Missing data for host_key %s", host_key)
+                continue
+            for params in self.parameters['protocols'][MultinodeProtocol.name]:
+                replacement_key = [key for key, _ in params['message'].items() if key != 'yaml_line'][0]
+                if replacement_key not in data:
+                    self.logger.error("Mismatched replacement key %s and received data %s", replacement_key, data.keys())
+                    continue
+                self.set_common_data(self.name, host_key, str(data[replacement_key]))
+                self.logger.info("data %s replacement key is %s", host_key, self.get_common_data(self.name, host_key))
         return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/strategies.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/strategies.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/strategies.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/strategies.py	2016-02-02 08:07:05.000000000 +0000
@@ -23,7 +23,9 @@
 
 # pylint: disable=unused-import
 
-from lava_dispatcher.pipeline.actions.deploy.image import DeployImage
+from lava_dispatcher.pipeline.actions.deploy.image import DeployImages
 from lava_dispatcher.pipeline.actions.deploy.tftp import Tftp
 from lava_dispatcher.pipeline.actions.deploy.removable import MassStorage
 from lava_dispatcher.pipeline.actions.deploy.ssh import Ssh
+from lava_dispatcher.pipeline.actions.deploy.fastboot import Fastboot
+from lava_dispatcher.pipeline.actions.deploy.lxc import Lxc
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/testdef.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/testdef.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/testdef.py	2015-09-10 08:06:51.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/testdef.py	2016-03-02 14:34:40.000000000 +0000
@@ -25,6 +25,7 @@ import yaml
 import base64
 import hashlib
 import tarfile
+import shutil
 from uuid import uuid4
 from collections import OrderedDict
 from nose.tools import nottest
@@ -260,6 +261,11 @@ class GitRepoAction(RepoAction):  # pyli
         runner_path = self.data['test'][self.uuid]['overlay_path'][self.parameters['test_name']]
         if os.path.exists(runner_path) and os.listdir(runner_path) == []:
             raise RuntimeError("Directory already exists and is not empty - duplicate Action?")
+
+        # Clear the data
+        if os.path.exists(runner_path):
+            shutil.rmtree(runner_path)
+
         commit_id = self.vcs.clone(runner_path, self.parameters.get('revision', None))
         if commit_id is None:
             raise RuntimeError("Unable to get test definition from %s (%s)" % (self.vcs.binary, self.parameters))
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/deploy/tftp.py 2016.3-1/lava_dispatcher/pipeline/actions/deploy/tftp.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/deploy/tftp.py	2015-09-10 10:34:46.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/deploy/tftp.py	2016-02-02 08:07:05.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright (C) 2014 Linaro Limited
+# Copyright (C) 2014,2015 Linaro Limited
 #
 # Author: Neil Williams <neil.williams@linaro.org>
 #
@@ -22,13 +22,13 @@
 # imported by the parser to populate the list of subclasses.
 
 import os
-from lava_dispatcher.pipeline.action import Pipeline, InfrastructureError
+from lava_dispatcher.pipeline.action import Pipeline
 from lava_dispatcher.pipeline.logical import Deployment
 from lava_dispatcher.pipeline.actions.deploy import DeployAction
 from lava_dispatcher.pipeline.actions.deploy.download import DownloaderAction
 from lava_dispatcher.pipeline.actions.deploy.apply_overlay import PrepareOverlayTftp
 from lava_dispatcher.pipeline.actions.deploy.environment import DeployDeviceEnvironment
-from lava_dispatcher.pipeline.utils.shell import which
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
 from lava_dispatcher.pipeline.utils.filesystem import mkdtemp, tftpd_dir
 from lava_dispatcher.pipeline.utils.constants import DISPATCHER_DOWNLOAD_DIR
 
@@ -61,6 +61,9 @@ class Tftp(Deployment):
     Limited to what the bootloader can deploy which means ramdisk or nfsrootfs.
     rootfs deployments would format the device and create a single partition for the rootfs.
     """
+
+    compatibility = 1
+
     def __init__(self, parent, parameters):
         super(Tftp, self).__init__(parent)
         self.action = TftpAction()
@@ -83,7 +86,7 @@ class TftpAction(DeployAction):  # pylin
         super(TftpAction, self).__init__()
         self.name = "tftp-deploy"
         self.description = "download files and deploy using tftp"
-        self.summary = "tftp deploment"
+        self.summary = "tftp deployment"
         self.tftp_dir = tftpd_dir()
         self.suffix = None
         try:
@@ -103,18 +106,18 @@ class TftpAction(DeployAction):  # pylin
             self.errors = "%s needs a kernel to deploy" % self.name
         if not self.valid:
             return
+        if 'nfsrootfs' in self.parameters and 'nfs_url' in self.parameters:
+            self.errors = "Only one of nfsrootfs or nfs_url can be specified"
         lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir']
         self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id
         if self.suffix:
             self.data[self.name].setdefault('suffix', self.suffix)
         self.data[self.name].setdefault('suffix', os.path.basename(self.tftp_dir))
-        try:
-            which("in.tftpd")
-        except InfrastructureError as exc:
-            self.errors = str(exc)
+        self.errors = infrastructure_error('in.tftpd')
 
     def populate(self, parameters):
         self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+        self.set_common_data('tftp', 'tftp_dir', self.tftp_dir)
         if 'ramdisk' in parameters:
             download = DownloaderAction('ramdisk', path=self.tftp_dir)
             download.max_retries = 3  # overridden by failure_retry in the parameters, if set.
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/test/multinode.py 2016.3-1/lava_dispatcher/pipeline/actions/test/multinode.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/test/multinode.py	2015-09-09 14:30:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/test/multinode.py	2016-03-02 14:34:40.000000000 +0000
@@ -78,11 +78,11 @@ class MultinodeTestAction(TestShellActio
         self.protocols = [protocol for protocol in self.job.protocols if protocol.name == MultinodeProtocol.name]
         self.signal_director = self.SignalDirector(self.protocols[0])
 
-    def check_patterns(self, event, test_connection):
+    def check_patterns(self, event, test_connection, check_char):
         """
         Calls the parent check_patterns first, then checks for subclass pattern.
         """
-        ret = super(MultinodeTestAction, self).check_patterns(event, test_connection)
+        ret = super(MultinodeTestAction, self).check_patterns(event, test_connection, check_char)
         if event == 'multinode':
             name, params = test_connection.match.groups()
             self.logger.debug("Received Multi_Node API <LAVA_%s>" % name)
diff -pruN 2015.9-1/lava_dispatcher/pipeline/actions/test/shell.py 2016.3-1/lava_dispatcher/pipeline/actions/test/shell.py
--- 2015.9-1/lava_dispatcher/pipeline/actions/test/shell.py	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/actions/test/shell.py	2016-03-02 14:34:40.000000000 +0000
@@ -18,17 +18,31 @@
 # along
 # with this program; if not, see <http://www.gnu.org/licenses>.
 
+import copy
 import logging
 import pexpect
 from collections import OrderedDict
-from lava_dispatcher.pipeline.actions.test import handle_testcase, TestAction
+
+from lava_dispatcher.pipeline.actions.test import (
+    TestAction,
+    handle_testcase
+)
 from lava_dispatcher.pipeline.action import (
     InfrastructureError,
     Pipeline,
     JobError,
 )
-from lava_dispatcher.pipeline.logical import LavaTest, RetryAction
-from lava_dispatcher.pipeline.connection import BaseSignalHandler, SignalMatch
+from lava_dispatcher.pipeline.logical import (
+    LavaTest,
+    RetryAction
+)
+from lava_dispatcher.pipeline.connection import (
+    BaseSignalHandler,
+    SignalMatch
+)
+from lava_dispatcher.pipeline.utils.constants import DEFAULT_SHELL_PROMPT
+
+# pylint: disable=too-many-branches,too-many-statements,too-many-instance-attributes
 
 
 class TestShell(LavaTest):
@@ -70,13 +84,11 @@ class TestShellAction(TestAction):
         self.signal_director = self.SignalDirector(None)  # no default protocol
         self.patterns = {}
         self.match = SignalMatch()
-        self.suite = None
-        self.testset = None
+        self.definition = None
+        self.testset_name = None  # FIXME
         self.report = {}
 
     def validate(self):
-        if "test_image_prompts" not in self.job.device:
-            self.errors = "Unable to identify test image prompts from device configuration."
         if "definitions" in self.parameters:
             for testdef in self.parameters["definitions"]:
                 if "repository" not in testdef:
@@ -114,9 +126,10 @@ class TestShellAction(TestAction):
 
         self.logger.info("Executing test definitions using %s" % connection.name)
         self.logger.debug("Setting default test shell prompt")
-        connection.prompt_str = self.job.device["test_image_prompts"]
+        if not connection.prompt_str:
+            connection.prompt_str = [DEFAULT_SHELL_PROMPT]
         self.logger.debug("Setting default timeout: %s" % self.timeout.duration)
-        connection.timeout = self.timeout
+        connection.timeout = self.connection_timeout
         self.wait(connection)
 
         # FIXME: a predictable UID could be calculated from existing data here.
@@ -141,13 +154,13 @@ class TestShellAction(TestAction):
             if self.timeout:
                 test_connection.timeout = self.timeout.duration
 
-            while self._keep_running(test_connection, test_connection.timeout):
+            while self._keep_running(test_connection, test_connection.timeout, connection.check_char):
                 pass
 
         self.logger.debug(self.report)
         return connection
 
-    def check_patterns(self, event, test_connection):
+    def check_patterns(self, event, test_connection, check_char):
         """
         Defines the base set of pattern responses.
         Stores the results of testcases inside the TestAction
@@ -156,16 +169,19 @@ class TestShellAction(TestAction):
         ret_val = False
         if event == "exit":
             self.logger.info("ok: lava_test_shell seems to have completed")
+            self.testset_name = None
 
         elif event == "eof":
             self.logger.warning("err: lava_test_shell connection dropped")
             self.errors = "lava_test_shell connection dropped"
+            self.testset_name = None
 
         elif event == "timeout":
             # if target.is_booted():
             #    target.reset_boot()
             self.logger.warning("err: lava_test_shell has timed out")
             self.errors = "lava_test_shell has timed out"
+            self.testset_name = None
 
         elif event == "signal":
             name, params = test_connection.match.groups()
@@ -173,8 +189,8 @@ class TestShellAction(TestAction):
             params = params.split()
             if name == "STARTRUN":
                 self.signal_director.test_uuid = params[1]
-                self.suite = params[0]
-                self.logger.debug("Starting test suite: %s" % self.suite)
+                self.definition = params[0]
+                self.logger.debug("Starting test definition: %s" % self.definition)
             #    self._handle_testrun(params)
             elif name == "TESTCASE":
                 data = handle_testcase(params)
@@ -187,21 +203,50 @@ class TestShellAction(TestAction):
                 # prevent losing data in the update
                 # FIXME: support parameters and retries
                 if res["test_case_id"] in p_res:
-                    raise JobError("Duplicate test_case_id in results: %s", res["test_case_id"])
-                # turn the result dict inside out to get the unique test_case_id as key and result as value
-                self.logger.results({
-                    'testsuite': self.suite,
-                    res["test_case_id"]: res["result"]})
-                self.report.update({
-                    res["test_case_id"]: res["result"]
-                })
+                    raise JobError(
+                        "Duplicate test_case_id in results: %s",
+                        res["test_case_id"])
+
+                # turn the result dict inside out to get the unique
+                # test_case_id/testset_name as key and result as value
+                if self.testset_name:
+                    self.logger.debug("result: %s" % res)
+                    self.logger.results({
+                        'test_definition': self.definition,
+                        'test_set': self.testset_name,
+                        res["test_case_id"]: res["result"]})
+                    self.report.update({
+                        'test_set': self.testset_name,
+                        res["test_case_id"]: res["result"]
+                    })
+                else:
+                    self.logger.debug("result: %s" % res)
+                    self.logger.results({
+                        'test_definition': self.definition,
+                        res["test_case_id"]: res["result"]})
+                    self.report.update({
+                        res["test_case_id"]: res["result"]
+                    })
+            elif name == "TESTSET":
+                action = params.pop(0)
+                if action == "START":
+                    name = "testset_" + action.lower()
+                    try:
+                        self.testset_name = params[0]
+                    except IndexError:
+                        raise JobError("Test set declared without a name")
+                    self.logger.info("Starting test_set %s", self.testset_name)
+                elif action == "STOP":
+                    self.logger.info("Closing test_set %s", self.testset_name)
+                    self.testset_name = None
+                    name = "testset_" + action.lower()
 
             try:
                 self.signal_director.signal(name, params)
             except KeyboardInterrupt:
                 raise KeyboardInterrupt
             # force output in case there was none but minimal content to increase speed.
-            test_connection.sendline("#")
+            test_connection.sendline(check_char)
             ret_val = True
 
         elif event == "test_case":
@@ -221,10 +266,10 @@ class TestShellAction(TestAction):
 
         return ret_val
 
-    def _keep_running(self, test_connection, timeout):
+    def _keep_running(self, test_connection, timeout, check_char):
         self.logger.debug("test shell timeout: %d seconds" % timeout)
         retval = test_connection.expect(list(self.patterns.values()), timeout=timeout)
-        return self.check_patterns(list(self.patterns.keys())[retval], test_connection)
+        return self.check_patterns(list(self.patterns.keys())[retval], test_connection, check_char)
 
     class SignalDirector(object):
 
@@ -261,7 +306,7 @@ class TestShellAction(TestAction):
                     # Without python support for switch, this gets harder to read than using
                     # a getattr lookup for the callable (codehelp). So disable checkers:
                     # noinspection PyCallingNonCallable
-                    handler(*params)  # pylint: disable=star-args
+                    handler(*params)
                 except KeyboardInterrupt:
                     raise KeyboardInterrupt
                 except TypeError as exc:
@@ -275,6 +320,12 @@ class TestShellAction(TestAction):
         def postprocess_bundle(self, bundle):
             pass
 
+        def _on_testset_start(self, set_name):
+            pass
+
+        def _on_testset_stop(self):
+            pass
+
         def _on_startrun(self, test_run_id, uuid):  # pylint: disable=unused-argument
             """
             runsh.write('echo "<LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>"\n')
diff -pruN 2015.9-1/lava_dispatcher/pipeline/connection.py 2016.3-1/lava_dispatcher/pipeline/connection.py
--- 2015.9-1/lava_dispatcher/pipeline/connection.py	2015-09-10 10:34:46.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/connection.py	2016-02-02 08:07:05.000000000 +0000
@@ -125,10 +125,20 @@ class Connection(object):
         self.results = {}
         self.match = None
         self.connected = True
+        self.check_char = '#'
 
-    def sendline(self, line):
+    def corruption_check(self):
+        self.sendline(self.check_char)
+
+    def sendline(self, line, delay=0, send_char=True):
+        if self.connected:
+            self.raw_connection.sendline(line, delay, send_char)
+        else:
+            raise RuntimeError()  # FIXME:
+
+    def sendcontrol(self, char):
         if self.connected:
-            self.raw_connection.sendline(line)
+            self.raw_connection.sendcontrol(char)
         else:
             raise RuntimeError()  # FIXME:
 
@@ -172,8 +182,8 @@ class CommandRunner(object):
         # self.logger.debug("Changing prompt to %s" % string)
         self._prompt_str = string
 
-    def wait_for_prompt(self, timeout=-1):
-        wait_for_prompt(self._connection, self._prompt_str, timeout)
+    def wait_for_prompt(self, timeout=-1, check_char='#'):
+        wait_for_prompt(self._connection, self._prompt_str, timeout, check_char)
 
     def get_connection(self):
         return self._connection
@@ -234,16 +244,22 @@ class Protocol(object):  # pylint: disab
     or adding a main function in the protocol python file and including a demo server script which
     can be run on the command line - using a different port to the default. However, this is likely
     to be of limited use because testing the actual API calls will need a functional test.
+
+    If a Protocol requires another Protocol to be available in order to run, the depending
+    Protocol *must* specify a higher level. All Protocol objects of a lower level are setup and
+    run before Protocol objects of a higher level. Protocols with the same level can be setup or run
+    in an arbitrary order (as the original source data is a dictionary).
     """
     name = 'protocol'
+    level = 0
 
     def __init__(self, parameters):
-        # FIXME: allow the bare logger to use the zmq socket
-        self.logger = logging.getLogger("root")
+        self.logger = logging.getLogger("dispatcher")
         self.poll_timeout = Timeout(self.name)
         self.parameters = None
         self.__errors__ = []
         self.parameters = parameters
+        self.configured = False
 
     @classmethod
     def select_all(cls, parameters):
@@ -252,7 +268,7 @@ class Protocol(object):  # pylint: disab
         Jobs may have zero or more protocols selected.
         """
         candidates = cls.__subclasses__()  # pylint: disable=no-member
-        return [c for c in candidates if c.accepts(parameters)]
+        return [(c, c.level) for c in candidates if c.accepts(parameters)]
 
     @property
     def errors(self):
@@ -269,9 +285,23 @@ class Protocol(object):  # pylint: disab
     def set_up(self):
         raise NotImplementedError()
 
+    def configure(self, device, job):
+        self.configured = True
+
     def finalise_protocol(self):
         raise NotImplementedError()
 
+    def check_timeout(self, duration, data):
+        """
+        Use if particular protocol calls can require a connection timeout
+        larger than the default_connection_duration.
+        :param duration: A minimum number of seconds
+        :param data: the API call
+        :return: True if checked, False if no limit is specified by the protocol.
+        raises JobError if the API call is invalid.
+        """
+        return False
+
     def _api_select(self, data):
         if not data:
             return None
diff -pruN 2015.9-1/lava_dispatcher/pipeline/connections/adb.py 2016.3-1/lava_dispatcher/pipeline/connections/adb.py
--- 2015.9-1/lava_dispatcher/pipeline/connections/adb.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/connections/adb.py	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,137 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Senthil Kumaran S <senthil.kumaran@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+import signal
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.action import (
+    Action,
+    JobError,
+)
+from lava_dispatcher.pipeline.shell import ShellCommand, ShellSession
+
+# pylint: disable=too-many-public-methods
+
+
+class ConnectAdb(Action):
+    """
+    Class to use the device commands to make a adb shell connection to the
+    device.
+    """
+    def __init__(self):
+        super(ConnectAdb, self).__init__()
+        self.name = "connect-adb"
+        self.summary = "run connection command"
+        self.description = "use the configured command to connect adb to the device"
+        self.session_class = ShellSession
+        self.shell_class = ShellCommand
+
+    def validate(self):
+        super(ConnectAdb, self).validate()
+        if 'connect' not in self.job.device['commands']:
+            self.errors = "Unable to connect to device %s - missing connect command." % self.job.device.hostname
+            return
+        if 'prompts' not in self.parameters:
+            self.errors = "Unable to identify test image prompts from parameters."
+        command = self.job.device['commands']['connect']
+        exe = ''
+        try:
+            exe = command.split(' ')[0]
+        except AttributeError:
+            self.errors = "Unable to parse the connection command %s" % command
+        self.errors = infrastructure_error(exe)
+
+    def run(self, connection, args=None):
+        if connection:
+            self.logger.debug("Already connected")
+            connection.prompt_str = self.parameters['prompts']
+            return connection
+        command = self.job.device['commands']['connect'][:]  # local copy to retain idempotency.
+        self.logger.info("%s Connecting to device using '%s'", self.name, command)
+        signal.alarm(0)  # clear the timeouts used without connections.
+        # ShellCommand executes the connection command
+        shell = self.shell_class("%s\n" % command, self.timeout,
+                                 logger=self.logger)
+        if shell.exitstatus:
+            raise JobError("%s command exited %d: %s" % (command,
+                                                         shell.exitstatus,
+                                                         shell.readlines()))
+        # ShellSession monitors the pexpect
+        connection = self.session_class(self.job, shell)
+        connection.connected = True
+        connection = super(ConnectAdb, self).run(connection, args)
+        connection.prompt_str = self.parameters['prompts']
+        self.data['boot-result'] = 'failed' if self.errors else 'success'
+        return connection
+
+
+class WaitForAdbDevice(Action):
+    """
+    Waits for device that gets connected using adb.
+    """
+
+    def __init__(self):
+        super(WaitForAdbDevice, self).__init__()
+        self.name = "wait-for-adb-device"
+        self.summary = "Waits for adb device"
+        self.description = "Waits for availability of adb device"
+        self.prompts = []
+
+    def validate(self):
+        super(WaitForAdbDevice, self).validate()
+        if 'adb_serial_number' not in self.job.device:
+            self.errors = "device adb serial number missing"
+            if self.job.device['adb_serial_number'] == '0000000000':
+                self.errors = "device adb serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(WaitForAdbDevice, self).run(connection, args)
+        serial_number = self.job.device['adb_serial_number']
+        adb_cmd = ['adb', '-s', serial_number, 'wait-for-device']
+        self.run_command(adb_cmd)
+        self.logger.debug("%s: Waiting for device", serial_number)
+        return connection
+
+
+class WaitForFastbootDevice(Action):
+    """
+    Waits for device that gets connected using fastboot.
+    """
+
+    def __init__(self):
+        super(WaitForFastbootDevice, self).__init__()
+        self.name = "wait-for-fastboot-device"
+        self.summary = "Waits for fastboot device"
+        self.description = "Waits for availability of fastboot device"
+        self.prompts = []
+
+    def validate(self):
+        super(WaitForFastbootDevice, self).validate()
+        if 'fastboot_serial_number' not in self.job.device:
+            self.errors = "device fastboot serial number missing"
+            if self.job.device['fastboot_serial_number'] == '0000000000':
+                self.errors = "device fastboot serial number unset"
+
+    def run(self, connection, args=None):
+        connection = super(WaitForFastbootDevice, self).run(connection, args)
+        serial_number = self.job.device['fastboot_serial_number']
+        fastboot_cmd = ['fastboot', '-s', serial_number, 'wait-for-device']
+        self.run_command(fastboot_cmd)
+        self.logger.debug("%s: Waiting for device", serial_number)
+        return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/connections/lxc.py 2016.3-1/lava_dispatcher/pipeline/connections/lxc.py
--- 2015.9-1/lava_dispatcher/pipeline/connections/lxc.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/connections/lxc.py	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,71 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Senthil Kumaran S <senthil.kumaran@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+import signal
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.action import (
+    Action,
+    JobError,
+)
+from lava_dispatcher.pipeline.shell import ShellCommand, ShellSession
+
+# pylint: disable=too-many-public-methods
+
+
+class ConnectLxc(Action):
+    """
+    Class to make a lxc shell connection to the container.
+    """
+    def __init__(self):
+        super(ConnectLxc, self).__init__()
+        self.name = "connect-lxc"
+        self.summary = "run connection command"
+        self.description = "connect to the lxc container"
+        self.session_class = ShellSession
+        self.shell_class = ShellCommand
+
+    def validate(self):
+        super(ConnectLxc, self).validate()
+        self.errors = infrastructure_error('lxc-attach')
+        if 'prompts' not in self.parameters:
+            self.errors = "Unable to identify test image prompts from parameters."
+
+    def run(self, connection, args=None):
+        if connection:
+            self.logger.debug("Already connected")
+            connection.prompt_str = self.parameters['prompts']
+            return connection
+        cmd = "lxc-attach -n {0}".format(self.get_common_data('lxc', 'name'))
+        self.logger.info("%s Connecting to device using '%s'", self.name, cmd)
+        signal.alarm(0)  # clear the timeouts used without connections.
+        # ShellCommand executes the connection command
+        shell = self.shell_class("%s\n" % cmd, self.timeout,
+                                 logger=self.logger)
+        if shell.exitstatus:
+            raise JobError("%s command exited %d: %s" % (cmd,
+                                                         shell.exitstatus,
+                                                         shell.readlines()))
+        # ShellSession monitors the pexpect
+        connection = self.session_class(self.job, shell)
+        connection.connected = True
+        connection = super(ConnectLxc, self).run(connection, args)
+        connection.prompt_str = self.parameters['prompts']
+        self.data['boot-result'] = 'failed' if self.errors else 'success'
+        return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/connections/serial.py 2016.3-1/lava_dispatcher/pipeline/connections/serial.py
--- 2015.9-1/lava_dispatcher/pipeline/connections/serial.py	2015-09-09 14:30:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/connections/serial.py	2016-02-02 08:07:05.000000000 +0000
@@ -20,12 +20,12 @@
 
 import signal
 from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.utils.constants import DEFAULT_SHELL_PROMPT
 from lava_dispatcher.pipeline.action import (
     Action,
     JobError,
-    TestError,
 )
-from lava_dispatcher.pipeline.shell import ShellCommand, ShellSession
+from lava_dispatcher.pipeline.shell import ShellCommand, SimpleSession
 
 # pylint: disable=too-many-public-methods
 
@@ -34,20 +34,22 @@ class ConnectDevice(Action):
     """
     General purpose class to use the device commands to
     make a serial connection to the device. e.g. using ser2net
+    Inherit from this class and change the session_class and/or shell_class for different behaviour.
     """
+
     def __init__(self):
         super(ConnectDevice, self).__init__()
         self.name = "connect-device"
         self.summary = "run connection command"
         self.description = "use the configured command to connect serial to the device"
+        self.session_class = SimpleSession  # wraps the pexpect and provides prompt_str access
+        self.shell_class = ShellCommand  # runs the command to initiate the connection
 
     def validate(self):
         super(ConnectDevice, self).validate()
         if 'connect' not in self.job.device['commands']:
             self.errors = "Unable to connect to device %s - missing connect command." % self.job.device.hostname
             return
-        if 'test_image_prompts' not in self.job.device:
-            self.errors = "Unable to identify test image prompts from device configuration."
         command = self.job.device['commands']['connect']
         exe = ''
         try:
@@ -55,28 +57,27 @@ class ConnectDevice(Action):
         except AttributeError:
             self.errors = "Unable to parse the connection command %s" % command
         self.errors = infrastructure_error(exe)
-        # FIXME: this improves speed but relies on using ser2net
-        # self.job.device['test_image_prompts'].append('ser2net port')
 
     def run(self, connection, args=None):
         if connection:
             self.logger.debug("Already connected")
-            connection.prompt_str = self.job.device['test_image_prompts']
+            if not connection.prompt_str:
+                # prompt_str can be a list or str
+                connection.prompt_str = [DEFAULT_SHELL_PROMPT]
             return connection
         command = self.job.device['commands']['connect'][:]  # local copy to retain idempotency.
         self.logger.info("%s Connecting to device using '%s'", self.name, command)
         signal.alarm(0)  # clear the timeouts used without connections.
         # ShellCommand executes the connection command
-        shell = ShellCommand("%s\n" % command, self.timeout)
+        shell = self.shell_class("%s\n" % command, self.timeout, logger=self.logger)
         if shell.exitstatus:
             raise JobError("%s command exited %d: %s" % (command, shell.exitstatus, shell.readlines()))
         # ShellSession monitors the pexpect
-        connection = ShellSession(self.job, shell)
+        connection = self.session_class(self.job, shell)
         connection.connected = True
         connection = super(ConnectDevice, self).run(connection, args)
-        # append ser2net port to the prompt_str
-        # FIXME: this improves speed but relies on using ser2net
-        connection.prompt_str = self.job.device['test_image_prompts'].append('ser2net port')
+        if not connection.prompt_str:
+            connection.prompt_str = [DEFAULT_SHELL_PROMPT]
         return connection
         # # if the board is running, wait for a prompt - if not, skip.
         # if self.job.device.power_state is 'off':
diff -pruN 2015.9-1/lava_dispatcher/pipeline/connections/ssh.py 2016.3-1/lava_dispatcher/pipeline/connections/ssh.py
--- 2015.9-1/lava_dispatcher/pipeline/connections/ssh.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/connections/ssh.py	2016-03-04 14:35:18.000000000 +0000
@@ -20,14 +20,18 @@
 
 
 import os
+import json
 import signal
 from lava_dispatcher.pipeline.action import JobError
 from lava_dispatcher.pipeline.utils.filesystem import check_ssh_identity_file
 from lava_dispatcher.pipeline.utils.shell import infrastructure_error
 from lava_dispatcher.pipeline.action import Action
 from lava_dispatcher.pipeline.shell import ShellCommand, ShellSession
+from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
+from lava_dispatcher.pipeline.utils.constants import DEFAULT_SHELL_PROMPT
 
-# pylint: disable=too-many-public-methods
+
+# pylint: disable=too-many-public-methods,too-many-instance-attributes
 
 
 class SShSession(ShellSession):
@@ -51,9 +55,8 @@ class ConnectSsh(Action):
     """
     Initiate an SSH connection from the dispatcher to a device.
     Connections from test images can be done in test definitions.
-    This class reads the destination data directly from the device configuration.
-    For SSH connections based on protocols and dynamic data from a test image,
-    use ConnectDynamicSsh.
+    If hostID and host_key are not specified as parameters,
+    this class reads the destination data directly from the device configuration.
     This is a Boot action with Retry support.
 
     Note the syntax requirements of methods:
@@ -66,8 +69,8 @@ class ConnectSsh(Action):
 
     def __init__(self):
         super(ConnectSsh, self).__init__()
-        self.name = "primary-ssh"
-        self.summary = "make an ssh connection to a known device"
+        self.name = "ssh-connection"
+        self.summary = "make an ssh connection to a device"
         self.description = "login to a known device using ssh"
         self.command = None
         self.host = None
@@ -75,6 +78,8 @@ class ConnectSsh(Action):
         self.scp_port = ["-P", "22"]
         self.identity_file = None
         self.ssh_user = 'root'
+        self.primary = False
+        self.scp_prompt = None
 
     def _check_params(self):
         # the deployment strategy ensures that this key exists
@@ -91,8 +96,8 @@ class ConnectSsh(Action):
         if 'ssh' not in params:
             self.errors = "Empty ssh parameter list in device configuration %s" % params
             return
-        if any([option for option in params['ssh']['options'] if type(option) != str]):
-            msg = [(option, type(option)) for option in params['ssh']['options'] if type(option) != str]
+        if any([option for option in params['ssh']['options'] if not isinstance(option, str)]):
+            msg = [(option, type(option)) for option in params['ssh']['options'] if not isinstance(option, str)]
             self.errors = "[%s] Invalid device configuration: all options must be only strings: %s" % (self.name, msg)
             return
         if 'port' in params['ssh']:
@@ -111,6 +116,9 @@ class ConnectSsh(Action):
         super(ConnectSsh, self).validate()
         params = self._check_params()
         self.errors = infrastructure_error('ssh')
+        if 'host' in self.job.device['actions']['deploy']['methods']['ssh']:
+            self.primary = True
+            self.host = self.job.device['actions']['deploy']['methods']['ssh']['host']
         if self.valid:
             self.command = ['ssh']
             self.command.extend(params['options'])
@@ -129,89 +137,33 @@ class ConnectSsh(Action):
         command = self.command[:]  # local copy for idempotency
         command.extend(['-i', self.identity_file])
 
-        if self.host:
+        overrides = self.get_common_data("prepare-scp-overlay", self.key)
+        host_address = None
+        if overrides:
+            host_address = str(self.get_common_data("prepare-scp-overlay", overrides[0]))
+        if host_address:
+            self.logger.info("Using common data to retrieve host_address for secondary connection.")
+            command_str = " ".join(str(item) for item in command)
+            self.logger.info("%s Connecting to device %s using '%s'", self.name, host_address, command_str)
+            command.append("%s@%s" % (self.ssh_user, host_address))
+        elif self.host and self.primary:
+            self.logger.info("Using device data host_address for primary connection.")
+            command_str = " ".join(str(item) for item in command)
+            self.logger.info("%s Connecting to device %s using '%s'", self.name, self.host, command_str)
             command.append("%s@%s" % (self.ssh_user, self.host))
         else:
-            # get from the protocol
-            pass
+            raise JobError("Unable to identify host address. Primary? %s", self.primary)
         command_str = " ".join(str(item) for item in command)
-        # use device data for destination
-        self.logger.info("%s Connecting to device %s using '%s'", self.name, self.host, command_str)
-        shell = ShellCommand("%s\n" % command_str, self.timeout)
+        shell = ShellCommand("%s\n" % command_str, self.timeout, logger=self.logger)
         if shell.exitstatus:
             raise JobError("%s command exited %d: %s" % (
                 self.command, shell.exitstatus, shell.readlines()))
         # SshSession monitors the pexpect
         connection = SShSession(self.job, shell)
         connection = super(ConnectSsh, self).run(connection, args)
-        connection.prompt_str = self.job.device['test_image_prompts']
+        connection.sendline('export PS1="%s"' % DEFAULT_SHELL_PROMPT)
+        connection.prompt_str = [DEFAULT_SHELL_PROMPT]
         connection.connected = True
         self.wait(connection)
-        self.data["boot-result"] = 'success'
+        self.data["boot-result"] = 'failed' if self.errors else 'success'
         return connection
-
-
-class Scp(ConnectSsh):
-    """
-    Use the SSH connection options to copy files over SSH
-    One action per scp operation, just as with download action
-    Needs the reference into the common data for each file to copy
-    This is a Deploy action. lava-start is managed by the protocol,
-    when this action starts, the device is in the "receiving" state.
-    """
-    def __init__(self, key):
-        super(Scp, self).__init__()
-        self.name = "scp-deploy"  # FIXME: confusing name as this is in the connections folder, not actions/deploy.
-        self.summary = "scp over the ssh connection"
-        self.description = "copy a file to a known device using scp"
-        self.key = key
-        self.scp = []
-
-    def validate(self):
-        super(Scp, self).validate()
-        params = self._check_params()
-        self.errors = infrastructure_error('scp')
-        if self.valid:
-            self.scp.append('scp')
-            self.scp.extend(params['options'])
-
-    def run(self, connection, args=None):
-        path = self.get_common_data(self.name, self.key)
-        if not path:
-            self.errors = "%s: could not find details of '%s'" % (self.name, self.key)
-            self.logger.error("%s: could not find details of '%s'" % (self.name, self.key))
-            return connection
-        destination = "%s-%s" % (self.job.job_id, os.path.basename(path))
-        command = self.scp[:]  # local copy
-        command.extend(['-i', self.identity_file])
-        # add the local file as source
-        command.append(path)
-        # add the remote as destination, with :/ top level directory
-        command.append("%s:/%s" % (self.host, destination))
-        command_str = " ".join(str(item) for item in command)
-        self.logger.info("Copying %s using %s" % (self.key, command_str))
-        self.run_command(command)
-        connection = super(Scp, self).run(connection, args)
-        self.wait(connection)
-        self.set_common_data('scp-overlay-unpack', 'overlay', destination)
-        self.wait(connection)
-        return connection
-
-
-class ConnectDynamicSsh(ConnectSsh):
-    """
-    Adaptation to read the destination from common data / protocol
-    Connect from the dispatcher to a dynamically provisioned ssh server
-    Returns a new Connection.
-    """
-    def __init__(self):
-        super(ConnectDynamicSsh, self).__init__()
-        self.name = "ssh-connect"
-        self.summary = "connect to a test image using ssh"
-        self.description = "login to a test image using a declared IP address"
-
-    def run(self, connection, args=None):
-        if connection:
-            self.logger.debug("Already connected")
-            return connection
-        # FIXME
diff -pruN 2015.9-1/lava_dispatcher/pipeline/deployment_data.py 2016.3-1/lava_dispatcher/pipeline/deployment_data.py
--- 2015.9-1/lava_dispatcher/pipeline/deployment_data.py	2015-08-07 08:19:39.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/deployment_data.py	2016-03-02 14:34:40.000000000 +0000
@@ -68,9 +68,9 @@ android = deployment_data_dict({  # pyli
     # for lava-test-shell
     'distro': 'android',
     'lava_test_sh_cmd': '/system/bin/sh',
-    'lava_test_dir': '/data/lava-%s',
+    'lava_test_dir': '/data/local/tmp/lava-%s',
     'lava_test_results_part_attr': 'data_part_android_org',
-    'lava_test_results_dir': '/lava-%s',
+    'lava_test_results_dir': '/data/local/tmp/lava-%s',
     'lava_test_shell_file': None,
 })
 
diff -pruN 2015.9-1/lava_dispatcher/pipeline/device.py 2016.3-1/lava_dispatcher/pipeline/device.py
--- 2015.9-1/lava_dispatcher/pipeline/device.py	2015-09-09 14:30:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/device.py	2016-02-02 08:07:05.000000000 +0000
@@ -53,6 +53,12 @@ class PipelineDevice(dict):
         return ''
 
     @property
+    def soft_reset_command(self):
+        if 'commands' in self and 'soft_reset' in self['commands']:
+            return self['commands']['soft_reset']
+        return ''
+
+    @property
     def power_command(self):
         if 'commands' in self and 'power_on' in self['commands']:
             return self['commands']['power_on']
@@ -98,7 +104,7 @@ class NewDevice(PipelineDevice):
             with open(target) as f_in:
                 self.update(yaml.load(f_in))
         except yaml.parser.ParserError:
-            raise RuntimeError("%s could not be parsed" % device_file)
+            raise RuntimeError("%s could not be parsed" % target)
 
         # Get the device name (/path/to/kvm01.yaml => kvm01)
         self.target = os.path.splitext(os.path.basename(target))[0]
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/bbb-01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/bbb-01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/bbb-01.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/bbb-01.yaml	2016-03-02 14:34:40.000000000 +0000
@@ -14,6 +14,21 @@ parameters:
       SanDisk_Ultra:
         uuid: usb-SanDisk_Ultra_20060775320F43006019-0:0
         boot_part: 0:1
+  interfaces:
+    eth0: # fake values just for the vland unit test
+      sysfs: "/sys/devices/pci0000:00/0000:00:19.0/net/eth0"
+      mac: "f0:de:f1:46:8c:21"
+      switch: 192.168.0.1
+      port: 5
+      tags:
+      - 1G
+    eth1:
+      sysfs: "/sys/devices/pci0000:00/0000:00:1c.1/0000:03:00.0/net/eth1"
+      mac: "00:24:d7:9b:c0:8c"
+      tags:
+      - 1G
+      switch: 192.168.0.1
+      port: 7
 commands:
   connect: telnet localhost 6000
   hard_reset: /usr/bin/pduclient --daemon localhost --hostname pdu --command reboot --port 08
@@ -56,6 +71,7 @@ actions:
           bootloader_prompt: U-Boot
           boot_message: Booting Linux
           send_char: False
+          mkimage_arch: arm # string to pass to mkimage -A when adding UBoot headers
           # interrupt: # character needed to interrupt u-boot, single whitespace by default
           # method specific stanza
         oe:
@@ -123,19 +139,11 @@ actions:
           - boot
 
 timeouts:
-  call-kexec:
-    seconds: 45
-
-test_image_prompts:
-  - '(initramfs)'
-  - 'linaro-test'
-  - '/ #'
-  - 'root@android'
-  - 'root@linaro'
-  - 'root@master'
-  - 'root@debian'
-  - 'root@linaro-nano:~#'
-  - 'root@linaro-developer:~#'
-  - 'root@linaro-server:~#'
-  - 'root@genericarmv7a:~#'
-  - 'root@genericarmv8:~#'
+  actions:
+    call-kexec:
+      seconds: 45
+    uboot-retry:
+      seconds: 90
+  connections:
+    uboot-retry:
+      seconds: 45
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/cubie1.yaml 2016.3-1/lava_dispatcher/pipeline/devices/cubie1.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/cubie1.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/cubie1.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -103,18 +103,3 @@ actions:
           - "setenv nfsargs 'setenv bootargs console=ttyS0,115200n8 root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp'"
           - setenv bootcmd 'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; run nfsargs; {BOOTX}'
           - boot
-
-test_image_prompts:
-  # TODO: we should'nt need such a long list
-  - '\(initramfs\)'
-  - '/ #'
-  - 'root@android'
-  - 'root@linaro'
-  - 'root@master'
-  - 'root@debian'
-  - 'root@linaro-nano:~#'
-  - 'root@linaro-developer:~#'
-  - 'root@linaro-server:~#'
-  - 'root@genericarmv7a:~#'
-  - 'root@genericarmv8:~#'
-  - 'linaro-test'
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/hi6220-hikey-01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/hi6220-hikey-01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/hi6220-hikey-01.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/hi6220-hikey-01.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,53 @@
+commands:
+    connect: telnet 192.168.1.200 8001
+    hard_reset: /home/stylesen/work/pdu/pdu-control-reset.sh 0 5 1 5
+    soft_reset: fastboot -s usb:2-1.2 reboot
+    power_off: /home/stylesen/work/pdu/pdu-control-off.sh 5
+    power_on: /home/stylesen/work/pdu/pdu-control-on.sh 5
+    adb_command: adb -s 0123456789
+    fastboot_command: fastboot -s usb:2-1.2
+device_type: hi6220-hikey
+adb_serial_number: 0123456789
+fastboot_serial_number: usb:2-1.2
+
+
+actions:
+  deploy:
+    methods:
+      fastboot:
+    connections:
+      serial:
+  boot:
+    connections:
+      adb:
+      serial:
+    methods:
+      uefi-menu:
+        parameters:
+          interrupt_prompt: "Android Fastboot mode"
+          interrupt_string: ' '
+          item_markup:
+            - "["
+            - "]"
+          item_class: '0-9'
+          separator: ' '
+          label_class: 'a-zA-Z0-9\s\:'
+          bootloader_prompt: 'Start:'
+          boot_message: "Booting Linux Kernel..."
+          send_char: True
+          character_delay: 10
+        fastboot:
+        - select:
+            items:
+             - 'boot from eMMC'
+
+timeouts:
+  actions:
+    apply-overlay-image:
+      seconds: 120
+    umount-retry:
+      seconds: 45
+    lava-test-shell:
+      seconds: 600
+    power_off:
+      seconds: 10
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/kvm01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/kvm01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/kvm01.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/kvm01.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -32,18 +32,3 @@ timeouts:
     seconds: 57
   lava-test-shell:
     seconds: 30
-
-
-test_image_prompts:
-  # TODO: we should'nt need such a long list
-  - '\(initramfs\)'
-  - '/ #'
-  - 'root@android'
-  - 'root@linaro'
-  - 'root@master'
-  - 'root@debian'
-  - 'root@linaro-nano:~#'
-  - 'root@linaro-developer:~#'
-  - 'root@linaro-server:~#'
-  - 'root@genericarmv7a:~#'
-  - 'root@genericarmv8:~#'
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/kvm02.yaml 2016.3-1/lava_dispatcher/pipeline/devices/kvm02.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/kvm02.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/kvm02.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -0,0 +1,32 @@
+device_type: qemu
+
+actions:
+  deploy:
+    methods:
+      image:
+        parameters:
+          root_part: 1
+  boot:
+    methods:
+      qemu:
+        parameters:
+          command:
+            qemu-system-arm
+          boot_options:
+            root: /dev/ram0
+            console: console=ttyAMA0 115200 androidboot.hardware=vexpress qemu=1
+          options:
+            - -nographic
+            - -M vexpress-a15
+            - -smp 2
+            - -net nic -net user
+            - -m 1024
+            - -no-reboot
+
+timeouts:
+  apply-overlay-image:
+    minutes: 2
+  umount-retry:
+    seconds: 57
+  lava-test-shell:
+    seconds: 30
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/lxc-01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/lxc-01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/lxc-01.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/lxc-01.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,23 @@
+device_type: lxc
+
+actions:
+  deploy:
+    methods:
+      lxc:
+    connections:
+      lxc:
+  boot:
+    connections:
+      lxc:
+    methods:
+      lxc:
+
+timeouts:
+  apply-overlay-image:
+    seconds: 120
+  umount-retry:
+    seconds: 45
+  lava-test-shell:
+    seconds: 30
+  power_off:
+    seconds: 5
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/mustang-uefi.yaml 2016.3-1/lava_dispatcher/pipeline/devices/mustang-uefi.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/mustang-uefi.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/mustang-uefi.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -0,0 +1,122 @@
+device_type: mustang-uefi
+
+commands:
+  connect: telnet localhost 6002
+  #hard_reset: /usr/bin/pduclient --daemon services --hostname pdu09 --command reboot --port 05
+  #power_off: /usr/bin/pduclient --daemon services --hostname pdu09 --command off --port 05
+  #power_on: /usr/bin/pduclient --daemon services --hostname pdu09 --command on --port 05
+
+
+actions:
+  deploy:
+    methods:
+      tftp:
+      usb:
+      ssh:
+        options:
+          - '-o'
+          - 'Compression=yes'
+          - '-o'
+          - 'UserKnownHostsFile=/dev/null'
+          - '-o'
+          - 'PasswordAuthentication=no'
+          - '-o'
+          - 'StrictHostKeyChecking=no'
+          - '-o'
+          - 'LogLevel=FATAL'
+          - '-l'
+          - 'root '
+          - '-p'
+          - '22'
+        host: 172.16.200.165
+        identity_file: device/dynamic_vm_keys/lava
+  boot:
+    connections:
+      serial:
+      ssh:
+    methods:
+      kexec:
+      ssh:
+      uefi-shell:
+        parameters:
+        master:
+          items:
+          - 'Shell'
+      uefi-menu:
+        parameters:
+          interrupt_prompt: The default boot selection will start in
+          interrupt_string: ' '
+          item_markup:
+            - "["
+            - "]"
+          item_class: '0-9'
+          separator: ' '
+          label_class: 'a-zA-Z0-9\s\:'
+          bootloader_prompt: 'Start:'
+          boot_message: "Loaded: LinuxImage"
+          send_char: True  # redefine to take an integer? or drop and set True if the delay is defined?
+          character_delay: 10
+          # interrupt: # character needed to interrupt u-boot, single whitespace by default
+          # method specific stanza
+        nfs:
+        - select:
+            items:
+             - 'Boot Manager'
+            wait: "Choice:"
+        - select:
+            items:
+             - 'Remove Boot Device Entry'
+            fallback: Return to Main Menu
+            wait: Delete entry
+        - select:
+            items:
+             - '{TEST_MENU_NAME}'
+            wait: "Choice:"
+        - select:
+            items:
+               - 'Add Boot Device Entry'
+            wait: "Select the Boot Device:"
+        - select:
+            items:
+               - 'TFTP on MAC Address: 00:01:73:69:5A:EF'  # substitute the MAC in the template
+            wait: "Get the IP address from DHCP:"
+        - select:
+            enter: y
+            wait: "Get the TFTP server IP address:"
+        - select:
+            enter: '{SERVER_IP}'
+            wait: "File path of the EFI Application or the kernel :"
+        - select:
+            enter: '{KERNEL}'
+            wait: 'Is an EFI Application?'
+        - select:
+            enter: n
+            wait: "Boot Type:"
+        - select:
+            enter: f
+            wait: "Add an initrd:"
+        - select:
+            enter: n
+            wait: "Get the IP address from DHCP:"
+        - select:
+            enter: y
+            wait: "Get the TFTP server IP address:"
+        - select:
+            enter: '{SERVER_IP}'
+            wait: "File path of the FDT :"
+        - select:
+            enter: '{DTB}'
+            wait: 'Arguments to pass to the binary:'
+        - select:
+            enter: "console=ttyS0,115200 earlyprintk=uart8250-32bit,0x1c020000 debug root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp"
+            wait: 'Description for this new Entry:'
+        - select:
+            enter: '{TEST_MENU_NAME}'
+            wait: "Choice:"
+        - select:
+            items:
+              - 'Return to main menu'
+            wait: "Start:"
+        - select:
+            items:
+              - LAVA NFS Test Image
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/nexus10-01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/nexus10-01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/nexus10-01.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/nexus10-01.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,36 @@
+commands:
+    connect: adb -s R32D300FRYP shell
+    soft_reboot: adb -s R32D300FRYP reboot bootloader
+    adb_command: adb -s R32D300FRYP
+    fastboot_command: fastboot -s R32D300FRYP
+device_type: nexus10
+adb_serial_number: R32D300FRYP
+fastboot_serial_number: R32D300FRYP
+
+
+actions:
+  deploy:
+    methods:
+      fastboot:
+    connections:
+      serial:
+      adb:
+  boot:
+    connections:
+      adb:
+    methods:
+      fastboot:
+
+timeouts:
+  actions:
+    apply-overlay-image:
+      seconds: 120
+    umount-retry:
+      seconds: 45
+    lava-test-shell:
+      seconds: 30
+    power_off:
+      seconds: 5
+  connections:
+    uboot-retry:
+      seconds: 60
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/nexus4-01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/nexus4-01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/nexus4-01.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/nexus4-01.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,36 @@
+commands:
+    connect: adb -s 04f228d1d9c76f39 shell
+    soft_reboot: adb -s 04f228d1d9c76f39 reboot bootloader
+    adb_command: adb -s 04f228d1d9c76f39
+    fastboot_command: fastboot -s 04f228d1d9c76f39
+device_type: nexus4
+adb_serial_number: 04f228d1d9c76f39
+fastboot_serial_number: 04f228d1d9c76f39
+
+
+actions:
+  deploy:
+    methods:
+      fastboot:
+    connections:
+      serial:
+      adb:
+  boot:
+    connections:
+      adb:
+    methods:
+      fastboot:
+
+timeouts:
+  actions:
+    apply-overlay-image:
+      seconds: 120
+    umount-retry:
+      seconds: 45
+    lava-test-shell:
+      seconds: 30
+    power_off:
+      seconds: 5
+  connections:
+    uboot-retry:
+      seconds: 60
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/ssh-host-01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/ssh-host-01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/ssh-host-01.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/ssh-host-01.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -34,26 +34,11 @@ actions:
           - '-u'
           - 'root'
         name:
-          - unstable
-          - sid
+          - raring
+          - ringtail
       ssh:
       vm:
 
 timeouts:
   lava-test-shell:
     seconds: 30
-
-
-test_image_prompts:
-  # TODO: we should'nt need such a long list
-  - '(initramfs)'
-  - '/ #'
-  - 'root@android'
-  - 'root@linaro'
-  - 'root@master'
-  - 'root@debian'
-  - 'root@linaro-nano:~#'
-  - 'root@linaro-developer:~#'
-  - 'root@linaro-server:~#'
-  - 'root@genericarmv7a:~#'
-  - 'root@genericarmv8:~#'
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/tk1-01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/tk1-01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/tk1-01.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/tk1-01.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,69 @@
+device_type: tk1
+
+parameters:
+  bootm:
+   kernel: '0x81000000'
+   ramdisk: '0x83000000'
+   dtb: '0x82000000'
+  bootz:
+   kernel: '0x81000000'
+   ramdisk: '0x83000000'
+   dtb: '0x82000000'
+
+commands:
+  connect: telnet bumblebee 8000
+  hard_reset: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 03 --command reboot --delay 5
+  power_off: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 03 --command off
+  power_on: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 03 --command on
+
+timeouts:
+  actions:
+    call-kexec:
+      seconds: 45
+    uboot-retry:
+      seconds: 90
+  connections:
+    uboot-retry:
+      seconds: 45
+
+actions:
+  deploy:
+    methods:
+      tftp:
+  boot:
+    connections:
+      serial:
+      ssh:
+    methods:
+      u-boot:
+        ramdisk:
+            commands:
+            - setenv autoload no
+            - setenv initrd_high '0xffffffff'
+            - setenv fdt_high '0xffffffff'
+            - setenv kernel_addr_r '{KERNEL_ADDR}'
+            - setenv initrd_addr_r '{RAMDISK_ADDR}'
+            - setenv fdt_addr_r '{DTB_ADDR}'
+            - setenv loadkernel 'tftp ${kernel_addr_r} {KERNEL}'
+            - setenv loadinitrd 'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'
+            - setenv loadfdt 'tftp ${fdt_addr_r} {DTB}'
+            - setenv bootargs 'console=ttyS0,115200n8 earlyprintk root=/dev/ram0 ip=dhcp'
+            - setenv bootcmd 'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; {BOOTX}'
+            - boot
+        nfs:
+            commands:
+            - setenv autoload no
+            - setenv initrd_high '0xffffffff'
+            - setenv fdt_high '0xffffffff'
+            - setenv kernel_addr_r '{KERNEL_ADDR}'
+            - setenv fdt_addr_r '{DTB_ADDR}'
+            - setenv loadkernel 'tftp ${kernel_addr_r} {KERNEL}'
+            - setenv loadfdt 'tftp ${fdt_addr_r} {DTB}'
+            - "setenv bootargs 'console=ttyS0,115200n8 earlyprintk root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp'"
+            - setenv bootcmd 'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; {BOOTX}'
+            - boot
+        parameters:
+          bootloader_prompt: Jetson TK1
+          boot_message: Booting Linux
+          send_char: False
+          mkimage_arch: arm
diff -pruN 2015.9-1/lava_dispatcher/pipeline/devices/x86-01.yaml 2016.3-1/lava_dispatcher/pipeline/devices/x86-01.yaml
--- 2015.9-1/lava_dispatcher/pipeline/devices/x86-01.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/devices/x86-01.yaml	2016-02-03 11:46:15.000000000 +0000
@@ -0,0 +1,64 @@
+device_type: x86
+parameters:
+  stuff:
+    - junk
+commands:
+  connect: telnet bumblebee 8003
+  hard_reset: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 08 --command reboot --delay 5
+  power_off: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 08 --command off
+  power_on: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 08 --command on
+
+
+actions:
+  deploy:
+    methods:
+      tftp:
+  boot:
+    connections:
+      serial:
+    methods:
+      ipxe:
+        parameters:
+          bootloader_prompt: iPXE>
+          boot_message: Linux version
+          send_char: True
+          use_bootscript: True
+          lava_mac: 00:90:05:af:00:7d
+        nfs:
+          bootscript_commands:
+          - dhcp net0
+          - chain {BOOTSCRIPT}
+          commands:
+          - dhcp net0,
+          - set console console=ttyS0,115200n8 lava_mac={LAVA_MAC}
+          - "set extraargs root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=eth0:dhcp"
+          - kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}
+          - initrd tftp://{SERVER_IP}/{RAMDISK}
+          - boot
+        ramdisk:
+          bootscript_commands:
+          - dhcp net0
+          - chain {BOOTSCRIPT}
+          commands:
+          - dhcp net0
+          - set console console=ttyS0,115200n8 lava_mac={LAVA_MAC}
+          - set extraargs init=/sbin/init ip=dhcp
+          - kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}
+          - initrd tftp://{SERVER_IP}/{RAMDISK}
+          - boot
+
+timeouts:
+  actions:
+    bootloader-retry:
+      seconds: 120
+    bootloader-interrupt:
+      seconds: 300
+    bootloader-commands:
+      seconds: 120
+  connections:
+    bootloader-retry:
+      seconds: 120
+    bootloader-interrupt:
+      seconds: 300
+    bootloader-commands:
+      seconds: 120
diff -pruN 2015.9-1/lava_dispatcher/pipeline/device_types/beaglebone-black.conf 2016.3-1/lava_dispatcher/pipeline/device_types/beaglebone-black.conf
--- 2015.9-1/lava_dispatcher/pipeline/device_types/beaglebone-black.conf	2015-09-03 13:34:38.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/device_types/beaglebone-black.conf	2015-12-14 09:33:11.000000000 +0000
@@ -26,6 +26,7 @@ actions:
           parameters:
             bootloader_prompt: U-Boot
             boot_message: Booting Linux
+            mkimage_arch: arm # string to pass to mkimage -A when adding UBoot headers
             send_char: False
             # interrupt: # character needed to interrupt u-boot, single whitespace by default
           # method specific stanza
@@ -57,7 +58,7 @@ actions:
             - setenv loadinitrd 'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'
             - setenv loadfdt 'tftp ${fdt_addr_r} {DTB}'
             # Always quote the entire string if the command includes a colon to support correct YAML.
-            - "setenv nfsargs 'setenv bootargs console=ttyO0,115200n8 root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp'"
+            - "setenv nfsargs 'setenv bootargs console=ttyO0,115200n8 root=/dev/nfs rw nfsroot={NFS_SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp'"
             - setenv bootcmd 'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; run nfsargs; {BOOTX}'
             - boot
           ramdisk:
diff -pruN 2015.9-1/lava_dispatcher/pipeline/device_types/hi6220-hikey.conf 2016.3-1/lava_dispatcher/pipeline/device_types/hi6220-hikey.conf
--- 2015.9-1/lava_dispatcher/pipeline/device_types/hi6220-hikey.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/device_types/hi6220-hikey.conf	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,10 @@
+actions:
+  deploy:
+    # list of deployment methods which this device supports
+    methods:
+      - fastboot
+
+  boot:
+    # list of boot methods which this device supports.
+    methods:
+      - uefi-menu
diff -pruN 2015.9-1/lava_dispatcher/pipeline/device_types/lxc.conf 2016.3-1/lava_dispatcher/pipeline/device_types/lxc.conf
--- 2015.9-1/lava_dispatcher/pipeline/device_types/lxc.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/device_types/lxc.conf	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,10 @@
+actions:
+  deploy:
+    # list of deployment methods which this device supports
+    methods:
+      - lxc
+
+  boot:
+    # list of boot methods which this device supports.
+    methods:
+      - lxc
diff -pruN 2015.9-1/lava_dispatcher/pipeline/device_types/nexus4.conf 2016.3-1/lava_dispatcher/pipeline/device_types/nexus4.conf
--- 2015.9-1/lava_dispatcher/pipeline/device_types/nexus4.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/device_types/nexus4.conf	2015-11-30 21:58:47.000000000 +0000
@@ -0,0 +1,10 @@
+actions:
+  deploy:
+    # list of deployment methods which this device supports
+    methods:
+      - fastboot
+
+  boot:
+    # list of boot methods which this device supports.
+    methods:
+      - fastboot
diff -pruN 2015.9-1/lava_dispatcher/pipeline/device_types/panda-es.conf 2016.3-1/lava_dispatcher/pipeline/device_types/panda-es.conf
--- 2015.9-1/lava_dispatcher/pipeline/device_types/panda-es.conf	2015-09-03 13:34:38.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/device_types/panda-es.conf	2015-12-14 09:33:11.000000000 +0000
@@ -48,7 +48,7 @@ actions:
             - setenv loadkernel 'tftp ${kernel_addr_r} {KERNEL}'
             - setenv loadinitrd 'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'
             - setenv loadfdt 'tftp ${fdt_addr_r} {DTB}'
-            - "setenv nfsargs 'setenv bootargs console=ttyO2,115200n8 root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS}
+            - "setenv nfsargs 'setenv bootargs console=ttyO2,115200n8 root=/dev/nfs rw nfsroot={NFS_SERVER_IP}:{NFSROOTFS}
               ip=dhcp fixrtc nocompcache vram=48M omapfb.vram=0:24M mem=456M@0x80000000 mem=512M@0xA0000000 init=init'"
             - setenv bootcmd 'usb start; dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; run nfsargs; {BOOTX}'
             - boot
diff -pruN 2015.9-1/lava_dispatcher/pipeline/device_types/tk1.conf 2016.3-1/lava_dispatcher/pipeline/device_types/tk1.conf
--- 2015.9-1/lava_dispatcher/pipeline/device_types/tk1.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/device_types/tk1.conf	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,43 @@
+# replacement device_type config for the tk1 type
+
+parameters:
+  bootm:
+   kernel: '0x81000000'
+   ramdisk: '0x83000000'
+   dtb: '0x82000000'
+  bootz:
+   kernel: '0x81000000'
+   ramdisk: '0x83000000'
+   dtb: '0x82000000'
+
+actions:
+  deploy:
+    # list of deployment methods which this device supports
+    methods:
+      # - image # not ready yet
+      - tftp
+
+  boot:
+    # list of boot methods which this device supports.
+    methods:
+      - u-boot:
+          parameters:
+            bootloader_prompt: Jetson TK1
+            boot_message: Booting Linux
+            mkimage_arch: arm # string to pass to mkimage -A when adding UBoot headers
+            send_char: False
+            # interrupt: # character needed to interrupt u-boot, single whitespace by default
+          ramdisk:
+            commands:
+            - setenv autoload no,
+            - setenv initrd_high "'0xffffffff'",
+            - setenv fdt_high "'0xffffffff'",
+            - setenv kernel_addr_r "'{KERNEL_ADDR}'",
+            - setenv initrd_addr_r "'{RAMDISK_ADDR}'",
+            - setenv fdt_addr_r "'{DTB_ADDR}'",
+            - setenv loadkernel "'tftp ${kernel_addr_r} {KERNEL}'",
+            - setenv loadinitrd "'tftp ${initrd_addr_r} {RAMDISK}; setenv initrd_size ${filesize}'",
+            - setenv loadfdt "'tftp ${fdt_addr_r} {DTB}'",
+            - setenv bootargs "'console=ttyS0,115200n8 earlyprintk root=/dev/ram0 ip=dhcp'",
+            - setenv bootcmd "'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadinitrd; run loadfdt; {BOOTX}'",
+            - boot
diff -pruN 2015.9-1/lava_dispatcher/pipeline/device_types/x86.conf 2016.3-1/lava_dispatcher/pipeline/device_types/x86.conf
--- 2015.9-1/lava_dispatcher/pipeline/device_types/x86.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/device_types/x86.conf	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,29 @@
+# replacement device_type config for the x86 type
+
+parameters:
+actions:
+  boot:
+    # list of boot methods which this device supports.
+    methods:
+      ipxe:
+        parameters:
+          bootloader_prompt: iPXE>
+          boot_message: Linux version
+          send_char: True
+          use_bootscript: False
+        nfs:
+          commands:
+          - dhcp net0,
+          - set console console=ttyS0,115200n8
+          - "set extraargs root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=eth0:dhcp"
+          - kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}
+          - initrd tftp://{SERVER_IP}/{RAMDISK}
+          - boot
+        ramdisk:
+          commands:
+          - dhcp net0
+          - set console console=ttyS0,115200n8
+          - set extraargs init=/sbin/init ip=dhcp
+          - kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}
+          - initrd tftp://{SERVER_IP}/{RAMDISK}
+          - boot
diff -pruN 2015.9-1/lava_dispatcher/pipeline/job.py 2016.3-1/lava_dispatcher/pipeline/job.py
--- 2015.9-1/lava_dispatcher/pipeline/job.py	2015-09-09 13:54:16.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/job.py	2016-03-02 14:31:44.000000000 +0000
@@ -60,6 +60,7 @@ class Job(object):  # pylint: disable=to
         ]
         self.timeout = None
         self.protocols = []
+        self.compatibility = 2
         # TODO: we are now able to create the logger when the job is started,
         # allowing the functions that are called before run() to log.
         # Do we want to do something with this?
@@ -101,6 +102,7 @@ class Job(object):  # pylint: disable=to
     def describe(self):
         return {'device': self.device,
                 'job': self.parameters,
+                'compatibility': self.compatibility,
                 'pipeline': self.pipeline.describe()}
 
     def validate(self, simulate=False):
@@ -109,6 +111,19 @@ class Job(object):  # pylint: disable=to
         Then needs to validate the context
         Finally expose the context so that actions can see it.
         """
+        for protocol in self.protocols:
+            try:
+                protocol.configure(self.device, self)
+            except KeyboardInterrupt:
+                self.pipeline.cleanup_actions(connection=None, message="Canceled")
+                self.logger.info("Canceled")
+                return 1  # equivalent to len(self.pipeline.errors)
+            except (JobError, RuntimeError, KeyError, TypeError) as exc:
+                raise JobError(exc)
+            if not protocol.valid:
+                msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors)
+                self.logger.exception(msg)
+                raise JobError(msg)
         if simulate:
             # output the content and then any validation errors (python3 compatible)
             print(yaml.dump(self.describe()))  # pylint: disable=superfluous-parens
diff -pruN 2015.9-1/lava_dispatcher/pipeline/logical.py 2016.3-1/lava_dispatcher/pipeline/logical.py
--- 2015.9-1/lava_dispatcher/pipeline/logical.py	2015-09-10 08:06:51.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/logical.py	2016-03-02 14:34:40.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright (C) 2014 Linaro Limited
+# Copyright (C) 2014,2015 Linaro Limited
 #
 # Author: Neil Williams <neil.williams@linaro.org>
 #
@@ -67,6 +67,8 @@ class RetryAction(Action):
                 time.sleep(self.sleep)
         if not self.valid:
             self.errors = "%s retries failed for %s" % (self.retries, self.name)
+            if "boot-result" not in self.data:
+                self.data['boot-result'] = 'failed'
         return connection
 
     # FIXME: needed?
@@ -152,11 +154,14 @@ class Deployment(object):  # pylint: dis
 
     priority = 0
     action_type = 'deploy'
+    compatibility = 0
 
     def __init__(self, parent):
         self.__parameters__ = {}
         self.pipeline = parent
         self.job = parent.job
+        if self.compatibility > self.job.compatibility:
+            self.job.compatibility = self.compatibility
 
     @property
     def parameters(self):
@@ -213,11 +218,14 @@ class Boot(object):
 
     priority = 0
     action_type = 'boot'
+    compatibility = 0
 
     def __init__(self, parent):
         self.__parameters__ = {}
         self.pipeline = parent
         self.job = parent.job
+        if self.compatibility > self.job.compatibility:
+            self.job.compatibility = self.compatibility
 
     @classmethod
     def accepts(cls, device, parameters):  # pylint: disable=unused-argument
@@ -252,11 +260,14 @@ class LavaTest(object):  # pylint: disab
 
     priority = 1
     action_type = 'test'
+    compatibility = 1  # used directly
 
     def __init__(self, parent):
         self.__parameters__ = {}
         self.pipeline = parent
         self.job = parent.job
+        if self.compatibility > self.job.compatibility:
+            self.job.compatibility = self.compatibility
 
     @classmethod
     def accepts(cls, device, parameters):  # pylint: disable=unused-argument
diff -pruN 2015.9-1/lava_dispatcher/pipeline/menus/menus.py 2016.3-1/lava_dispatcher/pipeline/menus/menus.py
--- 2015.9-1/lava_dispatcher/pipeline/menus/menus.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/menus/menus.py	2016-03-02 14:34:48.000000000 +0000
@@ -0,0 +1,241 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Neil Williams <neil.williams@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+import re
+import pexpect
+from lava_dispatcher.pipeline.shell import ShellSession
+from lava_dispatcher.pipeline.action import Action, JobError
+from lava_dispatcher.pipeline.connections.serial import ConnectDevice
+
+# pylint: disable=too-few-public-methods,too-many-branches
+
+
+class MovementMenu(object):
+
+    def __init__(self):
+        self.start_pos = 0
+        self.label = None
+        self.down_command = None
+
+
+class MenuInterrupt(Action):
+
+    def __init__(self):
+        super(MenuInterrupt, self).__init__()
+        self.name = "menu-interrupt"
+        self.summary = "base menu interrupt action"
+        self.description = "interrupt the bootloader to start the menu handling"
+        self.interrupt_prompt = None
+        self.interrupt_string = None
+
+
+class SelectorMenu(object):
+
+    def __init__(self):
+        self.item_markup = None
+        self.item_class = None
+        self.separator = None
+        self.label_class = None
+        self.prompt = None  # initial prompt
+
+    @property
+    def pattern(self):
+        """
+        This particular pattern property assumes something like:
+        [2] Shell
+        where Shell would be the label and 2 the selector to return.
+        Derive a new class if you have Shell [2]
+        :return: A regex pattern to identify the selector for the matching label.
+        """
+        return "%s([%s]+)%s%s([%s]*)" % (
+            re.escape(self.item_markup[0]),
+            self.item_class,
+            re.escape(self.item_markup[1]),
+            self.separator,
+            self.label_class
+        )
+
+    def select(self, output, label):
+        output_list = output.split('\n')
+        for line in output_list[::-1]:  # start from the end of the list to catch the latest menu first.
+            line = line.strip()
+            match = re.search(self.pattern, line)
+            if match:
+                if label == match.group(2):
+                    return match.group(1)
+        return None
+
+
+class MenuSession(ShellSession):
+
+    def wait(self):
+        """
+        Simple wait without sendling blank lines as that causes the menu
+        to advance without data which can cause blank entries and can cause
+        the menu to exit to an unrecognised prompt.
+        """
+        while True:
+            try:
+                self.raw_connection.expect(self.prompt_str, timeout=self.timeout.duration)
+            except pexpect.TIMEOUT:
+                raise JobError("wait for prompt timed out")
+            except KeyboardInterrupt:
+                raise KeyboardInterrupt
+            else:
+                break
+
+
+class MenuConnect(ConnectDevice):
+    """
+    Connect actions should not wait for a prompt - ResetDevice needs an active
+    connection and the device could be powered off when Connect is called.
+    """
+
+    def __init__(self):
+        super(MenuConnect, self).__init__()
+        self.session_class = MenuSession
+        self.name = "menu-connect"
+        self.summary = "Customise connection for menu operations"
+        self.description = "change into a menu session"
+
+    def validate(self):
+        hostname = self.job.device['hostname']
+        if self.job.device.power_state in ['on', 'off']:
+            # to enable power to a device, either power_on or hard_reset are needed.
+            if self.job.device.power_command is '':
+                self.errors = "Unable to power on or reset the device %s" % hostname
+            if self.job.device.connect_command is '':
+                self.errors = "Unable to connect to device %s" % hostname
+        else:
+            self.logger.warning("%s may need manual intervention to reboot", hostname)
+
+    def run(self, connection, args=None):
+        connection = super(MenuConnect, self).run(connection, args)
+        if not connection:
+            raise RuntimeError("%s needs a Connection")
+        connection.check_char = '\n'
+        connection.sendline('\n')  # to catch the first prompt (remove for PDU?)
+        connection.prompt_str = self.parameters['prompts']
+        if self.job.device.power_state not in ['on', 'off']:
+            self.wait(connection)
+        return connection
+
+
+class MenuReset(ConnectDevice):
+
+    def __init__(self):
+        super(MenuReset, self).__init__()
+        self.session_class = ShellSession
+        self.name = "menu-reset"
+        self.summary = "reset to shell connection"
+        self.description = "change out of menu session to a shell session"
+
+    def run(self, connection, args=None):
+        connection = super(MenuReset, self).run(connection, args)
+        if not connection:
+            raise RuntimeError("%s needs a Connection")
+
+        connection.check_char = '\n'
+        connection.sendline('\n')  # to catch the first prompt (remove for PDU?)
+        return connection
+
+
+class SelectorMenuAction(Action):
+
+    def __init__(self):
+        super(SelectorMenuAction, self).__init__()
+        self.name = 'menu-selector'
+        self.summary = 'select options in a menu'
+        self.description = 'select specified menu items'
+        self.selector = SelectorMenu()
+        self.items = []
+        self.send_char_delay = 0
+
+    def validate(self):
+        super(SelectorMenuAction, self).validate()
+        # check for allowed items, error if any are unrecognised
+        item_keys = {}
+        if not isinstance(self.items, list):
+            self.errors = "menu sequence must be a list"
+        for item in self.items:
+            if 'select' in item:
+                for _ in item['select']:
+                    item_keys[item['select'].keys()[0]] = None
+        disallowed = set(item_keys) - {'items', 'prompt', 'enter', 'escape'}
+        if disallowed:
+            self.errors = "Unable to recognise item %s" % disallowed
+
+    def _change_prompt(self, connection, change):
+        if change:
+            self.logger.debug("Changing menu prompt to '%s'", connection.prompt_str)
+            connection.wait()  # call MenuSession::wait directly for a tighter loop
+
+    def run(self, connection, args=None):
+        """
+        iterate through the menu sequence:
+        items: select
+        prompt: prompt_str
+        enter: <str> & Ctrl-M
+        escape: Ctrl-[ through pexpect.sendcontrol
+
+        :param menu: list of menus
+        :param connection: Connection to use to interact with the menu
+        :param logger: Action logger
+        :return: connection
+        """
+        if not connection:
+            self.logger.error("%s called without a Connection", self.name)
+            return connection
+        for block in self.items:
+            if 'select' in block:
+                change_prompt = False
+                # ensure the prompt is changed just before sending the action to allow it to be matched.
+                if 'wait' in block['select']:
+                    connection.prompt_str = block['select']['wait']
+                    change_prompt = True
+                if 'items' in block['select']:
+                    for selector in block['select']['items']:
+                        menu_text = connection.raw_connection.before
+                        action = self.selector.select(menu_text, selector)
+                        if action:
+                            self.logger.debug("Selecting option %s", action)
+                        elif 'fallback' in block['select']:
+                            action = self.selector.select(menu_text, block['select']['fallback'])
+                        connection.sendline(action)
+                        self._change_prompt(connection, change_prompt)
+                if 'escape' in block['select']:
+                    self.logger.debug("Sending escape")
+                    connection.raw_connection.sendcontrol('[')
+                    self._change_prompt(connection, change_prompt)
+                if 'enter' in block['select']:
+                    self.logger.debug("Sending %s Ctrl-M", block['select']['enter'])
+                    connection.raw_connection.send(block['select']['enter'], delay=self.send_char_delay)
+                    connection.raw_connection.sendcontrol('M')
+                    self._change_prompt(connection, change_prompt)
+            else:
+                raise JobError("Unable to recognise selection %s" % block['select'])
+        return connection
+
+
+class DebianInstallerMenu(MovementMenu):
+
+    def __init__(self):
+        super(DebianInstallerMenu, self).__init__()
+        self.down_command = '[1B'
diff -pruN 2015.9-1/lava_dispatcher/pipeline/parser.py 2016.3-1/lava_dispatcher/pipeline/parser.py
--- 2015.9-1/lava_dispatcher/pipeline/parser.py	2015-09-10 10:34:46.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/parser.py	2016-03-02 14:34:40.000000000 +0000
@@ -26,6 +26,7 @@ from lava_dispatcher.pipeline.action imp
     Pipeline,
     Action,
     Timeout,
+    JobError,
 )
 from lava_dispatcher.pipeline.logical import (
     Deployment,
@@ -109,10 +110,18 @@ class JobParser(object):
                 job.timeout = Timeout(data['job_name'], duration)
             if 'action' in data['timeouts']:
                 self.context['default_action_duration'] = Timeout.parse(data['timeouts']['action'])
+            if 'connection' in data['timeouts']:
+                self.context['default_connection_duration'] = Timeout.parse(data['timeouts']['connection'])
             if 'test' in data['timeouts']:
                 self.context['default_test_duration'] = Timeout.parse(data['timeouts']['test'])
 
-    # FIXME: add a validate() function which checks against a Schema as a completely separate step.
+    def _map_context_defaults(self):
+        return {
+            'default_action_timeout': self.context['default_action_duration'],
+            'default_test_timeout': self.context['default_test_duration'],
+            'default_connection_timeout': self.context['default_connection_duration']
+        }
+
     # pylint: disable=too-many-locals,too-many-statements
     def parse(self, content, device, job_id, socket_addr, output_dir=None,
               env_dut=None):
@@ -123,14 +132,16 @@ class JobParser(object):
 
         self.context['default_action_duration'] = Timeout.default_duration()
         self.context['default_test_duration'] = Timeout.default_duration()
+        self.context['default_connection_duration'] = Timeout.default_duration()
         job = Job(job_id, socket_addr, data)
         counts = {}
         job.device = device
         job.parameters['output_dir'] = output_dir
         job.parameters['env_dut'] = env_dut
         job.parameters['target'] = device.target
-        for instance in Protocol.select_all(job.parameters):
-            job.protocols.append(instance(job.parameters))
+        level_tuple = Protocol.select_all(job.parameters)
+        # sort the list of protocol objects by the protocol class level.
+        job.protocols = [item[0](job.parameters) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])]
         pipeline = Pipeline(job=job)
         self._timeouts(data, job)
 
@@ -140,8 +151,7 @@ class JobParser(object):
             action_data.pop('yaml_line', None)
             for name in action_data:
                 if type(action_data[name]) is dict:  # FIXME: commands are not fully implemented & may produce a list
-                    action_data[name]['default_action_timeout'] = self.context['default_action_duration']
-                    action_data[name]['default_test_timeout'] = self.context['default_test_duration']
+                    action_data[name].update(self._map_context_defaults())
                 counts.setdefault(name, 1)
                 if name == 'deploy' or name == 'boot' or name == 'test':
                     # reset the context before adding a second deployment and again before third etc.
@@ -177,11 +187,22 @@ class JobParser(object):
                             action.parameters = param
                     action.summary = name
                     action.timeout = Timeout(action.name, self.context['default_action_duration'])
+                    action.connection_timeout = Timeout(action.name, self.context['default_connection_duration'])
                     pipeline.add_action(action)
                 counts[name] += 1
 
         # there's always going to need to be a finalize_process action
-        pipeline.add_action(FinalizeAction())
+        finalize = FinalizeAction()
+        pipeline.add_action(finalize)
+        finalize.populate(self._map_context_defaults())
         data['output_dir'] = output_dir
         job.set_pipeline(pipeline)
+        if 'compatibility' in data:
+            try:
+                job_c = int(job.compatibility)
+                data_c = int(data['compatibility'])
+            except ValueError as exc:
+                raise JobError('invalid compatibility value: %s' % exc)
+            if job_c < data_c:
+                raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c))
         return job
diff -pruN 2015.9-1/lava_dispatcher/pipeline/power.py 2016.3-1/lava_dispatcher/pipeline/power.py
--- 2015.9-1/lava_dispatcher/pipeline/power.py	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/power.py	2016-03-02 14:31:44.000000000 +0000
@@ -31,6 +31,7 @@ from lava_dispatcher.pipeline.action imp
 )
 from lava_dispatcher.pipeline.logical import AdjuvantAction
 from lava_dispatcher.pipeline.utils.constants import SHUTDOWN_MESSAGE
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
 
 
 class ResetDevice(Action):
@@ -69,12 +70,19 @@ class RebootDevice(Action):
             raise RuntimeError("Called %s without an active Connection" % self.name)
         if self.job.device.power_state is 'off' and self.job.device.power_command is not '':  # power on action used instead
             return connection
-        connection = super(RebootDevice, self).run(connection, args)
-        connection.prompt_str = self.parameters.get('parameters', {}).get('shutdown-message', SHUTDOWN_MESSAGE)
-        connection.sendline("reboot")
-        # FIXME: possibly deployment data, possibly separate actions, possibly adjuvants.
-        connection.sendline("reboot -n")  # initramfs may require -n for *now*
-        connection.sendline("reboot -n -f")  # initrd may require -n for *now* and -f for *force*
+        if self.job.device.power_state is 'on' and self.job.device.soft_reset_command is not '':
+            command = self.job.device['commands']['soft_reset']
+            if not self.run_command(command.split(' ')):
+                raise InfrastructureError("%s command failed" % command)
+            self.results = {'status': "success"}
+        else:
+            connection = super(RebootDevice, self).run(connection, args)
+            connection.prompt_str = self.parameters.get('parameters', {}).get('shutdown-message', SHUTDOWN_MESSAGE)
+            connection.timeout = self.connection_timeout
+            connection.sendline("reboot")
+            # FIXME: possibly deployment data, possibly separate actions, possibly adjuvants.
+            connection.sendline("reboot -n")  # initramfs may require -n for *now*
+            connection.sendline("reboot -n -f")  # initrd may require -n for *now* and -f for *force*
         self.results = {'status': "success"}
         self.data[PDUReboot.key()] = False
         if 'bootloader_prompt' in self.data['common']:
@@ -150,6 +158,62 @@ class PowerOn(Action):
         return connection
 
 
+# FIXME: Unused action, but can give fine grained control.
+class LxcStop(Action):
+    """
+    Stops the lxc container at the end of a job
+    """
+    def __init__(self):
+        super(LxcStop, self).__init__()
+        self.name = "lxc_stop"
+        self.summary = "send stop command"
+        self.description = "stop the lxc container"
+
+    def validate(self):
+        super(LxcStop, self).validate()
+        self.errors = infrastructure_error('lxc-stop')
+
+    def run(self, connection, args=None):
+        connection = super(LxcStop, self).run(connection, args)
+        lxc_name = self.get_common_data('lxc', 'name')
+        if not lxc_name:
+            return connection
+        lxc_cmd = ['lxc-stop', '-n', lxc_name, '-k']
+        command_output = self.run_command(lxc_cmd)
+        if command_output and command_output is not '':
+            raise JobError("Unable to stop lxc container: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
+# FIXME: Unused action, but can give fine grained control.
+class LxcDestroy(Action):
+    """
+    Destroys the lxc container at the end of a job
+    """
+    def __init__(self):
+        super(LxcDestroy, self).__init__()
+        self.name = "lxc_destroy"
+        self.summary = "send destroy command"
+        self.description = "destroy the lxc container"
+
+    def validate(self):
+        super(LxcDestroy, self).validate()
+        self.errors = infrastructure_error('lxc-destroy')
+
+    def run(self, connection, args=None):
+        connection = super(LxcDestroy, self).run(connection, args)
+        lxc_name = self.get_common_data('lxc', 'name')
+        if not lxc_name:
+            return connection
+        lxc_cmd = ['lxc-destroy', '-n', lxc_name]
+        command_output = self.run_command(lxc_cmd)
+        if command_output and command_output is not '':
+            raise JobError("Unable to destroy lxc container: %s" %
+                           command_output)  # FIXME: JobError needs a unit test
+        return connection
+
+
 class PowerOff(Action):
     """
     Turns power off at the end of a job
diff -pruN 2015.9-1/lava_dispatcher/pipeline/protocols/lxc.py 2016.3-1/lava_dispatcher/pipeline/protocols/lxc.py
--- 2015.9-1/lava_dispatcher/pipeline/protocols/lxc.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/protocols/lxc.py	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,81 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Senthil Kumaran S <senthil.kumaran@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+import pexpect
+import logging
+from lava_dispatcher.pipeline.connection import Protocol
+from lava_dispatcher.pipeline.action import (
+    Timeout,
+    JobError,
+)
+from lava_dispatcher.pipeline.shell import ShellCommand
+from lava_dispatcher.pipeline.utils.constants import LAVA_LXC_TIMEOUT
+
+
+class LxcProtocol(Protocol):
+    """
+    Lxc API protocol.
+    """
+    name = "lava-lxc"
+
+    def __init__(self, parameters):
+        super(LxcProtocol, self).__init__(parameters)
+        self.system_timeout = Timeout('system', LAVA_LXC_TIMEOUT)
+        self.lxc_name = parameters['protocols'][self.name]['name']
+        self.lxc_dist = parameters['protocols'][self.name]['distribution']
+        self.lxc_release = parameters['protocols'][self.name]['release']
+        self.lxc_arch = parameters['protocols'][self.name]['arch']
+        self.logger = logging.getLogger('dispatcher')
+
+    @classmethod
+    def accepts(cls, parameters):
+        if 'protocols' not in parameters:
+            return False
+        if 'lava-lxc' not in parameters['protocols']:
+            return False
+        if 'name' not in parameters['protocols']['lava-lxc']:
+            return False
+        if 'distribution' not in parameters['protocols']['lava-lxc']:
+            return False
+        if 'release' not in parameters['protocols']['lava-lxc']:
+            return False
+        if 'arch' not in parameters['protocols']['lava-lxc']:
+            return False
+        return True
+
+    def set_up(self):
+        """
+        Called from the job at the start of the run step.
+        """
+        pass
+
+    def finalise_protocol(self):
+        # ShellCommand executes the destroy command
+        cmd = "lxc-destroy -n {0} -f".format(self.lxc_name)
+        self.logger.debug("%s protocol: executing '%s'", self.name, cmd)
+        shell = ShellCommand("%s\n" % cmd, self.system_timeout,
+                             logger=self.logger)
+        # execute the command.
+        shell.expect(pexpect.EOF)
+        if shell.exitstatus:
+            raise JobError("%s command exited %d: %s" % (cmd, shell.exitstatus,
+                                                         shell.readlines()))
+        self.logger.debug("%s protocol finalised." % self.name)
diff -pruN 2015.9-1/lava_dispatcher/pipeline/protocols/multinode.py 2016.3-1/lava_dispatcher/pipeline/protocols/multinode.py
--- 2015.9-1/lava_dispatcher/pipeline/protocols/multinode.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/protocols/multinode.py	2016-03-02 14:34:40.000000000 +0000
@@ -19,9 +19,11 @@
 # with this program; if not, see <http://www.gnu.org/licenses>.
 
 
+import re
 import copy
 import json
 import logging
+import traceback
 import os
 import socket
 import time
@@ -84,6 +86,7 @@ class MultinodeProtocol(Protocol):
             "poll_delay": 1,
             "coordinator_hostname": "localhost"
         }
+        self.logger = logging.getLogger('dispatcher')
         json_default = {}
         with open(filename) as stream:
             jobdata = stream.read()
@@ -132,7 +135,7 @@ class MultinodeProtocol(Protocol):
                 self.logger.debug("zero bytes sent for message - connection closed?")
                 return False
         except socket.error as exc:
-            self.logger.exception("socket error '%d' on send" % exc.message)
+            self.logger.exception("socket error '%s' on send" % exc.message)
             self.sock.close()
             return False
         return True
@@ -164,6 +167,10 @@ class MultinodeProtocol(Protocol):
         """
         if not timeout:
             timeout = self.poll_timeout.duration
+        if isinstance(timeout, float):
+            timeout = int(timeout)
+        elif not isinstance(timeout, int):
+            raise RuntimeError("Invalid timeout duration type: %s %s" % (type(timeout), timeout))
         msg_len = len(message)
         if msg_len > 0xFFFE:
             raise JobError("Message was too long to send!")
@@ -239,7 +246,7 @@ class MultinodeProtocol(Protocol):
                 "waitrole": expect_role,
                 "messageID": 'lava_start'}
             self._send(sync_msg, True)
-            self.logger.debug("sent %s" % sync_msg)
+            self.logger.debug("sent %s" % json.dumps(sync_msg))
         else:
             self.logger.debug("%s protocol initialised" % self.name)
 
@@ -303,7 +310,14 @@ class MultinodeProtocol(Protocol):
         if "poll_delay" in json_data:
             self.settings['poll_delay'] = int(json_data["poll_delay"])
         if 'timeout' in json_data:
-            self.poll_timeout = Timeout(self.name, json_data['timeout'])
+            if isinstance(json_data['timeout'], dict):
+                self.poll_timeout.duration = Timeout.parse(json_data['timeout'])
+            elif isinstance(json_data['timeout'], int) or isinstance(json_data['timeout'], float):
+                self.poll_timeout.duration = json_data['timeout']
+            else:
+                self.logger.debug(json_data['timeout'])
+                raise JobError("Invalid timeout request")
+            self.logger.debug("Setting poll timeout of %s seconds", int(self.poll_timeout.duration))
         if 'messageID' not in json_data:
             raise JobError("Missing messageID")
         # handle conversion of api calls to internal functions
@@ -344,10 +358,10 @@ class MultinodeProtocol(Protocol):
                 send_msg = json_data['message']
                 if type(send_msg) is not dict:
                     send_msg = {json_data['message']: None}
-                self.logger.debug("message: %s", send_msg)
+                self.logger.debug("message: %s", json.dumps(send_msg))
                 if 'yaml_line' in send_msg:
                     del send_msg['yaml_line']
-                self.logger.debug("requesting lava_send %s with args %s" % (message_id, send_msg))
+                self.logger.debug("requesting lava_send %s with args %s" % (message_id, json.dumps(send_msg)))
                 reply_str = self.request_send(message_id, send_msg)
             else:
                 self.logger.debug("requesting lava_send %s without args" % message_id)
@@ -365,6 +379,9 @@ class MultinodeProtocol(Protocol):
         try:
             return self._api_select(json.dumps(args))
         except (ValueError, TypeError) as exc:
+            msg = re.sub('\s+', ' ', ''.join(traceback.format_exc().split('\n')))
+            logger = logging.getLogger("dispatcher")
+            logger.exception(msg)
             raise JobError("Invalid call to %s %s" % (self.name, exc))
 
     def collate(self, reply, params):
@@ -378,15 +395,33 @@ class MultinodeProtocol(Protocol):
         and the second value is the collated data from the call to the protocol.
         """
         retval = {}
-        if 'message' in params and 'message' in reply:
+        if reply == {} or not isinstance(reply, dict):
+            msg = "Unable to identify replaceable values in the parameters: %s" % params
+            self.logger.error(msg)
+            raise JobError(msg)
+        self.logger.debug({
+            "Retrieving replaceable values from": "%s" % json.dumps(reply),
+            "params": "%s" % json.dumps(params)})
+        if 'message' in params and reply:
             replaceables = [key for key, value in params['message'].items()
                             if key != 'yaml_line' and value.startswith('$')]
             for item in replaceables:
-                data = [val for val in reply['message'].items() if self.parameters['target'] in val][0][1]
+                if 'message' in reply:
+                    data = [val for val in reply['message'].items() if self.parameters['target'] in val][0][1]
+                else:
+                    data = [val for val in reply.items()][0][1]
+                if item not in data:
+                    self.logger.warning("Skipping %s - not found in %s", item, data)
+                    continue
                 retval.setdefault(params['messageID'], {item: data[item]})
-        ret_key = params['messageID']
-        ret_value = retval[ret_key]
-        return ret_key, ret_value
+        if 'messageID' in params:
+            ret_key = params['messageID']
+            if ret_key in retval:
+                ret_value = retval[ret_key]
+                return ret_key, ret_value
+        msg = "Unable to identify replaceable values in the parameters: %s" % params
+        self.logger.error(msg)
+        raise JobError(msg)
 
     def _send(self, msg, system=False):
         """ Internal call to perform the API call via the Poller.
diff -pruN 2015.9-1/lava_dispatcher/pipeline/protocols/strategies.py 2016.3-1/lava_dispatcher/pipeline/protocols/strategies.py
--- 2015.9-1/lava_dispatcher/pipeline/protocols/strategies.py	2015-09-07 08:13:04.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/protocols/strategies.py	2016-02-02 08:07:05.000000000 +0000
@@ -21,3 +21,5 @@
 # pylint: disable=unused-import
 
 from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
+from lava_dispatcher.pipeline.protocols.lxc import LxcProtocol
+from lava_dispatcher.pipeline.protocols.vland import VlandProtocol
diff -pruN 2015.9-1/lava_dispatcher/pipeline/protocols/vland.py 2016.3-1/lava_dispatcher/pipeline/protocols/vland.py
--- 2015.9-1/lava_dispatcher/pipeline/protocols/vland.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/protocols/vland.py	2016-03-02 14:34:40.000000000 +0000
@@ -0,0 +1,483 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Neil Williams <neil.williams@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+import copy
+import time
+import json
+import socket
+import logging
+from lava_dispatcher.pipeline.connection import Protocol
+from lava_dispatcher.pipeline.action import JobError, TestError
+from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
+from lava_dispatcher.pipeline.utils.constants import VLAND_DEPLOY_TIMEOUT
+
+
+# pylint: disable=too-many-instance-attributes
+
+
+class VlandProtocol(Protocol):
+    """
+    VLANd protocol - multiple vlans are possible per group
+    Can only run *after* the multinode protocol is ready
+    Using the VLANd protocol has hardware and lab topology requirements.
+    All workers *must* be able to see a single vland daemon for this instance.
+    All workers and all devices *must* be on a single set of managed switches
+    which are already configured in that vland daemon. All switches in the set
+    *must* be able to setup a vlan that could potentially use ports on any switch
+    in the configured set - so each needs to be able to see all of the others.
+    """
+    name = "lava-vland"
+    level = 5
+
+    def __init__(self, parameters):
+        super(VlandProtocol, self).__init__(parameters=parameters)
+        self.logger = logging.getLogger('dispatcher')
+        self.vlans = {}
+        self.ports = []
+        self.names = {}
+        self.base_group = parameters['protocols'][MultinodeProtocol.name]['target_group'].replace('-', '')[:10]
+        self.sub_id = parameters['protocols'][MultinodeProtocol.name]['sub_id']
+        self.fake_run = False
+        self.settings = None
+        self.blocks = 4 * 1024
+        self.sock = None
+        self.base_message = {}
+        self.params = {}
+        self.nodes_seen = []  # node == combination of switch & port
+        self.multinode_protocol = None
+
+    @classmethod
+    def accepts(cls, parameters):
+        if 'protocols' not in parameters:
+            return False
+        if 'lava-multinode' not in parameters['protocols']:
+            return False
+        if 'target_group' not in parameters['protocols'][MultinodeProtocol.name]:
+            return False
+        if 'lava-vland' not in parameters['protocols']:
+            return False
+        return True
+
+    def read_settings(self):  # pylint: disable=no-self-use
+        # FIXME: support config file
+        settings = {
+            "port": 3080,
+            "poll_delay": 1,
+            "vland_hostname": "localhost"
+        }
+        return settings
+
+    def _connect(self, delay):
+        """
+        create socket and connect
+        """
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        try:
+            self.sock.connect((self.settings['vland_hostname'], self.settings['port']))
+            return True
+        except socket.error as exc:
+            self.logger.exception(
+                "socket error on connect: %d %s %s",
+                exc.errno, self.settings['vland_hostname'], self.settings['port'])
+            time.sleep(delay)
+            self.sock.close()
+            return False
+
+    def _send_message(self, message):
+        msg_len = len(message)
+        try:
+            # send the length as 32bit hexadecimal
+            ret_bytes = self.sock.send("%08X" % msg_len)
+            if ret_bytes == 0:
+                self.logger.debug("zero bytes sent for length - connection closed?")
+                return False
+            ret_bytes = self.sock.send(message)
+            if ret_bytes == 0:
+                self.logger.debug("zero bytes sent for message - connection closed?")
+                return False
+        except socket.error as exc:
+            self.logger.exception("socket error '%d' on send", exc.message)
+            self.sock.close()
+            return False
+        return True
+
+    def _recv_message(self):
+        try:
+            header = self.sock.recv(8)  # 32bit limit as a hexadecimal
+            if not header or header == '':
+                self.logger.debug("empty header received?")
+                return json.dumps({"response": "wait"})
+            msg_count = int(header, 16)
+            recv_count = 0
+            response = ''
+            while recv_count < msg_count:
+                response += self.sock.recv(self.blocks)
+                recv_count += self.blocks
+        except socket.error as exc:
+            self.logger.exception("socket error '%d' on response", exc.errno)
+            self.sock.close()
+            return json.dumps({"response": "wait"})
+        return response
+
+    def poll(self, message, timeout=None):
+        """
+        Blocking, synchronous polling of VLANd on the configured port.
+        Single send operations greater than 0xFFFF are rejected to prevent truncation.
+        :param msg_str: The message to send to VLAND, as a JSON string.
+        :return: a JSON string of the response to the poll
+        """
+        if not timeout:
+            timeout = self.poll_timeout.duration
+        msg_len = len(message)
+        if msg_len > 0xFFFE:
+            raise JobError("Message was too long to send!")
+        c_iter = 0
+        response = None
+        delay = self.settings['poll_delay']
+        self.logger.debug("Connecting to VLANd on %s:%s timeout=%d seconds.",
+                          self.settings['vland_hostname'], self.settings['port'], timeout)
+        while True:
+            c_iter += self.settings['poll_delay']
+            if self._connect(delay):
+                delay = self.settings['poll_delay']
+            else:
+                delay += 2
+                continue
+            if not c_iter % int(10 * self.settings['poll_delay']):
+                self.logger.debug("sending message: %s waited %s of %s seconds",
+                                  json.loads(message)['request'], c_iter, int(timeout))
+            # blocking synchronous call
+            if not self._send_message(message):
+                continue
+            self.sock.shutdown(socket.SHUT_WR)
+            response = self._recv_message()
+            self.sock.close()
+            try:
+                json_data = json.loads(response)
+            except ValueError:
+                self.logger.debug("response starting '%s' was not JSON", response[:42])
+                self.finalise_protocol()
+                break
+            if json_data['response'] != 'wait':
+                break
+            else:
+                time.sleep(delay)
+            # apply the default timeout to each poll operation.
+            if c_iter > timeout:
+                self.finalise_protocol()
+                raise JobError("protocol %s timed out" % self.name)
+        return response
+
+    def _call_vland(self, msg):
+        """ Internal call to perform the API call via the Poller.
+        :param msg: The call-specific message to be wrapped in the base_msg primitive.
+        :param timeout: Number of seconds for this call.
+        :return: Python object of the reply dict.
+        """
+        new_msg = copy.deepcopy(self.base_message)
+        new_msg.update(msg)
+        self.logger.debug("final message: %s", json.dumps(new_msg))
+        return self.poll(json.dumps(new_msg))
+
+    def _create_vlan(self, friendly_name):
+        """
+        Ask vland to create a vlan which we will track using the friendly_name
+        but which vland knows as a generated string in self.names which is
+        known to be safe to use on the supported switches.
+        Passes -1 as the tag so that vland allocates the next available tag.
+        :param friendly_name: user-specified string used to lookup vlan data
+        :return: a tuple containing the internal name used by vland and the vland tag.
+        """
+        msg = {
+            'type': 'vlan_update',
+            'command': 'api.create_vlan',
+            'data': {
+                'name': self.names[friendly_name],
+                'tag': -1,
+                'is_base_vlan': False
+            }
+        }
+        self.logger.debug({"create_vlan": msg})
+        response = self._call_vland(msg)
+        if not response or response == '':
+            return (None, None)
+        reply = json.loads(response)
+        if 'data' in reply:
+            return reply['data']
+        raise RuntimeError(reply)
+
+    def _declare_created(self, friendly_name, tag):
+        if not self.configured:
+            return False
+        send_msg = {
+            'request': 'lava_send',
+            'timeout': VLAND_DEPLOY_TIMEOUT,
+            'messageID': friendly_name,
+            'message': {
+                'vlan_name': self.vlans[friendly_name],
+                'vlan_tag': tag
+            }
+        }
+        self.multinode_protocol(send_msg)
+
+    def _wait_on_create(self, friendly_name):
+        if not self.configured:
+            return False
+        wait_msg = {
+            'request': 'lava_wait',
+            'timeout': VLAND_DEPLOY_TIMEOUT,
+            'messageID': friendly_name,
+        }
+        ret = self.multinode_protocol(wait_msg)
+        if ret:
+            values = ret.values()[0]
+            return (values['vlan_name'], values['vlan_tag'],)
+        raise JobError("Waiting for vlan creation failed: %s", ret)
+
+    def _delete_vlan(self, friendly_name, vlan_id):
+        msg = {
+            'type': 'vlan_update',
+            'command': 'api.delete_vlan',
+            'data': {
+                'vlan_id': vlan_id,
+            }
+        }
+        self.logger.debug({"delete_vlan": msg})
+        self._call_vland(msg)
+        # FIXME detect a failure
+        del self.vlans[friendly_name]
+
+    def _lookup_switch_id(self, switch_name):
+        msg = {
+            'type': 'db_query',
+            'command': 'db.get_switch_id_by_name',
+            'data': {
+                'name': switch_name
+            }
+        }
+        self.logger.debug({"lookup_switch": msg})
+        response = self._call_vland(msg)
+        if not response or response == '':
+            return None
+        reply = json.loads(response)
+        return reply['data']
+
+    def _lookup_port_id(self, switch_id, port):
+        msg = {
+            'type': 'db_query',
+            'command': 'db.get_port_by_switch_and_number',
+            'data': {
+                'switch_id': switch_id,
+                'number': port
+            }
+        }
+        self.logger.debug({"lookup_port_id": msg})
+        response = self._call_vland(msg)
+        if not response or response == '':
+            return None
+        reply = json.loads(response)
+        return reply['data']
+
+    def _set_port_onto_vlan(self, vlan_id, port_id):
+        msg = {
+            'type': 'vlan_update',
+            'command': 'api.set_current_vlan',
+            'data': {
+                'port_id': port_id,
+                'vlan_id': vlan_id
+            }
+        }
+        self.logger.debug({"set_port_onto_vlan": msg})
+        self._call_vland(msg)
+        # FIXME detect a failure
+
+    def _restore_port(self, port_id):
+        msg = {
+            'type': 'vlan_update',
+            'command': 'api.restore_base_vlan',
+            'data': {
+                'port_id': port_id,
+            }
+        }
+        self.logger.debug({"restore_port": msg})
+        self._call_vland(msg)
+        # FIXME detect a failure
+
+    def set_up(self):
+        """
+        Called by Job.run() to initialise the protocol itself.
+        The vlan is not setup at the start of the job as the job will likely need networking
+        to make the deployment.
+        """
+        self.settings = self.read_settings()
+        self.base_message = {
+            "port": self.settings['port'],
+            "poll_delay": self.settings["poll_delay"],
+            "host": self.settings['vland_hostname'],
+            "client_name": socket.gethostname(),
+        }
+
+    def configure(self, device, job):  # pylint: disable=too-many-branches
+        """
+        Called by job.validate() to populate internal data
+        Configures the vland protocol for this job for the assigned device.
+        Returns True if configuration completed.
+        """
+        if self.configured:
+            return True
+        if not device:
+            self.errors = "Unable to configure protocol without a device"
+        elif 'parameters' not in device:
+            self.errors = "Invalid device configuration, no parameters given."
+        elif 'interfaces' not in device['parameters']:
+            self.errors = "Device lacks interfaces information."
+        elif not isinstance(device['parameters']['interfaces'], dict):
+            self.errors = "Invalid interfaces dictionary for device"
+        protocols = [protocol for protocol in job.protocols if protocol.name == MultinodeProtocol.name]
+        if not protocols:
+            self.errors = "Unable to determine Multinode protocol object"
+        self.multinode_protocol = protocols[0]
+        if not self.valid:
+            return False
+        interfaces = [interface for interface, _ in device['parameters']['interfaces'].iteritems()]
+        available = []
+        for iface in interfaces:
+            available.extend(device['parameters']['interfaces'][iface]['tags'])
+        requested = []
+        count = 0
+        for friendly_name in self.parameters['protocols'][self.name]:
+            if friendly_name == 'yaml_line':
+                continue
+            self.names[friendly_name] = "%s%s%02d" % (self.base_group, self.sub_id, count)
+            count += 1
+        self.params = copy.deepcopy(self.parameters['protocols'][self.name])
+        for vlan_name in self.params:
+            if vlan_name == 'yaml_line':
+                continue
+            if 'tags' not in self.params[vlan_name]:
+                self.errors = "%s already configured for %s" % (device['hostname'], self.name)
+            else:
+                requested.extend(self.params[vlan_name]['tags'])
+        if not any(set(requested).intersection(available)):
+            self.errors = "Requested link speeds %s are not available %s for %s" % (
+                requested, available, device['hostname'])
+        if not self.valid:
+            return False
+
+        # one vlan_name, one combination of switch & port, one interface, any supported link speed.
+        # this may need more work with more complex vlan jobs
+        for vlan_name in self.params:
+            if vlan_name == 'yaml_line':
+                continue
+            for iface in interfaces:
+                device_info = device['parameters']['interfaces'][iface]
+                if ' '.join([device_info['switch'], str(device_info['port'])]) in self.nodes_seen:
+                    # combination of switch & port already processed for this device
+                    continue
+                if any(set(device_info['tags']).intersection(self.params[vlan_name]['tags'])):
+                    self.params[vlan_name]['switch'] = device_info['switch']
+                    self.params[vlan_name]['port'] = device_info['port']
+                    self.nodes_seen.append(' '.join([device_info['switch'], str(device_info['port'])]))
+                    break
+        self.logger.debug("[%s] vland params: %s", device['hostname'], self.params)
+        super(VlandProtocol, self).configure(device, job)
+        return True
+
+    def deploy_vlans(self):
+        """
+        Calls vland to create a vlan. Passes -1 to get the next available vlan tag
+        Always passes False to is_base_vlan
+        friendly_name is the name specified by the test writer and is not sent to vland.
+        self.names maps the friendly names to unique names for the VLANs, usable on the switches themselves.
+        Some switches have limits on the allowed characters and length of the name, so this
+        string is controlled by the protocol and differs from the friendly name supplied by the
+        test writer. Each VLAN also has an ID which is used to identify the VLAN to vland, this
+        ID is stored in self.vlans for each friendly_name for use with vland.
+        The vlan tag is also stored but not used by the protocol itself.
+        """
+        # FIXME implement a fake daemon to test the calls
+        # create vlans by iterating and appending to self.base_group for the vlan name
+        # run_admin_command --create_vlan test30 -1 false
+        if self.sub_id != 0:
+            for friendly_name, _ in self.names.items():
+                self.vlans[friendly_name], tag = self._wait_on_create(friendly_name)
+                self.logger.debug("vlan name: %s vlan tag: %s", self.vlans[friendly_name], tag)
+        else:
+            for friendly_name, _ in self.names.items():
+                self.logger.info("Deploying vlan %s : %s", friendly_name, self.names[friendly_name])
+                try:
+                    self.vlans[friendly_name], tag = self._create_vlan(friendly_name)
+                except RuntimeError as exc:
+                    raise JobError("Deploy vlans failed for %s: %s" % (friendly_name, exc))
+                self.logger.debug("vlan name: %s vlan tag: %s", self.vlans[friendly_name], tag)
+                if not tag:  # error state from create_vlan
+                    raise JobError("Unable to create vlan %s", friendly_name)
+                self._declare_created(friendly_name, tag)
+        for friendly_name, _ in self.names.items():
+            params = self.params[friendly_name]
+            switch_id = self._lookup_switch_id(params['switch'])
+            port_id = self._lookup_port_id(switch_id, params['port'])
+            self.logger.info("Setting switch %s port %s to vlan %s", params['switch'], params['port'], friendly_name)
+            self._set_port_onto_vlan(self.vlans[friendly_name], port_id)
+            self.ports.append(port_id)
+
+    def __call__(self, args):
+        try:
+            return self._api_select(args)
+        except (ValueError, TypeError) as exc:
+            msg = "Invalid call to %s %s" % (self.name, exc)
+            self.logger.exception(msg)
+            raise JobError(msg)
+
+    def _api_select(self, data):
+        if not data:
+            raise TestError("Protocol called without any data")
+        if 'request' not in data:
+            raise JobError("Bad API call over protocol - missing request")
+        if data['request'] == 'deploy_vlans':
+            self.deploy_vlans()
+        else:
+            raise JobError("Unrecognised API call in request.")
+        return None
+
+    def check_timeout(self, duration, data):
+        if not data:
+            raise TestError("Protocol called without any data")
+        if 'request' not in data:
+            raise JobError("Bad API call over protocol - missing request")
+        if data['request'] == 'deploy_vlans':
+            if duration < VLAND_DEPLOY_TIMEOUT:
+                raise JobError("Timeout of %s is insufficient for deploy_vlans", duration)
+            self.logger.info("Setting vland base timeout to %s seconds", duration)
+            self.poll_timeout.duration = duration
+            return True
+        return False
+
+    def finalise_protocol(self):
+        # restore any ports to base_vlan
+        for port_id in self.ports:
+            self.logger.info("Finalizing port %s", port_id)
+            self._restore_port(port_id)
+        # then delete any vlans
+        for friendly_name, vlan_id in self.vlans.items():
+            self.logger.info("Finalizing vlan %s", vlan_id)
+            self._delete_vlan(friendly_name, vlan_id)
diff -pruN 2015.9-1/lava_dispatcher/pipeline/shell.py 2016.3-1/lava_dispatcher/pipeline/shell.py
--- 2015.9-1/lava_dispatcher/pipeline/shell.py	2015-09-09 14:31:01.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/shell.py	2016-03-02 14:34:44.000000000 +0000
@@ -19,12 +19,12 @@
 # with this program; if not, see <http://www.gnu.org/licenses>.
 
 import contextlib
-import logging
 import os
+import yaml
 import pexpect
-import signal
 import sys
 import time
+import logging
 from lava_dispatcher.pipeline.action import (
     Action,
     JobError,
@@ -34,13 +34,17 @@ from lava_dispatcher.pipeline.action imp
 )
 from lava_dispatcher.pipeline.connection import Connection, CommandRunner
 from lava_dispatcher.pipeline.utils.constants import SHELL_SEND_DELAY
-from lava_dispatcher.pipeline.utils.shell import which
 
 
 class ShellLogger(object):
-    def __init__(self):
+    """
+    Builds a YAML log message out of the incremental output of the pexpect.spawn
+    using the logfile support built into pexpect.
+    """
+
+    def __init__(self, logger):
         self.line = ''
-        self.logger = logging.getLogger('dispatcher')
+        self.logger = logger
 
     def write(self, new_line):
         replacements = {
@@ -61,7 +65,7 @@ class ShellLogger(object):
             self.line += new_line
         return
 
-    def flush(self):
+    def flush(self):  # pylint: disable=no-self-use
         sys.stdout.flush()
         sys.stderr.flush()
 
@@ -75,16 +79,19 @@ class ShellCommand(pexpect.spawn):  # py
     A ShellCommand is a raw_connection for a ShellConnection instance.
     """
 
-    def __init__(self, command, lava_timeout, cwd=None):
-        if not lava_timeout or type(lava_timeout) is not Timeout:
+    def __init__(self, command, lava_timeout, logger=None, cwd=None):
+        if not lava_timeout or not isinstance(lava_timeout, Timeout):
             raise RuntimeError("ShellCommand needs a timeout set by the calling Action")
+        if not logger:
+            raise RuntimeError("ShellCommand needs a logger")
         pexpect.spawn.__init__(
             self, command,
             timeout=lava_timeout.duration,
             cwd=cwd,
-            logfile=ShellLogger(),
+            logfile=ShellLogger(logger),
         )
         self.name = "ShellCommand"
+        self.logger = logger
         # serial can be slow, races do funny things, so allow for a delay
         self.delaybeforesend = SHELL_SEND_DELAY
         self.lava_timeout = lava_timeout
@@ -99,12 +106,21 @@ class ShellCommand(pexpect.spawn):  # py
         :param delay: delay in milliseconds between sending each character
         :param send_char: send one character or entire string
         """
+        if delay:
+            self.logger.debug({
+                "sending with %s millisecond delay" % delay: yaml.dump(
+                    s, default_style='"', width=1000)})
+        else:
+            self.logger.debug({
+                "sending": "%s" % yaml.dump(s, default_style='"', width=1000)})
         self.send(s, delay, send_char)
         self.send(os.linesep, delay)
 
     def sendcontrol(self, char):
+        self.logger.debug("sendcontrol: %s", char)
         return super(ShellCommand, self).sendcontrol(char)
 
+    # FIXME: no sense in sending delay and send_char - if delay is non-zero, send_char needs to be True
     def send(self, string, delay=0, send_char=True):  # pylint: disable=arguments-differ
         """
         Extends pexpect.send to support extra arguments, delay and send by character flags.
@@ -158,6 +174,7 @@ class ShellSession(Connection):
         self.__runner__ = None
         self.name = "ShellSession"
         self.data = job.context
+        # FIXME: rename __prompt_str__ to indicate it can be a list or str
         self.__prompt_str__ = None
         self.spawn = shell_command
         self.timeout = shell_command.lava_timeout
@@ -166,6 +183,7 @@ class ShellSession(Connection):
         # FIXME
         pass
 
+    # FIXME: rename prompt_str to indicate it can be a list or str
     @property
     def prompt_str(self):
         return self.__prompt_str__
@@ -182,7 +200,7 @@ class ShellSession(Connection):
             # device = self.device
             spawned_shell = self.raw_connection  # ShellCommand(pexpect.spawn)
             # FIXME: the prompts should not be needed here, only kvm uses these. Remove.
-            # prompt_str = device['test_image_prompts']  # FIXME: deployment_data?
+            # prompt_str = parameters['prompts']
             prompt_str_includes_rc = True  # FIXME - parameters['deployment_data']['TESTER_PS1_INCLUDES_RC']?
 #            prompt_str_includes_rc = device.config.tester_ps1_includes_rc
             # The Connection for a CommandRunner in the pipeline needs to be a ShellCommand, not logging_spawn
@@ -198,9 +216,8 @@ class ShellSession(Connection):
         Yields the actual connection which can be used to interact inside this shell.
         """
         if self.__runner__ is None:
-            # device = self.device
             spawned_shell = self.raw_connection  # ShellCommand(pexpect.spawn)
-            # prompt_str = device['test_image_prompts']
+            # prompt_str = parameters['prompts']
             prompt_str_includes_rc = True  # FIXME - do we need this?
 #            prompt_str_includes_rc = device.config.tester_ps1_includes_rc
             # The Connection for a CommandRunner in the pipeline needs to be a ShellCommand, not logging_spawn
@@ -209,102 +226,56 @@ class ShellSession(Connection):
         yield self.__runner__.get_connection()
 
     def wait(self):
-        self.raw_connection.sendline("#")
         if not self.prompt_str:
-            self.prompt_str = '#'
+            self.prompt_str = self.check_char
         try:
-            self.runner.wait_for_prompt(self.timeout.duration)
+            self.runner.wait_for_prompt(self.timeout.duration, self.check_char)
         except pexpect.TIMEOUT:
             raise JobError("wait for prompt timed out")
 
 
+class SimpleSession(ShellSession):
+
+    def wait(self):
+        """
+        Simple wait without sendling blank lines as that causes the menu
+        to advance without data which can cause blank entries and can cause
+        the menu to exit to an unrecognised prompt.
+        """
+        while True:
+            try:
+                self.raw_connection.expect(self.prompt_str, timeout=self.timeout.duration)
+            except pexpect.TIMEOUT:
+                raise JobError("wait for prompt timed out")
+            except KeyboardInterrupt:
+                raise KeyboardInterrupt
+            else:
+                break
+
+
 class ExpectShellSession(Action):
     """
     Waits for a shell connection to the device for the current job.
     The shell connection can be over any particular connection,
     all that is needed is a prompt.
     """
+    compatibility = 2
 
     def __init__(self):
         super(ExpectShellSession, self).__init__()
         self.name = "expect-shell-connection"
         self.summary = "Expect a shell prompt"
         self.description = "Wait for a shell"
-        self.prompts = []
 
     def validate(self):
         super(ExpectShellSession, self).validate()
-        if 'test_image_prompts' not in self.job.device:
-            self.errors = "Unable to identify test image prompts from device configuration."
-        self.prompts = self.job.device['test_image_prompts']
-        if 'parameters' in self.parameters:
-            if 'boot_prompt' in self.parameters['parameters']:
-                self.prompts.append(self.parameters['parameters']['boot_prompt'])
+        if 'prompts' not in self.parameters:
+            self.errors = "Unable to identify test image prompts from parameters."
 
     def run(self, connection, args=None):
         connection = super(ExpectShellSession, self).run(connection, args)
-        connection.prompt_str = self.job.device['test_image_prompts']
+        if not connection.prompt_str:
+            connection.prompt_str = self.parameters['prompts']
         self.logger.debug("%s: Waiting for prompt", self.name)
         self.wait(connection)  # FIXME: should this be a regular RetryAction operation?
         return connection
-
-
-class ConnectDevice(Action):
-    """
-    General purpose class to use the device commands to
-    make a connection to the device. e.g. using ser2net
-    """
-    def __init__(self):
-        super(ConnectDevice, self).__init__()
-        self.name = "connect-device"
-        self.summary = "run connection command"
-        self.description = "use the configured command to connect to the device"
-
-    def validate(self):
-        super(ConnectDevice, self).validate()
-        if 'connect' not in self.job.device['commands']:
-            self.errors = "Unable to connect to device %s - missing connect command." % self.job.device.hostname
-            return
-        if 'test_image_prompts' not in self.job.device:
-            self.errors = "Unable to identify test image prompts from device configuration."
-        command = self.job.device['commands']['connect']
-        exe = ''
-        try:
-            exe = command.split(' ')[0]
-        except AttributeError:
-            self.errors = "Unable to parse the connection command %s" % command
-        try:
-            which(exe)
-        except InfrastructureError:
-            if exe != '':
-                self.errors = "Unable to find %s - is it installed?" % exe
-        # FIXME: check the executable is safe to call?
-        # from stat import S_IXUSR
-        # import os
-        # os.stat(exe).st_mode & S_IXUSR == S_IXUSR  # should be True
-        # does require that telnet is always installed.
-
-    def run(self, connection, args=None):
-        if connection:
-            self.logger.debug("Already connected")
-            connection.prompt_str = self.job.device['test_image_prompts']
-            return connection
-        command = self.job.device['commands']['connect']
-        self.logger.info("%s Connecting to device using '%s'", self.name, command)
-        signal.alarm(0)  # clear the timeouts used without connections.
-        shell = ShellCommand("%s\n" % command, self.timeout)
-        if shell.exitstatus:
-            raise JobError("%s command exited %d: %s" % (command, shell.exitstatus, shell.readlines()))
-        connection = ShellSession(self.job, shell)
-        connection = super(ConnectDevice, self).run(connection, args)
-        connection.prompt_str = self.job.device['test_image_prompts']
-        # if the board is running, wait for a prompt - if not, skip.
-        if self.job.device.power_state is 'off':
-            return connection
-        try:
-            self.wait(connection)
-        except TestError:
-            self.errors = "%s wait expired", self.name
-            self.logger.debug("wait expired %s", self.elapsed_time)
-        self.logger.debug("matched %s", connection.match)
-        return connection
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,51 @@
+- class: actions.deploy.tftp.TftpAction
+  name: tftp-deploy
+  pipeline:
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.apply_overlay.PrepareOverlayTftp
+    name: prepare-tftp-overlay
+    pipeline:
+    - {class: actions.deploy.apply_overlay.ExtractNfsRootfs, name: extract-nfsrootfs}
+    - class: actions.deploy.overlay.OverlayAction
+      name: lava-overlay
+      pipeline:
+      - {class: actions.deploy.overlay.SshAuthorize, name: ssh-authorize}
+      - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
+      - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
+      - class: actions.deploy.testdef.TestDefinitionAction
+        name: test-definition
+        pipeline:
+        - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+        - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+        - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+        - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+        - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+        - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+        - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+        - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+      - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+      - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+    - {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
+    - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
+    - {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
+    - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
+  - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
+- class: actions.test.shell.TestShellRetry
+  name: lava-test-retry
+  pipeline:
+  - {class: actions.test.shell.TestShellAction, name: lava-test-shell}
+- class: power.FinalizeAction
+  name: finalize
+  pipeline:
+  - {class: power.PowerOff, name: power_off}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -0,0 +1,67 @@
+- class: actions.deploy.tftp.TftpAction
+  name: tftp-deploy
+  pipeline:
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.apply_overlay.PrepareOverlayTftp
+    name: prepare-tftp-overlay
+    pipeline:
+    - {class: actions.deploy.apply_overlay.ExtractNfsRootfs, name: extract-nfsrootfs}
+    - class: actions.deploy.overlay.OverlayAction
+      name: lava-overlay
+      pipeline:
+      - {class: actions.deploy.overlay.SshAuthorize, name: ssh-authorize}
+      - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
+      - class: actions.deploy.testdef.TestDefinitionAction
+        name: test-definition
+        pipeline:
+        - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+        - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+        - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+        - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+        - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+        - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+        - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+        - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+      - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+      - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+    - {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
+    - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
+    - {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
+    - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
+  - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
+- class: actions.boot.u_boot.UBootAction
+  name: uboot-action
+  pipeline:
+  - {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
+  - {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
+  - {class: connections.serial.ConnectDevice, name: connect-device}
+  - class: actions.boot.u_boot.UBootRetry
+    name: uboot-retry
+    pipeline:
+    - class: power.ResetDevice
+      name: reboot-device
+      pipeline:
+      - {class: power.RebootDevice, name: soft-reboot}
+      - {class: power.PDUReboot, name: pdu_reboot}
+      - {class: power.PowerOn, name: power_on}
+    - {class: actions.boot.u_boot.UBootInterrupt, name: u-boot-interrupt}
+    - {class: actions.boot.u_boot.ExpectBootloaderSession, name: expect-bootloader-connection}
+    - {class: actions.boot.u_boot.UBootCommandsAction, name: u-boot-commands}
+    - {class: actions.boot.AutoLoginAction, name: auto-login-action}
+    - {class: shell.ExpectShellSession, name: expect-shell-connection}
+    - {class: actions.boot.environment.ExportDeviceEnvironment, name: export-device-env}
+- class: actions.test.shell.TestShellRetry
+  name: lava-test-retry
+  pipeline:
+  - {class: actions.test.shell.TestShellAction, name: lava-test-shell}
+- class: power.FinalizeAction
+  name: finalize
+  pipeline:
+  - {class: power.PowerOff, name: power_off}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-ssh-guest.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-ssh-guest.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-ssh-guest.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/bbb-ssh-guest.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,47 @@
+- class: actions.deploy.ssh.ScpOverlay
+  name: scp-overlay
+  pipeline:
+  - class: actions.deploy.overlay.OverlayAction
+    name: lava-overlay
+    pipeline:
+    - {class: actions.deploy.overlay.SshAuthorize, name: ssh-authorize}
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
+    - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
+    - class: actions.deploy.testdef.TestDefinitionAction
+      name: test-definition
+      pipeline:
+      - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+      - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+      - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+      - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+      - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+      - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+      - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+      - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+      - {class: actions.deploy.testdef.InlineRepoAction, name: inline-repo-action}
+      - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+      - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+      - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+    - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+  - class: actions.deploy.ssh.PrepareOverlayScp
+    name: prepare-scp-overlay
+    pipeline:
+    - {class: actions.deploy.apply_overlay.ExtractRootfs, name: extract-rootfs}
+    - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
+  - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
+- class: actions.boot.ssh.SshAction
+  name: login-ssh
+  pipeline:
+  - {class: actions.boot.ssh.Scp, name: scp-deploy}
+  - {class: actions.boot.ssh.PrepareSsh, name: prepare-ssh}
+  - {class: connections.ssh.ConnectSsh, name: ssh-connection}
+  - {class: actions.boot.AutoLoginAction, name: auto-login-action}
+  - {class: shell.ExpectShellSession, name: expect-shell-connection}
+  - {class: actions.boot.environment.ExportDeviceEnvironment, name: export-device-env}
+  - {class: actions.boot.ssh.ScpOverlayUnpack, name: scp-overlay-unpack}
+- {class: actions.test.multinode.MultinodeTestAction, name: multinode-test}
+- class: power.FinalizeAction
+  name: finalize
+  pipeline:
+  - {class: power.PowerOff, name: power_off}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml	2015-09-03 13:41:15.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -1,52 +1,43 @@
-  [{'class': 'actions.deploy.removable.MassStorage',
-    'name': 'storage-deploy',
-    'pipeline': [{'class': 'actions.deploy.download.DownloaderAction',
-                  'name': 'download_retry',
-                  'pipeline': [{'class': 'actions.deploy.download.HttpDownloadAction',
-                                'name': 'http_download'}]},
-                 {'class': 'actions.deploy.removable.DDAction',
-                  'name': 'dd-image'},
-                 {'class': 'actions.deploy.overlay.CustomisationAction',
-                  'name': 'customise'},
-                 {'class': 'actions.deploy.overlay.OverlayAction',
-                  'name': 'lava-overlay',
-                  'pipeline': [{'class': 'actions.deploy.overlay.MultinodeOverlayAction',
-                                'name': 'lava-multinode-overlay'},
-                               {'class': 'actions.deploy.testdef.TestDefinitionAction',
-                                'name': 'test-definition'},
-                               {'class': 'actions.deploy.overlay.CompressOverlay',
-                               'name': 'compress-overlay'}]},
-                 {'class': 'actions.deploy.environment.DeployDeviceEnvironment',
-                  'name': 'deploy-device-env'}]},
-   {'class': 'actions.boot.u_boot.UBootAction',
-    'name': 'uboot-action',
-    'pipeline': [{'class': 'actions.boot.u_boot.UBootSecondaryMedia',
-                  'name': 'uboot-from-media'},
-                 {'class': 'actions.boot.u_boot.UBootCommandOverlay',
-                  'name': 'uboot-overlay'},
-                 {'class': 'connections.serial.ConnectDevice', 'name': 'connect-device'},
-                 {'class': 'actions.boot.u_boot.UBootRetry',
-                  'name': 'uboot-retry',
-                  'pipeline': [{'class': 'power.ResetDevice',
-                                'name': 'reboot-device',
-                                'pipeline': [{'class': 'power.RebootDevice',
-                                              'name': 'soft-reboot'},
-                                             {'class': 'power.PDUReboot',
-                                              'name': 'pdu_reboot'},
-                                             {'class': 'power.PowerOn',
-                                              'name': 'power_on'}]},
-                               {'class': 'actions.boot.u_boot.UBootInterrupt',
-                                'name': 'u-boot-interrupt'},
-                               {'class': 'actions.boot.u_boot.ExpectBootloaderSession',
-                                'name': 'expect-bootloader-connection'},
-                               {'class': 'actions.boot.u_boot.UBootCommandsAction',
-                                'name': 'u-boot-commands'},
-                               {'class': 'actions.boot.AutoLoginAction',
-                                'name': 'auto-login-action'},
-                               {'class': 'shell.ExpectShellSession',
-                                'name': 'expect-shell-connection'},
-                               {'class': 'actions.boot.environment.ExportDeviceEnvironment',
-                                'name': 'export-device-env'}]}]},
-   {'class': 'power.FinalizeAction',
-    'name': 'finalize',
-    'pipeline': [{'class': 'power.PowerOff', 'name': 'power_off'}]}]
+- class: actions.deploy.removable.MassStorage
+  name: storage-deploy
+  pipeline:
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - {class: actions.deploy.removable.DDAction, name: dd-image}
+  - {class: actions.deploy.overlay.CustomisationAction, name: customise}
+  - class: actions.deploy.overlay.OverlayAction
+    name: lava-overlay
+    pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
+    - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
+    - {class: actions.deploy.testdef.TestDefinitionAction, name: test-definition}
+    - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+  - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
+- class: actions.boot.u_boot.UBootAction
+  name: uboot-action
+  pipeline:
+  - {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
+  - {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
+  - {class: connections.serial.ConnectDevice, name: connect-device}
+  - class: actions.boot.u_boot.UBootRetry
+    name: uboot-retry
+    pipeline:
+    - class: power.ResetDevice
+      name: reboot-device
+      pipeline:
+      - {class: power.RebootDevice, name: soft-reboot}
+      - {class: power.PDUReboot, name: pdu_reboot}
+      - {class: power.PowerOn, name: power_on}
+    - {class: actions.boot.u_boot.UBootInterrupt, name: u-boot-interrupt}
+    - {class: actions.boot.u_boot.ExpectBootloaderSession, name: expect-bootloader-connection}
+    - {class: actions.boot.u_boot.UBootCommandsAction, name: u-boot-commands}
+    - {class: actions.boot.AutoLoginAction, name: auto-login-action}
+    - {class: shell.ExpectShellSession, name: expect-shell-connection}
+    - {class: actions.boot.environment.ExportDeviceEnvironment, name: export-device-env}
+- class: power.FinalizeAction
+  name: finalize
+  pipeline:
+  - {class: power.PowerOff, name: power_off}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/fastboot.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/fastboot.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/fastboot.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/fastboot.yaml	2016-03-02 14:34:40.000000000 +0000
@@ -0,0 +1,50 @@
+- class: actions.deploy.fastboot.FastbootAction
+  name: fastboot-deploy
+  pipeline:
+  - class: actions.deploy.overlay.OverlayAction
+    name: lava-overlay
+    pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
+    - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
+    - class: actions.deploy.testdef.TestDefinitionAction
+      name: test-definition
+      pipeline:
+      - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+      - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+      - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+      - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+    - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+  - {class: actions.deploy.fastboot.EnterFastbootAction, name: enter_fastboot_action}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - {class: actions.deploy.fastboot.ApplyUserdataAction, name: fastboot_apply_userdata_action}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - {class: actions.deploy.fastboot.ApplyBootAction, name: fastboot_apply_boot_action}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - {class: actions.deploy.fastboot.ApplySystemAction, name: fastboot_apply_system_action}
+- class: actions.boot.fastboot.BootFastbootAction
+  name: fastboot_boot
+  pipeline:
+  - {class: actions.boot.fastboot.FastbootAction, name: boot-fastboot}
+  - {class: connections.adb.WaitForAdbDevice, name: wait-for-adb-device}
+  - {class: connections.adb.ConnectAdb, name: connect-adb}
+  - {class: actions.boot.AutoLoginAction, name: auto-login-action}
+  - {class: shell.ExpectShellSession, name: expect-shell-connection}
+  - {class: actions.boot.fastboot.AdbOverlayUnpack, name: adb-overlay-unpack}
+- class: actions.test.shell.TestShellRetry
+  name: lava-test-retry
+  pipeline:
+  - {class: actions.test.shell.TestShellAction, name: lava-test-shell}
+- class: power.FinalizeAction
+  name: finalize
+  pipeline:
+  - {class: power.PowerOff, name: power_off}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,69 @@
+- class: actions.deploy.tftp.TftpAction
+  name: tftp-deploy
+  pipeline:
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.apply_overlay.PrepareOverlayTftp
+    name: prepare-tftp-overlay
+    pipeline:
+    - {class: actions.deploy.apply_overlay.ExtractNfsRootfs, name: extract-nfsrootfs}
+    - class: actions.deploy.overlay.OverlayAction
+      name: lava-overlay
+      pipeline:
+      - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
+      - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
+      - class: actions.deploy.testdef.TestDefinitionAction
+        name: test-definition
+        pipeline:
+        - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+        - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+        - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+        - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+        - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+        - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+        - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+        - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+      - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+      - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+    - {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
+    - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
+    - {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
+    - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
+  - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
+- class: actions.boot.ipxe.BootloaderAction
+  name: bootloader-action
+  pipeline:
+  - {class: actions.boot.ipxe.BootloaderCommandOverlay, name: bootloader-overlay}
+  - {class: connections.serial.ConnectDevice, name: connect-device}
+  - class: actions.boot.ipxe.BootloaderRetry
+    name: bootloader-retry
+    pipeline:
+    - class: power.ResetDevice
+      name: reboot-device
+      pipeline:
+      - {class: power.RebootDevice, name: soft-reboot}
+      - {class: power.PDUReboot, name: pdu_reboot}
+      - {class: power.PowerOn, name: power_on}
+    - {class: actions.boot.ipxe.BootloaderInterrupt, name: bootloader-interrupt}
+    - {class: actions.boot.ipxe.BootloaderCommandsAction, name: bootloader-commands}
+    - {class: actions.boot.AutoLoginAction, name: auto-login-action}
+    - {class: shell.ExpectShellSession, name: expect-shell-connection}
+    - {class: actions.boot.environment.ExportDeviceEnvironment, name: export-device-env}
+- class: actions.test.shell.TestShellRetry
+  name: lava-test-retry
+  pipeline:
+  - {class: actions.test.shell.TestShellAction, name: lava-test-shell}
+- class: power.FinalizeAction
+  name: finalize
+  pipeline:
+  - {class: power.PowerOff, name: power_off}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -21,6 +21,7 @@
       name: lava-overlay
       pipeline:
       - {'class': actions.deploy.overlay.SshAuthorize, 'name': ssh-authorize}
+      - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
       - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
       - class: actions.deploy.testdef.TestDefinitionAction
         name: test-definition
@@ -30,6 +31,7 @@
         - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
         - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
       - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+      - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
     - {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
     - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
     - {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-inline.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-inline.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-inline.yaml	2015-09-03 13:41:15.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-inline.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -1,5 +1,5 @@
-- class: actions.deploy.image.DeployImageAction
-  name: deployimage
+- class: actions.deploy.image.DeployImagesAction
+  name: deployimages
   pipeline:
   - class: actions.deploy.download.DownloaderAction
     name: download_retry
@@ -15,6 +15,7 @@
   - class: actions.deploy.overlay.OverlayAction
     name: lava-overlay
     pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
     - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
     - class: actions.deploy.testdef.TestDefinitionAction
       name: test-definition
@@ -24,6 +25,7 @@
       - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
       - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
     - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
   - {class: actions.deploy.apply_overlay.ApplyOverlayImage, name: apply-overlay-image}
   - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
   - class: actions.deploy.mount.UnmountAction
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-local.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-local.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-local.yaml	2015-09-03 13:41:15.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-local.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -1,5 +1,5 @@
-- class: actions.deploy.image.DeployImageAction
-  name: deployimage
+- class: actions.deploy.image.DeployImagesAction
+  name: deployimages
   pipeline:
   - class: actions.deploy.download.DownloaderAction
     name: download_retry
@@ -15,6 +15,7 @@
   - class: actions.deploy.overlay.OverlayAction
     name: lava-overlay
     pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
     - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
     - class: actions.deploy.testdef.TestDefinitionAction
       name: test-definition
@@ -28,6 +29,7 @@
       - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
       - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
     - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
   - {class: actions.deploy.apply_overlay.ApplyOverlayImage, name: apply-overlay-image}
   - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
   - class: actions.deploy.mount.UnmountAction
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-qcow2.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-qcow2.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-qcow2.yaml	2015-09-03 13:41:15.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-qcow2.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -1,5 +1,5 @@
-- class: actions.deploy.image.DeployImageAction
-  name: deployimage
+- class: actions.deploy.image.DeployImagesAction
+  name: deployimages
   pipeline:
   - class: actions.deploy.download.DownloaderAction
     name: download_retry
@@ -16,6 +16,7 @@
   - class: actions.deploy.overlay.OverlayAction
     name: lava-overlay
     pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
     - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
     - class: actions.deploy.testdef.TestDefinitionAction
       name: test-definition
@@ -29,6 +30,7 @@
       - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
       - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
     - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
   - {class: actions.deploy.apply_overlay.ApplyOverlayImage, name: apply-overlay-image}
   - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
   - class: actions.deploy.mount.UnmountAction
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-repeat.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-repeat.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-repeat.yaml	2015-09-03 13:41:15.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm-repeat.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -1,5 +1,5 @@
-- class: actions.deploy.image.DeployImageAction
-  name: deployimage
+- class: actions.deploy.image.DeployImagesAction
+  name: deployimages
   pipeline:
   - class: actions.deploy.download.DownloaderAction
     name: download_retry
@@ -15,6 +15,7 @@
   - class: actions.deploy.overlay.OverlayAction
     name: lava-overlay
     pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
     - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
     - class: actions.deploy.testdef.TestDefinitionAction
       name: test-definition
@@ -44,6 +45,7 @@
       - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
       - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
     - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
   - {class: actions.deploy.apply_overlay.ApplyOverlayImage, name: apply-overlay-image}
   - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
   - class: actions.deploy.mount.UnmountAction
@@ -229,8 +231,8 @@
   - {class: shell.ExpectShellSession, name: expect-shell-connection}
   - {class: actions.boot.environment.ExportDeviceEnvironment, name: export-device-env}
 - {class: job.ResetContext, name: reset-context}
-- class: actions.deploy.image.DeployImageAction
-  name: deployimage
+- class: actions.deploy.image.DeployImagesAction
+  name: deployimages
   pipeline:
   - class: actions.deploy.download.DownloaderAction
     name: download_retry
@@ -246,6 +248,7 @@
   - class: actions.deploy.overlay.OverlayAction
     name: lava-overlay
     pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
     - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
     - class: actions.deploy.testdef.TestDefinitionAction
       name: test-definition
@@ -275,6 +278,7 @@
       - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
       - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
     - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
   - {class: actions.deploy.apply_overlay.ApplyOverlayImage, name: apply-overlay-image}
   - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
   - class: actions.deploy.mount.UnmountAction
@@ -299,4 +303,3 @@
   name: finalize
   pipeline:
   - {class: power.PowerOff, name: power_off}
-
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm.yaml	2015-09-03 13:41:15.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/kvm.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -1,5 +1,5 @@
-- class: actions.deploy.image.DeployImageAction
-  name: deployimage
+- class: actions.deploy.image.DeployImagesAction
+  name: deployimages
   pipeline:
   - class: actions.deploy.download.DownloaderAction
     name: download_retry
@@ -15,6 +15,7 @@
   - class: actions.deploy.overlay.OverlayAction
     name: lava-overlay
     pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
     - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
     - class: actions.deploy.testdef.TestDefinitionAction
       name: test-definition
@@ -28,6 +29,42 @@
       - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
       - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
     - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+  - {class: actions.deploy.apply_overlay.ApplyOverlayImage, name: apply-overlay-image}
+  - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
+  - class: actions.deploy.mount.UnmountAction
+    name: umount-retry
+    pipeline:
+    - {class: actions.deploy.mount.Unmount, name: umount}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.mount.MountAction
+    name: mount_action
+    pipeline:
+    - {class: actions.deploy.mount.OffsetAction, name: offset_action}
+    - {class: actions.deploy.mount.LoopCheckAction, name: loop_check}
+    - {class: actions.deploy.mount.LoopMountAction, name: loop_mount}
+  - {class: actions.deploy.overlay.CustomisationAction, name: customise}
+  - class: actions.deploy.overlay.OverlayAction
+    name: lava-overlay
+    pipeline:
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
+    - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
+    - class: actions.deploy.testdef.TestDefinitionAction
+      name: test-definition
+      pipeline:
+      - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+      - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+      - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+      - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+      - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+      - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+      - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+      - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+    - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
   - {class: actions.deploy.apply_overlay.ApplyOverlayImage, name: apply-overlay-image}
   - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
   - class: actions.deploy.mount.UnmountAction
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,67 @@
+- class: actions.deploy.tftp.TftpAction
+  name: tftp-deploy
+  pipeline:
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.download.DownloaderAction
+    name: download_retry
+    pipeline:
+    - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+  - class: actions.deploy.apply_overlay.PrepareOverlayTftp
+    name: prepare-tftp-overlay
+    pipeline:
+    - {class: actions.deploy.apply_overlay.ExtractNfsRootfs, name: extract-nfsrootfs}
+    - class: actions.deploy.overlay.OverlayAction
+      name: lava-overlay
+      pipeline:
+      - {class: actions.deploy.overlay.SshAuthorize, name: ssh-authorize}
+      - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
+      - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
+      - class: actions.deploy.testdef.TestDefinitionAction
+        name: test-definition
+        pipeline:
+        - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+        - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+        - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+        - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+        - {class: actions.deploy.testdef.GitRepoAction, name: git-repo-action}
+        - {class: actions.deploy.testdef.TestOverlayAction, name: test-overlay}
+        - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
+        - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
+      - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+      - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+    - {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
+    - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
+    - {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
+    - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
+  - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
+- class: actions.boot.uefi_menu.UefiMenuAction
+  name: uefi-menu-action
+  pipeline:
+  - {class: actions.boot.uefi_menu.UefiSubstituteCommands, name: uefi-commands}
+  - {class: menus.menus.MenuConnect, name: menu-connect}
+  - class: power.ResetDevice
+    name: reboot-device
+    pipeline:
+    - {class: power.RebootDevice, name: soft-reboot}
+    - {class: power.PDUReboot, name: pdu_reboot}
+    - {class: power.PowerOn, name: power_on}
+  - {class: actions.boot.uefi_menu.UEFIMenuInterrupt, name: menu-interrupt}
+  - {class: actions.boot.uefi_menu.UefiMenuSelector, name: uefi-menu-selector}
+  - {class: menus.menus.MenuReset, name: menu-reset}
+  - {class: actions.boot.AutoLoginAction, name: auto-login-action}
+  - {class: actions.boot.environment.ExportDeviceEnvironment, name: export-device-env}
+- class: actions.test.shell.TestShellRetry
+  name: lava-test-retry
+  pipeline:
+  - {class: actions.test.shell.TestShellAction, name: lava-test-shell}
+- class: power.FinalizeAction
+  name: finalize
+  pipeline:
+  - {class: power.PowerOff, name: power_off}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/ssh-deploy.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/ssh-deploy.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/ssh-deploy.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/ssh-deploy.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -5,6 +5,7 @@
     name: lava-overlay
     pipeline:
     - {class: actions.deploy.overlay.SshAuthorize, name: ssh-authorize}
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
     - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
     - class: actions.deploy.testdef.TestDefinitionAction
       name: test-definition
@@ -18,17 +19,19 @@
       - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
       - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
     - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
   - class: actions.deploy.ssh.PrepareOverlayScp
     name: prepare-scp-overlay
     pipeline:
     - {class: actions.deploy.apply_overlay.ExtractRootfs, name: extract-rootfs}
     - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
   - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
-  - {class: connections.ssh.Scp, name: scp-deploy}
 - class: actions.boot.ssh.SshAction
   name: login-ssh
   pipeline:
-  - {class: connections.ssh.ConnectSsh, name: primary-ssh}
+  - {class: actions.boot.ssh.Scp, name: scp-deploy}
+  - {class: actions.boot.ssh.PrepareSsh, name: prepare-ssh}
+  - {class: connections.ssh.ConnectSsh, name: ssh-connection}
   - {class: actions.boot.AutoLoginAction, name: auto-login-action}
   - {class: shell.ExpectShellSession, name: expect-shell-connection}
   - {class: actions.boot.environment.ExportDeviceEnvironment, name: export-device-env}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/ssh-guest.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/ssh-guest.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/ssh-guest.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/ssh-guest.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -5,6 +5,7 @@
     name: lava-overlay
     pipeline:
     - {class: actions.deploy.overlay.SshAuthorize, name: ssh-authorize}
+    - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
     - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
     - class: actions.deploy.testdef.TestDefinitionAction
       name: test-definition
@@ -22,17 +23,19 @@
       - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
       - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
     - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+    - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
   - class: actions.deploy.ssh.PrepareOverlayScp
     name: prepare-scp-overlay
     pipeline:
     - {class: actions.deploy.apply_overlay.ExtractRootfs, name: extract-rootfs}
     - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
   - {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
-  - {class: connections.ssh.Scp, name: scp-deploy}
 - class: actions.boot.ssh.SshAction
   name: login-ssh
   pipeline:
-  - {class: connections.ssh.ConnectSsh, name: primary-ssh}
+  - {class: actions.boot.ssh.Scp, name: scp-deploy}
+  - {class: actions.boot.ssh.PrepareSsh, name: prepare-ssh}
+  - {class: connections.ssh.ConnectSsh, name: ssh-connection}
   - {class: actions.boot.AutoLoginAction, name: auto-login-action}
   - {class: shell.ExpectShellSession, name: expect-shell-connection}
   - {class: actions.boot.environment.ExportDeviceEnvironment, name: export-device-env}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -21,9 +21,11 @@
       name: lava-overlay
       pipeline:
       - {'class': actions.deploy.overlay.SshAuthorize, 'name': ssh-authorize}
+      - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
       - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
       - {class: actions.deploy.testdef.TestDefinitionAction, name: test-definition}
       - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+      - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
     - {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
     - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
     - {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
@@ -74,9 +76,11 @@
       name: lava-overlay
       pipeline:
       - {'class': actions.deploy.overlay.SshAuthorize, 'name': ssh-authorize}
+      - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
       - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
       - {class: actions.deploy.testdef.TestDefinitionAction, name: test-definition}
       - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+      - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
     - {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
     - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
     - {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -21,6 +21,7 @@
       name: lava-overlay
       pipeline:
       - {'class': actions.deploy.overlay.SshAuthorize, 'name': ssh-authorize}
+      - {class: actions.deploy.overlay.VlandOverlayAction, name: lava-vland-overlay}
       - {class: actions.deploy.overlay.MultinodeOverlayAction, name: lava-multinode-overlay}
       - class: actions.deploy.testdef.TestDefinitionAction
         name: test-definition
@@ -34,6 +35,7 @@
         - {class: actions.deploy.testdef.TestInstallAction, name: test-install-overlay}
         - {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
       - {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
+      - {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
     - {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
     - {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
     - {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/basics.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/basics.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/basics.yaml	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/basics.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -28,7 +28,10 @@ actions:
           minutes: 20
           seconds: 0
         to: sata
-        image: http://myimage.com/target.img.gz
+        images:
+          rootfs:
+            url: http://myimage.com/target.img.gz
+            compression: gz
         os: debian
         # if root_partition partition is not present:
         # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
@@ -62,6 +65,9 @@ actions:
         kernel: http://myimage.com/zImage
         kernel_args: "bla bla bla"
         failure_retry: 10
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 6
@@ -69,10 +75,6 @@ actions:
         definitions:
             - from: url
               path: file:///path/to/testdef.yaml
-              # name: if not present, use the name from the YAML. The name can
-              # also be overriden from the actualy commands being run by
-              # calling the lava-test-suite-name API call (e.g.
-              # `lava-test-suite-name FOO`).
               name: ptest-dbus
             - from: url
               path: http:///url.to/testdef.yaml
@@ -87,6 +89,9 @@ actions:
           minutes: 20
         method: block
         media: sata
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - boot:
         timeout:
@@ -95,10 +100,16 @@ actions:
         media: usb
         kernel: http://myimage.com/zImage
         boot_cmds: “blah blah blah”
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - boot:
         method: fastboot
         media: whatever
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - repeat:
         count: 10
@@ -107,6 +118,9 @@ actions:
         - boot:
             method: block
             media: sata
+            prompts:
+              - 'linaro-test'
+              - 'root@debian:~#'
         - test:
             definitions:
               - from: url
@@ -117,6 +131,9 @@ actions:
         - boot:
             method: fastboot
             media: whatever
+            prompts:
+              - 'linaro-test'
+              - 'root@debian:~#'
 
 
     # includes other YAML test definitions at this point
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-alpha.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-alpha.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-alpha.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-alpha.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,60 @@
+device_type: beaglebone-black
+job_name: bbb-vland-test
+timeouts:
+  job:
+    minutes: 30
+  action:
+    minutes: 3
+
+protocols:
+  lava-multinode:
+    target_group: arbitrary-group-id  # only for unittest support
+    role: alpha
+    roles:
+      bbb1: alpha
+      bbb2: beta
+    group_size: 2
+    sub_id: 0
+    timeout:
+      minutes: 10
+  # interface, switch and port come from device config
+  lava-vland:
+    vlan_one:
+      tags:
+      - 1G
+    vlan_two:
+      tags:
+      - 1G
+
+priority: medium
+visibility: public
+
+actions:
+  - deploy:
+     to: tftp
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     nfsrootfs:
+         url: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
+         compression: gz
+     os: debian
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+     protocols:
+       lava-vland:
+       - action: prepare-vland
+         request: deploy_vlans
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode
+     timeout:
+       minutes: 5
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-beta.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-beta.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-beta.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-beta.yaml	2016-03-02 14:34:40.000000000 +0000
@@ -0,0 +1,50 @@
+device_type: beaglebone-black
+job_name: bbb-vland-test
+timeouts:
+  job:
+    minutes: 30
+  action:
+    minutes: 3
+
+protocols:
+  lava-multinode:
+    target_group: arbitrary-group-id  # only for unittest support
+    role: alpha
+    roles:
+      bbb1: alpha
+      bbb2: beta
+    group_size: 2
+    sub_id: 1
+    timeout:
+      minutes: 10
+  lava-vland:
+    vlan_one:
+      tags:
+      - 1G
+    vlan_two:
+      tags:
+      - 1G
+
+priority: medium
+visibility: public
+
+actions:
+  - deploy:
+      protocols:
+        lava-vland:
+        - action: prepare-vland
+          request: deploy_vlans
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode
+     timeout:
+       minutes: 5
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-nfs-url.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-nfs-url.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-nfs-url.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-nfs-url.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,58 @@
+# Sample JOB definition for a u-boot job
+
+device_type: beaglebone-black
+
+job_name: uboot-persistent-nfs
+timeouts:
+  job:
+    minutes: 15
+  action:
+    minutes: 5
+priority: medium
+
+actions:
+  - deploy:
+     timeout:
+       minutes: 2
+     to: tftp
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+     nfs_url: "127.0.0.1:/var/lib/lava/dispatcher/tmp/armhf/jessie"
+     os: debian
+
+  - boot:
+     method: u-boot
+     commands: nfs
+     type: bootz
+     prompts:
+     - 'linaro-test'
+     - 'root@debian:~#'
+     auto_login:
+       login_prompt: 'login:'
+       username: root
+       password_prompt: 'Password:'
+       password: root
+     parameters:
+      shutdown-message: "reboot: Restarting system"
+
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode  # is not present, use "test $N"
+     # only s, m & h are supported.
+     timeout:
+       minutes: 5 # uses install:deps, so takes longer than singlenode01
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           # name: if not present, use the name from the YAML. The name can
+           # also be overriden from the actual commands being run by
+           # calling the lava-test-suite-name API call (e.g.
+           # `lava-test-suite-name FOO`).
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-ramdisk-nfs.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-ramdisk-nfs.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-ramdisk-nfs.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-ramdisk-nfs.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,48 @@
+device_type: beaglebone-black
+
+job_name: bbb-armmp-standard-nfs
+timeouts:
+  job:
+    minutes: 15
+  action:
+    minutes: 5
+  connection:
+    minutes: 2
+priority: medium
+visibility: public
+
+actions:
+- deploy:
+    timeout:
+      minutes: 4
+    to: tftp
+    kernel:
+      url: file:///tmp/armhf/vmlinuz
+    ramdisk: 
+      url: file:///tmp/armhf/initramfs.cpio.gz
+      compression: gz
+      # the bootloader needs a u-boot header on the modified ramdisk
+      add-header: u-boot
+    modules:
+      url: file:///tmp/armhf/modules.tar.gz
+      compression: gz
+    nfsrootfs:
+      url: file:///tmp/armhf/jessie-armhf-nfs.tar.gz
+      compression: gz
+    os: oe
+    dtb:
+      url: file:///tmp/armhf/dtbs/am335x-boneblack.dtb
+
+- boot:
+    method: u-boot
+    commands: nfs
+    type: bootz
+    auto_login:
+      login_prompt: 'login:'
+      username: root
+    parameters:
+      shutdown-message: "reboot: Restarting system"
+    prompts:
+    - 'root@jessie:'
+    timeout:
+      minutes: 2
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-ssh-guest.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-ssh-guest.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-ssh-guest.yaml	2015-09-04 07:44:33.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/bbb-ssh-guest.yaml	2016-02-02 08:07:05.000000000 +0000
@@ -4,17 +4,25 @@ actions:
     os: debian
     protocols:
       lava-multinode:
-        api: lava-wait
-        id: ipv4
-        key: ipaddr
-        timeout: {minutes: 5}
+        - action: prepare-scp-overlay
+          request: lava-wait
+          messageID: ipv4
+          message:
+              ipaddr: $ipaddr
+          timeout: {minutes: 5}
     role: guest
     timeout: {seconds: 30}
     to: ssh
 - boot:
     method: ssh
     role: guest
+    parameters:
+      hostID: ipv4
+      host_key: ipaddr
     timeout: {minutes: 3}
+    prompts:
+      - 'linaro-test'
+      - 'root@debian:~#'
 - test:
     definitions:
     - {from: git, name: smoke-tests, path: ubuntu/smoke-tests-basic.yaml, repository: 'git://git.linaro.org/qa/test-definitions.git'}
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml	2015-07-30 09:30:47.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -18,7 +18,9 @@ actions:
           minutes: 10
         to: usb
         os: debian
-        image: http://releases.linaro.org/12.02/ubuntu/leb-panda/panda-ubuntu-desktop.img.gz
+        image:
+            url: http://images.validation.linaro.org/functional-test-images/panda/panda-raring_developer_20130723-408.img.gz
+            compression: gz
         device: SanDisk_Ultra # needs to be exposed in the device-specific UI
         download: /usr/bin/wget
 
@@ -34,3 +36,6 @@ actions:
         root_uuid: UUID=159d17cc-697c-4125-95a0-a3775e1deabe  # comes from the supplied image.
         boot_part: 1  # the partition on the media from which the bootloader can read the kernel, ramdisk & dtb
         type: bootz
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/fastboot.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/fastboot.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/fastboot.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/fastboot.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -0,0 +1,43 @@
+# Sample JOB definition for a fastboot device such as Nexus4
+device_type: nexus4
+job_name: nexus4-pipeline
+timeouts:
+  job:
+    minutes: 60            # timeout for the whole job (default: ??h)
+  action:
+    minutes: 15         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+priority: medium
+
+actions:
+  - deploy:
+      timeout:
+        minutes: 15
+      to: fastboot
+      images:
+        boot:
+          url: http://images.validation.linaro.org/functional-test-images/nexus4/boot.img
+
+        userdata:
+          url: http://images.validation.linaro.org/functional-test-images/nexus4/userdata.img
+        system:
+          url: http://images.validation.linaro.org/functional-test-images/nexus4/system.img
+      os: android
+
+  - boot:
+      timeout:
+        minutes: 15
+      method: fastboot
+      prompts:
+        - 'shell@mako:/ $'
+        - 'shell@mako'
+
+  - test:
+      failure_retry: 3
+      name: nexus4-basic
+      timeout:
+        minutes: 5
+      definitions:
+        - repository: git://git.linaro.org/people/senthil.kumaran/test-definitions.git
+          from: git
+          path: android/echo-pass-fail-error-test.yaml
+          name: echo-pass-fail-error-test
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-nfs.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-nfs.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-nfs.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-nfs.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,53 @@
+# Sample JOB definition for an ipxe job
+
+device_type: x86
+
+job_name: x86-pipeline
+timeouts:
+  job:
+    minutes: 15            # timeout for the whole job (default: ??h)
+  action:
+   minutes: 5         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+priority: medium
+
+actions:
+
+  # needs to be a list of hashes to retain the order
+  - deploy:
+     timeout:
+       minutes: 2
+     to: tftp
+     kernel:
+         url: http://ironhide.bounceme.net/x86-32/bzImage
+     nfsrootfs:
+         url: http://totaljunk/fail.tar.xz
+         compression: xz
+     modules:
+         url: http://ironhide.bounceme.net/x86-32/modules.tar
+     os: oe
+
+  - boot:
+     method: ipxe
+     commands: nfs
+     parameters:
+       shutdown-message: "reboot: Restarting system"
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
+       - '/ #'
+
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode  # is not present, use "test $N"
+     # only s, m & h are supported.
+     timeout:
+       minutes: 5 # uses install:deps, so takes longer than singlenode01
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk-bootscript.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk-bootscript.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk-bootscript.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk-bootscript.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,58 @@
+# Sample JOB definition for an ipxe job
+
+device_type: x86
+
+job_name: x86-pipeline
+timeouts:
+  job:
+    minutes: 15            # timeout for the whole job (default: ??h)
+  action:
+   minutes: 5         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+priority: medium
+
+# example old-style job: https://staging.validation.linaro.org/scheduler/job/113682/definition
+
+actions:
+
+  # needs to be a list of hashes to retain the order
+  - deploy:
+     timeout:
+       minutes: 2
+     parameters:
+       use_bootscript: True
+     to: tftp
+     kernel:
+         url: http://ironhide.bounceme.net/x86-32/bzImage
+     ramdisk:
+         url: http://ironhide.bounceme.net/x86-32/rootfs.cpio.gz
+         compression: gz
+     modules:
+         url: http://ironhide.bounceme.net/x86-32/modules.tar.xz
+         compression: xz
+     os: oe
+
+  - boot:
+     method: ipxe
+     commands: ramdisk
+     parameters:
+       shutdown-message: "reboot: Restarting system"
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
+       - '/ #'
+
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode  # is not present, use "test $N"
+     # only s, m & h are supported.
+     timeout:
+       minutes: 5 # uses install:deps, so takes longer than singlenode01
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,56 @@
+# Sample JOB definition for an ipxe job
+
+device_type: x86
+
+job_name: x86-pipeline
+timeouts:
+  job:
+    minutes: 15            # timeout for the whole job (default: ??h)
+  action:
+   minutes: 5         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+priority: medium
+
+# example old-style job: https://staging.validation.linaro.org/scheduler/job/113682/definition
+
+actions:
+
+  # needs to be a list of hashes to retain the order
+  - deploy:
+     timeout:
+       minutes: 2
+     to: tftp
+     kernel:
+         url: http://ironhide.bounceme.net/x86-32/bzImage
+     ramdisk:
+         url: http://ironhide.bounceme.net/x86-32/rootfs.cpio.gz
+         compression: gz
+     modules:
+         url: http://ironhide.bounceme.net/x86-32/modules.tar.xz
+         compression: xz
+     os: oe
+
+  - boot:
+     method: ipxe
+     commands: ramdisk
+     parameters:
+       shutdown-message: "reboot: Restarting system"
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
+       - '/ #'
+
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode  # is not present, use "test $N"
+     # only s, m & h are supported.
+     timeout:
+       minutes: 5 # uses install:deps, so takes longer than singlenode01
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ipxe.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,58 @@
+# Sample JOB definition for an ipxe job
+
+device_type: x86
+
+job_name: x86-pipeline
+timeouts:
+  job:
+    minutes: 15            # timeout for the whole job (default: ??h)
+  action:
+   minutes: 5
+  extract-nfsrootfs:
+    seconds: 90         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+priority: medium
+
+# example old-style job: https://staging.validation.linaro.org/scheduler/job/113682/definition
+
+actions:
+
+  # needs to be a list of hashes to retain the order
+  - deploy:
+     timeout:
+       minutes: 2
+     modules_compression: xz
+     to: tftp
+     kernel:
+         url: http://ironhide.bounceme.net/x86-32/bzImage
+     ramdisk:
+         url: http://ironhide.bounceme.net/x86-32/rootfs.cpio.gz
+         compression: gz
+     modules:
+         url: http://ironhide.bounceme.net/x86-32/modules.tar
+     os: oe
+
+  - boot:
+     method: ipxe
+     commands: ramdisk
+     parameters:
+       shutdown-message: "reboot: Restarting system"
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
+       - '/ #'
+
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode  # is not present, use "test $N"
+     # only s, m & h are supported.
+     timeout:
+       minutes: 5 # uses install:deps, so takes longer than singlenode01
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kexec.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kexec.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kexec.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kexec.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -21,17 +21,23 @@ actions:
      to: tftp
      # a real job would need different files, these do *not* support kexec at runtime
      # this YAML is just an example.
-     kernel: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
      # ramdisk: some file somewhere ...
-     nfsrootfs: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
-     rootfs_compression: gz
+     nfsrootfs:
+         url: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
+         compression: gz
      os: debian
-     dtb: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
 
   - boot:
      method: u-boot
      commands: nfs
      type: bootz
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
 
   - test:
       failure_retry: 3
@@ -69,3 +75,6 @@ actions:
      boot_message: Booting Linux
      options:
        - "--reuse-cmdline"
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-android.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-android.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-android.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-android.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -0,0 +1,58 @@
+# Sample JOB definition for a KVM
+
+device_type: qemu
+
+job_name: qemu-arm-android-pipeline
+timeouts:
+  job:
+    minutes: 10            # timeout for the whole job (default: ??h)
+  action:
+    minutes: 2         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+
+priority: medium
+
+actions:
+
+    - deploy:
+        timeout:
+          minutes: 2
+        to: tmpfs
+        os: android
+        # if root_partition partition is not present:
+        # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
+        # - look into device configuration
+        root_partition: 1
+        images:
+          kernel:
+            url: http://images.validation.linaro.org/pipeline/vexpress/zImage
+            image_arg: -kernel {kernel} -append "root=/dev/ram0 console=ttyAMA0 115200 androidboot.hardware=vexpress qemu=1"
+          ramdisk:
+            url: http://images.validation.linaro.org/pipeline/vexpress/ramdisk.img
+            image_arg: -initrd {ramdisk}
+          dtb:
+            url: http://images.validation.linaro.org/pipeline/vexpress/vexpress-v2p-ca15-tc1.dtb
+            image_arg: -dtb {dtb}
+          system:
+            url: http://images.validation.linaro.org/pipeline/vexpress/system.img
+            image_arg: -device virtio-blk-device,drive=system -drive if=mtd,id=system,file={system}
+          userdata:
+            url: http://images.validation.linaro.org/pipeline/vexpress/userdata.img
+            image_arg: -device virtio-blk-device,drive=userdata -drive if=mtd,id=userdata,file={userdata}
+          cache:
+            url: http://images.validation.linaro.org/pipeline/vexpress/cache.img
+            image_arg: -device virtio-blk-device,drive=cache -drive if=mtd,id=cache,file={cache}
+
+    - boot:
+        method: qemu
+        media: tmpfs
+        timeout:
+          minutes: 5
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
+
+    - submit_results:
+        stream: /anonymous/codehelp/
+
+context:
+  arch: arm
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-inline.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-inline.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-inline.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-inline.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -18,8 +18,11 @@ actions:
         timeout:
           minutes: 5
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
-        compression: gz
+        images:
+          rootfs:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            image_arg: -hda {rootfs} 
+            compression: gz
         os: debian
         # if root_partition partition is not present:
         # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
@@ -30,6 +33,9 @@ actions:
         method: qemu
         media: tmpfs
         failure_retry: 2
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-local.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-local.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-local.yaml	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-local.yaml	2016-01-18 13:22:34.000000000 +0000
@@ -16,8 +16,11 @@ actions:
         timeout:
           minutes: 20
         to: tmpfs
-        image: file:///home/lava/kvm-debian-wheezy.img.bz2
-        compression: bz2
+        images:
+          rootfs:
+            url: file:///home/lava/kvm-debian-wheezy.img.bz2
+            image_arg: -hda {rootfs}
+            compression: bz2
         os: debian
         # if root_partition partition is not present:
         # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
@@ -28,6 +31,9 @@ actions:
         method: qemu
         media: tmpfs
         failure_retry: 2
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
@@ -39,10 +45,6 @@ actions:
             - repository: git://git.linaro.org/qa/test-definitions.git
               from: git
               path: ubuntu/smoke-tests-basic.yaml
-              # name: if not present, use the name from the YAML. The name can
-              # also be overriden from the actual commands being run by
-              # calling the lava-test-suite-name API call (e.g.
-              # `lava-test-suite-name FOO`).
               name: smoke-tests
             - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
               from: git
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multinode-client.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multinode-client.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multinode-client.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multinode-client.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -35,8 +35,11 @@ actions:
         timeout:
           minutes: 2
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
-        compression: gz
+        images:
+          rootfs:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            image_arg: -hda {rootfs}
+            compression: gz
         os: debian
         root_partition: 1
         role: client
@@ -53,6 +56,9 @@ actions:
         media: tmpfs
         failure_retry: 2
         role: client
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
         protocols:
           lava-multinode:
             - action: execute-qemu
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multinode-server.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multinode-server.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multinode-server.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multinode-server.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -33,8 +33,11 @@ actions:
         timeout:
           minutes: 2
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
-        compression: gz
+        images:
+          rootfs:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            image_arg: -hda {rootfs}
+            compression: gz
         os: debian
         root_partition: 1
         role: server
@@ -44,6 +47,9 @@ actions:
         media: tmpfs
         failure_retry: 2
         role: server
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multi.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multi.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multi.yaml	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-multi.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -16,7 +16,11 @@ actions:
         timeout:
           minutes: 20
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+        images:
+          rootfs:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            image_arg: -hda {rootfs}
+            compression: gz
         os: debian
         # if root_partition partition is not present:
         # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
@@ -27,6 +31,9 @@ actions:
         method: qemu
         media: tmpfs
         failure_retry: 2
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
@@ -38,10 +45,6 @@ actions:
             - repository: git://git.linaro.org/qa/test-definitions.git
               from: git
               path: ubuntu/smoke-tests-basic.yaml
-              # name: if not present, use the name from the YAML. The name can
-              # also be overriden from the actual commands being run by
-              # calling the lava-test-suite-name API call (e.g.
-              # `lava-test-suite-name FOO`).
               name: smoke-tests
             - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
               from: git
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-notest.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-notest.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-notest.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-notest.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -14,7 +14,11 @@ actions:
 
     - deploy:
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+        images: 
+          rootfs:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            image_arg: -hda {rootfs}
+            compression: gz
         os: debian
         # if root_partition partition is not present:
         # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
@@ -25,6 +29,9 @@ actions:
         method: qemu
         media: tmpfs
         failure_retry: 2
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
 context:
   arch: amd64
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -15,8 +15,11 @@ actions:
         timeout:
           minutes: 20
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
-        compression: gz
+        images:
+          rootfs:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            image_arg: -hda {rootfs}
+            compression: gz
         os: debian
         # if root_partition partition is not present:
         # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
@@ -27,6 +30,9 @@ actions:
         method: qemu
         media: tmpfs
         failure_retry: 2
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-qcow2.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-qcow2.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-qcow2.yaml	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-qcow2.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -16,9 +16,12 @@ actions:
         timeout:
           minutes: 20
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm/debian-sid-2014_08_21-amd64.qcow2.xz
-        format: qcow2
-        compression: xz
+        images:
+          rootfs:
+            url: http://images.validation.linaro.org/kvm/debian-sid-2014_08_21-amd64.qcow2.xz
+            image_arg: -hda {rootfs}
+            format: qcow2
+            compression: xz
         os: debian
         # if root_partition partition is not present:
         # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
@@ -32,6 +35,9 @@ actions:
           minutes: 5
         media: tmpfs
         failure_retry: 2
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
@@ -43,10 +49,6 @@ actions:
             - repository: http://git.linaro.org/qa/test-definitions.git
               from: git
               path: ubuntu/smoke-tests-basic.yaml
-              # name: if not present, use the name from the YAML. The name can
-              # also be overriden from the actual commands being run by
-              # calling the lava-test-suite-name API call (e.g.
-              # `lava-test-suite-name FOO`).
               name: smoke-tests
             - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
               from: git
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-repeat.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-repeat.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-repeat.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm-repeat.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -7,8 +7,11 @@ actions:
         timeout:
           minutes: 20
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
-        compression: gz
+        images:
+          rootfs:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            image_arg: -hda {rootfs}
+            compression: gz
         os: debian
         root_partition: 1
 
@@ -19,6 +22,9 @@ actions:
         - boot:
             method: qemu
             media: tmpfs
+            prompts:
+              - 'linaro-test'
+              - 'root@debian:~#'
 
         - test:
             failure_retry: 3
@@ -40,6 +46,9 @@ actions:
             media: tmpfs
             failure_retry: 2  # deliberately invalid action,
             # combines failure_retry and repeat
+            prompts:
+              - 'linaro-test'
+              - 'root@debian:~#'
 
         - test:
            name: kvm-intermediate-singlenode
@@ -59,13 +68,19 @@ actions:
         method: qemu
         media: tmpfs
         repeat: 4
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - deploy:
         timeout:
           minutes: 20
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
-        compression: gz
+        images: 
+          rootfs:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            image_arg: -hda {rootfs}
+            compression: gz
         os: debian
         root_partition: 1
 
@@ -73,6 +88,9 @@ actions:
         method: qemu
         media: tmpfs
         repeat: 4
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -18,31 +18,41 @@ actions:
         timeout:
           minutes: 2
         to: tmpfs
-        image: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
-        compression: gz
         os: debian
         # if root_partition partition is not present:
         # - look for a partitions labelled "root" or "ROOT" or "Root" (i.e. case insensitive)
         # - look into device configuration
         root_partition: 1
+        images:
+          disk1:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            compression: gz
+            image_arg: -hda {disk1}
+            overlay: True
+          disk2:
+            url: http://images.validation.linaro.org/kvm-debian-wheezy.img.gz
+            compression: gz
+            image_arg: -hdb {disk2}
 
     - boot:
         method: qemu
         media: tmpfs
         connection: serial
         failure_retry: 2
+        auto_login: {login_prompt: 'login:', username: root}
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
+        timeout:
+          minutes: 10
         name: kvm-basic-singlenode  # is not present, use "test $N"
         definitions:
             - repository: git://git.linaro.org/qa/test-definitions.git
               from: git
               path: ubuntu/smoke-tests-basic.yaml
-              # name: if not present, use the name from the YAML. The name can
-              # also be overriden from the actual commands being run by
-              # calling the lava-test-suite-name API call (e.g.
-              # `lava-test-suite-name FOO`).
               name: smoke-tests
             - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
               from: git
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/mustang-menu-ramdisk.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/mustang-menu-ramdisk.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/mustang-menu-ramdisk.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/mustang-menu-ramdisk.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,56 @@
+device_type: mustang-uefi
+
+job_name: uefi-menu-mustang
+timeouts:
+  job:
+    minutes: 15            # timeout for the whole job (default: ??h)
+  action:
+    minutes: 5         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+  menu-interrupt:
+    minutes: 3
+priority: medium
+
+actions:
+
+  # needs to be a list of hashes to retain the order
+  - deploy:
+     timeout:
+       minutes: 2
+     to: tftp
+     dtb:
+         url: http://images.validation.linaro.org/mustang/mustang.dtb_1.11
+     kernel:
+         url: http://images.validation.linaro.org/mustang/uImage_1.11
+     nfsrootfs:
+         url: http://people.linaro.org/~neil.williams/arm64/debian-jessie-arm64-rootfs.tar.gz
+         compression: gz
+     os: debian
+     timeout: {minutes: 5}
+     to: tftp
+
+  - boot:
+     method: uefi-menu
+     auto_login: {login_prompt: 'login:', username: root}
+     commands: nfs
+     type: bootm
+     prompts:
+     - 'linaro-test'
+     - 'root@debian:~#'
+     parameters:
+       shutdown-message: "reboot: Restarting system"
+
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode  # is not present, use "test $N"
+     # only s, m & h are supported.
+     timeout:
+       minutes: 5 # uses install:deps, so takes longer than singlenode01
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode02.yaml
+           name: singlenode-intermediate
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/panda-ramdisk.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/panda-ramdisk.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/panda-ramdisk.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/panda-ramdisk.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -16,15 +16,23 @@ actions:
   - deploy:
      timeout: 2m
      to: tftp
-     kernel: http://images.validation.linaro.org/functional-test-images/panda/uImage
-     ramdisk: http://images.validation.linaro.org/functional-test-images/common/linaro-image-minimal-initramfs-genericarmv7a.cpio.gz.u-boot
-     ramdisk-type: u-boot
-     dtb: http://images.validation.linaro.org/functional-test-images/panda/omap4-panda-es.dtb
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/panda/uImage
+     ramdisk:
+         url: http://images.validation.linaro.org/functional-test-images/common/linaro-image-minimal-initramfs-genericarmv7a.cpio.gz.u-boot
+         compression: gz
+         header: u-boot
+         add-header: u-boot
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/panda/omap4-panda-es.dtb
 
   - boot:
      method: u-boot
      commands: ramdisk
      type: bootm
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
 
   - test:
      failure_retry: 3
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/panda-usb.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/panda-usb.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/panda-usb.yaml	2015-02-26 13:45:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/panda-usb.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -19,19 +19,29 @@ actions:
      timeout:
        minutes: 2
      to: tftp
-     kernel: http://images.validation.linaro.org/functional-test-images/panda/uImage
-     nfsrootfs: file:///home/linaro/chroots/jessie.tar.gz
-     dtb: http://images.validation.linaro.org/functional-test-images/panda/omap4-panda-es.dtb
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/panda/uImage
+     nfsrootfs:
+         url: file:///home/linaro/chroots/jessie.tar.gz
+         compression: gz
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/panda/omap4-panda-es.dtb
 
   - boot:
      method: u-boot
      commands: nfs
      type: bootm
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
 
   - deploy:
       timeout:
         minutes: 10
       to: usb
-      image: http://releases.linaro.org/12.02/ubuntu/leb-panda/panda-ubuntu-desktop.img.gz
+      images:
+        rootfs:
+          url: http://images.validation.linaro.org/functional-test-images/panda/panda-raring_developer_20130723-408.img.gz
+          compression: gz
       os: debian
       device: SanDisk_Ultra # needs to be exposed in the device-specific UI
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ssh-deploy.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ssh-deploy.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/ssh-deploy.yaml	2015-09-01 08:36:11.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/ssh-deploy.yaml	2015-12-14 09:33:11.000000000 +0000
@@ -24,6 +24,9 @@ actions:
         method: ssh
         connection: ssh
         failure_retry: 2
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - boot:
         method: schroot
@@ -31,6 +34,9 @@ actions:
         failure_retry: 2
         schroot: unstable
         os: debian
+        prompts:
+          - 'linaro-test'
+          - 'root@debian:~#'
 
     - test:
         failure_retry: 3
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/tk1-nfs.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/tk1-nfs.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/tk1-nfs.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/tk1-nfs.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,58 @@
+# Sample JOB definition for a u-boot job
+
+device_type: tk1
+
+job_name: tk1-beserk
+timeouts:
+  job:
+    minutes: 10            # timeout for the whole job (default: ??h)
+  action:
+   minutes: 5         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+priority: medium
+
+# example old-style job: https://staging.validation.linaro.org/scheduler/job/113682/definition
+
+actions:
+
+  # needs to be a list of hashes to retain the order
+  - deploy:
+     timeout:
+       minutes: 2
+     to: tftp
+     kernel:
+         url: http://storage.kernelci.org/next/next-20160120/arm-multi_v7_defconfig+CONFIG_LKDTM=y/zImage
+     nfsrootfs:
+         url: http://ironhide.bounceme.net/debian-jessie-arm64-rootfs.tar.gz
+         compression: gz
+     modules:
+         url: http://ironhide.bounceme.net/tk1/modules.tar.xz
+         compression: xz
+
+     os: oe
+     dtb:
+         url: http://storage.kernelci.org/next/next-20160120/arm-multi_v7_defconfig+CONFIG_LKDTM=y/dtbs/tegra124-jetson-tk1.dtb
+
+  - boot:
+     method: u-boot
+     commands: nfs
+     type: bootz
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
+       - '/ #'
+
+  - test:
+     failure_retry: 3
+     name: smoke-tests  # is not present, use "test $N"
+     # only s, m & h are supported.
+     timeout:
+       minutes: 5 # uses install:deps, so takes longer than singlenode01
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/tk1-ramdisk.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/tk1-ramdisk.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/tk1-ramdisk.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/tk1-ramdisk.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,61 @@
+# Sample JOB definition for a u-boot job
+
+device_type: tk1
+
+job_name: tk1-beserk
+timeouts:
+  job:
+    minutes: 10            # timeout for the whole job (default: ??h)
+  action:
+   minutes: 5         # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+priority: medium
+
+# example old-style job: https://staging.validation.linaro.org/scheduler/job/113682/definition
+
+actions:
+
+  # needs to be a list of hashes to retain the order
+  - deploy:
+     timeout:
+       minutes: 2
+     to: tftp
+     kernel:
+         url: http://storage.kernelci.org/mainline/v4.4-8855-ga200dcb34693/arm-multi_v7_defconfig/zImage
+     ramdisk:
+         url: http://ironhide.bounceme.net/common/kernelcirootfs.cpio.bz2
+         #url: http://images.validation.linaro.org/functional-test-images/common/linaro-image-minimal-initramfs-genericarmv7a.cpio.gz.u-boot
+         compression: bz2
+         #header: u-boot
+         add-header: u-boot
+     modules:
+         url: http://storage.kernelci.org/mainline/v4.4-8855-ga200dcb34693/arm-multi_v7_defconfig/modules.tar.xz
+         compression: xz
+
+     os: oe
+     dtb:
+         url: http://storage.kernelci.org/mainline/v4.4-8855-ga200dcb34693/arm-multi_v7_defconfig/dtbs/tegra124-jetson-tk1.dtb
+
+  - boot:
+     method: u-boot
+     commands: ramdisk
+     type: bootz
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
+       - '/ #'
+
+  - test:
+     failure_retry: 3
+     name: smoke-tests  # is not present, use "test $N"
+     # only s, m & h are supported.
+     timeout:
+       minutes: 5 # uses install:deps, so takes longer than singlenode01
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-multiple.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-multiple.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-multiple.yaml	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-multiple.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -21,29 +21,41 @@ actions:
      timeout:
        minutes: 2
      to: tftp
-     kernel: http://images.validation.linaro.org/functional-test-images/bbb/zImage
-     nfsrootfs: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
-     rootfs_compression: gz
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     nfsrootfs:
+         url: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
+         compression: gz
      os: debian
-     dtb: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
 
   - boot:
      method: u-boot
      commands: nfs
      type: bootz
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
 
   # needs to be a list of hashes to retain the order
   - deploy:
      timeout:
        minutes: 4
      to: tftp
-     kernel: http://images.validation.linaro.org/functional-test-images/bbb/zImage
-     nfsrootfs: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
-     rootfs_compression: gz
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     nfsrootfs:
+         url: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
+         compression: gz
      os: oe
-     dtb: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
 
   - boot:
      method: u-boot
      commands: ramdisk
      type: bootz
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-nfs.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-nfs.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-nfs.yaml	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-nfs.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -21,16 +21,22 @@ actions:
      timeout:
        minutes: 2
      to: tftp
-     kernel: http://images.validation.linaro.org/functional-test-images/bbb/zImage
-     nfsrootfs: http://images.validation.linaro.org/no-such-file-rootfs.tar.xz
-     rootfs_compression: xz
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     nfsrootfs:
+         url: http://images.validation.linaro.org/no-such-file-rootfs.tar.xz
+         compression: xz
      os: debian
-     dtb: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
 
   - boot:
      method: u-boot
      commands: nfs
      type: bootz
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
 
   - test:
      failure_retry: 3
@@ -42,10 +48,6 @@ actions:
          - repository: git://git.linaro.org/qa/test-definitions.git
            from: git
            path: ubuntu/smoke-tests-basic.yaml
-           # name: if not present, use the name from the YAML. The name can
-           # also be overriden from the actual commands being run by
-           # calling the lava-test-suite-name API call (e.g.
-           # `lava-test-suite-name FOO`).
            name: smoke-tests
          - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
            from: git
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-persistent.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-persistent.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-persistent.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-persistent.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,46 @@
+# Sample JOB definition for a u-boot job
+
+device_type: beaglebone-black
+
+job_name: uboot-persistent-nfs
+timeouts:
+  job:
+    minutes: 15
+  action:
+    minutes: 5
+priority: medium
+
+actions:
+  - deploy:
+     timeout:
+       minutes: 2
+     to: tftp
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     nfs_url: "127.0.0.1:/nfs/debian-jessie.armhf"
+     os: debian
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+
+  - boot:
+     method: u-boot
+     commands: nfs
+     type: bootz
+     prompts:
+     - 'linaro-test'
+     - 'root@debian:~#'
+
+  - test:
+     failure_retry: 3
+     name: kvm-basic-singlenode
+     timeout:
+       minutes: 5
+     definitions:
+         - repository: git://git.linaro.org/qa/test-definitions.git
+           from: git
+           path: ubuntu/smoke-tests-basic.yaml
+           name: smoke-tests
+         - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+           from: git
+           path: lava-test-shell/single-node/singlenode03.yaml
+           name: singlenode-advanced
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -19,11 +19,16 @@ actions:
      timeout:
        minutes: 2
      to: tftp
-     kernel: http://images.validation.linaro.org/functional-test-images/bbb/zImage
-     ramdisk: http://images.validation.linaro.org/functional-test-images/common/linaro-image-minimal-initramfs-genericarmv7a.cpio.gz.u-boot
-     ramdisk-type: u-boot
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     ramdisk:
+         url: http://images.validation.linaro.org/functional-test-images/common/linaro-image-minimal-initramfs-genericarmv7a.cpio.gz.u-boot
+         header: u-boot
+         add-header: u-boot
+         compression: gz
      os: oe
-     dtb: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
 
   - boot:
      method: u-boot
@@ -31,6 +36,9 @@ actions:
      type: bootz
      parameters:
        shutdown-message: "reboot: Restarting system"
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
 
   - test:
      failure_retry: 3
@@ -42,10 +50,6 @@ actions:
          - repository: git://git.linaro.org/qa/test-definitions.git
            from: git
            path: ubuntu/smoke-tests-basic.yaml
-           # name: if not present, use the name from the YAML. The name can
-           # also be overriden from the actual commands being run by
-           # calling the lava-test-suite-name API call (e.g.
-           # `lava-test-suite-name FOO`).
            name: smoke-tests
          - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
            from: git
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot.yaml 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot.yaml
--- 2015.9-1/lava_dispatcher/pipeline/test/sample_jobs/uboot.yaml	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/sample_jobs/uboot.yaml	2016-03-04 14:35:18.000000000 +0000
@@ -21,16 +21,22 @@ actions:
      timeout:
        minutes: 2
      to: tftp
-     kernel: http://images.validation.linaro.org/functional-test-images/bbb/zImage
-     nfsrootfs: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
-     rootfs_compression: gz
+     kernel:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/zImage
+     nfsrootfs:
+         url: http://images.validation.linaro.org/debian-jessie-rootfs.tar.gz
+         compression: gz
      os: debian
-     dtb: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
+     dtb:
+         url: http://images.validation.linaro.org/functional-test-images/bbb/am335x-bone.dtb
 
   - boot:
      method: u-boot
      commands: nfs
      type: bootz
+     prompts:
+       - 'linaro-test'
+       - 'root@debian:~#'
 
   - test:
      failure_retry: 3
@@ -42,10 +48,6 @@ actions:
          - repository: git://git.linaro.org/qa/test-definitions.git
            from: git
            path: ubuntu/smoke-tests-basic.yaml
-           # name: if not present, use the name from the YAML. The name can
-           # also be overriden from the actual commands being run by
-           # calling the lava-test-suite-name API call (e.g.
-           # `lava-test-suite-name FOO`).
            name: smoke-tests
          - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
            from: git
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_basic.py 2016.3-1/lava_dispatcher/pipeline/test/test_basic.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_basic.py	2015-09-04 07:44:33.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_basic.py	2016-03-02 14:34:40.000000000 +0000
@@ -19,16 +19,18 @@
 # with this program; if not, see <http://www.gnu.org/licenses>.
 
 import os
+import glob
 import time
 import unittest
 import simplejson
 import yaml
 
 from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
-from lava_dispatcher.pipeline.action import Pipeline, Action
+from lava_dispatcher.pipeline.action import Pipeline, Action, JobError
 from lava_dispatcher.pipeline.parser import JobParser
 from lava_dispatcher.pipeline.job import Job
 from lava_dispatcher.pipeline.device import NewDevice
+from lava_dispatcher.pipeline.shell import ExpectShellSession
 
 
 class TestAction(unittest.TestCase):  # pylint: disable=too-many-public-methods
@@ -285,6 +287,58 @@ class TestPipeline(unittest.TestCase):
                 for element in item['pipeline']:
                     self.assertNotIn('match', element)
 
+    def test_compatibility(self):
+        """
+        Test compatibility support.
+
+        The class to use in the comparison will change according to which class
+        is related to the change which caused the compatibility to be modified.
+        """
+        factory = Factory()
+        job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
+        pipe = job.describe()
+        self.assertEqual(pipe['compatibility'], ExpectShellSession.compatibility)
+        self.assertEqual(job.compatibility, ExpectShellSession.compatibility)
+        kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/kvm.yaml')
+        job_def = yaml.load(open(kvm_yaml, 'r'))
+        job_def['compatibility'] = job.compatibility
+        parser = JobParser()
+        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/kvm01.yaml'))
+        try:
+            job = parser.parse(yaml.dump(job_def), device, 4212, None, output_dir=mkdtemp())
+        except NotImplementedError:
+            # some deployments listed in basics.yaml are not implemented yet
+            pass
+        self.assertIsNotNone(job)
+        job_def['compatibility'] = job.compatibility + 1
+        self.assertRaises(
+            JobError, parser.parse, yaml.dump(job_def), device, 4212, None, mkdtemp()
+        )
+        job_def['compatibility'] = 0
+        try:
+            job = parser.parse(yaml.dump(job_def), device, 4212, None, output_dir=mkdtemp())
+        except NotImplementedError:
+            # some deployments listed in basics.yaml are not implemented yet
+            pass
+        self.assertIsNotNone(job)
+
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
+    def test_common_data(self):
+        factory = Factory()
+        job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
+        self.assertIsNotNone(job)
+        test_action = job.pipeline.actions[0]
+        test_action.validate()
+        test_action.set_common_data('ns', 'simple', 1)
+        self.assertEqual(test_action.get_common_data('ns', 'simple'), 1)
+        test_action.set_common_data('ns', 'dict', {'key': False})
+        self.assertEqual(test_action.get_common_data('ns', 'dict'), {'key': False})
+        test_action.set_common_data('ns', 'list', [1, 2, 3, '4'])
+        self.assertEqual(test_action.get_common_data('ns', 'list'), [1, 2, 3, '4'])
+        test_action.set_common_data('ns', 'dict2', {'key': {'nest': True}})
+        self.assertEqual(test_action.get_common_data('ns', 'dict2'), {'key': {'nest': True}})
+        self.assertNotEqual(test_action.get_common_data('unknown', 'simple'), 1)
+
 
 class TestFakeActions(unittest.TestCase):  # pylint: disable=too-many-public-methods
 
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_connections.py 2016.3-1/lava_dispatcher/pipeline/test/test_connections.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_connections.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_connections.py	2016-03-02 14:34:40.000000000 +0000
@@ -20,17 +20,18 @@
 
 
 import os
+import yaml
 import unittest
 from lava_dispatcher.pipeline.action import JobError
 from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
 from lava_dispatcher.pipeline.device import NewDevice
+from lava_dispatcher.pipeline.action import Timeout
 from lava_dispatcher.pipeline.parser import JobParser
 from lava_dispatcher.pipeline.actions.boot.ssh import SchrootAction
-from lava_dispatcher.pipeline.actions.boot.qemu import BootVMAction
-from lava_dispatcher.pipeline.connections.ssh import ConnectDynamicSsh
 from lava_dispatcher.pipeline.utils.shell import infrastructure_error
 from lava_dispatcher.pipeline.test.test_basic import pipeline_reference
 from lava_dispatcher.pipeline.utils.filesystem import check_ssh_identity_file
+from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
 
 
 class Factory(object):  # pylint: disable=too-few-public-methods
@@ -114,8 +115,10 @@ class TestConnection(unittest.TestCase):
             '-p', '8022']
         self.job.validate()
         login = [action for action in self.job.pipeline.actions if action.name == 'login-ssh'][0]
-        self.assertIn('primary-ssh', [action.name for action in login.internal_pipeline.actions])
-        primary = [action for action in login.internal_pipeline.actions if action.name == 'primary-ssh'][0]
+        self.assertIn('ssh-connection', [action.name for action in login.internal_pipeline.actions])
+        primary = [action for action in login.internal_pipeline.actions if action.name == 'ssh-connection'][0]
+        prepare = [action for action in login.internal_pipeline.actions if action.name == 'prepare-ssh'][0]
+        self.assertTrue(prepare.primary)
         self.assertEqual(identity, primary.identity_file)
         self.assertEqual(primary.host, params['ssh']['host'])
         self.assertEqual(test_command, primary.command)
@@ -138,16 +141,12 @@ class TestConnection(unittest.TestCase):
     @unittest.skipIf(infrastructure_error('schroot'), "schroot not installed")
     def test_scp_command(self):
         self.job.validate()
-        overlay = [action for action in self.job.pipeline.actions if action.name == 'scp-overlay'][0]
-        deploy = [action for action in overlay.internal_pipeline.actions if action.name == 'scp-deploy'][0]
-        scp = [action for action in overlay.internal_pipeline.actions if action.name == 'prepare-scp-overlay'][0]
+        login = [action for action in self.guest_job.pipeline.actions if action.name == 'login-ssh'][0]
+        scp = [action for action in login.internal_pipeline.actions if action.name == 'scp-deploy'][0]
         self.assertIsNotNone(scp)
-        self.assertIn('scp', deploy.scp)
-        self.assertNotIn('ssh', deploy.scp)
-        self.assertIn('ssh', deploy.command)
-        self.assertNotIn('scp', deploy.command)
-        self.assertIn('lava_test_results_dir', deploy.data)
-        self.assertIn('/lava-', deploy.data['lava_test_results_dir'])
+        # FIXME: schroot needs to make use of scp
+        self.assertNotIn('ssh', scp.scp)
+        self.assertFalse(scp.primary)
 
     @unittest.skipIf(infrastructure_error('schroot'), "schroot not installed")
     def test_schroot_params(self):
@@ -171,17 +170,133 @@ class TestConnection(unittest.TestCase):
 
     def test_guest_ssh(self):
         self.assertIsNotNone(self.guest_job)
+        description_ref = pipeline_reference('bbb-ssh-guest.yaml')
+        self.assertEqual(description_ref, self.guest_job.pipeline.describe(False))
         self.guest_job.validate()
+        multinode = [protocol for protocol in self.guest_job.protocols if protocol.name == MultinodeProtocol.name][0]
+        self.assertEqual(int(multinode.system_timeout.duration), 900)
         self.assertEqual([], self.guest_job.pipeline.errors)
-        scp_overlay = [item for item in self.guest_job.pipeline.actions if item.name == 'scp-overlay']
-        environment = scp_overlay[0].get_common_data('environment', 'env_dict')
+        self.assertEqual(len([item for item in self.guest_job.pipeline.actions if item.name == 'scp-overlay']), 1)
+        scp_overlay = [item for item in self.guest_job.pipeline.actions if item.name == 'scp-overlay'][0]
+        prepare = [item for item in scp_overlay.internal_pipeline.actions if item.name == 'prepare-scp-overlay'][0]
+        self.assertEqual(prepare.host_keys, ['ipv4'])
+        self.assertEqual(prepare.get_common_data(prepare.name, 'overlay'), prepare.host_keys)
+        params = prepare.parameters['protocols'][MultinodeProtocol.name]
+        for call_dict in [call for call in params if 'action' in call and call['action'] == prepare.name]:
+            del call_dict['yaml_line']
+            if 'message' in call_dict:
+                del call_dict['message']['yaml_line']
+            if 'timeout' in call_dict:
+                del call_dict['timeout']['yaml_line']
+            self.assertEqual(
+                call_dict, {
+                    'action': 'prepare-scp-overlay',
+                    'message': {'ipaddr': '$ipaddr'},
+                    'messageID': 'ipv4', 'request': 'lava-wait',
+                    'timeout': {'minutes': 5}
+                },
+            )
+        login = [action for action in self.guest_job.pipeline.actions if action.name == 'login-ssh'][0]
+        scp = [action for action in login.internal_pipeline.actions if action.name == 'scp-deploy'][0]
+        self.assertFalse(scp.primary)
+        ssh = [action for action in login.internal_pipeline.actions if action.name == 'prepare-ssh'][0]
+        self.assertFalse(ssh.primary)
+        self.assertIsNotNone(scp.scp)
+        self.assertFalse(scp.primary)
+        self.assertIn('host_key', login.parameters['parameters'])
+        self.assertIn('hostID', login.parameters['parameters'])
+        self.assertIn(  # ipv4
+            login.parameters['parameters']['hostID'],
+            prepare.host_keys)
+        prepare.set_common_data(MultinodeProtocol.name, 'ipv4', {'ipaddr': u'172.16.200.165'})
+        self.assertEqual(prepare.get_common_data(prepare.name, 'overlay'), prepare.host_keys)
+        self.assertIn(
+            login.parameters['parameters']['host_key'],
+            prepare.get_common_data(MultinodeProtocol.name, login.parameters['parameters']['hostID']))
+        host_data = prepare.get_common_data(MultinodeProtocol.name, login.parameters['parameters']['hostID'])
+        self.assertEqual(
+            host_data[login.parameters['parameters']['host_key']],
+            u'172.16.200.165'
+        )
+        data = scp_overlay.get_common_data(MultinodeProtocol.name, 'ipv4')
+        if 'protocols' in scp_overlay.parameters:
+            for params in scp_overlay.parameters['protocols'][MultinodeProtocol.name]:
+                (replacement_key, placeholder) = [(key, value) for key, value in params['message'].items() if key != 'yaml_line'][0]
+                self.assertEqual(data[replacement_key], u'172.16.200.165')
+                self.assertEqual(placeholder, '$ipaddr')
+        environment = scp_overlay.get_common_data('environment', 'env_dict')
         self.assertIsNotNone(environment)
         self.assertIn('LANG', environment.keys())
         self.assertIn('C', environment.values())
-        self.assertEqual(len(scp_overlay), 1)
-        overlay = [item for item in scp_overlay[0].internal_pipeline.actions if item.name == 'lava-overlay']
+        overlay = [item for item in scp_overlay.internal_pipeline.actions if item.name == 'lava-overlay']
+        self.assertIn('action', overlay[0].parameters['protocols'][MultinodeProtocol.name][0])
+        self.assertIn('message', overlay[0].parameters['protocols'][MultinodeProtocol.name][0])
+        self.assertIn('timeout', overlay[0].parameters['protocols'][MultinodeProtocol.name][0])
+        msg_dict = overlay[0].parameters['protocols'][MultinodeProtocol.name][0]['message']
+        for key, value in msg_dict.items():
+            if 'yaml_line' == key:
+                continue
+            self.assertTrue(value.startswith('$'))
+            self.assertFalse(key.startswith('$'))
+        self.assertIn('request', overlay[0].parameters['protocols'][MultinodeProtocol.name][0])
         multinode = [item for item in overlay[0].internal_pipeline.actions if item.name == 'lava-multinode-overlay']
         self.assertEqual(len(multinode), 1)
         # Check Pipeline
         description_ref = pipeline_reference('ssh-guest.yaml')
         self.assertEqual(description_ref, self.guest_job.pipeline.describe(False))
+
+
+class TestTimeouts(unittest.TestCase):
+    """
+    Test action and connection timeout parsing.
+    """
+
+    def create_custom_job(self, data, output_dir='/tmp/'):  # pylint: disable=no-self-use
+        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/bbb-01.yaml'))
+        parser = JobParser()
+        job = parser.parse(data, device, 4212, None, output_dir=output_dir)
+        return job
+
+    def test_action_timeout(self):
+        factory = Factory()
+        job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml')
+        deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0]
+        test_action = [action for action in job.pipeline.actions if action.name == 'lava-test-retry'][0]
+        self.assertEqual(deploy.timeout.duration, 120)  # job specifies 2 minutes
+        self.assertEqual(deploy.connection_timeout.duration, Timeout.default_duration())
+        self.assertEqual(test_action.timeout.duration, 300)
+        self.assertEqual(test_action.connection_timeout.duration, Timeout.default_duration())
+
+    def test_job_connection_timeout(self):
+        """
+        Test connection timeout specified in the submission YAML
+        """
+        data = yaml.load(
+            open(os.path.join(
+                os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml'), 'r'))
+        data['timeouts']['connection'] = {'seconds': 20}
+        job = self.create_custom_job(yaml.dump(data))
+        for action in job.pipeline.actions:
+            if action.internal_pipeline:
+                for action in action.internal_pipeline.actions:
+                    if action.connection_timeout and action.name != 'uboot-retry':
+                        # uboot-retry has an override in this sample job
+                        self.assertEqual(action.connection_timeout.duration, 20)
+
+    def test_action_connection_timeout(self):
+        """
+        Test connection timeout specified for a particular action
+        """
+        data = yaml.load(
+            open(os.path.join(
+                os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml'), 'r'))
+        data['timeouts']['connections'] = {'uboot-retry': {}}
+        data['timeouts']['connections']['uboot-retry'] = {'seconds': 20}
+        job = self.create_custom_job(yaml.dump(data))
+        boot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0]
+        retry = [action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry'][0]
+        self.assertEqual(retry.timeout.duration, Timeout.parse(job.device['timeouts']['actions'][retry.name]))
+        self.assertEqual(
+            Timeout.parse(job.device['timeouts']['connections'][retry.name]),
+            retry.connection_timeout.duration
+        )
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_defs.py 2016.3-1/lava_dispatcher/pipeline/test/test_defs.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_defs.py	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_defs.py	2016-03-02 14:34:40.000000000 +0000
@@ -56,8 +56,8 @@ class TestDefinitionHandlers(unittest.Te
             self.assertIsNotNone(action.name)
             if isinstance(action, DeployAction):
                 overlay = action.pipeline.children[action.pipeline][3]
-                testdef = overlay.internal_pipeline.actions[1]
-        self.assertEqual(len(overlay.internal_pipeline.actions), 3)
+                testdef = overlay.internal_pipeline.actions[2]
+        self.assertEqual(len(overlay.internal_pipeline.actions), 5)
         self.assertIsInstance(testdef, TestDefinitionAction)
         testdef.validate()
         if not testdef.valid:
@@ -90,6 +90,7 @@ class TestDefinitionHandlers(unittest.Te
         script_list = [
             'lava-test-case',
             'lava-add-keys',
+            'lava-echo-ipv4',
             'lava-install-packages',
             'lava-test-case-attach',
             'lava-os-build',
@@ -106,7 +107,8 @@ class TestDefinitionHandlers(unittest.Te
             'lava-installed-packages',
             'lava-add-sources',
             'lava-background-process-start',
-            'lava-background-process-stop'
+            'lava-background-process-stop',
+            'lava-test-set'
         ]
 
         overlay = None
@@ -135,7 +137,7 @@ class TestDefinitionSimple(unittest.Test
         factory = Factory()
         self.job = factory.create_kvm_job('sample_jobs/kvm-notest.yaml')
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_job_without_tests(self):
         deploy = boot = finalize = None
         self.job.pipeline.validate_actions()
@@ -159,7 +161,7 @@ class TestDefinitionParams(unittest.Test
         factory = Factory()
         self.job = factory.create_kvm_job('sample_jobs/kvm-params.yaml')
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_job_without_tests(self):
         deploy = boot = finalize = overlay = test = None
         self.job.pipeline.validate_actions()
@@ -171,7 +173,7 @@ class TestDefinitionParams(unittest.Test
             finalize = self.job.pipeline.actions[3]
             overlay = deploy.internal_pipeline.actions[3]
         self.assertIsInstance(overlay, OverlayAction)
-        testdef = overlay.internal_pipeline.actions[1]
+        testdef = overlay.internal_pipeline.actions[2]
         self.assertIsInstance(testdef, TestDefinitionAction)
         test = testdef.internal_pipeline.actions[1]
         install = testdef.internal_pipeline.actions[2]
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_fastboot.py 2016.3-1/lava_dispatcher/pipeline/test/test_fastboot.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_fastboot.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_fastboot.py	2016-02-09 16:48:28.000000000 +0000
@@ -0,0 +1,107 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Senthil Kumaran S <senthil.kumaran@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+import os
+import glob
+import unittest
+
+from lava_dispatcher.pipeline.device import NewDevice
+from lava_dispatcher.pipeline.parser import JobParser
+from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
+from lava_dispatcher.pipeline.action import JobError
+from lava_dispatcher.pipeline.test.test_basic import pipeline_reference
+from lava_dispatcher.pipeline.actions.deploy import DeployAction
+from lava_dispatcher.pipeline.actions.boot.fastboot import BootAction
+
+
+class Factory(object):  # pylint: disable=too-few-public-methods
+    """
+    Not Model based, this is not a Django factory.
+    Factory objects are dispatcher based classes, independent
+    of any database objects.
+    """
+    def create_fastboot_job(self, filename, output_dir='/tmp/'):  # pylint: disable=no-self-use
+        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/nexus4-01.yaml'))
+        fastboot_yaml = os.path.join(os.path.dirname(__file__), filename)
+        with open(fastboot_yaml) as sample_job_data:
+            parser = JobParser()
+            job = parser.parse(sample_job_data, device, 4212, None, output_dir=output_dir)
+        return job
+
+
+class TestFastbootDeploy(unittest.TestCase):  # pylint: disable=too-many-public-methods
+
+    def setUp(self):
+        super(TestFastbootDeploy, self).setUp()
+        factory = Factory()
+        self.job = factory.create_fastboot_job('sample_jobs/fastboot.yaml',
+                                               mkdtemp())
+
+    def test_deploy_job(self):
+        self.assertEqual(self.job.pipeline.job, self.job)
+        for action in self.job.pipeline.actions:
+            if isinstance(action, DeployAction):
+                self.assertEqual(action.job, self.job)
+
+    def test_pipeline(self):
+        description_ref = pipeline_reference('fastboot.yaml')
+        self.assertEqual(description_ref, self.job.pipeline.describe(False))
+
+    def test_validate(self):
+        try:
+            self.job.pipeline.validate_actions()
+        except JobError as exc:
+            self.fail(exc)
+        for action in self.job.pipeline.actions:
+            self.assertEqual([], action.errors)
+
+    def test_overlay(self):
+        overlay = None
+        for action in self.job.pipeline.actions:
+            self.assertIsNotNone(action.name)
+            if isinstance(action, DeployAction):
+                overlay = action.pipeline.children[action.pipeline][0]
+        self.assertIsNotNone(overlay)
+        # these tests require that lava-dispatcher itself is installed, not just running tests from a git clone
+        self.assertTrue(os.path.exists(overlay.lava_test_dir))
+        self.assertIsNot(overlay.lava_test_dir, '/')
+        self.assertNotIn('lava_multi_node_test_dir', dir(overlay))
+        self.assertNotIn('lava_multi_node_cache_file', dir(overlay))
+        self.assertNotIn('lava_lmp_test_dir', dir(overlay))
+        self.assertNotIn('lava_lmp_cache_file', dir(overlay))
+        self.assertIsNotNone(overlay.parameters['deployment_data']['lava_test_results_dir'])
+        self.assertIsNotNone(overlay.parameters['deployment_data']['lava_test_sh_cmd'])
+        self.assertEqual(overlay.parameters['deployment_data']['distro'], 'android')
+        self.assertIsNotNone(overlay.parameters['deployment_data']['lava_test_results_part_attr'])
+        self.assertIsNotNone(glob.glob(os.path.join(overlay.lava_test_dir, 'lava-*')))
+
+    def test_boot(self):
+        for action in self.job.pipeline.actions:
+            if isinstance(action, BootAction):
+                # get the action & populate it
+                self.assertEqual(action.parameters['method'], 'fastboot')
+                self.assertEqual(action.parameters['prompts'],
+                                 ['shell@mako:/ $', 'shell@mako'])
+
+    def test_testdefinitions(self):
+        for action in self.job.pipeline.actions:
+            if action.name == 'test':
+                # get the action & populate it
+                self.assertEqual(len(action.parameters['definitions']), 2)
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_ipxe.py 2016.3-1/lava_dispatcher/pipeline/test/test_ipxe.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_ipxe.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_ipxe.py	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,290 @@
+# Copyright (C) 2014 Linaro Limited
+#
+# Author: Matthew Hart <matthew.hart@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+import os
+import yaml
+import tarfile
+import unittest
+from lava_dispatcher.pipeline.device import NewDevice
+from lava_dispatcher.pipeline.parser import JobParser
+from lava_dispatcher.pipeline.actions.boot.ipxe import (
+    BootloaderAction,
+    BootloaderCommandOverlay
+)
+from lava_dispatcher.pipeline.actions.deploy.apply_overlay import CompressRamdisk
+from lava_dispatcher.pipeline.actions.deploy.tftp import TftpAction
+from lava_dispatcher.pipeline.job import Job
+from lava_dispatcher.pipeline.action import Pipeline, InfrastructureError, JobError
+from lava_dispatcher.pipeline.test.test_basic import pipeline_reference
+from lava_dispatcher.pipeline.utils.network import dispatcher_ip
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+from lava_dispatcher.pipeline.utils.filesystem import mkdtemp, tftpd_dir
+from lava_dispatcher.pipeline.utils.strings import substitute
+from lava_dispatcher.pipeline.utils.constants import (
+    SHUTDOWN_MESSAGE,
+    BOOT_MESSAGE,
+)
+
+
+class Factory(object):  # pylint: disable=too-few-public-methods
+    """
+    Not Model based, this is not a Django factory.
+    Factory objects are dispatcher based classes, independent
+    of any database objects.
+    """
+    def create_job(self, filename, output_dir='/tmp/'):  # pylint: disable=no-self-use
+        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml'))
+        yaml = os.path.join(os.path.dirname(__file__), filename)
+        with open(yaml) as sample_job_data:
+            parser = JobParser()
+            job = parser.parse(sample_job_data, device, 4212, None, output_dir=output_dir)
+        return job
+
+
+class TestBootloaderAction(unittest.TestCase):  # pylint: disable=too-many-public-methods
+
+    def test_simulated_action(self):
+        factory = Factory()
+        job = factory.create_job('sample_jobs/ipxe-ramdisk.yaml')
+        self.assertIsNotNone(job)
+
+        description_ref = pipeline_reference('ipxe.yaml')
+        self.assertEqual(description_ref, job.pipeline.describe(False))
+
+        self.assertIsNone(job.validate())
+        self.assertEqual(job.device['device_type'], 'x86')
+
+    def test_tftp_pipeline(self):
+        factory = Factory()
+        job = factory.create_job('sample_jobs/ipxe-ramdisk.yaml')
+        self.assertEqual(
+            [action.name for action in job.pipeline.actions],
+            ['tftp-deploy', 'bootloader-action', 'lava-test-retry', 'finalize']
+        )
+        tftp = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0]
+        self.assertTrue(tftp.get_common_data('tftp', 'ramdisk'))
+        self.assertIsNotNone(tftp.internal_pipeline)
+        self.assertEqual(
+            [action.name for action in tftp.internal_pipeline.actions],
+            ['download_retry', 'download_retry', 'download_retry', 'prepare-tftp-overlay', 'deploy-device-env']
+        )
+        self.assertIn('ramdisk', [action.key for action in tftp.internal_pipeline.actions if hasattr(action, 'key')])
+        self.assertIn('kernel', [action.key for action in tftp.internal_pipeline.actions if hasattr(action, 'key')])
+        # allow root to compare the path (with the mkdtemp added)
+        paths = {action.path for action in tftp.internal_pipeline.actions if hasattr(action, 'path')}
+        self.assertIn(
+            tftpd_dir(),
+            [item for item in paths][0]
+        )
+
+    def test_device_x86(self):
+        factory = Factory()
+        job = factory.create_job('sample_jobs/ipxe-ramdisk.yaml')
+        self.assertEqual(
+            job.device['commands']['connect'],
+            'telnet bumblebee 8003'
+        )
+        self.assertEqual(job.device['commands'].get('interrupt', ' '), ' ')
+        methods = job.device['actions']['boot']['methods']
+        self.assertIn('ipxe', methods)
+        self.assertEqual(methods['ipxe']['parameters'].get('bootloader_prompt', None), 'iPXE>')
+
+    def test_bootloader_action(self):
+        factory = Factory()
+        job = factory.create_job('sample_jobs/ipxe-ramdisk.yaml')
+        job.validate()
+        self.assertEqual(job.pipeline.errors, [])
+        self.assertIn('ipxe', job.device['actions']['boot']['methods'])
+        params = job.device['actions']['boot']['methods']['ipxe']['parameters']
+        boot_message = params.get('boot_message', BOOT_MESSAGE)
+        self.assertIsNotNone(boot_message)
+        for action in job.pipeline.actions:
+            action.validate()
+            if isinstance(action, BootloaderAction):
+                self.assertIn('method', action.parameters)
+                self.assertEqual('ipxe', action.parameters['method'])
+                self.assertEqual(
+                    'reboot: Restarting system',
+                    action.parameters.get('parameters', {}).get('shutdown-message', SHUTDOWN_MESSAGE)
+                )
+            if isinstance(action, TftpAction):
+                self.assertIn('ramdisk', action.parameters)
+                self.assertIn('kernel', action.parameters)
+                self.assertIn('to', action.parameters)
+                self.assertEqual('tftp', action.parameters['to'])
+            self.assertTrue(action.valid)
+
+    def test_overlay_action(self):  # pylint: disable=too-many-locals
+        parameters = {
+            'device_type': 'x86',
+            'job_name': 'ipxe-pipeline',
+            'job_timeout': '15m',
+            'action_timeout': '5m',
+            'priority': 'medium',
+            'output_dir': mkdtemp(),
+            'actions': {
+                'boot': {
+                    'method': 'ipxe',
+                    'commands': 'ramdisk',
+                    'prompts': ['linaro-test', 'root@debian:~#']
+                },
+                'deploy': {
+                    'ramdisk': 'initrd.gz',
+                    'kernel': 'zImage',
+                }
+            }
+        }
+        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml'))
+        job = Job(4212, None, parameters)
+        job.device = device
+        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
+        job.set_pipeline(pipeline)
+        overlay = BootloaderCommandOverlay()
+        pipeline.add_action(overlay)
+        try:
+            ip_addr = dispatcher_ip()
+        except InfrastructureError as exc:
+            raise RuntimeError("Unable to get dispatcher IP address: %s" % exc)
+        parsed = []
+        kernel = parameters['actions']['deploy']['kernel']
+        ramdisk = parameters['actions']['deploy']['ramdisk']
+
+        substitution_dictionary = {
+            '{SERVER_IP}': ip_addr,
+            '{RAMDISK}': ramdisk,
+            '{KERNEL}': kernel,
+            '{LAVA_MAC}': "00:00:00:00:00:00"
+        }
+        params = device['actions']['boot']['methods']
+        params['ipxe']['ramdisk']['commands'] = substitute(params['ipxe']['ramdisk']['commands'], substitution_dictionary)
+
+        commands = params['ipxe']['ramdisk']['commands']
+        self.assertIs(type(commands), list)
+        self.assertIn("dhcp net0", commands)
+        self.assertIn("set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands)
+        self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands)
+        self.assertNotIn("kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands)
+        self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands)
+        self.assertIn("boot", commands)
+
+    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    def test_download_action(self):
+        factory = Factory()
+        job = factory.create_job('sample_jobs/ipxe.yaml')
+        for action in job.pipeline.actions:
+            action.validate()
+            self.assertTrue(action.valid)
+        job.validate()
+        self.assertEqual(job.pipeline.errors, [])
+        deploy = None
+        overlay = None
+        extract = None
+        for action in job.pipeline.actions:
+            if action.name == 'tftp-deploy':
+                deploy = action
+        if deploy:
+            for action in deploy.internal_pipeline.actions:
+                if action.name == 'prepare-tftp-overlay':
+                    overlay = action
+        if overlay:
+            for action in overlay.internal_pipeline.actions:
+                if action.name == 'extract-nfsrootfs':
+                    extract = action
+        self.assertIn('lava_test_results_dir', overlay.data)
+        self.assertIn('/lava-', overlay.data['lava_test_results_dir'])
+        self.assertIsNotNone(extract)
+        self.assertEqual(extract.timeout.duration, job.parameters['timeouts'][extract.name]['seconds'])
+
+    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    def test_reset_actions(self):
+        factory = Factory()
+        job = factory.create_job('sample_jobs/ipxe.yaml')
+        bootloader_action = None
+        bootloader_retry = None
+        reset_action = None
+        for action in job.pipeline.actions:
+            action.validate()
+            self.assertTrue(action.valid)
+            if action.name == 'bootloader-action':
+                bootloader_action = action
+        names = [r_action.name for r_action in bootloader_action.internal_pipeline.actions]
+        self.assertIn('connect-device', names)
+        self.assertIn('bootloader-retry', names)
+        for action in bootloader_action.internal_pipeline.actions:
+            if action.name == 'bootloader-retry':
+                bootloader_retry = action
+        names = [r_action.name for r_action in bootloader_retry.internal_pipeline.actions]
+        self.assertIn('reboot-device', names)
+        self.assertIn('bootloader-interrupt', names)
+        self.assertIn('expect-shell-connection', names)
+        self.assertIn('bootloader-commands', names)
+        for action in bootloader_retry.internal_pipeline.actions:
+            if action.name == 'reboot-device':
+                reset_action = action
+        names = [r_action.name for r_action in reset_action.internal_pipeline.actions]
+        self.assertIn('soft-reboot', names)
+        self.assertIn('pdu_reboot', names)
+        self.assertIn('power_on', names)
+
+    @unittest.skipIf(infrastructure_error('telnet'), "telnet not installed")
+    def test_prompt_from_job(self):
+        """
+        Support setting the prompt after login via the job
+
+        Loads a known YAML, adds a prompt to the dict and re-parses the job.
+        Checks that the prompt is available in the expect_shell_connection action.
+        """
+        factory = Factory()
+        job = factory.create_job('sample_jobs/ipxe-ramdisk.yaml')
+        job.validate()
+        bootloader = [action for action in job.pipeline.actions if action.name == 'bootloader-action'][0]
+        retry = [action for action in bootloader.internal_pipeline.actions
+                 if action.name == 'bootloader-retry'][0]
+        expect = [action for action in retry.internal_pipeline.actions
+                  if action.name == 'expect-shell-connection'][0]
+        check = expect.parameters
+        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml'))
+        extra_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/ipxe.yaml')
+        with open(extra_yaml) as data:
+            sample_job_string = data.read()
+        parser = JobParser()
+        sample_job_data = yaml.load(sample_job_string)
+        boot = [item['boot'] for item in sample_job_data['actions'] if 'boot' in item][0]
+        sample_job_string = yaml.dump(sample_job_data)
+        job = parser.parse(sample_job_string, device, 4212, None, output_dir='/tmp')
+        job.validate()
+        bootloader = [action for action in job.pipeline.actions if action.name == 'bootloader-action'][0]
+        retry = [action for action in bootloader.internal_pipeline.actions
+                 if action.name == 'bootloader-retry'][0]
+        expect = [action for action in retry.internal_pipeline.actions
+                  if action.name == 'expect-shell-connection'][0]
+        self.assertNotEqual(check, expect.parameters)
+
+    def test_xz_nfs(self):
+        factory = Factory()
+        job = factory.create_job('sample_jobs/ipxe-nfs.yaml')
+        # this job won't validate as the .xz nfsrootfs URL is a fiction
+        self.assertRaises(JobError, job.validate)
+        tftp_deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0]
+        prepare = [action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay'][0]
+        nfs = [action for action in prepare.internal_pipeline.actions if action.name == 'extract-nfsrootfs'][0]
+        self.assertIn('compression', nfs.parameters['nfsrootfs'])
+        self.assertEqual(nfs.parameters['nfsrootfs']['compression'], 'xz')
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_kvm.py 2016.3-1/lava_dispatcher/pipeline/test/test_kvm.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_kvm.py	2015-09-09 14:28:12.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_kvm.py	2016-03-04 14:35:18.000000000 +0000
@@ -25,8 +25,9 @@ import unittest
 import yaml
 
 from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
-from lava_dispatcher.pipeline.action import Pipeline, Action, JobError
+from lava_dispatcher.pipeline.action import Pipeline, Action, JobError, Timeout
 from lava_dispatcher.pipeline.test.test_basic import Factory, pipeline_reference
+from lava_dispatcher.pipeline.shell import ShellSession
 from lava_dispatcher.pipeline.job import Job
 from lava_dispatcher.pipeline.actions.deploy import DeployAction
 from lava_dispatcher.pipeline.actions.boot.qemu import BootAction
@@ -152,9 +153,12 @@ class TestKVMBasicDeploy(unittest.TestCa
 
     def test_pipeline(self):
         description_ref = pipeline_reference('kvm.yaml')
+        deploy = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
+        overlay = [action for action in deploy.internal_pipeline.actions if action.name == 'lava-overlay'][0]
+        self.assertIn('persistent-nfs-overlay', [action.name for action in overlay.internal_pipeline.actions])
         self.assertEqual(description_ref, self.job.pipeline.describe(False))
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_validate(self):
         try:
             self.job.pipeline.validate_actions()
@@ -188,6 +192,13 @@ class TestKVMBasicDeploy(unittest.TestCa
             if isinstance(action, BootAction):
                 # get the action & populate it
                 self.assertEqual(action.parameters['method'], 'qemu')
+                self.assertEqual(action.parameters['prompts'], ['linaro-test', 'root@debian:~#'])
+                params = action.parameters.get('auto_login', None)
+
+                if 'login_prompt' in params:
+                    self.assertEqual(params['login_prompt'], 'login:')
+                if 'username' in params:
+                    self.assertEqual(params['username'], 'root')
 
     def test_testdefinitions(self):
         for action in self.job.pipeline.actions:
@@ -213,7 +224,7 @@ class TestKVMQcow2Deploy(unittest.TestCa
         description_ref = pipeline_reference('kvm-qcow2.yaml')
         self.assertEqual(description_ref, self.job.pipeline.describe(False))
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_validate(self):
         try:
             self.job.pipeline.validate_actions()
@@ -254,7 +265,7 @@ class TestKVMInlineTestDeploy(unittest.T
             if isinstance(action, DeployAction):
                 self.assertEqual(action.job, self.job)
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_validate(self):
         try:
             self.job.pipeline.validate_actions()
@@ -271,7 +282,7 @@ class TestKVMInlineTestDeploy(unittest.T
         for action in self.job.pipeline.actions:
             if isinstance(action, DeployAction):
                 overlay = action.pipeline.children[action.pipeline][3]
-                testdef = overlay.internal_pipeline.actions[1]
+                testdef = overlay.internal_pipeline.actions[2]
                 inline_repo = testdef.internal_pipeline.actions[0]
                 break
 
@@ -303,3 +314,222 @@ class TestKVMInlineTestDeploy(unittest.T
                                         'yaml_line': 53},
                                 'yaml_line': 38}
             self.assertEqual(testdef, expected_testdef)
+
+    def test_autologin_prompt_patterns(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        bootaction = [action for action in self.job.pipeline.actions if action.name == 'boot_image_retry'][0]
+        autologinaction = [action for action in bootaction.internal_pipeline.actions if action.name == 'auto-login-action'][0]
+
+        autologinaction.parameters.update({'auto_login': {'login_prompt': 'login:',
+                                                          'username': 'root'},
+                                           'prompts': ['root@debian:~#']})
+
+        # initialise the first Connection object, a command line shell
+        shell_command = FakeCommand(autologinaction.timeout)
+        shell_connection = ShellSession(self.job, shell_command)
+
+        # Test the AutoLoginAction directly
+        conn = autologinaction.run(shell_connection)
+
+        self.assertEqual(conn.prompt_str, ['lava-test: # ', 'root@debian:~#'])
+
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
+    def test_autologin_void_login_prompt(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        bootaction = [action for action in self.job.pipeline.actions if action.name == 'boot_image_retry'][0]
+        autologinaction = [action for action in bootaction.internal_pipeline.actions if action.name == 'auto-login-action'][0]
+
+        autologinaction.parameters.update({'auto_login': {'login_prompt': '',
+                                                          'username': 'root'},
+                                           'prompts': ['root@debian:~#']})
+
+        self.assertRaises(JobError, self.job.validate)
+
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
+    def test_missing_autologin_void_prompts_list(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        bootaction = [action for action in self.job.pipeline.actions if action.name == 'boot_image_retry'][0]
+        autologinaction = [action for action in bootaction.internal_pipeline.actions if action.name == 'auto-login-action'][0]
+
+        autologinaction.parameters.update({'prompts': []})
+
+        self.assertRaises(JobError, self.job.validate)
+
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
+    def test_missing_autologin_void_prompts_list_item(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        bootaction = [action for action in self.job.pipeline.actions if action.name == 'boot_image_retry'][0]
+        autologinaction = [action for action in bootaction.internal_pipeline.actions if action.name == 'auto-login-action'][0]
+
+        autologinaction.parameters.update({'prompts': ['']})
+
+        self.assertRaises(JobError, self.job.validate)
+
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
+    def test_missing_autologin_void_prompts_list_item2(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        bootaction = [action for action in self.job.pipeline.actions if action.name == 'boot_image_retry'][0]
+        autologinaction = [action for action in bootaction.internal_pipeline.actions if action.name == 'auto-login-action'][0]
+
+        autologinaction.parameters.update({'prompts': ['root@debian:~#', '']})
+
+        self.assertRaises(JobError, self.job.validate)
+
+    def test_missing_autologin_prompts_list(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        bootaction = [action for action in self.job.pipeline.actions if action.name == 'boot_image_retry'][0]
+        autologinaction = [action for action in bootaction.internal_pipeline.actions if action.name == 'auto-login-action'][0]
+
+        autologinaction.parameters.update({'prompts': ['root@debian:~#']})
+
+        # initialise the first Connection object, a command line shell
+        shell_command = FakeCommand(autologinaction.timeout)
+        shell_connection = ShellSession(self.job, shell_command)
+
+        # Test the AutoLoginAction directly
+        conn = autologinaction.run(shell_connection)
+
+        self.assertEqual(conn.prompt_str, ['lava-test: # ', 'root@debian:~#'])
+
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
+    def test_missing_autologin_void_prompts_str(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        bootaction = [action for action in self.job.pipeline.actions if action.name == 'boot_image_retry'][0]
+        autologinaction = [action for action in bootaction.internal_pipeline.actions if action.name == 'auto-login-action'][0]
+
+        autologinaction.parameters.update({'prompts': ''})
+
+        self.assertRaises(JobError, self.job.validate)
+
+    def test_missing_autologin_prompts_str(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        bootaction = [action for action in self.job.pipeline.actions if action.name == 'boot_image_retry'][0]
+        autologinaction = [action for action in bootaction.internal_pipeline.actions if action.name == 'auto-login-action'][0]
+
+        autologinaction.parameters.update({'prompts': 'root@debian:~#'})
+
+        # initialise the first Connection object, a command line shell
+        shell_command = FakeCommand(autologinaction.timeout)
+        shell_connection = ShellSession(self.job, shell_command)
+
+        # Test the AutoLoginAction directly
+        conn = autologinaction.run(shell_connection)
+
+        self.assertEqual(conn.prompt_str, ['lava-test: # ', 'root@debian:~#'])
+
+    def test_download_checksum_match_success(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        deployimagesaction = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
+        downloadretryaction = [action for action in deployimagesaction.internal_pipeline.actions if action.name == 'download_retry'][0]
+        httpdownloadaction = [action for action in downloadretryaction.internal_pipeline.actions if action.name == 'http_download'][0]
+
+        # Just a small image
+        httpdownloadaction.url = 'http://images.validation.linaro.org/unit-tests/rootfs.gz'
+        httpdownloadaction.parameters.update({'images': {'rootfs': {
+            'url': httpdownloadaction.url,
+            'md5sum': '6ea432ac3c23210c816551782346ed1c',
+            'sha256sum': '1a76b17701b9fdf6346b88eb49b0143a9c6912701b742a6e5826d6856edccd21'}}})
+        httpdownloadaction.validate()
+        httpdownloadaction.run(None)
+
+    def test_download_checksum_match_fail(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        deployimagesaction = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
+        downloadretryaction = [action for action in deployimagesaction.internal_pipeline.actions if action.name == 'download_retry'][0]
+        httpdownloadaction = [action for action in downloadretryaction.internal_pipeline.actions if action.name == 'http_download'][0]
+
+        # Just a small image
+        httpdownloadaction.url = 'http://images.validation.linaro.org/unit-tests/rootfs.gz'
+        httpdownloadaction.parameters.update({'images': {'rootfs': {
+            'url': httpdownloadaction.url,
+            'md5sum': 'df1bd1598699e7a89d2e111111111111',
+            'sha256sum': '92d6ff900d0c3656ab3f214ce6efd708f898fc5e259111111111111111111111'}}})
+        httpdownloadaction.validate()
+
+        self.assertRaises(JobError, httpdownloadaction.run, None)
+
+    def test_download_no_images_no_checksum(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        deployimagesaction = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
+        downloadretryaction = [action for action in deployimagesaction.internal_pipeline.actions if action.name == 'download_retry'][0]
+        httpdownloadaction = [action for action in downloadretryaction.internal_pipeline.actions if action.name == 'http_download'][0]
+
+        # Just a small image
+        httpdownloadaction.url = 'http://images.validation.linaro.org/unit-tests/rootfs.gz'
+        del httpdownloadaction.parameters['images']
+        httpdownloadaction.parameters.update({'rootfs': {'url': httpdownloadaction.url}})
+        httpdownloadaction.validate()
+        httpdownloadaction.run(None)
+
+    def test_download_no_images_match_success(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        deployimagesaction = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
+        downloadretryaction = [action for action in deployimagesaction.internal_pipeline.actions if action.name == 'download_retry'][0]
+        httpdownloadaction = [action for action in downloadretryaction.internal_pipeline.actions if action.name == 'http_download'][0]
+
+        # Just a small image
+        httpdownloadaction.url = 'http://images.validation.linaro.org/unit-tests/rootfs.gz'
+        del httpdownloadaction.parameters['images']
+        httpdownloadaction.parameters.update({
+            'rootfs': {'url': httpdownloadaction.url},
+            'md5sum': {'rootfs': '6ea432ac3c23210c816551782346ed1c'},
+            'sha256sum': {'rootfs': '1a76b17701b9fdf6346b88eb49b0143a9c6912701b742a6e5826d6856edccd21'}})
+        httpdownloadaction.validate()
+        httpdownloadaction.run(None)
+
+    def test_download_no_images_match_fail(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        deployimagesaction = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
+        downloadretryaction = [action for action in deployimagesaction.internal_pipeline.actions if action.name == 'download_retry'][0]
+        httpdownloadaction = [action for action in downloadretryaction.internal_pipeline.actions if action.name == 'http_download'][0]
+
+        # Just a small image
+        httpdownloadaction.url = 'http://images.validation.linaro.org/unit-tests/rootfs.gz'
+        del httpdownloadaction.parameters['images']
+        httpdownloadaction.parameters.update({
+            'rootfs': {'url': httpdownloadaction.url},
+            'md5sum': {'rootfs': '6ea432ac3c232122222221782346ed1c'},
+            'sha256sum': {'rootfs': '1a76b17701b9fdf63444444444444444446912701b742a6e5826d6856edccd21'}})
+        httpdownloadaction.validate()
+        self.assertRaises(JobError, httpdownloadaction.run, None)
+
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
+    def test_no_test_action_validate(self):
+        self.assertEqual(len(self.job.pipeline.describe()), 4)
+
+        del self.job.pipeline.actions[2]
+
+        try:
+            self.job.pipeline.validate_actions()
+        except JobError as exc:
+            self.fail(exc)
+        for action in self.job.pipeline.actions:
+            self.assertEqual([], action.errors)
+
+
+class FakeCommand(object):
+
+    def __init__(self, lava_timeout):
+        if not lava_timeout or not isinstance(lava_timeout, Timeout):
+            raise RuntimeError("FakeCommand needs a timeout set by the calling Action")
+        self.name = "FakeCommand"
+        self.lava_timeout = lava_timeout
+
+    def sendline(self, s='', delay=0, send_char=True):  # pylint: disable=invalid-name
+        pass
+
+    def expect(self, *args, **kw):
+        pass
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_lavashell.py 2016.3-1/lava_dispatcher/pipeline/test/test_lavashell.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_lavashell.py	2015-09-09 14:30:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_lavashell.py	2015-12-14 09:33:11.000000000 +0000
@@ -68,9 +68,9 @@ class TestDefinitionHandlers(unittest.Te
                 testshell = action.pipeline.children[action.pipeline][0]
                 break
         self.assertTrue(testshell.valid)
-        self.assertFalse(testshell.check_patterns('exit', None))
-        self.assertFalse(testshell.check_patterns('eof', None))
-        self.assertFalse(testshell.check_patterns('timeout', None))
+        self.assertFalse(testshell.check_patterns('exit', None, ''))
+        self.assertFalse(testshell.check_patterns('eof', None, ''))
+        self.assertFalse(testshell.check_patterns('timeout', None, ''))
 
 
 class TestShellResults(unittest.TestCase):   # pylint: disable=too-many-public-methods
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_menus.py 2016.3-1/lava_dispatcher/pipeline/test/test_menus.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_menus.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_menus.py	2016-02-02 08:07:05.000000000 +0000
@@ -0,0 +1,177 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Neil Williams <neil.williams@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+import os
+import re
+import logging
+import unittest
+from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
+from lava_dispatcher.pipeline.device import NewDevice
+from lava_dispatcher.pipeline.parser import JobParser
+from lava_dispatcher.pipeline.action import Timeout, JobError
+from lava_dispatcher.pipeline.shell import ShellSession, ShellCommand
+from lava_dispatcher.pipeline.test.test_basic import pipeline_reference
+from lava_dispatcher.pipeline.utils.strings import substitute
+from lava_dispatcher.pipeline.menus.menus import SelectorMenu
+
+# pylint: disable=too-many-public-methods
+
+
+class TestSelectorMenu(unittest.TestCase):
+
+    def setUp(self):
+        self.menu = SelectorMenu()
+        self.menu.item_markup = (r'\[', r'\]')
+        self.menu.item_class = '0-9'
+        self.menu.separator = ' '
+        self.menu.label_class = 'a-zA-Z0-9'
+        self.menu.prompt = None
+
+    def test_menu_parser(self):
+        pattern = "%s([%s]+)%s%s([%s]*)" % (
+            re.escape(self.menu.item_markup[0]),
+            self.menu.item_class,
+            re.escape(self.menu.item_markup[1]),
+            self.menu.separator,
+            self.menu.label_class
+        )
+        serial_input = """
+    [1] debian
+    [2] tester
+    [3] Shell
+    [4] Boot Manager
+    [5] Reboot
+    [6] Shutdown
+    Start:
+            """
+        selection = self.menu.select(serial_input, 'Shell')
+        self.assertEqual(
+            self.menu.pattern,
+            pattern
+        )
+        for line in serial_input.split('\n'):
+            match = re.search(pattern, line)
+            if match:
+                if match.group(2) == "Shell":
+                    self.assertEqual(match.group(1), selection)
+
+
+class Factory(object):  # pylint: disable=too-few-public-methods
+    """
+    Not Model based, this is not a Django factory.
+    Factory objects are dispatcher based classes, independent
+    of any database objects.
+    """
+    def create_uefi_job(self, filename, output_dir=None):  # pylint: disable=no-self-use
+        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/mustang-uefi.yaml'))
+        mustang_yaml = os.path.join(os.path.dirname(__file__), filename)
+        with open(mustang_yaml) as sample_job_data:
+            parser = JobParser()
+            job = parser.parse(sample_job_data, device, 0, socket_addr=None, output_dir=output_dir)
+        return job
+
+
+class TestUefi(unittest.TestCase):  # pylint: disable=too-many-public-methods
+
+    def setUp(self):
+        super(TestUefi, self).setUp()
+        factory = Factory()
+        self.job = factory.create_uefi_job('sample_jobs/mustang-menu-ramdisk.yaml', mkdtemp())
+
+    def test_check_char(self):
+        shell = ShellCommand("%s\n" % 'ls', Timeout('fake', 30), logger=logging.getLogger())
+        if shell.exitstatus:
+            raise JobError("%s command exited %d: %s" % ('ls', shell.exitstatus, shell.readlines()))
+        connection = ShellSession(self.job, shell)
+        self.assertFalse(hasattr(shell, 'check_char'))
+        self.assertTrue(hasattr(connection, 'check_char'))
+        self.assertIsNotNone(connection.check_char)
+
+    def test_selector(self):
+        self.assertIsNotNone(self.job)
+        self.job.validate()
+        uefi_menu = [action for action in self.job.pipeline.actions if action.name == 'uefi-menu-action'][0]
+        selector = [action for action in uefi_menu.internal_pipeline.actions if action.name == 'uefi-menu-selector'][0]
+        params = self.job.device['actions']['boot']['methods']['uefi-menu']['parameters']
+        self.assertEqual(selector.selector.item_markup, params['item_markup'])
+        self.assertEqual(selector.selector.item_class, params['item_class'])
+        self.assertEqual(selector.selector.separator, params['separator'])
+        self.assertEqual(selector.selector.label_class, params['label_class'])
+        self.assertEqual(selector.selector.prompt, params['bootloader_prompt'])  # initial prompt
+        self.assertEqual(selector.boot_message, params['boot_message'])  # final prompt
+        self.assertEqual(
+            selector.send_char_delay,
+            self.job.device['actions']['boot']['methods']['uefi-menu']['parameters']['character_delay'])
+
+    def test_uefi_job(self):
+        self.assertIsNotNone(self.job)
+        self.job.validate()
+        uefi_menu = [action for action in self.job.pipeline.actions if action.name == 'uefi-menu-action'][0]
+        selector = [action for action in uefi_menu.internal_pipeline.actions if action.name == 'uefi-menu-selector'][0]
+        self.assertEqual(
+            selector.selector.prompt,
+            "Start:"
+        )
+        self.assertIsInstance(selector.items, list)
+        description_ref = pipeline_reference('mustang-uefi.yaml')
+        self.assertEqual(description_ref, self.job.pipeline.describe(False))
+        # just dummy strings
+        substitution_dictionary = {
+            '{SERVER_IP}': '10.4.0.1',
+            '{RAMDISK}': None,
+            '{KERNEL}': 'uImage',
+            '{DTB}': 'mustang.dtb',
+            '{NFSROOTFS}': 'tmp/tmp21dfed/',
+            '{TEST_MENU_NAME}': 'LAVA NFS Test Image'
+        }
+        for block in selector.items:
+            if 'select' in block:
+                if 'enter' in block['select']:
+                    block['select']['enter'] = substitute([block['select']['enter']], substitution_dictionary)
+                if 'items' in block['select']:
+                    block['select']['items'] = substitute(block['select']['items'], substitution_dictionary)
+        count = 0
+        check_block = [
+            {'items': ['Boot Manager'], 'wait': 'Choice:'},
+            {'items': ['Remove Boot Device Entry'], 'fallback': 'Return to Main Menu', 'wait': 'Delete entry'},
+            {'items': ['LAVA NFS Test Image'], 'wait': 'Choice:'},
+            {'items': ['Add Boot Device Entry'], 'wait': 'Select the Boot Device:'},
+            {'items': ['TFTP on MAC Address: 00:01:73:69:5A:EF'], 'wait': 'Get the IP address from DHCP:'},
+            {'enter': ['y'], 'wait': 'Get the TFTP server IP address:'},
+            {'enter': ['10.4.0.1'], 'wait': 'File path of the EFI Application or the kernel :'},
+            {'enter': ['uImage'], 'wait': 'Is an EFI Application?'},
+            {'enter': ['n'], 'wait': 'Boot Type:'},
+            {'enter': ['f'], 'wait': 'Add an initrd:'},
+            {'enter': ['n'], 'wait': 'Get the IP address from DHCP:'},
+            {'enter': ['y'], 'wait': 'Get the TFTP server IP address:'},
+            {'enter': ['10.4.0.1'], 'wait': 'File path of the FDT :'},
+            {'enter': ['mustang.dtb'], 'wait': 'Arguments to pass to the binary:'},
+            {'enter': ['console=ttyS0,115200 earlyprintk=uart8250-32bit,0x1c020000 debug root=/dev/nfs rw '
+                       'nfsroot=10.4.0.1:tmp/tmp21dfed/,tcp,hard,intr ip=dhcp'], 'wait': 'Description for this new Entry:'},
+            {'enter': ['LAVA NFS Test Image'], 'wait': 'Choice:'},
+            {'items': ['Return to main menu'], 'wait': 'Start:'},
+            {'items': ['LAVA NFS Test Image']},
+        ]
+        for item in selector.items:
+            self.assertEqual(
+                item['select'],
+                check_block[count])
+            count += 1
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_multinode.py 2016.3-1/lava_dispatcher/pipeline/test/test_multinode.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_multinode.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_multinode.py	2016-03-02 14:34:40.000000000 +0000
@@ -20,13 +20,14 @@
 
 
 import os
+import glob
 import yaml
 import uuid
 import json
 import unittest
 from lava_dispatcher.pipeline.test.fake_coordinator import TestCoordinator
 from lava_dispatcher.pipeline.test.test_basic import Factory
-from lava_dispatcher.pipeline.actions.deploy.image import DeployImageAction
+from lava_dispatcher.pipeline.actions.deploy.image import DeployImagesAction
 from lava_dispatcher.pipeline.actions.deploy.overlay import OverlayAction, MultinodeOverlayAction, CustomisationAction
 from lava_dispatcher.pipeline.actions.boot.qemu import BootQemuRetry, CallQemuAction
 from lava_dispatcher.pipeline.actions.boot import BootAction
@@ -80,7 +81,7 @@ class TestMultinode(unittest.TestCase):
             msg.update(self.base_message)
             return json.dumps(self.coord.dataReceived(msg))
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_multinode_jobs(self):
         self.assertIsNotNone(self.client_job)
         self.assertIsNotNone(self.server_job)
@@ -89,7 +90,7 @@ class TestMultinode(unittest.TestCase):
         self.server_job.validate()
         self.assertEqual(self.server_job.pipeline.errors, [])
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_protocol(self):
         self.assertEqual(
             ['lava-multinode'],
@@ -119,7 +120,7 @@ class TestMultinode(unittest.TestCase):
         self.assertIn("coordinator_hostname", settings)
 
     def test_multinode_pipeline(self):
-        deploy = [action for action in self.client_job.pipeline.actions if isinstance(action, DeployImageAction)][0]
+        deploy = [action for action in self.client_job.pipeline.actions if isinstance(action, DeployImagesAction)][0]
         self.assertIsNotNone(deploy)
         overlay = [action for action in deploy.internal_pipeline.actions if isinstance(action, OverlayAction)][0]
         self.assertIsNotNone(overlay)
@@ -130,7 +131,7 @@ class TestMultinode(unittest.TestCase):
         client_multinode.validate()
         self.assertEqual(client_multinode.role, 'client')
 
-        deploy = [action for action in self.server_job.pipeline.actions if isinstance(action, DeployImageAction)][0]
+        deploy = [action for action in self.server_job.pipeline.actions if isinstance(action, DeployImagesAction)][0]
         self.assertIsNotNone(deploy)
         overlay = [action for action in deploy.internal_pipeline.actions if isinstance(action, OverlayAction)][0]
         self.assertIsNotNone(overlay)
@@ -176,7 +177,7 @@ class TestMultinode(unittest.TestCase):
             self.assertIs(True, protocol.valid)
         self.assertIsNone(self.coord.dataReceived({}))
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_multinode_description(self):
         self.assertIsNotNone(self.client_job)
         self.client_job.validate()
@@ -385,7 +386,7 @@ class TestMultinode(unittest.TestCase):
         }))
 
     def test_protocol_action(self):
-        deploy = [action for action in self.client_job.pipeline.actions if isinstance(action, DeployImageAction)][0]
+        deploy = [action for action in self.client_job.pipeline.actions if isinstance(action, DeployImagesAction)][0]
         customise = [action for action in deploy.internal_pipeline.actions if isinstance(action, CustomisationAction)][0]
         self.assertIn('protocols', deploy.parameters)
         self.assertIn('protocols', customise.parameters)
@@ -405,10 +406,10 @@ class TestMultinode(unittest.TestCase):
                 'action': customise.name,
                 'request': 'lava-send',
                 'messageID': 'test',
-                'yaml_line': 45,
+                'yaml_line': 48,
                 'message': {
                     'key': 'value',
-                    'yaml_line': 47
+                    'yaml_line': 50
                 },
             }
         )
@@ -426,11 +427,11 @@ class TestMultinode(unittest.TestCase):
                 'action': 'customise',
                 'message': {
                     'key': 'value',
-                    'yaml_line': 47
+                    'yaml_line': 50
                 },
                 'messageID': 'test',
                 'request': 'lava-send',
-                'yaml_line': 45
+                'yaml_line': 48
             }
         )
 
@@ -452,11 +453,11 @@ class TestMultinode(unittest.TestCase):
                 'action': 'execute-qemu',
                 'message': {
                     'ipv4': '$IPV4',
-                    'yaml_line': 61
+                    'yaml_line': 67
                 },
                 'messageID': 'test',
                 'request': 'lava-wait',
-                'yaml_line': 58
+                'yaml_line': 64
             }])
         client_calls = {}
         for action in retry.internal_pipeline.actions:
@@ -470,6 +471,12 @@ class TestMultinode(unittest.TestCase):
 
         # now pretend that another job has called lava-send with the same messageID, this would be the reply to the
         # :lava-wait
+        reply = {"/tmp/lava-dispatcher/slave/8833/device.yaml": {"ipaddr": "10.15.206.133"}}
+        cparams = {'timeout': {'minutes': 5, 'yaml_line': 11}, 'messageID': 'ipv4', 'action': 'prepare-scp-overlay', 'message': {'ipaddr': '$ipaddr'}, 'request': 'lava-wait'}
+        self.assertEqual(
+            ('ipv4', {'ipaddr': '10.15.206.133'}),
+            mn_protocol.collate(reply, cparams)
+        )
         reply = {
             "message": {
                 "kvm01": {
@@ -494,9 +501,9 @@ class TestMultinode(unittest.TestCase):
                 'action': 'execute-qemu',
                 'message': {
                     'ipv4': reply['message'][self.client_job.device.target]['ipv4'],
-                    'yaml_line': 61
+                    'yaml_line': 67
                 },
-                'yaml_line': 58,
+                'yaml_line': 64,
                 'request': 'lava-wait',
                 'messageID': 'test'
             }
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_removable.py 2016.3-1/lava_dispatcher/pipeline/test/test_removable.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_removable.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_removable.py	2016-03-02 14:34:40.000000000 +0000
@@ -106,7 +106,7 @@ class TestRemovable(unittest.TestCase):
         self.assertIn('/lava-', deploy_action.data['lava_test_results_dir'])
         self.assertIsInstance(deploy_action, MassStorage)
         self.assertIn('image', deploy_action.parameters.keys())
-        dd_action = deploy_action.internal_pipeline.actions[1]
+        dd_action = [action for action in deploy_action.internal_pipeline.actions if action.name == 'dd-image'][0]
         self.assertEqual(
             dd_action.boot_params[dd_action.parameters['device']]['uuid'],
             'usb-SanDisk_Ultra_20060775320F43006019-0:0')
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_repeat.py 2016.3-1/lava_dispatcher/pipeline/test/test_repeat.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_repeat.py	2015-08-07 08:19:39.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_repeat.py	2016-03-02 14:34:40.000000000 +0000
@@ -20,6 +20,7 @@
 
 
 import os
+import glob
 import unittest
 from lava_dispatcher.pipeline.actions.boot.qemu import BootQEMUImageAction
 from lava_dispatcher.pipeline.actions.test.shell import TestShellRetry
@@ -37,7 +38,7 @@ class TestRepeatBootTest(unittest.TestCa
         factory = Factory()
         self.job = factory.create_kvm_job('sample_jobs/kvm-repeat.yaml', mkdtemp())
 
-    @unittest.skipIf(not os.path.exists('/dev/loop0'), "loopback support not found")
+    @unittest.skipIf(len(glob.glob('/sys/block/loop*')) <= 0, "loopback support not found")
     def test_basic_structure(self):
         self.assertIsNotNone(self.job)
         self.job.validate()
@@ -54,12 +55,13 @@ class TestRepeatBootTest(unittest.TestCa
         self.assertIn('repeat', self.job.parameters['actions'][1])
         repeat_block = self.job.parameters['actions'][1]['repeat']
         self.assertIn('count', repeat_block)
-        actions = [action for action in repeat_block if 'count' not in action]
+        # params is a list of default params for the actions, not a list of actions.
+        params = [param for param in repeat_block if 'count' not in param]
         self.assertIn('boot', repeat_block['actions'][0])
         self.assertIn('test', repeat_block['actions'][1])
         self.assertIn('boot', repeat_block['actions'][2])
         self.assertIn('test', repeat_block['actions'][3])
-        self.assertEqual(len(actions), 4)
+        self.assertEqual(len(params), 5)
 
     def test_nested_structure(self):
         self.assertIn(['repeat'], [actions.keys() for actions in self.job.parameters['actions']])
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_uboot.py 2016.3-1/lava_dispatcher/pipeline/test/test_uboot.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_uboot.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_uboot.py	2016-03-04 14:35:18.000000000 +0000
@@ -21,7 +21,6 @@
 
 import os
 import yaml
-import tarfile
 import unittest
 from lava_dispatcher.pipeline.device import NewDevice
 from lava_dispatcher.pipeline.parser import JobParser
@@ -30,6 +29,7 @@ from lava_dispatcher.pipeline.actions.bo
     UBootCommandOverlay,
     UBootSecondaryMedia
 )
+from lava_dispatcher.pipeline.actions.deploy.apply_overlay import CompressRamdisk
 from lava_dispatcher.pipeline.actions.deploy.tftp import TftpAction
 from lava_dispatcher.pipeline.job import Job
 from lava_dispatcher.pipeline.action import Pipeline, InfrastructureError, JobError
@@ -52,8 +52,8 @@ class Factory(object):  # pylint: disabl
     """
     def create_bbb_job(self, filename, output_dir='/tmp/'):  # pylint: disable=no-self-use
         device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/bbb-01.yaml'))
-        kvm_yaml = os.path.join(os.path.dirname(__file__), filename)
-        with open(kvm_yaml) as sample_job_data:
+        bbb_yaml = os.path.join(os.path.dirname(__file__), filename)
+        with open(bbb_yaml) as sample_job_data:
             parser = JobParser()
             job = parser.parse(sample_job_data, device, 4212, None, output_dir=output_dir)
         return job
@@ -93,6 +93,7 @@ class TestUbootAction(unittest.TestCase)
         self.assertIn('dtb', [action.key for action in tftp.internal_pipeline.actions if hasattr(action, 'key')])
         # allow root to compare the path (with the mkdtemp added)
         paths = {action.path for action in tftp.internal_pipeline.actions if hasattr(action, 'path')}
+        self.assertNotIn('=', tftpd_dir())
         self.assertIn(
             tftpd_dir(),
             [item for item in paths][0]
@@ -118,6 +119,7 @@ class TestUbootAction(unittest.TestCase)
         self.assertEqual(job.pipeline.errors, [])
         self.assertIn('u-boot', job.device['actions']['boot']['methods'])
         params = job.device['actions']['boot']['methods']['u-boot']['parameters']
+        self.assertIn('mkimage_arch', params)
         boot_message = params.get('boot_message', BOOT_MESSAGE)
         self.assertIsNotNone(boot_message)
         for action in job.pipeline.actions:
@@ -134,6 +136,8 @@ class TestUbootAction(unittest.TestCase)
                 self.assertIn('kernel', action.parameters)
                 self.assertIn('to', action.parameters)
                 self.assertEqual('tftp', action.parameters['to'])
+            if isinstance(action, CompressRamdisk):
+                self.assertEqual(action.mkimage_arch, 'arm')
             self.assertTrue(action.valid)
 
     def test_overlay_action(self):  # pylint: disable=too-many-locals
@@ -148,7 +152,8 @@ class TestUbootAction(unittest.TestCase)
                 'boot': {
                     'method': 'u-boot',
                     'commands': 'ramdisk',
-                    'type': 'bootz'
+                    'type': 'bootz',
+                    'prompts': ['linaro-test', 'root@debian:~#']
                 },
                 'deploy': {
                     'ramdisk': 'initrd.gz',
@@ -304,7 +309,7 @@ class TestUbootAction(unittest.TestCase)
         self.assertEqual(part_reference, "0:1")
 
     @unittest.skipIf(infrastructure_error('telnet'), "telnet not installed")
-    def test_prompt_from_job(self):
+    def test_prompt_from_job(self):  # pylint: disable=too-many-locals
         """
         Support setting the prompt after login via the job
 
@@ -327,7 +332,7 @@ class TestUbootAction(unittest.TestCase)
         parser = JobParser()
         sample_job_data = yaml.load(sample_job_string)
         boot = [item['boot'] for item in sample_job_data['actions'] if 'boot' in item][0]
-        boot.update({'parameters': {'boot_prompt': 'root@bbb'}})
+        self.assertIsNotNone(boot)
         sample_job_string = yaml.dump(sample_job_data)
         job = parser.parse(sample_job_string, device, 4212, None, output_dir='/tmp')
         job.validate()
@@ -337,7 +342,6 @@ class TestUbootAction(unittest.TestCase)
         expect = [action for action in retry.internal_pipeline.actions
                   if action.name == 'expect-shell-connection'][0]
         self.assertNotEqual(check, expect.parameters)
-        self.assertIn('root@bbb', expect.prompts)
 
     def test_xz_nfs(self):
         factory = Factory()
@@ -347,13 +351,22 @@ class TestUbootAction(unittest.TestCase)
         tftp_deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0]
         prepare = [action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay'][0]
         nfs = [action for action in prepare.internal_pipeline.actions if action.name == 'extract-nfsrootfs'][0]
-        self.assertIn('rootfs_compression', nfs.parameters)
-        self.assertEqual(nfs.parameters['rootfs_compression'], 'xz')
-        valid = tarfile.TarFile
-        if 'xz' not in valid.__dict__['OPEN_METH'].keys():
-            self.assertTrue(nfs.use_lzma)
-            self.assertFalse(nfs.use_tarfile)
-        else:
-            # python3 has xz support in tarfile.
-            self.assertFalse(nfs.use_lzma)
-            self.assertTrue(nfs.use_tarfile)
+        self.assertIn('compression', nfs.parameters['nfsrootfs'])
+        self.assertEqual(nfs.parameters['nfsrootfs']['compression'], 'xz')
+
+
+class TestOverlayCommands(unittest.TestCase):  # pylint: disable=too-many-public-methods
+
+    def test_combined_ramdisk_nfs(self):
+        factory = Factory()
+        job = factory.create_bbb_job('sample_jobs/bbb-ramdisk-nfs.yaml')
+        tftp_deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0]
+        prepare = [action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay'][0]
+        nfs = [action for action in prepare.internal_pipeline.actions if action.name == 'extract-nfsrootfs'][0]
+        modules = [action for action in prepare.internal_pipeline.actions if action.name == 'extract-modules'][0]
+        overlay = [action for action in prepare.internal_pipeline.actions if action.name == 'apply-overlay-tftp'][0]
+        self.assertIsNotNone(modules.parameters.get('ramdisk', None))
+        self.assertIsNotNone(modules.parameters.get('nfsrootfs', None))
+        self.assertIsNotNone(nfs.parameters.get('nfsrootfs', None))
+        self.assertIsNotNone(overlay.parameters.get('nfsrootfs', None))
+        self.assertIsNotNone(overlay.parameters.get('ramdisk', None))
diff -pruN 2015.9-1/lava_dispatcher/pipeline/test/test_vland.py 2016.3-1/lava_dispatcher/pipeline/test/test_vland.py
--- 2015.9-1/lava_dispatcher/pipeline/test/test_vland.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/test/test_vland.py	2016-03-02 14:34:40.000000000 +0000
@@ -0,0 +1,238 @@
+# Copyright (C) 2015 Linaro Limited
+#
+# Author: Neil Williams <neil.williams@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+import os
+import yaml
+import socket
+import unittest
+from lava_dispatcher.pipeline.device import NewDevice
+from lava_dispatcher.pipeline.parser import JobParser
+from lava_dispatcher.pipeline.action import JobError
+from lava_dispatcher.pipeline.connection import Protocol
+from lava_dispatcher.pipeline.protocols.vland import VlandProtocol
+from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
+from lava_dispatcher.pipeline.test.test_basic import pipeline_reference
+
+
+class TestVland(unittest.TestCase):  # pylint: disable=too-many-public-methods
+
+    def setUp(self):
+        super(TestVland, self).setUp()
+        self.filename = os.path.join(os.path.dirname(__file__), 'sample_jobs/bbb-group-vland-alpha.yaml')
+        self.device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/bbb-01.yaml'))
+
+    def test_file_structure(self):
+        with open(self.filename) as yaml_data:
+            alpha_data = yaml.load(yaml_data)
+        self.assertIn('protocols', alpha_data)
+        self.assertTrue(VlandProtocol.accepts(alpha_data))
+        level_tuple = Protocol.select_all(alpha_data)
+        self.assertEqual(len(level_tuple), 2)
+        self.assertEqual(
+            VlandProtocol,
+            [
+                item[0] for item in sorted(level_tuple, key=lambda data: data[1])
+            ][1]
+        )
+        vprotocol = VlandProtocol(alpha_data)
+        self.assertIn(
+            'arbit',
+            vprotocol.base_group,
+        )
+        self.assertNotIn(
+            'group',
+            vprotocol.base_group,
+        )
+        vprotocol.set_up()
+        self.assertIn('port', vprotocol.settings)
+        self.assertIn('poll_delay', vprotocol.settings)
+        self.assertIn('vland_hostname', vprotocol.settings)
+        self.assertEqual(
+            vprotocol.base_message,
+            {
+                "port": vprotocol.settings['port'],
+                "poll_delay": vprotocol.settings["poll_delay"],
+                "host": vprotocol.settings['vland_hostname'],
+                "client_name": socket.gethostname(),
+            }
+        )
+        for name in vprotocol.names:
+            vlan = vprotocol.params[name]
+            self.assertIn('tags', vlan)
+
+    def test_device(self):
+        self.assertIsNotNone(self.device)
+        self.assertIn('eth0', self.device['parameters']['interfaces'])
+        self.assertIn('eth1', self.device['parameters']['interfaces'])
+        self.assertIn('sysfs', self.device['parameters']['interfaces']['eth0'])
+        self.assertIn('mac', self.device['parameters']['interfaces']['eth0'])
+        self.assertIn('switch', self.device['parameters']['interfaces']['eth0'])
+        self.assertIn('port', self.device['parameters']['interfaces']['eth0'])
+        self.assertIn('tags', self.device['parameters']['interfaces']['eth0'])
+        self.assertIn('sysfs', self.device['parameters']['interfaces']['eth1'])
+        self.assertIn('mac', self.device['parameters']['interfaces']['eth1'])
+        self.assertIn('switch', self.device['parameters']['interfaces']['eth1'])
+        self.assertIn('port', self.device['parameters']['interfaces']['eth1'])
+        self.assertIn('tags', self.device['parameters']['interfaces']['eth1'])
+        self.assertIsInstance(self.device['parameters']['interfaces']['eth1']['tags'], list)
+        self.assertIsInstance(self.device['parameters']['interfaces']['eth0']['tags'], list)
+        csv_list = []
+        for interface in self.device['parameters']['interfaces']:
+            csv_list.extend(
+                [
+                    self.device['parameters']['interfaces'][interface]['sysfs'],
+                    self.device['parameters']['interfaces'][interface]['mac'],
+                    interface
+                ]
+            )
+        self.assertEqual(
+            csv_list,
+            [
+                '/sys/devices/pci0000:00/0000:00:1c.1/0000:03:00.0/net/eth1', '00:24:d7:9b:c0:8c', 'eth1',
+                '/sys/devices/pci0000:00/0000:00:19.0/net/eth0', 'f0:de:f1:46:8c:21', 'eth0'
+            ]
+        )
+        tag_list = []
+        for interface in self.device['parameters']['interfaces']:
+            for tag in self.device['parameters']['interfaces'][interface]['tags']:
+                tag_list.extend([interface, tag])
+        self.assertEqual(tag_list, ['eth1', '1G', 'eth0', '1G'])
+
+    def test_configure(self):
+        with open(self.filename) as yaml_data:
+            alpha_data = yaml.load(yaml_data)
+        self.assertIn('protocols', alpha_data)
+        self.assertTrue(VlandProtocol.accepts(alpha_data))
+        vprotocol = VlandProtocol(alpha_data)
+        vprotocol.set_up()
+        with open(self.filename) as sample_job_data:
+            parser = JobParser()
+            job = parser.parse(sample_job_data, self.device, 4212, None, output_dir='/tmp/')
+        ret = vprotocol.configure(self.device, job)
+        if not ret:
+            print vprotocol.errors
+        self.assertTrue(ret)
+        nodes = {}
+        for name in vprotocol.names:
+            vlan = vprotocol.params[name]
+            # self.assertNotIn('tags', vlan)
+            uid = ' '.join([vlan['switch'], str(vlan['port'])])
+            nodes[uid] = name
+        self.assertEqual(len(nodes.keys()), len(vprotocol.names))
+        self.assertIn('vlan_one', vprotocol.names)
+        self.assertIn('vlan_two', vprotocol.names)
+        # self.assertNotIn('tags', vprotocol.params['vlan_one'])
+        self.assertIn('switch', vprotocol.params['vlan_one'])
+        self.assertIn('port', vprotocol.params['vlan_one'])
+        self.assertIn('switch', vprotocol.params['vlan_two'])
+        self.assertIn('port', vprotocol.params['vlan_two'])
+        self.assertIsNotNone(vprotocol.multinode_protocol)
+
+        bbb2 = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/bbb-01.yaml'))
+        bbb2['hostname'] = 'bbb2'
+        bbb2['parameters']['interfaces']['eth0']['switch'] = '192.168.0.2'
+        bbb2['parameters']['interfaces']['eth0']['port'] = '6'
+        bbb2['parameters']['interfaces']['eth1']['switch'] = '192.168.0.2'
+        bbb2['parameters']['interfaces']['eth1']['port'] = '4'
+        self.assertEqual(
+            vprotocol.params,
+            {'vlan_one': {'switch': '192.168.0.1', 'port': 7, 'tags': ['1G']}, 'vlan_two': {'switch': '192.168.0.1', 'port': 5, 'tags': ['1G']}}
+        )
+        # already configured the vland protocol in the same job
+        self.assertTrue(vprotocol.configure(bbb2, job))
+        self.assertEqual(
+            vprotocol.params,
+            {'vlan_one': {'switch': '192.168.0.1', 'port': 7, 'tags': ['1G']}, 'vlan_two': {'switch': '192.168.0.1', 'port': 5, 'tags': ['1G']}}
+        )
+        self.assertTrue(vprotocol.valid)
+        self.assertEqual(vprotocol.names, {'vlan_one': 'arbitraryg000', 'vlan_two': 'arbitraryg001'})
+
+    def test_job(self):
+        with open(self.filename) as yaml_data:
+            alpha_data = yaml.load(yaml_data)
+        self.assertIn('protocols', alpha_data)
+        self.assertIn(VlandProtocol.name, alpha_data['protocols'])
+        with open(self.filename) as sample_job_data:
+            parser = JobParser()
+            job = parser.parse(sample_job_data, self.device, 4212, None, output_dir='/tmp/')
+        description_ref = pipeline_reference('bbb-group-vland-alpha.yaml')
+        self.assertEqual(description_ref, job.pipeline.describe(False))
+        job.validate()
+        self.assertNotEqual([], [protocol.name for protocol in job.protocols if protocol.name == MultinodeProtocol.name])
+        ret = {"message": {"kvm01": {"vlan_name": "name", "vlan_tag": 6}}, "response": "ack"}
+        self.assertEqual(('name', 6), (ret['message']['kvm01']['vlan_name'], ret['message']['kvm01']['vlan_tag'],))
+        self.assertIn('protocols', job.parameters)
+        self.assertIn(VlandProtocol.name, job.parameters['protocols'])
+        self.assertIn(MultinodeProtocol.name, job.parameters['protocols'])
+        vprotocol = [vprotocol for vprotocol in job.protocols if vprotocol.name == VlandProtocol.name][0]
+        self.assertTrue(vprotocol.valid)
+        self.assertEqual(vprotocol.names, {'vlan_one': 'arbitraryg000', 'vlan_two': 'arbitraryg001'})
+        self.assertFalse(vprotocol.check_timeout(120, {'request': 'no call'}))
+        self.assertRaises(JobError, vprotocol.check_timeout, 60, 'deploy_vlans')
+        self.assertRaises(JobError, vprotocol.check_timeout, 60, {'request': 'deploy_vlans'})
+        self.assertTrue(vprotocol.check_timeout(120, {'request': 'deploy_vlans'}))
+        for vlan_name in job.parameters['protocols'][VlandProtocol.name]:
+            if vlan_name == 'yaml_line':
+                continue
+            self.assertIn(vlan_name, vprotocol.params)
+            self.assertIn('switch', vprotocol.params[vlan_name])
+            self.assertIn('port', vprotocol.params[vlan_name])
+
+    # pylint: disable=protected-access
+    def demo(self):
+        with open(self.filename) as yaml_data:
+            alpha_data = yaml.load(yaml_data)
+        vprotocol = VlandProtocol(alpha_data)
+        vprotocol.settings = vprotocol.read_settings()
+        self.assertIn('port', vprotocol.settings)
+        self.assertIn('poll_delay', vprotocol.settings)
+        self.assertIn('vland_hostname', vprotocol.settings)
+        vprotocol.base_message = {
+            "port": vprotocol.settings['port'],
+            "poll_delay": vprotocol.settings["poll_delay"],
+            "host": vprotocol.settings['vland_hostname'],
+            "client_name": socket.gethostname(),
+        }
+        count = 0
+        print "\nTesting vland live using connections."
+        for friendly_name in vprotocol.parameters['protocols'][vprotocol.name]:
+            print "Processing VLAN: %s" % friendly_name
+            vprotocol.names[friendly_name] = vprotocol.base_group + '%02d' % count
+            count += 1
+            vprotocol.vlans[friendly_name], tag = vprotocol._create_vlan(friendly_name)
+            print "[%s] Created vlan with id %s" % (friendly_name, vprotocol.vlans[friendly_name])
+            print "[%s] tag: %s" % (friendly_name, tag)
+            for hostname in vprotocol.parameters['protocols'][vprotocol.name][friendly_name]:
+                params = vprotocol.parameters['protocols'][vprotocol.name][friendly_name][hostname]
+                print "[%s] to use switch %s and port %s" % (friendly_name, params['switch'], params['port'])
+                self.assertIn('switch', params)
+                self.assertIn('port', params)
+                self.assertIsNotNone(params['switch'])
+                self.assertIsNotNone(params['port'])
+                switch_id = vprotocol._lookup_switch_id(params['switch'])
+                self.assertIsNotNone(switch_id)
+                print "[%s] Using switch ID %s" % (friendly_name, switch_id)
+                port_id = vprotocol._lookup_port_id(switch_id, params['port'])
+                print "%s Looked up port ID %s for %s" % (friendly_name, port_id, params['port'])
+                vprotocol._set_port_onto_vlan(vprotocol.vlans[friendly_name], port_id)
+                vprotocol.ports.append(port_id)
+        print "Finalising - tearing down vlans"
+        vprotocol.finalise_protocol()
diff -pruN 2015.9-1/lava_dispatcher/pipeline/utils/compression.py 2016.3-1/lava_dispatcher/pipeline/utils/compression.py
--- 2015.9-1/lava_dispatcher/pipeline/utils/compression.py	1970-01-01 00:00:00.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/utils/compression.py	2016-03-04 14:35:18.000000000 +0000
@@ -0,0 +1,76 @@
+# Copyright (C) 2016 Linaro Limited
+#
+# Author: Matthew Hart <matthew.hart@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+
+# ramdisk, always cpio, comp: gz,xz
+# rootfs, always tar, comp: gz,xz,bzip2
+# android images: tar + xz,bz2,gz, or just gz,xz,bzip2
+
+import os
+import subprocess
+import tarfile
+
+from lava_dispatcher.pipeline.action import (
+    JobError
+)
+
+# https://www.kernel.org/doc/Documentation/xz.txt
+compress_command_map = {'xz': 'xz --check=crc32', 'gz': 'gzip', 'bz2': 'bzip2'}
+decompress_command_map = {'xz': 'unxz', 'gz': 'gunzip', 'bz2': 'bunzip2'}
+
+
+def compress_file(infile, compression):
+    if compression not in compress_command_map.keys():
+        raise JobError("Cannot find shell command to compress: %s" % compression)
+    pwd = os.getcwd()
+    os.chdir(os.path.dirname(infile))
+    cmd = "%s %s" % (compress_command_map[compression], infile)
+    try:
+        # safe to use shell=True here, no external arguments
+        log = subprocess.check_output(cmd, shell=True)
+        os.chdir(pwd)
+        return "%s.%s" % (infile, compression)
+    except OSError as exc:
+        raise RuntimeError('unable to compress file %s: %s' % (infile, exc))
+
+
+def decompress_file(infile, compression):
+    if compression not in decompress_command_map.keys():
+        raise JobError("Cannot find shell command to decompress: %s" % compression)
+    os.chdir(os.path.dirname(infile))
+    cmd = "%s %s" % (decompress_command_map[compression], infile)
+    outfile = infile
+    if infile.endswith(compression):
+        outfile = infile[:-(len(compression) + 1)]
+    try:
+        # safe to use shell=True here, no external arguments
+        log = subprocess.check_output(cmd, shell=True)
+        return outfile
+    except OSError as exc:
+        raise RuntimeError('unable to decompress file %s: %s' % (infile, exc))
+
+
+def untar_file(infile, outdir):
+    try:
+        tar = tarfile.open(infile)
+        tar.extractall(outdir)
+        tar.close()
+    except tarfile.TarError as exc:
+        raise JobError("Unable to unpack %s" % infile)
diff -pruN 2015.9-1/lava_dispatcher/pipeline/utils/constants.py 2016.3-1/lava_dispatcher/pipeline/utils/constants.py
--- 2015.9-1/lava_dispatcher/pipeline/utils/constants.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/utils/constants.py	2016-03-04 14:35:18.000000000 +0000
@@ -41,7 +41,6 @@ UBOOT_AUTOBOOT_PROMPT = "Hit any key to
 UBOOT_DEFAULT_CMD_TIMEOUT = 90
 
 # Ramdisk default filenames
-RAMDISK_COMPRESSED_FNAME = 'ramdisk.cpio.gz'
 RAMDISK_FNAME = 'ramdisk.cpio'
 
 # Size of the chunks when copying file
@@ -71,8 +70,37 @@ SHUTDOWN_MESSAGE = 'The system is going
 # Kernel starting message
 BOOT_MESSAGE = 'Booting Linux'
 
+# Default shell prompt for AutoLogin
+DEFAULT_SHELL_PROMPT = 'lava-test: # '
+
+# Distinctive prompt characters which can
+# help distinguish status messages from shell prompts.
+DISTINCTIVE_PROMPT_CHARACTERS = "\\:"
+
 # LAVA Coordinator setup and finalize timeout
 LAVA_MULTINODE_SYSTEM_TIMEOUT = 90
 
 # Default Action timeout
 ACTION_TIMEOUT = 30
+
+# Android tmp directory
+ANDROID_TMP_DIR = '/data/local/tmp'
+
+# Default timeout for fastboot reboot
+FASTBOOT_REBOOT_TIMEOUT = 10
+
+# LXC container path
+LXC_PATH = "/var/lib/lxc"
+
+# LXC finalize timeout
+LAVA_LXC_TIMEOUT = 30
+
+# Timeout used by the vland protocol when waiting for vland to
+# respond to the api.create_vlan request, in seconds.
+VLAND_DEPLOY_TIMEOUT = 120
+
+# ipxe boot interrupting
+IPXE_BOOT_PROMPT = "Press Ctrl-B for the iPXE command line"
+
+# bootloader default timeout for commands
+BOOTLOADER_DEFAULT_CMD_TIMEOUT = 90
diff -pruN 2015.9-1/lava_dispatcher/pipeline/utils/filesystem.py 2016.3-1/lava_dispatcher/pipeline/utils/filesystem.py
--- 2015.9-1/lava_dispatcher/pipeline/utils/filesystem.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/utils/filesystem.py	2016-03-04 14:35:18.000000000 +0000
@@ -22,6 +22,7 @@ import atexit
 import os
 import shutil
 import tempfile
+from configobj import ConfigObj
 
 
 def rmtree(directory):
@@ -87,10 +88,17 @@ def tftpd_dir():
     subdirectory of it. Default installation value: /srv/tftp/
     :return: real path to the TFTP directory or raises RuntimeError
     """
+    var_name = 'TFTP_DIRECTORY'
     if os.path.exists('/etc/default/tftpd-hpa'):
-        with open('/etc/default/tftpd-hpa', 'r') as tftpd:
-            lines = tftpd.read()
-        for line in lines.split('\n'):
-            if 'TFTP_DIRECTORY' in line:
-                return os.path.realpath(line[15:].replace('"', ''))  # remove quote markers
+        config = ConfigObj('/etc/default/tftpd-hpa')
+        value = config.get(var_name)
+        return os.path.realpath(value)
     raise RuntimeError("Unable to identify tftpd directory")
+
+
+def write_bootscript(commands, filename):
+    with open(filename, 'w') as bootscript:
+        bootscript.write("#!ipxe\n\n")
+        for line in commands:
+            bootscript.write(line + "\n")
+        bootscript.close()
diff -pruN 2015.9-1/lava_dispatcher/pipeline/utils/network.py 2016.3-1/lava_dispatcher/pipeline/utils/network.py
--- 2015.9-1/lava_dispatcher/pipeline/utils/network.py	2015-09-03 13:34:38.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/utils/network.py	2015-12-14 09:33:11.000000000 +0000
@@ -21,7 +21,9 @@
 # List just the subclasses supported for this base strategy
 # imported by the parser to populate the list of subclasses.
 
+import os
 import netifaces
+import subprocess
 from lava_dispatcher.pipeline.action import InfrastructureError
 
 
@@ -49,3 +51,18 @@ def dispatcher_ip():
     iface = gateways['default'][netifaces.AF_INET][1]
     addr = netifaces.ifaddresses(iface)
     return addr[netifaces.AF_INET][0]['addr']
+
+
+def rpcinfo_nfs(server):
+    """
+    Calls rpcinfo nfs on the specified server.
+    Only stderr matters
+    :param server: the NFS server to check
+    :return: None if success, message if fail
+    """
+    with open(os.devnull, 'w') as devnull:
+        proc = subprocess.Popen(['/usr/sbin/rpcinfo', '-u', server, 'nfs'], stdout=devnull, stderr=subprocess.PIPE)
+        msg = proc.communicate()
+        if msg[1]:
+            return "%s %s" % (server, msg[1])
+    return None
diff -pruN 2015.9-1/lava_dispatcher/pipeline/utils/shell.py 2016.3-1/lava_dispatcher/pipeline/utils/shell.py
--- 2015.9-1/lava_dispatcher/pipeline/utils/shell.py	2015-09-09 14:30:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/pipeline/utils/shell.py	2016-02-02 08:07:05.000000000 +0000
@@ -64,13 +64,12 @@ def infrastructure_error(path):
     return None
 
 
-def wait_for_prompt(connection, prompt_pattern, timeout):
+def wait_for_prompt(connection, prompt_pattern, timeout, check_char):
     # One of the challenges we face is that kernel log messages can appear
     # half way through a shell prompt.  So, if things are taking a while,
     # we send a newline along to maybe provoke a new prompt.  We wait for
     # half the timeout period and then wait for one tenth of the timeout
     # 6 times (so we wait for 1.1 times the timeout period overall).
-    logger = logging.getLogger('dispatcher')
     prompt_wait_count = 0
     if timeout == -1:
         timeout = connection.timeout
@@ -81,10 +80,12 @@ def wait_for_prompt(connection, prompt_p
         except TestError as exc:
             if prompt_wait_count < 6:
                 logger = logging.getLogger('dispatcher')
-                logger.warning('%s: Sending newline in case of corruption.', exc)
+                logger.warning('%s: Sending %s in case of corruption. connection timeout %s, retry in %s',
+                               exc, check_char, timeout, partial_timeout)
+                logger.debug("pattern: %s" % prompt_pattern)
                 prompt_wait_count += 1
                 partial_timeout = timeout / 10
-                connection.sendline('#')
+                connection.sendline(check_char)
                 continue
             else:
                 raise
diff -pruN 2015.9-1/lava_dispatcher/tests/__init__.py 2016.3-1/lava_dispatcher/tests/__init__.py
--- 2015.9-1/lava_dispatcher/tests/__init__.py	2015-09-09 14:30:53.000000000 +0000
+++ 2016.3-1/lava_dispatcher/tests/__init__.py	2016-02-02 08:07:05.000000000 +0000
@@ -21,6 +21,10 @@ def test_suite():
         'lava_dispatcher.pipeline.test.test_connections',
         #  'lava_dispatcher.pipeline.test.test_utils',
         'lava_dispatcher.pipeline.test.test_repeat',
+        'lava_dispatcher.pipeline.test.test_menus',
+        'lava_dispatcher.pipeline.test.test_fastboot',
+        'lava_dispatcher.pipeline.test.test_vland',
+        'lava_dispatcher.pipeline.test.test_ipxe'
     ]
     loader = unittest.TestLoader()
     return loader.loadTestsFromNames(module_names)
diff -pruN 2015.9-1/lava_dispatcher/utils.py 2016.3-1/lava_dispatcher/utils.py
--- 2015.9-1/lava_dispatcher/utils.py	2015-09-10 08:06:51.000000000 +0000
+++ 2016.3-1/lava_dispatcher/utils.py	2016-02-02 08:07:05.000000000 +0000
@@ -216,7 +216,7 @@ def is_uimage(kernel, context):
         return False
 
 
-def create_uimage(kernel, load_addr, tmp_dir, xip, arch='arm'):
+def create_uimage(kernel, load_addr, tmp_dir, xip, arch):
     load_addr = int(load_addr, 16)
     uimage_path = '%s/uImage' % tmp_dir
     if xip:
@@ -570,7 +570,7 @@ def connect_to_serial(context):
     raise CriticalError('could not execute connection_command successfully')
 
 
-def wait_for_prompt(connection, prompt_pattern, timeout):
+def wait_for_prompt(connection, prompt_pattern, timeout, skip_newlines=False):
     # One of the challenges we face is that kernel log messages can appear
     # half way through a shell prompt.  So, if things are taking a while,
     # we send a newline along to maybe provoke a new prompt.  We wait for
@@ -585,10 +585,11 @@ def wait_for_prompt(connection, prompt_p
             connection.expect(prompt_pattern, timeout=partial_timeout)
         except pexpect.TIMEOUT:
             if prompt_wait_count < 6:
-                logging.warning('Sending newline in case of corruption.')
                 prompt_wait_count += 1
                 partial_timeout = timeout / 10
-                connection.sendline('')
+                if not skip_newlines:
+                    logging.warning('Sending newline in case of corruption.')
+                    connection.sendline('')
                 continue
             else:
                 raise
diff -pruN 2015.9-1/lava_dispatcher.egg-info/pbr.json 2016.3-1/lava_dispatcher.egg-info/pbr.json
--- 2015.9-1/lava_dispatcher.egg-info/pbr.json	2015-09-10 10:34:52.000000000 +0000
+++ 2016.3-1/lava_dispatcher.egg-info/pbr.json	2015-10-14 08:03:30.000000000 +0000
@@ -1 +1 @@
-{"is_release": true, "git_version": "5dc0a11"}
\ No newline at end of file
+{"is_release": false, "git_version": "1503aeb"}
\ No newline at end of file
diff -pruN 2015.9-1/lava_dispatcher.egg-info/PKG-INFO 2016.3-1/lava_dispatcher.egg-info/PKG-INFO
--- 2015.9-1/lava_dispatcher.egg-info/PKG-INFO	2015-09-10 10:34:52.000000000 +0000
+++ 2016.3-1/lava_dispatcher.egg-info/PKG-INFO	2016-03-04 14:38:58.000000000 +0000
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: lava-dispatcher
-Version: 2015.9
+Version: 2016.3
 Summary: Part of the LAVA framework for dispatching test jobs
 Home-page: http://git.linaro.org/git/lava/lava-dispatcher.git
 Author: Linaro Validation Team
diff -pruN 2015.9-1/lava_dispatcher.egg-info/requires.txt 2016.3-1/lava_dispatcher.egg-info/requires.txt
--- 2015.9-1/lava_dispatcher.egg-info/requires.txt	2015-09-10 10:34:52.000000000 +0000
+++ 2016.3-1/lava_dispatcher.egg-info/requires.txt	2016-03-04 14:38:58.000000000 +0000
@@ -9,3 +9,4 @@ requests
 netifaces >= 0.10.0
 nose
 pyzmq
+configobj
diff -pruN 2015.9-1/lava_dispatcher.egg-info/SOURCES.txt 2016.3-1/lava_dispatcher.egg-info/SOURCES.txt
--- 2015.9-1/lava_dispatcher.egg-info/SOURCES.txt	2015-09-10 10:34:52.000000000 +0000
+++ 2016.3-1/lava_dispatcher.egg-info/SOURCES.txt	2016-03-04 14:38:58.000000000 +0000
@@ -14,6 +14,7 @@ etc/lava-options.conf
 etc/lava-slave.init
 etc/lava-slave.service
 etc/tftpd-hpa
+etc/logrotate.d/lava-slave-log
 lava/__init__.py
 lava/dispatcher/__init__.py
 lava/dispatcher/commands.py
@@ -92,6 +93,8 @@ lava_dispatcher/default-config/lava-disp
 lava_dispatcher/default-config/lava-dispatcher/device-types/dummy-schroot.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/dummy-ssh.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/dynamic-vm.conf
+lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls1021a-twr.conf
+lava_dispatcher/default-config/lava-dispatcher/device-types/fsl-ls2085a-rdb.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/galaxy-nexus.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/hi3716cv200.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/hi6220-hikey.conf
@@ -100,6 +103,7 @@ lava_dispatcher/default-config/lava-disp
 lava_dispatcher/default-config/lava-dispatcher/device-types/ifc6540.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/imx6q-wandboard.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/jetson-tk1.conf
+lava_dispatcher/default-config/lava-dispatcher/device-types/juno-r2.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/juno.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/k3v2.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/keystone.conf
@@ -118,6 +122,7 @@ lava_dispatcher/default-config/lava-disp
 lava_dispatcher/default-config/lava-dispatcher/device-types/nexus9.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/odroid-u2.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/odroid-xu3.conf
+lava_dispatcher/default-config/lava-dispatcher/device-types/omap5-uevm.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/optimus-a80.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/origen.conf
 lava_dispatcher/default-config/lava-dispatcher/device-types/panda-es.conf
@@ -175,6 +180,7 @@ lava_dispatcher/lava_test_shell/lava-add
 lava_dispatcher/lava_test_shell/lava-add-sources
 lava_dispatcher/lava_test_shell/lava-background-process-start
 lava_dispatcher/lava_test_shell/lava-background-process-stop
+lava_dispatcher/lava_test_shell/lava-echo-ipv4
 lava_dispatcher/lava_test_shell/lava-install-packages
 lava_dispatcher/lava_test_shell/lava-installed-packages
 lava_dispatcher/lava_test_shell/lava-os-build
@@ -183,6 +189,7 @@ lava_dispatcher/lava_test_shell/lava-tes
 lava_dispatcher/lava_test_shell/lava-test-case-metadata
 lava_dispatcher/lava_test_shell/lava-test-run-attach
 lava_dispatcher/lava_test_shell/lava-test-runner
+lava_dispatcher/lava_test_shell/lava-test-set
 lava_dispatcher/lava_test_shell/lava-test-shell
 lava_dispatcher/lava_test_shell/lava-vm-groups-setup-host
 lava_dispatcher/lava_test_shell/distro/android/lava-test-runner
@@ -208,6 +215,7 @@ lava_dispatcher/lava_test_shell/lmp/lava
 lava_dispatcher/lava_test_shell/lmp/lava-lmp-sata
 lava_dispatcher/lava_test_shell/lmp/lava-lmp-usb
 lava_dispatcher/lava_test_shell/lmp/lava-lmp.lib
+lava_dispatcher/lava_test_shell/multi_node/lava-echo-config
 lava_dispatcher/lava_test_shell/multi_node/lava-group
 lava_dispatcher/lava_test_shell/multi_node/lava-multi-node.lib
 lava_dispatcher/lava_test_shell/multi_node/lava-network
@@ -219,6 +227,8 @@ lava_dispatcher/lava_test_shell/multi_no
 lava_dispatcher/lava_test_shell/multi_node/lava-wait
 lava_dispatcher/lava_test_shell/multi_node/lava-wait-all
 lava_dispatcher/lava_test_shell/multi_node/lava-wait-for-vms
+lava_dispatcher/lava_test_shell/vland/lava-vland-self
+lava_dispatcher/lava_test_shell/vland/lava-vland-tags
 lava_dispatcher/pipeline/__init__.py
 lava_dispatcher/pipeline/action.py
 lava_dispatcher/pipeline/connection.py
@@ -236,16 +246,22 @@ lava_dispatcher/pipeline/actions/command
 lava_dispatcher/pipeline/actions/logic.py
 lava_dispatcher/pipeline/actions/boot/__init__.py
 lava_dispatcher/pipeline/actions/boot/environment.py
+lava_dispatcher/pipeline/actions/boot/fastboot.py
+lava_dispatcher/pipeline/actions/boot/ipxe.py
 lava_dispatcher/pipeline/actions/boot/kexec.py
+lava_dispatcher/pipeline/actions/boot/lxc.py
 lava_dispatcher/pipeline/actions/boot/qemu.py
 lava_dispatcher/pipeline/actions/boot/ssh.py
 lava_dispatcher/pipeline/actions/boot/strategies.py
 lava_dispatcher/pipeline/actions/boot/u_boot.py
+lava_dispatcher/pipeline/actions/boot/uefi_menu.py
 lava_dispatcher/pipeline/actions/deploy/__init__.py
 lava_dispatcher/pipeline/actions/deploy/apply_overlay.py
 lava_dispatcher/pipeline/actions/deploy/download.py
 lava_dispatcher/pipeline/actions/deploy/environment.py
+lava_dispatcher/pipeline/actions/deploy/fastboot.py
 lava_dispatcher/pipeline/actions/deploy/image.py
+lava_dispatcher/pipeline/actions/deploy/lxc.py
 lava_dispatcher/pipeline/actions/deploy/mount.py
 lava_dispatcher/pipeline/actions/deploy/overlay.py
 lava_dispatcher/pipeline/actions/deploy/removable.py
@@ -258,28 +274,50 @@ lava_dispatcher/pipeline/actions/test/mu
 lava_dispatcher/pipeline/actions/test/shell.py
 lava_dispatcher/pipeline/actions/test/strategies.py
 lava_dispatcher/pipeline/connections/__init__.py
+lava_dispatcher/pipeline/connections/adb.py
+lava_dispatcher/pipeline/connections/lxc.py
 lava_dispatcher/pipeline/connections/serial.py
 lava_dispatcher/pipeline/connections/ssh.py
 lava_dispatcher/pipeline/device_types/beaglebone-black.conf
 lava_dispatcher/pipeline/device_types/cubietruck.conf
+lava_dispatcher/pipeline/device_types/hi6220-hikey.conf
 lava_dispatcher/pipeline/device_types/kvm.conf
+lava_dispatcher/pipeline/device_types/lxc.conf
+lava_dispatcher/pipeline/device_types/nexus4.conf
 lava_dispatcher/pipeline/device_types/panda-es.conf
+lava_dispatcher/pipeline/device_types/tk1.conf
+lava_dispatcher/pipeline/device_types/x86.conf
 lava_dispatcher/pipeline/devices/bbb-01.yaml
 lava_dispatcher/pipeline/devices/cubie1.yaml
+lava_dispatcher/pipeline/devices/hi6220-hikey-01.yaml
 lava_dispatcher/pipeline/devices/kvm01.yaml
+lava_dispatcher/pipeline/devices/kvm02.yaml
+lava_dispatcher/pipeline/devices/lxc-01.yaml
+lava_dispatcher/pipeline/devices/mustang-uefi.yaml
+lava_dispatcher/pipeline/devices/nexus10-01.yaml
+lava_dispatcher/pipeline/devices/nexus4-01.yaml
 lava_dispatcher/pipeline/devices/ssh-host-01.yaml
+lava_dispatcher/pipeline/devices/tk1-01.yaml
+lava_dispatcher/pipeline/devices/x86-01.yaml
+lava_dispatcher/pipeline/menus/__init__.py
+lava_dispatcher/pipeline/menus/menus.py
 lava_dispatcher/pipeline/protocols/__init__.py
+lava_dispatcher/pipeline/protocols/lxc.py
 lava_dispatcher/pipeline/protocols/multinode.py
 lava_dispatcher/pipeline/protocols/strategies.py
+lava_dispatcher/pipeline/protocols/vland.py
 lava_dispatcher/pipeline/test/__init__.py
 lava_dispatcher/pipeline/test/fake_coordinator.py
 lava_dispatcher/pipeline/test/test_basic.py
 lava_dispatcher/pipeline/test/test_connections.py
 lava_dispatcher/pipeline/test/test_defs.py
 lava_dispatcher/pipeline/test/test_devices.py
+lava_dispatcher/pipeline/test/test_fastboot.py
+lava_dispatcher/pipeline/test/test_ipxe.py
 lava_dispatcher/pipeline/test/test_kexec.py
 lava_dispatcher/pipeline/test/test_kvm.py
 lava_dispatcher/pipeline/test/test_lavashell.py
+lava_dispatcher/pipeline/test/test_menus.py
 lava_dispatcher/pipeline/test/test_multi.py
 lava_dispatcher/pipeline/test/test_multinode.py
 lava_dispatcher/pipeline/test/test_removable.py
@@ -287,21 +325,38 @@ lava_dispatcher/pipeline/test/test_repea
 lava_dispatcher/pipeline/test/test_retries.py
 lava_dispatcher/pipeline/test/test_uboot.py
 lava_dispatcher/pipeline/test/test_utils.py
+lava_dispatcher/pipeline/test/test_vland.py
+lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml
+lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml
+lava_dispatcher/pipeline/test/pipeline_refs/bbb-ssh-guest.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml
+lava_dispatcher/pipeline/test/pipeline_refs/fastboot.yaml
+lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/kvm-inline.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/kvm-local.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/kvm-qcow2.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/kvm-repeat.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/kvm.yaml
+lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/ssh-deploy.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/ssh-guest.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml
 lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml
 lava_dispatcher/pipeline/test/sample_jobs/basics.yaml
+lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-alpha.yaml
+lava_dispatcher/pipeline/test/sample_jobs/bbb-group-vland-beta.yaml
+lava_dispatcher/pipeline/test/sample_jobs/bbb-nfs-url.yaml
+lava_dispatcher/pipeline/test/sample_jobs/bbb-ramdisk-nfs.yaml
 lava_dispatcher/pipeline/test/sample_jobs/bbb-ssh-guest.yaml
 lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml
+lava_dispatcher/pipeline/test/sample_jobs/fastboot.yaml
+lava_dispatcher/pipeline/test/sample_jobs/ipxe-nfs.yaml
+lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk-bootscript.yaml
+lava_dispatcher/pipeline/test/sample_jobs/ipxe-ramdisk.yaml
+lava_dispatcher/pipeline/test/sample_jobs/ipxe.yaml
 lava_dispatcher/pipeline/test/sample_jobs/kexec.yaml
+lava_dispatcher/pipeline/test/sample_jobs/kvm-android.yaml
 lava_dispatcher/pipeline/test/sample_jobs/kvm-inline.yaml
 lava_dispatcher/pipeline/test/sample_jobs/kvm-local.yaml
 lava_dispatcher/pipeline/test/sample_jobs/kvm-multi.yaml
@@ -312,14 +367,19 @@ lava_dispatcher/pipeline/test/sample_job
 lava_dispatcher/pipeline/test/sample_jobs/kvm-qcow2.yaml
 lava_dispatcher/pipeline/test/sample_jobs/kvm-repeat.yaml
 lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml
+lava_dispatcher/pipeline/test/sample_jobs/mustang-menu-ramdisk.yaml
 lava_dispatcher/pipeline/test/sample_jobs/panda-ramdisk.yaml
 lava_dispatcher/pipeline/test/sample_jobs/panda-usb.yaml
 lava_dispatcher/pipeline/test/sample_jobs/ssh-deploy.yaml
+lava_dispatcher/pipeline/test/sample_jobs/tk1-nfs.yaml
+lava_dispatcher/pipeline/test/sample_jobs/tk1-ramdisk.yaml
 lava_dispatcher/pipeline/test/sample_jobs/uboot-multiple.yaml
 lava_dispatcher/pipeline/test/sample_jobs/uboot-nfs.yaml
+lava_dispatcher/pipeline/test/sample_jobs/uboot-persistent.yaml
 lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml
 lava_dispatcher/pipeline/test/sample_jobs/uboot.yaml
 lava_dispatcher/pipeline/utils/__init__.py
+lava_dispatcher/pipeline/utils/compression.py
 lava_dispatcher/pipeline/utils/constants.py
 lava_dispatcher/pipeline/utils/filesystem.py
 lava_dispatcher/pipeline/utils/network.py
diff -pruN 2015.9-1/MANIFEST.in 2016.3-1/MANIFEST.in
--- 2015.9-1/MANIFEST.in	2015-07-30 09:30:47.000000000 +0000
+++ 2016.3-1/MANIFEST.in	2016-03-04 14:35:18.000000000 +0000
@@ -15,6 +15,7 @@ include lava_dispatcher/tests/test-confi
 include requirements.txt
 include man/*
 include etc/*
+include etc/logrotate.d/*
 include version.py
 include linaro_dashboard_bundle/schemas/*.json
 include linaro_dashboard_bundle/test_documents/*.json
diff -pruN 2015.9-1/PKG-INFO 2016.3-1/PKG-INFO
--- 2015.9-1/PKG-INFO	2015-09-10 10:34:52.000000000 +0000
+++ 2016.3-1/PKG-INFO	2016-03-04 14:38:58.000000000 +0000
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: lava-dispatcher
-Version: 2015.9
+Version: 2016.3
 Summary: Part of the LAVA framework for dispatching test jobs
 Home-page: http://git.linaro.org/git/lava/lava-dispatcher.git
 Author: Linaro Validation Team
diff -pruN 2015.9-1/setup.py 2016.3-1/setup.py
--- 2015.9-1/setup.py	2015-09-09 14:31:19.000000000 +0000
+++ 2016.3-1/setup.py	2016-03-04 14:35:18.000000000 +0000
@@ -41,6 +41,7 @@ setup(
             'device/dynamic_vm_keys/lava*',
             'lava_test_shell/lava-background-process-start',
             'lava_test_shell/lava-background-process-stop',
+            'lava_test_shell/lava-echo-ipv4',
             'lava_test_shell/lava-vm-groups-setup-host',
             'lava_test_shell/lava-installed-packages',
             'lava_test_shell/lava-os-build',
@@ -49,8 +50,10 @@ setup(
             'lava_test_shell/lava-test-case-metadata',
             'lava_test_shell/lava-test-run-attach',
             'lava_test_shell/lava-test-runner',
+            'lava_test_shell/lava-test-set',
             'lava_test_shell/lava-test-shell',
             'lava_test_shell/multi_node/*',
+            'lava_test_shell/vland/*',
             'lava_test_shell/lmp/*',
             'lava_test_shell/distro/fedora/*',
             'lava_test_shell/distro/android/*',
@@ -74,7 +77,8 @@ setup(
         'requests',
         'netifaces >= 0.10.0',
         'nose',
-        'pyzmq'
+        'pyzmq',
+        'configobj'
     ],
     tests_require=[
         'pep8 >= 1.4.6',
@@ -89,6 +93,8 @@ setup(
             ['etc/lava-options.conf']),
         ('/etc/modules-load.d/',
             ['etc/lava-modules.conf']),
+        ('/etc/logrotate.d/',
+            ['etc/logrotate.d/lava-slave-log']),
         ('/etc/init.d/',
             ['etc/lava-slave.init']),
         ('/usr/share/lava-dispatcher/',
diff -pruN 2015.9-1/version.py 2016.3-1/version.py
--- 2015.9-1/version.py	2015-09-09 14:30:35.000000000 +0000
+++ 2016.3-1/version.py	2016-02-02 08:07:05.000000000 +0000
@@ -26,6 +26,8 @@
 import subprocess
 import os
 
+# pylint: disable=superfluous-parens,too-many-locals
+
 
 def version_tag():
     """
@@ -70,13 +72,11 @@ def version_tag():
         # use the rev-list count to always ensure that we are building
         # a newer version to cope with date changes at month end.
         # use short git hash for reference.
-        bits = tag_name.split('.')
-        tag_month = int(bits[1])
         dev_stamp = ['git', 'rev-list', '--count', 'HEAD']
         dev_count = subprocess.check_output(dev_stamp).strip()
         dev_short = ['git', 'rev-parse', '--short', 'HEAD']
         dev_hash = subprocess.check_output(dev_short).strip()
-        return "%s.%s.%s" % (tag_name, dev_count, dev_hash)
+        return "%s+%s.%s" % (tag_name, dev_count, dev_hash)
 
 
 def main():
