diff -pruN 1.1.20+ds1-2/debian/changelog 1.1.27+ds1-2/debian/changelog
--- 1.1.20+ds1-2/debian/changelog	2019-07-16 22:15:26.000000000 +0000
+++ 1.1.27+ds1-2/debian/changelog	2022-03-24 11:01:10.000000000 +0000
@@ -1,3 +1,55 @@
+python-pypowervm (1.1.27+ds1-2) unstable; urgency=medium
+
+  * Uploading to unstable.
+
+ -- Thomas Goirand <zigo@debian.org>  Thu, 24 Mar 2022 12:01:10 +0100
+
+python-pypowervm (1.1.27+ds1-1) experimental; urgency=medium
+
+  * New upstream release.
+
+ -- Thomas Goirand <zigo@debian.org>  Tue, 15 Feb 2022 16:59:02 +0100
+
+python-pypowervm (1.1.26+ds1-3) unstable; urgency=medium
+
+  * Uploading to unstable.
+  * Removed linuxvnc from (build-)depends.
+
+ -- Thomas Goirand <zigo@debian.org>  Wed, 29 Sep 2021 11:40:18 +0200
+
+python-pypowervm (1.1.26+ds1-2) experimental; urgency=medium
+
+  * Builds localization at build time (took it from upstream packaging).
+  * Revisit (build-)depends taking info from upstream packaging.
+
+ -- Thomas Goirand <zigo@debian.org>  Tue, 24 Aug 2021 16:20:05 +0200
+
+python-pypowervm (1.1.26+ds1-1) experimental; urgency=medium
+
+  * New upstream release.
+  * Refresh debian/patches/skip-tests.patch.
+
+ -- Thomas Goirand <zigo@debian.org>  Tue, 24 Aug 2021 16:10:38 +0200
+
+python-pypowervm (1.1.24+ds1-2) unstable; urgency=medium
+
+  * Uploading to unstable.
+
+ -- Thomas Goirand <zigo@debian.org>  Fri, 08 May 2020 11:33:30 +0200
+
+python-pypowervm (1.1.24+ds1-1) experimental; urgency=medium
+
+  [ Ondřej Nový ]
+  * Use debhelper-compat instead of debian/compat.
+  * Bump Standards-Version to 4.4.1.
+
+  [ Thomas Goirand ]
+  * New upstream release.
+  * Blacklist test_vterm.TestVterm.test_open_vnc_vterm_nonascii() and
+    test_vterm.TestVterm.test_open_vnc_vterm_nonvnc_noforce().
+
+ -- Thomas Goirand <zigo@debian.org>  Tue, 31 Mar 2020 15:02:56 +0200
+
 python-pypowervm (1.1.20+ds1-2) unstable; urgency=medium
 
   * Uploading to unstable.
diff -pruN 1.1.20+ds1-2/debian/compat 1.1.27+ds1-2/debian/compat
--- 1.1.20+ds1-2/debian/compat	2019-07-16 22:15:26.000000000 +0000
+++ 1.1.27+ds1-2/debian/compat	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-10
diff -pruN 1.1.20+ds1-2/debian/control 1.1.27+ds1-2/debian/control
--- 1.1.20+ds1-2/debian/control	2019-07-16 22:15:26.000000000 +0000
+++ 1.1.27+ds1-2/debian/control	2022-03-24 11:01:10.000000000 +0000
@@ -5,13 +5,15 @@ Maintainer: Debian OpenStack <team+opens
 Uploaders:
  Thomas Goirand <zigo@debian.org>,
 Build-Depends:
- debhelper (>= 10),
+ debhelper-compat (= 10),
  dh-python,
  openstack-pkg-tools,
  python3-all,
  python3-pbr,
  python3-setuptools,
 Build-Depends-Indep:
+ gettext,
+ python3-future,
  python3-lxml,
  python3-mock,
  python3-oslo.concurrency,
@@ -19,15 +21,18 @@ Build-Depends-Indep:
  python3-oslo.i18n,
  python3-oslo.log,
  python3-oslo.utils,
+ python3-pbr,
  python3-pyasn1,
  python3-pyasn1-modules,
+ python3-pyrsistent,
  python3-requests,
  python3-six,
  python3-stestr,
  python3-taskflow,
  python3-tz,
+ python3-zipp,
  subunit,
-Standards-Version: 4.3.0
+Standards-Version: 4.4.1
 Vcs-Browser: https://salsa.debian.org/openstack-team/libs/python-pypowervm
 Vcs-Git: https://salsa.debian.org/openstack-team/libs/python-pypowervm.git
 Homepage: https://github.com/powervm/pypowervm
@@ -35,6 +40,7 @@ Homepage: https://github.com/powervm/pyp
 Package: python3-pypowervm
 Architecture: all
 Depends:
+ python3-future,
  python3-lxml,
  python3-oslo.concurrency,
  python3-oslo.context,
@@ -44,10 +50,12 @@ Depends:
  python3-pbr,
  python3-pyasn1,
  python3-pyasn1-modules,
+ python3-pyrsistent,
  python3-requests,
  python3-six,
  python3-taskflow,
  python3-tz,
+ python3-zipp,
  ${misc:Depends},
  ${python3:Depends},
 Description: Python binding for the PowerVM REST API - Python 3.x
diff -pruN 1.1.20+ds1-2/debian/patches/skip-tests.patch 1.1.27+ds1-2/debian/patches/skip-tests.patch
--- 1.1.20+ds1-2/debian/patches/skip-tests.patch	2019-07-16 22:15:26.000000000 +0000
+++ 1.1.27+ds1-2/debian/patches/skip-tests.patch	2022-03-24 11:01:10.000000000 +0000
@@ -13,9 +13,9 @@ Index: python-pypowervm/pypowervm/tests/
  import subunit
 +import sys
  
- if six.PY2:
-     import __builtin__ as builtins
-@@ -496,6 +497,8 @@ class TestAdapter(testtools.TestCase):
+ 
+ import mock
+@@ -497,6 +498,8 @@ class TestAdapter(testtools.TestCase):
          fhdata = ['one', 'two']
          self._test_upload_request(mock_rq, fhdata, fhdata)
  
@@ -24,7 +24,7 @@ Index: python-pypowervm/pypowervm/tests/
      @mock.patch('requests.sessions.Session.request')
      def test_upload_request_fh(self, mock_rq):
          """Test an upload request with a filehandle."""
-@@ -928,6 +931,8 @@ class TestAdapterClasses(subunit.Isolate
+@@ -929,6 +932,8 @@ class TestAdapterClasses(subunit.Isolate
          # Ensure we get an EventListener
          self.assertIsInstance(sess.get_event_listener(), adp.EventListener)
  
@@ -33,7 +33,7 @@ Index: python-pypowervm/pypowervm/tests/
      def test_shutdown_session(self):
          # Get a session
          sess = adp.Session()
-@@ -953,6 +958,8 @@ class TestAdapterClasses(subunit.Isolate
+@@ -954,6 +959,8 @@ class TestAdapterClasses(subunit.Isolate
              # Test that logoff has occurred
              self.assertTrue(self.mock_logoff.called)
  
diff -pruN 1.1.20+ds1-2/debian/rules 1.1.27+ds1-2/debian/rules
--- 1.1.20+ds1-2/debian/rules	2019-07-16 22:15:26.000000000 +0000
+++ 1.1.27+ds1-2/debian/rules	2022-03-24 11:01:10.000000000 +0000
@@ -15,7 +15,13 @@ override_dh_auto_build:
 override_dh_auto_install:
 	pkgos-dh_auto_install --no-py2
 
+	set -e ; set -x ; for lc in $$(ls -d pypowervm/locale/*/ | cut -f3 -d'/'); do \
+		mkdir -p debian/python3-pypowervm/usr/share/locale/$$lc/LC_MESSAGES ; \
+		msgfmt -o debian/python3-pypowervm/usr/share/locale/$$lc/LC_MESSAGES/pypowervm.mo pypowervm/locale/$$lc/pypowervm.po ; \
+	done
+
 override_dh_auto_test:
 ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS)))
-	pkgos-dh_auto_test --no-py2 'pypowervm\.tests\.((?!.*test_session\.TestSession\.test_session_clone.*))'
+	# See https://bugs.launchpad.net/pypowervm/+bug/1856354
+	pkgos-dh_auto_test --no-py2 'pypowervm\.tests\.((?!.*test_session\.TestSession\.test_session_clone.*|.*tasks\.test_vterm\.TestVterm\.test_open_vnc_vterm_nonascii.*|.*test_vterm\.TestVterm\.test_open_vnc_vterm_nonvnc_noforce.*))'
 endif
diff -pruN 1.1.20+ds1-2/.gitreview 1.1.27+ds1-2/.gitreview
--- 1.1.20+ds1-2/.gitreview	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/.gitreview	2022-02-15 15:58:17.000000000 +0000
@@ -1,3 +1,3 @@
 [gerrit]
 project=powervm/pypowervm.git
-defaultbranch=develop
+defaultbranch=release/1.1.27
diff -pruN 1.1.20+ds1-2/pypowervm/adapter.py 1.1.27+ds1-2/pypowervm/adapter.py
--- 1.1.20+ds1-2/pypowervm/adapter.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/adapter.py	2022-02-15 15:58:17.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright 2014, 2017 IBM Corp.
+# Copyright 2014, 2020 IBM Corp.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -17,37 +17,36 @@ import copy
 import errno
 import hashlib
 import os
-import uuid
-
-if os.name == 'posix':
-    import pwd
 import re
 import threading
 import time
+import uuid
 import xml.sax.saxutils as sax_utils
 
-from lxml import etree
+import pypowervm.entities as ent
+import pypowervm.exceptions as pvmex
 
-try:
-    import urlparse
-except ImportError:
-    import urllib.parse as urlparse
 
-from oslo_log import log as logging
 import requests
 import requests.exceptions as rqex
 import six
 import six.moves.urllib.parse as urllib
 import weakref
 
+from lxml import etree
+from oslo_log import log as logging
 from pypowervm import const as c
-import pypowervm.entities as ent
-import pypowervm.exceptions as pvmex
 from pypowervm.i18n import _
 from pypowervm import traits as pvm_traits
 from pypowervm import util
 from pypowervm.utils import retry
+if os.name == 'posix':
+    import pwd
 
+try:
+    import urlparse
+except ImportError:
+    import urllib.parse as urlparse
 
 # Preserve CDATA on the way in (also ensures it is not altered on the way out)
 etree.set_default_parser(etree.XMLParser(strip_cdata=False, encoding='utf-8'))
@@ -1218,7 +1217,8 @@ class _EventListener(EventListener):
         if session.has_event_listener:
             raise ValueError(_('An event listener is already active on the '
                                'session.'))
-        self.appid = hashlib.md5(session._sessToken).hexdigest()
+        self.appid = hashlib.md5(
+            session._sessToken.encode('utf-8')).hexdigest()
         self.timeout = timeout if timeout != -1 else session.timeout
         self._lock = threading.RLock()
         self.handlers = []
diff -pruN 1.1.20+ds1-2/pypowervm/log.py 1.1.27+ds1-2/pypowervm/log.py
--- 1.1.20+ds1-2/pypowervm/log.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/log.py	2022-02-15 15:58:17.000000000 +0000
@@ -51,5 +51,6 @@ def _logcall(filter_=None, dump_parms=Fa
         return wrapper
     return func_parms
 
+
 logcall = _logcall()
 logcall_args = _logcall(dump_parms=True)
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/hdisk/_iscsi.py 1.1.27+ds1-2/pypowervm/tasks/hdisk/_iscsi.py
--- 1.1.20+ds1-2/pypowervm/tasks/hdisk/_iscsi.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/hdisk/_iscsi.py	2022-02-15 15:58:17.000000000 +0000
@@ -270,7 +270,7 @@ def discover_iscsi(adapter, host_ip, use
         'auth': auth, 'discovery_auth': discovery_auth,
         'discovery_username': discovery_username,
         'discovery_password': discovery_password
-        }
+    }
 
     status, devname, udid = _discover_iscsi(adapter, host_ip, vios_uuid,
                                             multipath, **kwargs)
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/ibmi.py 1.1.27+ds1-2/pypowervm/tasks/ibmi.py
--- 1.1.20+ds1-2/pypowervm/tasks/ibmi.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/ibmi.py	2022-02-15 15:58:17.000000000 +0000
@@ -106,6 +106,7 @@ class IBMiPanelOperations(object):
     ALL_VALUES = (DUMPRESTART, DSTON, RETRYDUMP, REMOTEDSTOFF, REMOTEDSTON,
                   IOPRESET, IOPDUMP, CONSOLESERVICE)
 
+
 CONF = cfg.CONF
 IBMI_PANEL_JOB_SUFFIX = 'PanelFunction'
 IBMI_PARAM_KEY = 'operation'
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/network_bridger.py 1.1.27+ds1-2/pypowervm/tasks/network_bridger.py
--- 1.1.20+ds1-2/pypowervm/tasks/network_bridger.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/network_bridger.py	2022-02-15 15:58:17.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright 2015, 2016 IBM Corp.
+# Copyright 2015, 2020 IBM Corp.
 #
 # All Rights Reserved.
 #
@@ -22,6 +22,7 @@ import copy
 import six
 
 from oslo_concurrency import lockutils as lock
+from oslo_config import cfg
 
 from pypowervm import const as c
 from pypowervm import exceptions as pvm_exc
@@ -31,9 +32,28 @@ from pypowervm.wrappers import managed_s
 from pypowervm.wrappers import network as pvm_net
 from pypowervm.wrappers import virtual_io_server as pvm_vios
 
+_MAX_VEAS_PER_SEA = 15
 _MAX_VLANS_PER_VEA = 20
 _ENSURE_VLAN_LOCK = 'ensure_vlans_nb'
 
+CONF = cfg.CONF
+
+try:
+    CONF.register_opt(
+        cfg.BoolOpt('load_balance_vlan_across_veas', default=False,
+                    help='Determines whether or not the VLANs will '
+                         'be configured on a new VEA till the total VEA limit '
+                         'is reached. Post that, additional VLAN configuration'
+                         ' will choose the least used VEA, to load balance '
+                         'VLAN config, for better performance. This is just '
+                         'so, to not overload a VEA till its limit is '
+                         'exhausted before creating a new VEA.'))
+except cfg.DuplicateOptError:
+    # Registering load_balance_vlan_opt as part of compute service startup
+    # for Novalink host will raise this error, since it will be loaded as
+    # part of vif/driver.py module. Hence, this can be ignored here.
+    pass
+
 
 def ensure_vlans_on_nb(adapter, host_uuid, nb_uuid, vlan_ids):
     """Will make sure that the VLANs are assigned to the Network Bridge.
@@ -768,7 +788,7 @@ class NetworkBridgerTA(NetworkBridger):
 
         # For each Trunk Adapter, change the VID to the new value.
         for ta in impacted_tas:
-                ta.pvid = new_vid
+            ta.pvid = new_vid
 
         # Call the update
         impacted_nb = impacted_nb.update()
@@ -900,7 +920,15 @@ class NetworkBridgerTA(NetworkBridger):
         # Find a trunk with the lowest amount of VLANs on it.
         cur_min = None
         avail_count = 0
-        for trunk in nb.seas[0].addl_adpts:
+
+        trunks = nb.seas[0].addl_adpts
+        if (CONF.load_balance_vlan_across_veas and
+                len(trunks) < _MAX_VEAS_PER_SEA):
+            # Create trunk till the maximum limit is reached. This is to load
+            # balance VLANs across trunks for better performance.
+            return None
+
+        for trunk in trunks:
             # If this trunk has maxed out its VLANs, skip to next.
             if len(trunk.tagged_vlans) >= _MAX_VLANS_PER_VEA:
                 continue
@@ -908,7 +936,8 @@ class NetworkBridgerTA(NetworkBridger):
             # This could definitely support it...
             avail_count += 1
 
-            # But, is it the best?
+            # But, is it the best? Iterate over all the trunks till we find
+            # a least used one.
             if (cur_min is None or
                     len(trunk.tagged_vlans) < len(cur_min.tagged_vlans)):
                 cur_min = trunk
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/partition.py 1.1.27+ds1-2/pypowervm/tasks/partition.py
--- 1.1.20+ds1-2/pypowervm/tasks/partition.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/partition.py	2022-02-15 15:58:17.000000000 +0000
@@ -47,6 +47,7 @@ _DOWN_VM_STATES = (bp.LPARState.NOT_ACTI
                    bp.LPARState.UNKNOWN)
 
 _SUFFIX_PARM_CLONE_UUID = 'CloneUUID'
+_SUFFIX_PARM_ADD_LICENSE = 'AddLicense'
 
 _LOW_WAIT_TIME = 120
 _HIGH_WAIT_TIME = 600
@@ -398,3 +399,24 @@ def clone_uuid(adapter, lpar_uuid, surro
                                                   surrogate_lpar_name)]
 
     job_wrapper.run_job(lpar_uuid, job_parms=job_parms)
+
+
+def ibmi_add_license_key(adapter, lpar_uuid, license_key):
+    """Issue the AddLicense job.
+
+    The AddLicense job submits a license key to IBMi partition.
+
+    :param adapter: The pypowervm adapter to issue the job.
+    :param lpar_uuid: Original LPAR's UUID.
+    :param license_key: License key for IBMi partition.
+    """
+    resp = adapter.read(lpar.LPAR.schema_type, root_id=lpar_uuid,
+                        suffix_type=c.SUFFIX_TYPE_DO,
+                        suffix_parm=_SUFFIX_PARM_ADD_LICENSE)
+    job_wrapper = job.Job.wrap(resp.entry)
+    job_parms = [job_wrapper.create_job_parameter('licKey', license_key)]
+    try:
+        job_wrapper.run_job(lpar_uuid, job_parms=job_parms)
+    except Exception:
+        LOG.exception(_('IBMi Key Injection Failed'))
+        raise
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/scsi_mapper.py 1.1.27+ds1-2/pypowervm/tasks/scsi_mapper.py
--- 1.1.20+ds1-2/pypowervm/tasks/scsi_mapper.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/scsi_mapper.py	2022-02-15 15:58:17.000000000 +0000
@@ -490,6 +490,7 @@ def find_maps(mapping_list, client_lpar_
     if not match_func:
         # Default no filter
         match_func = lambda x: True
+
     if stg_elem:
         # Match storage element on type and name
         match_func = lambda stg_el: (
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/slot_map.py 1.1.27+ds1-2/pypowervm/tasks/slot_map.py
--- 1.1.20+ds1-2/pypowervm/tasks/slot_map.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/slot_map.py	2022-02-15 15:58:17.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright 2016, 2017 IBM Corp.
+# Copyright 2016, 2020 IBM Corp.
 #
 # All Rights Reserved.
 #
@@ -25,6 +25,7 @@ import pickle
 import six
 import warnings
 
+from oslo_serialization import base64 as base64utils
 from pypowervm import exceptions as pvm_ex
 from pypowervm.i18n import _
 from pypowervm import util as pvm_util
@@ -74,7 +75,14 @@ class SlotMapStore(object):
         self._vswitch_map = None
         map_str = self.load() if load else None
         # Deserialize or initialize
-        self._slot_topo = pickle.loads(map_str) if map_str else {}
+        try:
+            self._slot_topo = pickle.loads(
+                base64utils.decode_as_bytes(map_str)) if map_str else {}
+        except UnicodeDecodeError:
+            # Retain old way of decoding slot map data. This is required
+            # for virtual machines deployed on a py2 env and upgraded to
+            # py3.
+            self._slot_topo = pickle.loads(map_str) if map_str else {}
         # Save a copy of the topology so we can tell when it has changed
         self._loaded_topo = copy.deepcopy(self._slot_topo)
 
@@ -212,7 +220,14 @@ class SlotMapStore(object):
                        be incorporated.
         :param fab: The fabric name associated with the mapping.
         """
-        self._reg_slot(IOCLASS.VFC, fab, vfcmap.server_adapter.lpar_slot_num)
+        if vfcmap.client_adapter:
+            wwpn_list = vfcmap.client_adapter.wwpns
+            self._reg_slot(IOCLASS.VFC, fab,
+                           vfcmap.server_adapter.lpar_slot_num,
+                           extra_spec=wwpn_list)
+        else:
+            self._reg_slot(IOCLASS.VFC, fab,
+                           vfcmap.server_adapter.lpar_slot_num)
 
     def drop_vfc_mapping(self, vfcmap, fab):
         """Drops the client network adapter from the slot topology.
@@ -726,14 +741,18 @@ class RebuildSlotMap(BuildSlotMap):
         seen_fabrics = set()
         for fabric in fabrics:
             fabric_slots = []
+            fabric_wwpn = {}
             # Add the slot numbers for this fabric
             for slot, iomap in six.iteritems(self._slot_store.topology):
                 if fabric not in iomap.get(IOCLASS.VFC, {}):
                     continue
                 fabric_slots.append(slot)
+                fabric_wwpn[slot] = iomap.get(IOCLASS.VFC, {}).get(fabric, {})
                 seen_fabrics.add(fabric)
 
+            fabric_slot = fabric + '_wwpn'
             self._put_novios_val(IOCLASS.VFC, fabric, fabric_slots)
+            self._put_novios_val(IOCLASS.VFC, fabric_slot, fabric_wwpn)
 
         # Make sure all the topology's fabrics are accounted for.
         # topo_fabrics is all the fabrics in all the slots from the slot_map
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/sriov.py 1.1.27+ds1-2/pypowervm/tasks/sriov.py
--- 1.1.20+ds1-2/pypowervm/tasks/sriov.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/sriov.py	2022-02-15 15:58:17.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright 2016, 2018 IBM Corp.
+# Copyright 2016, 2021 IBM Corp.
 #
 # All Rights Reserved.
 #
@@ -51,7 +51,7 @@ def _validate_capacity(min_capacity, max
 
 def set_vnic_back_devs(vnic_w, pports, sys_w=None, vioses=None, redundancy=1,
                        capacity=None, max_capacity=None,
-                       check_port_status=False):
+                       check_port_status=False, redundant_pports=None):
     """Set a vNIC's backing devices over given SRIOV physical ports and VIOSes.
 
     Assign the backing devices to a iocard.VNIC wrapper using an anti-affinity
@@ -128,6 +128,12 @@ def set_vnic_back_devs(vnic_w, pports, s
     :param max_capacity: (float) Maximum capacity to assign to each backing
                          device. Must be greater or equal to capacity and
                          less than 1.0.
+    :param redundant_pports: List of physical location code strings
+                             (corresponding to the loc_code @property of
+                             iocard.SRIOV*PPort) for all SRIOV redundant
+                             physical ports to be considered as backing
+                             devices for the vNIC. This does not mean that
+                             all of these ports will be used.
     :param check_port_status: If True, only ports with link-up status will be
                               considered for allocation.  If False (the
                               default), link-down ports may be used.
@@ -167,45 +173,62 @@ def set_vnic_back_devs(vnic_w, pports, s
     # Try not to end up lopsided on one VIOS
     random.shuffle(vioses)
 
-    # Get the subset of backing ports corresponding to the specified location
-    # codes which have enough space for new VFs.
-    pport_wraps = _get_good_pport_list(sriov_adaps, pports, capacity,
-                                       redundancy, check_port_status)
-
     # At this point, we've validated enough that we won't raise.  Start by
     # clearing any existing backing devices.
     vnic_w.back_devs = []
 
-    card_use = {}
-    for pport in pport_wraps:
-        said = pport.sriov_adap_id
-        if said not in card_use:
-            card_use[said] = {'num_used': 0, 'ports_left': 0}
-        card_use[said]['ports_left'] += 1
-    vio_idx = 0
-    while pport_wraps and len(vnic_w.back_devs) < redundancy:
-        # Always rotate VIOSes
-        vio = vioses[vio_idx]
-        vio_idx = (vio_idx + 1) % len(vioses)
-        # Select the least-saturated port from among the least-used adapters.
-        least_uses = min([cud['num_used'] for cud in card_use.values()])
-        pp2use = min([pport for pport in pport_wraps if
-                      card_use[pport.sriov_adap_id]['num_used'] == least_uses],
-                     key=lambda pp: pp.allocated_capacity)
-        said = pp2use.sriov_adap_id
-        # Register a hit on the chosen port's card
-        card_use[said]['num_used'] += 1
-        # And take off a port
-        card_use[said]['ports_left'] -= 1
-        # If that was the last port, remove this card from consideration
-        if card_use[said]['ports_left'] == 0:
-            del card_use[said]
-        # Create and add the backing device
-        vnic_w.back_devs.append(card.VNICBackDev.bld(
-            adap, vio.uuid, said, pp2use.port_id, capacity=capacity,
-            max_capacity=max_capacity))
-        # Remove the port we just used from subsequent consideration.
-        pport_wraps.remove(pp2use)
+    # Get the subset of backing ports corresponding to the specified
+    # location codes which have enough space for new VFs.
+    pport_wraps = _get_good_pport_list(sriov_adaps, pports, capacity,
+                                       redundancy, check_port_status,
+                                       redundant_pports=redundant_pports)
+    if redundant_pports and (redundancy > 1):
+        redundant_pport_wraps = _get_good_pport_list(
+            sriov_adaps, redundant_pports, capacity, redundancy,
+            check_port_status, redundant_pports=redundant_pports)
+    if (redundancy > 1 and len(pport_wraps) == 1 and
+            len(redundant_pport_wraps) < 1):
+        raise ex.InsufficientSRIOVCapacity(red=redundancy,
+                                           found_vfs=len(pport_wraps))
+
+    def _pports_config(pport_wraps, vio_idx=0):
+        card_use = {}
+        for pport in pport_wraps:
+            said = pport.sriov_adap_id
+            if said not in card_use:
+                card_use[said] = {'num_used': 0, 'ports_left': 0}
+            card_use[said]['ports_left'] += 1
+        while pport_wraps and len(vnic_w.back_devs) < redundancy:
+            # Always rotate VIOSes
+            vio = vioses[vio_idx]
+            vio_idx = (vio_idx + 1) % len(vioses)
+            # Select the least-saturated port from among the least-used
+            # adapters.
+            least_uses = min([cud['num_used'] for cud in card_use.values()])
+            pp2use = min([pport for pport in pport_wraps if
+                          card_use[
+                              pport.sriov_adap_id]['num_used'] == least_uses],
+                         key=lambda pp: pp.allocated_capacity)
+            said = pp2use.sriov_adap_id
+            # Register a hit on the chosen port's card
+            card_use[said]['num_used'] += 1
+            # And take off a port
+            card_use[said]['ports_left'] -= 1
+            # If that was the last port, remove this card from consideration
+            if card_use[said]['ports_left'] == 0:
+                del card_use[said]
+            # Create and add the backing device
+            vnic_w.back_devs.append(card.VNICBackDev.bld(
+                adap, vio.uuid, said, pp2use.port_id, capacity=capacity,
+                max_capacity=max_capacity))
+            # Remove the port we just used from subsequent consideration.
+            pport_wraps.remove(pp2use)
+            if redundant_pports and redundancy == 2:
+                break
+
+    _pports_config(pport_wraps)
+    if redundant_pports and redundancy == 2:
+        _pports_config(redundant_pport_wraps, vio_idx=1)
 
 
 def _check_sys_vnic_capabilities(adap, sys_w, redundancy):
@@ -300,7 +323,7 @@ def _get_good_sriovs(sriov_adaps):
 
 
 def _get_good_pport_list(sriov_adaps, pports, capacity, redundancy,
-                         check_link_status):
+                         check_link_status, redundant_pports=None):
     """Get a list of SRIOV*PPort filtered by capacity and specified pports.
 
     Builds a list of pypowervm.wrappers.iocard.SRIOV*PPort from sriov_adaps
@@ -325,6 +348,8 @@ def _get_good_pport_list(sriov_adaps, pp
                        raised.
     :param check_link_status: If True, ports with link-down status will not be
                               returned.  If False, link status is not checked.
+    :param redundant_pports: A list of string physical location codes of the
+                             physical redundant ports to consider.
     :raise InsufficientSRIOVCapacity: If the final list contains fewer than
                                       'redundancy' ports.
     :return: A filtered list of SRIOV*PPort wrappers.
@@ -356,7 +381,7 @@ def _get_good_pport_list(sriov_adaps, pp
                 pp2add = copy.deepcopy(pport)
                 pport_wraps.append(pp2add)
 
-    if len(pport_wraps) < redundancy:
+    if len(pport_wraps) < redundancy and not redundant_pports:
         raise ex.InsufficientSRIOVCapacity(red=redundancy,
                                            found_vfs=len(pport_wraps))
     LOG.debug('Filtered list of physical ports: %s' %
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/vfc_mapper.py 1.1.27+ds1-2/pypowervm/tasks/vfc_mapper.py
--- 1.1.20+ds1-2/pypowervm/tasks/vfc_mapper.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/vfc_mapper.py	2022-02-15 15:58:17.000000000 +0000
@@ -374,8 +374,8 @@ def _find_ports_on_vio(vio_w, p_port_wwp
 
 def _fuse_vfc_ports(wwpn_list):
     """Returns a list of fused VFC WWPNs.  See derive_npiv_map."""
-    l = list(map(u.sanitize_wwpn_for_api, wwpn_list))
-    return list(map(' '.join, zip(l[::2], l[1::2])))
+    ll = list(map(u.sanitize_wwpn_for_api, wwpn_list))
+    return list(map(' '.join, zip(ll[::2], ll[1::2])))
 
 
 def find_pfc_wwpn_by_name(vios_w, pfc_name):
diff -pruN 1.1.20+ds1-2/pypowervm/tasks/vterm.py 1.1.27+ds1-2/pypowervm/tasks/vterm.py
--- 1.1.20+ds1-2/pypowervm/tasks/vterm.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tasks/vterm.py	2022-02-15 15:58:17.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright 2015, 2018 IBM Corp.
+# Copyright 2015, 2021 IBM Corp.
 #
 # All Rights Reserved.
 #
@@ -33,6 +33,7 @@ from oslo_utils import encodeutils
 import pypowervm.const as c
 from pypowervm import exceptions as pvm_exc
 from pypowervm.i18n import _
+import pypowervm.wrappers.base_partition as bp
 from pypowervm.wrappers import job
 import pypowervm.wrappers.logical_partition as pvm_lpar
 
@@ -106,7 +107,7 @@ def _close_vterm_local(adapter, lpar_uui
             _VNC_LOCAL_PORT_TO_REPEATER[vnc_port].stop()
 
 
-def open_localhost_vnc_vterm(adapter, lpar_uuid, force=False):
+def open_localhost_vnc_vterm(adapter, lpar_uuid, force=False, codepage="037"):
     """Opens a VNC vTerm to a given LPAR.  Always binds to localhost.
 
     :param adapter: The adapter to drive the PowerVM API
@@ -114,6 +115,8 @@ def open_localhost_vnc_vterm(adapter, lp
     :param force: (Optional, Default: False) If set to true will force the
                   console to be opened as VNC even if it is already opened
                   via some other means.
+    :param codepage: (Optional, Default: 037) Language code for IBMi
+                  console. Default is 037 for English language.
     :return: The VNC Port that the terminal is running on.
     """
     # This API can only run if local.
@@ -121,11 +124,34 @@ def open_localhost_vnc_vterm(adapter, lp
         raise pvm_exc.ConsoleNotLocal()
 
     lpar_id = _get_lpar_id(adapter, lpar_uuid)
+    lpar_type = _get_lpar_type(adapter, lpar_uuid)
 
-    def _run_mkvterm_cmd(lpar_uuid, force):
+    def _run_mkvterm_cmd(lpar_uuid, force, codepage=codepage):
         cmd = ['mkvterm', '--id', str(lpar_id), '--vnc', '--local']
+        if lpar_type.replace('"', '') == bp.LPARType.OS400:
+            ibmi_cmd = ['--consolesettings', 'codepage=' + str(codepage)]
+            cmd.extend(ibmi_cmd)
         ret_code, std_out, std_err = _run_proc(cmd)
 
+        # If the vterm is already running for IBMi VM and new reuqest
+        # received with different codepage attribute,
+        # it will close and reopen vterm with new pagecode provided.
+        # if running codepage and new codepage are same, nothing to do.
+
+        if ret_code == 3 and lpar_type.replace('"', '') == bp.LPARType.OS400:
+            cmd_ps = ['pgrep', 'mkvtermutil', '--list-full']
+            ret_code1, std_out1, std_err1 = _run_proc(cmd_ps)
+            std_out1 = six.text_type(std_out1)
+            std_err1 = six.text_type(std_err1)
+            for line in std_out1.splitlines():
+                search = "--id " + str(lpar_id)
+                if search in line:
+                    break
+            search = "--codepage " + str(codepage)
+            if search not in line:
+                close_vterm(adapter, lpar_uuid)
+                ret_code, std_out, std_err = _run_proc(cmd)
+
         # If the vterm was already started, the mkvterm command will always
         # return an error message with a return code of 3.  However, there
         # are 2 scenarios here, one where it was started with the VNC option
@@ -134,6 +160,7 @@ def open_localhost_vnc_vterm(adapter, lp
         # where we will get no port.  If it is the out-of-band scenario and
         # they asked us to force the connection, then we will attempt to
         # terminate the old vterm session so we can start up one with VNC.
+
         if force and ret_code == 3 and not _parse_vnc_port(std_out):
             LOG.warning(_("Invalid output on vterm open.  Trying to reset the "
                           "vterm.  Error was %s"), std_err)
@@ -143,19 +170,19 @@ def open_localhost_vnc_vterm(adapter, lp
         # The only error message that is fine is a return code of 3 that a
         # session is already started, where we got back the port back meaning
         # that it was started as VNC.  Else, raise up the error message.
-        if ret_code != 0 and not (ret_code == 3 and _parse_vnc_port(std_out)):
+        if ret_code != 0 and ret_code != 3:
             raise pvm_exc.VNCBasedTerminalFailedToOpen(err=std_err)
 
         # Parse the VNC Port out of the stdout returned from mkvterm
         return _parse_vnc_port(std_out)
 
-    return _run_mkvterm_cmd(lpar_uuid, force)
+    return _run_mkvterm_cmd(lpar_uuid, force, codepage)
 
 
 def open_remotable_vnc_vterm(
         adapter, lpar_uuid, local_ip, remote_ips=None, vnc_path=None,
         use_x509_auth=False, ca_certs=None, server_cert=None, server_key=None,
-        force=False):
+        force=False, codepage="037"):
     """Opens a VNC vTerm to a given LPAR.  Wraps in some validation.
 
     Must run on the management partition.
@@ -199,6 +226,8 @@ def open_remotable_vnc_vterm(
     :param force: (Optional, Default: False) If set to true will force the
                   console to be opened as VNC even if it is already opened
                   via some other means.
+    :param codepage: (Optional, Default: 037) Language code for IBMi
+                  console. Default is 037 for English language.
     :return: The VNC Port that the terminal is running on.
     """
     # This API can only run if local.
@@ -207,13 +236,15 @@ def open_remotable_vnc_vterm(
 
     # Open the VNC Port.  If already open, it will just return the same port,
     # so no harm re-opening.  The stdout will just print out the existing port.
-    local_port = open_localhost_vnc_vterm(adapter, lpar_uuid, force=force)
+    local_port = open_localhost_vnc_vterm(
+        adapter, lpar_uuid, force=force, codepage=codepage)
     # If a VNC path is provided then we have a way to map an incoming
     # connection to a given LPAR and will use the single 5901 port, otherwise
     # we need to listen for remote connections on the same port as the local
     # one so we know which VNC session to forward the connection's data to
     remote_port = _REMOTE_PORT if vnc_path is not None else local_port
-    _VNC_UUID_TO_LOCAL_PORT[lpar_uuid] = local_port
+    if local_port:
+        _VNC_UUID_TO_LOCAL_PORT[lpar_uuid] = local_port
 
     # We will use a flag to the Socket Listener to tell it whether the
     # user provided us a VNC Path we should use to look up the UUID from
@@ -267,6 +298,12 @@ def _get_lpar_id(adapter, lpar_uuid):
     return lpar_resp.body
 
 
+def _get_lpar_type(adapter, lpar_uuid):
+    lpar_resp = adapter.read(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid,
+                             suffix_type='quick', suffix_parm='PartitionType')
+    return lpar_resp.body
+
+
 def _parse_vnc_port(std_out):
     """Parse the VNC port number out of the standard output from mkvterm.
 
@@ -397,11 +434,12 @@ class _VNCSocketListener(threading.Threa
             lpar_uuid, http_code = self._check_http_connect(client_socket)
             if lpar_uuid:
                 # Send back the success message.
-                client_socket.sendall("HTTP/%s 200 OK\r\n\r\n" % http_code)
+                client_socket.sendall(encodeutils.safe_encode(
+                    "HTTP/%s 200 OK\r\n\r\n" % http_code))
             else:
                 # Was not a success, exit.
-                client_socket.sendall("HTTP/%s 400 Bad Request\r\n\r\n" %
-                                      http_code)
+                client_socket.sendall(encodeutils.safe_encode(
+                    "HTTP/%s 400 Bad Request\r\n\r\n" % http_code))
                 client_socket.close()
                 return
         # If we had no VNC Path to match against, then the local port is
@@ -507,7 +545,7 @@ class _VNCSocketListener(threading.Threa
         # Say we only support VeNCrypt (19) authentication version 0.2
         client_socket.sendall(six.int2byte(1))
         client_socket.sendall(six.int2byte(19))
-        client_socket.sendall("\x00\x02")
+        client_socket.sendall(encodeutils.safe_encode("\x00\x02"))
         authtype = self._socket_receive(client_socket, 1)
         # Make sure the Client supports the VeNCrypt (19) authentication
         if len(authtype) < 1 or six.byte2int(authtype) != 19:
@@ -576,10 +614,14 @@ class _VNCSocketListener(threading.Threa
         value = client_socket.recv(header_len)
 
         # Find the HTTP Code (if you can...)
-        pat = r'^CONNECT\s+(\S+)\s+HTTP/(.*)\r\n\r\n$'
+        pat = encodeutils.safe_encode(r'^CONNECT\s+(\S+)\s+HTTP/(.*)\r\n\r\n$')
         res = re.match(pat, value)
         vnc_path = res.groups()[0] if res else None
         http_code = res.groups()[1] if res else '1.1'
+        if vnc_path and isinstance(vnc_path, bytes):
+            vnc_path = vnc_path.decode()
+        if http_code and isinstance(http_code, bytes):
+            http_code = http_code.decode()
         return _VNC_PATH_TO_UUID.get(vnc_path), http_code
 
 
diff -pruN 1.1.20+ds1-2/pypowervm/tests/data/lpar.txt 1.1.27+ds1-2/pypowervm/tests/data/lpar.txt
--- 1.1.20+ds1-2/pypowervm/tests/data/lpar.txt	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/data/lpar.txt	2022-02-15 15:58:17.000000000 +0000
@@ -172,6 +172,15 @@ BODY{
     <BootMode kxe="false" kb="ROR">Normal</BootMode>
     <PendingSecureBoot ksv="V1_8_0" kxe="false" kb="CUD">2</PendingSecureBoot>
     <CurrentSecureBoot ksv="V1_8_0" kxe="false" kb="ROO">1</CurrentSecureBoot>
+    <AssociatedPersistentMemoryConfiguration ksv="V1_10_0" kxe="false" kb="ROO" schemaVersion="V1_11_0">
+        <Metadata>
+            <Atom/>
+        </Metadata>
+        <MaximumPersistentMemoryVolumes ksv="V1_10_0" kb="ROR" kxe="false">964</MaximumPersistentMemoryVolumes>
+        <CurrentPersistentMemoryVolumes ksv="V1_10_0" kxe="false" kb="ROR">2</CurrentPersistentMemoryVolumes>
+        <MaximumDramPersistentMemoryVolumes ksv="V1_10_0" kb="ROR" kxe="false">4</MaximumDramPersistentMemoryVolumes>
+        <CurrentDramPersistentMemoryVolumes ksv="V1_10_0" kxe="false" kb="ROR">2</CurrentDramPersistentMemoryVolumes>
+    </AssociatedPersistentMemoryConfiguration>
     <RemoteRestartCapable kb="COD" kxe="false">false</RemoteRestartCapable>
     <SimplifiedRemoteRestartCapable kb="COD" kxe="false">true</SimplifiedRemoteRestartCapable>
     <SuspendCapable kb="CUD" kxe="false">false</SuspendCapable>
diff -pruN 1.1.20+ds1-2/pypowervm/tests/data/managedsystem.txt 1.1.27+ds1-2/pypowervm/tests/data/managedsystem.txt
--- 1.1.20+ds1-2/pypowervm/tests/data/managedsystem.txt	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/data/managedsystem.txt	2022-02-15 15:58:17.000000000 +0000
@@ -1550,6 +1550,11 @@ BODY{
     <IsHMCPowerVMManagementMaster ksv="V1_3_0" kb="ROO" kxe="false">false</IsHMCPowerVMManagementMaster>
     <MeteredPoolID ksv="V1_8_0" kb="ROR" kxe="false">6689</MeteredPoolID>
     <ProcessorThrottling ksv="V1_8_0" kxe="false" kb="ROR">true</ProcessorThrottling>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">930:930 Japanese (Katanaka)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">e930:e930 Japan (Katanaka Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">939:939 Japan (Latin Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">1399:1399 Japanese (Latin Unicode Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">e1399:e1399 Japanese (Latin Unicode Extended: JIS2014)</SupportedIBMiConsoleCodePage>	
 </ManagedSystem:ManagedSystem>
         </content>
     </entry>
@@ -2496,6 +2501,11 @@ BODY{
     <IsPowerVMManagementMaster ksv="V1_3_0" kxe="false" kb="ROO">true</IsPowerVMManagementMaster>
     <IsManagementPartitionPowerVMManagementMaster ksv="V1_3_0" kb="ROO" kxe="false">true</IsManagementPartitionPowerVMManagementMaster>
     <IsHMCPowerVMManagementMaster ksv="V1_3_0" kb="ROO" kxe="false">false</IsHMCPowerVMManagementMaster>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">930:930 Japanese (Katanaka)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">e930:e930 Japan (Katanaka Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">939:939 Japan (Latin Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">1399:1399 Japanese (Latin Unicode Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">e1399:e1399 Japanese (Latin Unicode Extended: JIS2014)</SupportedIBMiConsoleCodePage>	
 </ManagedSystem:ManagedSystem>
         </content>
     </entry>
@@ -3458,6 +3468,11 @@ BODY{
     <IsPowerVMManagementMaster ksv="V1_3_0" kxe="false" kb="ROO">true</IsPowerVMManagementMaster>
     <IsManagementPartitionPowerVMManagementMaster ksv="V1_3_0" kb="ROO" kxe="false">true</IsManagementPartitionPowerVMManagementMaster>
     <IsHMCPowerVMManagementMaster ksv="V1_3_0" kb="ROO" kxe="false">false</IsHMCPowerVMManagementMaster>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">930:930 Japanese (Katanaka)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">e930:e930 Japan (Katanaka Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">939:939 Japan (Latin Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">1399:1399 Japanese (Latin Unicode Extended)</SupportedIBMiConsoleCodePage>
+	<SupportedIBMiConsoleCodePage ksv="V1_8_0" kxe="false" kb="ROO">e1399:e1399 Japanese (Latin Unicode Extended: JIS2014)</SupportedIBMiConsoleCodePage>	
 </ManagedSystem:ManagedSystem>
         </content>
     </entry>
diff -pruN 1.1.20+ds1-2/pypowervm/tests/helpers/test_vios_busy.py 1.1.27+ds1-2/pypowervm/tests/helpers/test_vios_busy.py
--- 1.1.20+ds1-2/pypowervm/tests/helpers/test_vios_busy.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/helpers/test_vios_busy.py	2022-02-15 15:58:17.000000000 +0000
@@ -59,7 +59,7 @@ class TestVIOSBusyHelper(unittest.TestCa
         self.assertRaises(pvmex.Error, adpt._request, 'method', 'path',
                           body='the body', helpers=hlp)
         # Should have tried 'retries' times plus the initial one
-        self.assertEqual(mock_sess.request.call_count, retries+1)
+        self.assertEqual(mock_sess.request.call_count, retries + 1)
 
         # Test with None response
         mock_sess.reset_mock()
diff -pruN 1.1.20+ds1-2/pypowervm/tests/tasks/test_memory.py 1.1.27+ds1-2/pypowervm/tests/tasks/test_memory.py
--- 1.1.20+ds1-2/pypowervm/tests/tasks/test_memory.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/tasks/test_memory.py	2022-02-15 15:58:17.000000000 +0000
@@ -83,7 +83,7 @@ class TestMemory(testtools.TestCase):
                                                              mock_host_uuid,
                                                              **kwargs2))
         self.assertEqual(6, mock_job_p.call_count)
-        self.assertEqual((1024-768), overhead)
+        self.assertEqual((1024 - 768), overhead)
         self.assertEqual(32768, avail)
         _reset_mocks()
 
diff -pruN 1.1.20+ds1-2/pypowervm/tests/tasks/test_network_bridger.py 1.1.27+ds1-2/pypowervm/tests/tasks/test_network_bridger.py
--- 1.1.20+ds1-2/pypowervm/tests/tasks/test_network_bridger.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/tasks/test_network_bridger.py	2022-02-15 15:58:17.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright 2015, 2016 IBM Corp.
+# Copyright 2015, 2020 IBM Corp.
 #
 # All Rights Reserved.
 #
@@ -17,6 +17,8 @@
 import mock
 import testtools
 
+from oslo_config.cfg import CONF
+
 from pypowervm import adapter as adpt
 import pypowervm.entities as ent
 from pypowervm import exceptions as pvm_exc
@@ -641,8 +643,19 @@ class TestNetworkBridgerTA(TestNetworkBr
         self.assertEqual(1, self.adpt.update_by_path.call_count)
 
     def test_find_available_trunks(self):
+        CONF.load_balance_vlan_across_veas = True
+        nb = pvm_net.NetBridge.wrap(self.mgr_nbr_resp)[0]
+        bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid)
+        trunks = bridger._find_available_trunks(nb)
+        self.assertIsNone(trunks)
+
+    def test_find_available_trunks_1(self):
         nb = pvm_net.NetBridge.wrap(self.mgr_nbr_resp)[0]
         bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid)
+
+        net_br._MAX_VEAS_PER_SEA = 1
+        net_br._MAX_VLANS_PER_VEA = 3
+
         trunks = bridger._find_available_trunks(nb)
         self.assertIsNotNone(trunks)
 
@@ -674,6 +687,7 @@ class TestNetworkBridgerTA(TestNetworkBr
         sea.addl_adpts = [trunk_addl, trunk_addl2, trunk_addl3]
         nb.seas = [sea]
 
+        net_br._MAX_VEAS_PER_SEA = 3
         bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid)
         bridger._find_available_trunks(nb)
 
diff -pruN 1.1.20+ds1-2/pypowervm/tests/tasks/test_slot_map.py 1.1.27+ds1-2/pypowervm/tests/tasks/test_slot_map.py
--- 1.1.20+ds1-2/pypowervm/tests/tasks/test_slot_map.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/tasks/test_slot_map.py	2022-02-15 15:58:17.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright 2016, 2017 IBM Corp.
+# Copyright 2016, 2020 IBM Corp.
 #
 # All Rights Reserved.
 #
@@ -19,6 +19,7 @@ import mock
 import six
 import testtools
 
+from oslo_serialization import base64
 from pypowervm import exceptions as pv_e
 from pypowervm.tasks import slot_map
 from pypowervm.tests.test_utils import pvmhttp
@@ -32,6 +33,7 @@ from pypowervm.wrappers import virtual_i
 def loadf(wcls, fname):
     return wcls.wrap(pvmhttp.load_pvm_resp(fname).get_response())
 
+
 # Load data files just once, since the wrappers will be read-only
 vio1 = loadf(vios.VIOS, 'fake_vios_ssp_npiv.txt')
 vio2 = loadf(vios.VIOS, 'fake_vios_mappings.txt')
@@ -105,8 +107,9 @@ class TestSlotMapStoreLegacy(testtools.T
         doesnt_unpickle = self.smt_impl('foo')
         mock_unpickle.assert_not_called()
         self.assertEqual({}, doesnt_unpickle.topology)
-        unpickles = self.smt_impl('foo', load_ret='abc123')
-        mock_unpickle.assert_called_once_with('abc123')
+        val = base64.encode_as_text('abc123')
+        unpickles = self.smt_impl('foo', load_ret=val)
+        mock_unpickle.assert_called_once_with(b'abc123')
         self.assertEqual(mock_unpickle.return_value, unpickles.topology)
 
     @mock.patch('pickle.dumps')
@@ -256,24 +259,41 @@ class TestSlotMapStoreLegacy(testtools.T
             for vfcmap in vio.vfc_mappings:
                 smt.register_vfc_mapping(vfcmap, 'fab%d' % i)
                 i += 1
-        self.assertEqual({3: {'VFC': {'fab1': None, 'fab10': None,
-                                      'fab11': None, 'fab12': None,
-                                      'fab13': None, 'fab14': None,
-                                      'fab15': None, 'fab16': None,
-                                      'fab17': None, 'fab18': None,
-                                      'fab19': None, 'fab20': None,
-                                      'fab21': None, 'fab22': None,
-                                      'fab23': None, 'fab24': None,
-                                      'fab25': None, 'fab26': None,
-                                      'fab28': None, 'fab29': None,
-                                      'fab3': None, 'fab30': None,
-                                      'fab31': None, 'fab32': None,
-                                      'fab33': None, 'fab4': None,
-                                      'fab5': None, 'fab6': None,
-                                      'fab7': None, 'fab8': None,
-                                      'fab9': None}},
-                          6: {'VFC': {'fab2': None}},
-                          8: {'VFC': {'fab27': None}}}, smt.topology)
+        self.assertEqual(
+            {3: {'VFC': {'fab1': ['C05076065A8B005A', 'C05076065A8B005B'],
+                         'fab10': None,
+                         'fab11': None,
+                         'fab12': ['C05076065A7C02D6', 'C05076065A7C02D7'],
+                         'fab13': ['C05076065A7C030E', 'C05076065A7C030F'],
+                         'fab14': None,
+                         'fab15': ['C05076065A7C02D4', 'C05076065A7C02D5'],
+                         'fab16': None,
+                         'fab17': ['C05076065A7C02E0', 'C05076065A7C02E1'],
+                         'fab18': ['C05076065A7C02E0', 'C05076065A7C02E1'],
+                         'fab19': ['C05076065A7C02E0', 'C05076065A7C02E1'],
+                         'fab20': None,
+                         'fab21': None,
+                         'fab22': None,
+                         'fab23': None,
+                         'fab24': None,
+                         'fab25': ['C05076065A7C0002', 'C05076065A7C0003'],
+                         'fab26': ['C05076065A7C030A', 'C05076065A7C030B'],
+                         'fab28': None,
+                         'fab29': None,
+                         'fab3': None,
+                         'fab30': ['C05076065A7C030C', 'C05076065A7C030D'],
+                         'fab31': None,
+                         'fab32': None,
+                         'fab33': None,
+                         'fab4': None,
+                         'fab5': ['C05076065A7C02E4', 'C05076065A7C02E5'],
+                         'fab6': None,
+                         'fab7': None,
+                         'fab8': ['C05076065A7C02E2', 'C05076065A7C02E3'],
+                         'fab9': None}},
+             6: {'VFC': {'fab2': ['C05076065A8B0060', 'C05076065A8B0061']}},
+             8: {'VFC': {'fab27': ['C05076065A7C0000', 'C05076065A7C0001']}}},
+            smt.topology)
 
     def test_drop_vfc_mapping(self):
         """Test drop_vfc_mapping."""
@@ -454,7 +474,8 @@ class TestSlotMapStoreLegacy(testtools.T
                 smt1.register_vfc_mapping(vfcmap, 'fab%d' % i)
                 i += 1
         # Serialize, and make a new slot map that loads that serialized data
-        smt2 = self.smt_impl('bar', load_ret=smt1.serialized)
+        smt_1 = base64.encode_as_text(smt1.serialized)
+        smt2 = self.smt_impl('bar', load_ret=smt_1)
         # Ensure their topologies are identical
         self.assertEqual(smt1.topology, smt2.topology)
 
@@ -478,24 +499,41 @@ class TestSlotMapStoreLegacy(testtools.T
         # max_vslots still set
         self.assertEqual(234, smt.max_vslots)
         # Topology not polluted by max_vslots
-        self.assertEqual({3: {'VFC': {'fab1': None, 'fab10': None,
-                                      'fab11': None, 'fab12': None,
-                                      'fab13': None, 'fab14': None,
-                                      'fab15': None, 'fab16': None,
-                                      'fab17': None, 'fab18': None,
-                                      'fab19': None, 'fab20': None,
-                                      'fab21': None, 'fab22': None,
-                                      'fab23': None, 'fab24': None,
-                                      'fab25': None, 'fab26': None,
-                                      'fab28': None, 'fab29': None,
-                                      'fab3': None, 'fab30': None,
-                                      'fab31': None, 'fab32': None,
-                                      'fab33': None, 'fab4': None,
-                                      'fab5': None, 'fab6': None,
-                                      'fab7': None, 'fab8': None,
-                                      'fab9': None}},
-                          6: {'VFC': {'fab2': None}},
-                          8: {'VFC': {'fab27': None}}}, smt.topology)
+        self.assertEqual(
+            {3: {'VFC': {'fab1': ['C05076065A8B005A', 'C05076065A8B005B'],
+                         'fab10': None,
+                         'fab11': None,
+                         'fab12': ['C05076065A7C02D6', 'C05076065A7C02D7'],
+                         'fab13': ['C05076065A7C030E', 'C05076065A7C030F'],
+                         'fab14': None,
+                         'fab15': ['C05076065A7C02D4', 'C05076065A7C02D5'],
+                         'fab16': None,
+                         'fab17': ['C05076065A7C02E0', 'C05076065A7C02E1'],
+                         'fab18': ['C05076065A7C02E0', 'C05076065A7C02E1'],
+                         'fab19': ['C05076065A7C02E0', 'C05076065A7C02E1'],
+                         'fab20': None,
+                         'fab21': None,
+                         'fab22': None,
+                         'fab23': None,
+                         'fab24': None,
+                         'fab25': ['C05076065A7C0002', 'C05076065A7C0003'],
+                         'fab26': ['C05076065A7C030A', 'C05076065A7C030B'],
+                         'fab28': None,
+                         'fab29': None,
+                         'fab3': None,
+                         'fab30': ['C05076065A7C030C', 'C05076065A7C030D'],
+                         'fab31': None,
+                         'fab32': None,
+                         'fab33': None,
+                         'fab4': None,
+                         'fab5': ['C05076065A7C02E4', 'C05076065A7C02E5'],
+                         'fab6': None,
+                         'fab7': None,
+                         'fab8': ['C05076065A7C02E2', 'C05076065A7C02E3'],
+                         'fab9': None}},
+             6: {'VFC': {'fab2': ['C05076065A8B0060', 'C05076065A8B0061']}},
+             8: {'VFC': {'fab27': ['C05076065A7C0000', 'C05076065A7C0001']}}},
+            smt.topology)
 
 
 class TestSlotMapStore(TestSlotMapStoreLegacy):
@@ -914,16 +952,32 @@ class TestRebuildSlotMapLegacy(testtools
         rsm = slot_map.RebuildSlotMap(smt, [vios1, vios2], None, fabrics)
 
         # Verify rebuild map was created successfully
-        self.assertEqual({'VFC': {'fab1': [3, 9, 11], 'fab10': [5], 'fab2': [],
-                                  'fab27': [], 'fab7': [4, 12, 113, 114],
-                                  'fab8': [6], 'fab9': [7, 8, 10]}},
-                         rsm._build_map)
+        self.assertEqual(
+            {'VFC': {'fab1': [3, 9, 11],
+                     'fab10': [5],
+                     'fab10_wwpn': {5: None},
+                     'fab1_wwpn': {3: None, 9: None, 11: None},
+                     'fab2': [],
+                     'fab27': [],
+                     'fab27_wwpn': {},
+                     'fab2_wwpn': {},
+                     'fab7': [4, 12, 113, 114],
+                     'fab7_wwpn': {4: None, 12: None, 113: None, 114: None},
+                     'fab8': [6],
+                     'fab8_wwpn': {6: None},
+                     'fab9': [7, 8, 10],
+                     'fab9_wwpn': {7: None, 8: None, 10: None}}},
+            rsm._build_map)
 
         # Verify the getters return the slots correctly
-        self.assertEqual([3, 9, 11], rsm.get_vfc_slots('fab1', 3))
-        self.assertEqual([4, 12, 113, 114], rsm.get_vfc_slots('fab7', 4))
+        self.assertEqual(
+            [3, 9, 11], rsm.get_vfc_slots('fab1', 3))
+        self.assertEqual(
+            [4, 12, 113, 114],
+            rsm.get_vfc_slots('fab7', 4))
         self.assertEqual([6], rsm.get_vfc_slots('fab8', 1))
-        self.assertEqual([7, 8, 10], rsm.get_vfc_slots('fab9', 3))
+        self.assertEqual(
+            [7, 8, 10], rsm.get_vfc_slots('fab9', 3))
         self.assertEqual([5], rsm.get_vfc_slots('fab10', 1))
         self.assertEqual([], rsm.get_vfc_slots('fab2', 0))
         self.assertEqual([], rsm.get_vfc_slots('fab27', 0))
@@ -949,6 +1003,7 @@ class TestRebuildSlotMap(TestRebuildSlot
         super(TestRebuildSlotMap, self).__init__(*args, **kwargs)
         self.smt_impl = SlotMapTestImpl
 
+
 SCSI_W_VOPT = {
     1: {
         slot_map.IOCLASS.VOPT: {
diff -pruN 1.1.20+ds1-2/pypowervm/tests/tasks/test_sriov.py 1.1.27+ds1-2/pypowervm/tests/tasks/test_sriov.py
--- 1.1.20+ds1-2/pypowervm/tests/tasks/test_sriov.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/tasks/test_sriov.py	2022-02-15 15:58:17.000000000 +0000
@@ -47,6 +47,7 @@ def good_sriov(sriov_adap_id, pports):
     return fake_sriov(card.SRIOVAdapterMode.SRIOV,
                       card.SRIOVAdapterState.RUNNING, sriov_adap_id, pports)
 
+
 ded_sriov = fake_sriov(card.SRIOVAdapterMode.DEDICATED, None, 86, [])
 down_sriov = fake_sriov(card.SRIOVAdapterMode.SRIOV,
                         card.SRIOVAdapterState.FAILED, 68, [])
@@ -307,9 +308,9 @@ class TestSriov(testtools.TestCase):
             sys_w=mock_sys, capacity=cap, redundancy=13)
 
         # The passed-in wrapper isn't modified if the method raises.
-        self.assertEqual(live_back_devs,
-                         [(bd.vios_href, bd.sriov_adap_id, bd.pport_id,
-                           bd.capacity) for bd in vnic.back_devs])
+        # self.assertEqual(live_back_devs,
+        #                 [(bd.vios_href, bd.sriov_adap_id, bd.pport_id,
+        #                   bd.capacity) for bd in vnic.back_devs])
 
         # Make sure redundancy caps it.
         # By reusing vnic without resetting its back_devs, we're proving the
diff -pruN 1.1.20+ds1-2/pypowervm/tests/tasks/test_storage.py 1.1.27+ds1-2/pypowervm/tests/tasks/test_storage.py
--- 1.1.20+ds1-2/pypowervm/tests/tasks/test_storage.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/tasks/test_storage.py	2022-02-15 15:58:17.000000000 +0000
@@ -174,7 +174,7 @@ class TestUploadLV(testtools.TestCase):
         # Ensure cleanup was called twice since the first uploads fails.
         self.adpt.delete.assert_has_calls([mock.call(
             'File', service='web',
-            root_id='6233b070-31cc-4b57-99bd-37f80e845de9')]*2)
+            root_id='6233b070-31cc-4b57-99bd-37f80e845de9')] * 2)
 
     @mock.patch('pypowervm.tasks.storage._create_file')
     def test_upload_new_vopt_w_fail(self, mock_create_file):
@@ -820,7 +820,7 @@ class TestLU(testtools.TestCase):
         self.adpt.extend_path = lambda x, xag: x
         self.ssp = stor.SSP.bld(self.adpt, 'ssp1', [])
         for i in range(5):
-            lu = stor.LU.bld(self.adpt, 'lu%d' % i, i+1)
+            lu = stor.LU.bld(self.adpt, 'lu%d' % i, i + 1)
             lu._udid('udid_' + lu.name)
             self.ssp.logical_units.append(lu)
         self.ssp.entry.properties = {
diff -pruN 1.1.20+ds1-2/pypowervm/tests/tasks/test_vterm.py 1.1.27+ds1-2/pypowervm/tests/tasks/test_vterm.py
--- 1.1.20+ds1-2/pypowervm/tests/tasks/test_vterm.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/tasks/test_vterm.py	2022-02-15 15:58:17.000000000 +0000
@@ -65,6 +65,45 @@ class TestVterm(testtools.TestCase):
                                                '--local'])
         self.assertEqual(5903, resp)
 
+    @mock.patch('pypowervm.tasks.vterm._get_lpar_type')
+    @mock.patch('pypowervm.tasks.vterm._get_lpar_id')
+    @mock.patch('pypowervm.tasks.vterm._run_proc')
+    def test_open_vnc_vterm_ibmi(self, mock_run_proc,
+                                 mock_get_lpar_id, mock_get_lpar_type):
+        """Validates the mkvterm command for ibmi console."""
+        mock_get_lpar_id.return_value = '4'
+        mock_get_lpar_type.return_value = 'OS400'
+        std_out = '5903'
+        std_err = ('VNC is started on port 5903 for localhost access '
+                   'only.  Use \'rmvterm --id 4\' to close it.')
+        mock_run_proc.return_value = (0, std_out, std_err)
+
+        resp = vterm.open_localhost_vnc_vterm(self.adpt, 'lpar_uuid')
+
+        mock_run_proc.assert_called_once_with(['mkvterm', '--id', '4', '--vnc',
+                                               '--local', '--consolesettings',
+                                               'codepage=037'])
+        self.assertEqual(5903, resp)
+
+    @mock.patch('pypowervm.tasks.vterm._get_lpar_type')
+    @mock.patch('pypowervm.tasks.vterm._get_lpar_id')
+    @mock.patch('pypowervm.tasks.vterm._run_proc')
+    def test_open_vnc_vterm_non_ibmi(self, mock_run_proc,
+                                     mock_get_lpar_id, mock_get_lpar_type):
+        """Validates the mkvterm command for non ibmi console."""
+        mock_get_lpar_id.return_value = '4'
+        mock_get_lpar_type.return_value = 'AIX/Linux'
+        std_out = '5903'
+        std_err = ('VNC is started on port 5903 for localhost access '
+                   'only.  Use \'rmvterm --id 4\' to close it.')
+        mock_run_proc.return_value = (0, std_out, std_err)
+
+        resp = vterm.open_localhost_vnc_vterm(self.adpt, 'lpar_uuid')
+
+        mock_run_proc.assert_called_once_with(['mkvterm', '--id', '4', '--vnc',
+                                               '--local'])
+        self.assertEqual(5903, resp)
+
     @mock.patch('pypowervm.tasks.vterm._get_lpar_id')
     @mock.patch('pypowervm.tasks.vterm._run_proc')
     def test_open_vnc_vterm_existing(self, mock_run_proc, mock_get_lpar_id):
@@ -85,7 +124,7 @@ class TestVterm(testtools.TestCase):
     @mock.patch('pypowervm.tasks.vterm._get_lpar_id')
     def test_open_vnc_vterm_nonascii(self, mock_get_lpar_id, mock_popen):
         """Validates errors in non-ascii encodings are handled properly"""
-        proc_mock = mock.Mock(returncode=3)
+        proc_mock = mock.Mock(returncode=4)
         mock_get_lpar_id.return_value = '4'
         mock_popen.return_value = proc_mock
         proc_mock.communicate.return_value = ('', '\xd0\x92')
@@ -129,7 +168,7 @@ class TestVterm(testtools.TestCase):
         std_out = ""
         std_err = ("The vterm is currently in use by process 120352.  "
                    "Use 'rmvterm --id 4' to close it.")
-        mock_run_proc.return_value = (3, std_out, std_err)
+        mock_run_proc.return_value = (4, std_out, std_err)
 
         self.assertRaises(pexc.VNCBasedTerminalFailedToOpen,
                           vterm.open_localhost_vnc_vterm, self.adpt,
@@ -231,12 +270,12 @@ class TestVNCSocketListener(testtools.Te
         mock_sock.return_value = mock_s_sock
         mock_select.return_value = [mock_c_sock], None, None
         mock_srv.accept.return_value = mock_c_sock, ('1.2.3.5', '40675')
-        mock_c_sock.recv.return_value = "CONNECT path HTTP/1.8\r\n\r\n"
+        mock_c_sock.recv.return_value = b"CONNECT path HTTP/1.8\r\n\r\n"
 
         self.srv._new_client(mock_srv)
 
         mock_c_sock.sendall.assert_called_once_with(
-            "HTTP/1.8 200 OK\r\n\r\n")
+            b"HTTP/1.8 200 OK\r\n\r\n")
         mock_s_sock.connect.assert_called_once_with(('127.0.0.1', '5800'))
 
         self.assertEqual({mock_c_sock: mock_s_sock, mock_s_sock: mock_c_sock},
@@ -250,12 +289,11 @@ class TestVNCSocketListener(testtools.Te
         mock_sock.return_value = mock_s_sock
         mock_select.return_value = [mock_c_sock], None, None
         mock_srv.accept.return_value = mock_c_sock, ('fe80:7890', '40675')
-        mock_c_sock.recv.return_value = "CONNECT path HTTP/1.8\r\n\r\n"
-
+        mock_c_sock.recv.return_value = b"CONNECT path HTTP/1.8\r\n\r\n"
         self.srv_6._new_client(mock_srv)
 
         mock_c_sock.sendall.assert_called_once_with(
-            "HTTP/1.8 200 OK\r\n\r\n")
+            b"HTTP/1.8 200 OK\r\n\r\n")
         mock_s_sock.connect.assert_called_once_with(('127.0.0.1', '5800'))
 
         self.assertEqual({mock_c_sock: mock_s_sock, mock_s_sock: mock_c_sock},
@@ -263,14 +301,14 @@ class TestVNCSocketListener(testtools.Te
 
     def test_check_http_connect(self):
         sock = mock.MagicMock()
-        sock.recv.return_value = "INVALID"
+        sock.recv.return_value = b"INVALID"
         uuid, http_code = self.srv._check_http_connect(sock)
         self.assertIsNone(uuid)
         self.assertEqual('1.1', http_code)
 
         # Test a good string
         sock.reset_mock()
-        sock.recv.return_value = 'CONNECT test HTTP/2.0\r\n\r\n'
+        sock.recv.return_value = b'CONNECT test HTTP/2.0\r\n\r\n'
         uuid, http_code = self.srv._check_http_connect(sock)
         self.assertEqual('uuid', uuid)
         self.assertEqual('2.0', http_code)
@@ -313,11 +351,11 @@ class TestVNCSocketListener(testtools.Te
         # Reset the select so that the validation check fails
         mock_c_sock.reset_mock()
         mock_select.return_value = [mock_c_sock], None, None
-        mock_c_sock.recv.return_value = 'bad_check'
+        mock_c_sock.recv.return_value = b'bad_check'
         self.srv._new_client(mock_srv)
         self.assertEqual(self.rptr.peers, {})
         mock_c_sock.sendall.assert_called_with(
-            "HTTP/1.1 400 Bad Request\r\n\r\n")
+            b"HTTP/1.1 400 Bad Request\r\n\r\n")
         self.assertEqual(1, mock_c_sock.close.call_count)
 
     @mock.patch('pypowervm.tasks.vterm._close_vterm_local')
@@ -454,7 +492,7 @@ class _FakeSocket(object):
 
     def recv(self, bufsize):
         bufsize = bufsize if isinstance(bufsize, int) else ord(bufsize)
-        chunk = self.recv_buffer[self.recv_bytes:self.recv_bytes+bufsize]
+        chunk = self.recv_buffer[self.recv_bytes:self.recv_bytes + bufsize]
         if not isinstance(chunk, six.binary_type):
             chunk = six.binary_type(chunk, 'utf-8')
         self.recv_bytes += bufsize
diff -pruN 1.1.20+ds1-2/pypowervm/tests/test_adapter.py 1.1.27+ds1-2/pypowervm/tests/test_adapter.py
--- 1.1.20+ds1-2/pypowervm/tests/test_adapter.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/test_adapter.py	2022-02-15 15:58:17.000000000 +0000
@@ -21,15 +21,6 @@ from lxml import etree
 import six
 import subunit
 
-if six.PY2:
-    import __builtin__ as builtins
-elif six.PY3:
-    import builtins
-
-try:
-    import urlparse
-except ImportError:
-    import urllib.parse as urlparse
 
 import mock
 import requests.models as req_mod
@@ -45,6 +36,16 @@ import pypowervm.tests.test_fixtures as
 from pypowervm.tests.test_utils import pvmhttp
 from pypowervm.wrappers import storage as pvm_stor
 
+if six.PY2:
+    import __builtin__ as builtins
+elif six.PY3:
+    import builtins
+
+try:
+    import urlparse
+except ImportError:
+    import urllib.parse as urlparse
+
 logon_text = testlib.file2b("logon.xml")
 response_text = testlib.file2b("event.xml")
 NET_BRIDGE_FILE = 'fake_network_bridge.txt'
@@ -114,7 +115,7 @@ class TestAdapter(testtools.TestCase):
         with mock.patch.object(adp._EventListener, '_get_events') as m_events,\
                 mock.patch.object(adp, '_EventPollThread') as mock_poll:
             # With some fake events, event listener can be initialized
-            self.sess._sessToken = 'token'.encode('utf-8')
+            self.sess._sessToken = 'token'
             m_events.return_value = {'general': 'init'}, 'raw_evt', 'wrap_evt'
             event_listen = self.sess.get_event_listener()
             self.assertIsNotNone(event_listen)
@@ -924,7 +925,7 @@ class TestAdapterClasses(subunit.Isolate
         self.assertRaises(TypeError, adp.EventListener, sess)
 
         # Mock the session token like we logged on
-        sess._sessToken = 'token'.encode('utf-8')
+        sess._sessToken = 'token'
         # Ensure we get an EventListener
         self.assertIsInstance(sess.get_event_listener(), adp.EventListener)
 
@@ -932,7 +933,7 @@ class TestAdapterClasses(subunit.Isolate
         # Get a session
         sess = adp.Session()
         # Fake the session token like we logged on
-        sess._sessToken = 'token'.encode('utf-8')
+        sess._sessToken = 'token'
         # It should have logged on
         self.assertTrue(self.mock_logon.called)
 
@@ -957,7 +958,7 @@ class TestAdapterClasses(subunit.Isolate
         # Get Adapter
         adapter = adp.Adapter()
         # Fake the implicit session token like we logged on
-        adapter.session._sessToken = 'token'.encode('utf-8')
+        adapter.session._sessToken = 'token'
         # Construct and get the event listener
         adapter.session.get_event_listener()
 
diff -pruN 1.1.20+ds1-2/pypowervm/tests/test_exceptions.py 1.1.27+ds1-2/pypowervm/tests/test_exceptions.py
--- 1.1.20+ds1-2/pypowervm/tests/test_exceptions.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/test_exceptions.py	2022-02-15 15:58:17.000000000 +0000
@@ -98,5 +98,6 @@ class TestExceptions(unittest.TestCase):
 
         self.assertRaises(KeyError, Bogus, **msg_params)
 
+
 if __name__ == "__main__":
     unittest.main()
diff -pruN 1.1.20+ds1-2/pypowervm/tests/test_fixtures.py 1.1.27+ds1-2/pypowervm/tests/test_fixtures.py
--- 1.1.20+ds1-2/pypowervm/tests/test_fixtures.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/test_fixtures.py	2022-02-15 15:58:17.000000000 +0000
@@ -47,6 +47,7 @@ def _mk_traits(local, hmc):
     _mk_traits_sessions.append(_sess)
     return trt.APITraits(_sess)
 
+
 LocalPVMTraits = _mk_traits(local=True, hmc=False)
 RemotePVMTraits = _mk_traits(local=False, hmc=False)
 RemoteHMCTraits = _mk_traits(local=False, hmc=True)
@@ -206,7 +207,8 @@ class LoggingPatcher(SimplePatcher):
         """
         def _log(*a, **k):
             self.fx.log(self.name)
-            return a[0] if self.ret is self.FIRST_ARG else self.ret
+            return a[0] if self.ret is self.FIRST_ARG and len(a) != 0 \
+                else self.ret
         # This ignores/overrides the superclass's return_value semantic.
         self.ret = return_value
         super(LoggingPatcher, self).__init__(
@@ -284,6 +286,7 @@ class SleepFx(SimplePatchingFx):
         super(SleepFx, self).__init__()
         self.add_patchers(SleepPatcher(self, side_effect=side_effect))
 
+
 # Thread locking primitives are located slightly differently in py2 vs py3
 SEM_ENTER = 'threading.%sSemaphore.__enter__' % ('_' if six.PY2 else '')
 SEM_EXIT = 'threading.%sSemaphore.__exit__' % ('_' if six.PY2 else '')
diff -pruN 1.1.20+ds1-2/pypowervm/tests/test_traits.py 1.1.27+ds1-2/pypowervm/tests/test_traits.py
--- 1.1.20+ds1-2/pypowervm/tests/test_traits.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/test_traits.py	2022-02-15 15:58:17.000000000 +0000
@@ -150,5 +150,6 @@ class TestTraits(unittest.TestCase):
         self.assertIsInstance(mew, MyElementWrapper)
         self.assertEqual(sess.traits, mew.traits)
 
+
 if __name__ == '__main__':
     unittest.main()
diff -pruN 1.1.20+ds1-2/pypowervm/tests/test_util.py 1.1.27+ds1-2/pypowervm/tests/test_util.py
--- 1.1.20+ds1-2/pypowervm/tests/test_util.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/test_util.py	2022-02-15 15:58:17.000000000 +0000
@@ -17,15 +17,15 @@
 import mock
 import six
 
-if six.PY2:
-    import __builtin__ as builtins
-elif six.PY3:
-    import builtins
 
 import unittest
 
 from pypowervm import const
 from pypowervm import util
+if six.PY2:
+    import __builtin__ as builtins
+elif six.PY3:
+    import builtins
 
 dummyuuid1 = "abcdef01-2345-2345-2345-67890abcdef0"
 dummyuuid2 = "67890abc-5432-5432-5432-def0abcdef01"
diff -pruN 1.1.20+ds1-2/pypowervm/tests/test_utils/create_httpresp.py 1.1.27+ds1-2/pypowervm/tests/test_utils/create_httpresp.py
--- 1.1.20+ds1-2/pypowervm/tests/test_utils/create_httpresp.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/test_utils/create_httpresp.py	2022-02-15 15:58:17.000000000 +0000
@@ -26,8 +26,8 @@ def refresh_response(file_to_refresh):
     print("Loading original file: ", file_to_refresh)
     new_http = pvmhttp.load_pvm_resp(file_to_refresh)
     if new_http is None or new_http.refresh() is False:
-            print("Unable to refresh ", file_to_refresh)
-            return 1
+        print("Unable to refresh ", file_to_refresh)
+        return 1
 
     print("Saving refreshed file: ", file_to_refresh)
     new_http.save(file_to_refresh)
diff -pruN 1.1.20+ds1-2/pypowervm/tests/test_utils/pvmhttp.py 1.1.27+ds1-2/pypowervm/tests/test_utils/pvmhttp.py
--- 1.1.20+ds1-2/pypowervm/tests/test_utils/pvmhttp.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/test_utils/pvmhttp.py	2022-02-15 15:58:17.000000000 +0000
@@ -152,7 +152,7 @@ class PVMResp(PVMFile):
             'status': self.response.status,
         }
 
-        with open(file_name, 'wb') as df:
+        with open(file_name, 'wt') as df:
             df.write("####################################################")
             df.write(EOL)
             df.write("# THIS IS AN AUTOMATICALLY GENERATED FILE")
diff -pruN 1.1.20+ds1-2/pypowervm/tests/test_utils/refresh_httpresp.py 1.1.27+ds1-2/pypowervm/tests/test_utils/refresh_httpresp.py
--- 1.1.20+ds1-2/pypowervm/tests/test_utils/refresh_httpresp.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/test_utils/refresh_httpresp.py	2022-02-15 15:58:17.000000000 +0000
@@ -118,6 +118,7 @@ def get_txt_file():
         save_default_selection(txtfiles[line_index - 1])
         return txtfiles[line_index - 1]
 
+
 if __name__ == '__main__':
     txt_file = get_txt_file()
 
diff -pruN 1.1.20+ds1-2/pypowervm/tests/utils/test_lpar_bldr.py 1.1.27+ds1-2/pypowervm/tests/utils/test_lpar_bldr.py
--- 1.1.20+ds1-2/pypowervm/tests/utils/test_lpar_bldr.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/utils/test_lpar_bldr.py	2022-02-15 15:58:17.000000000 +0000
@@ -240,7 +240,7 @@ class TestLPARBuilder(testtools.TestCase
         bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
         self.assertRaises(ValueError, bldr.build)
 
-        attr = dict(name='lpar', memory=1024, max_io_slots=(65534+1),
+        attr = dict(name='lpar', memory=1024, max_io_slots=(65534 + 1),
                     env=bp.LPARType.AIXLINUX, vcpu=1)
         bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
         self.assertRaises(ValueError, bldr.build)
diff -pruN 1.1.20+ds1-2/pypowervm/tests/utils/test_retry.py 1.1.27+ds1-2/pypowervm/tests/utils/test_retry.py
--- 1.1.20+ds1-2/pypowervm/tests/utils/test_retry.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/utils/test_retry.py	2022-02-15 15:58:17.000000000 +0000
@@ -236,7 +236,7 @@ class TestRetry(testtools.TestCase):
         delays = [0, .5, 2.0, 6.5, 20.0, 30.0, 30.0]
         for i in range(1, 7):
             pvm_retry.STEPPED_DELAY(i, 7)
-            mock_sleep.assert_called_once_with(delays[i-1])
+            mock_sleep.assert_called_once_with(delays[i - 1])
             mock_sleep.reset_mock()
 
     @mock.patch('time.sleep')
diff -pruN 1.1.20+ds1-2/pypowervm/tests/utils/test_transaction.py 1.1.27+ds1-2/pypowervm/tests/utils/test_transaction.py
--- 1.1.20+ds1-2/pypowervm/tests/utils/test_transaction.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/utils/test_transaction.py	2022-02-15 15:58:17.000000000 +0000
@@ -518,11 +518,21 @@ class TestFeedTask(twrap.TestWrapper):
                 self.assertIs(wt1.subtasks[i], wt2.subtasks[i])
 
         # "Functors" for easy subtask creation.  Named so we can identify them.
-        foo = lambda: None
-        bar = lambda: None
-        baz = lambda: None
-        xyz = lambda: None
-        abc = lambda: None
+        def foo():
+            return None
+
+        def bar():
+            return None
+
+        def baz():
+            return None
+
+        def xyz():
+            return None
+
+        def abc():
+            return None
+
         # setUp's initialization of feed_task creates empty dict and common_tx
         self.assertEqual({}, self.feed_task._tx_by_uuid)
         self.assertEqual(0, len(self.feed_task._common_tx.subtasks))
diff -pruN 1.1.20+ds1-2/pypowervm/tests/utils/test_validation.py 1.1.27+ds1-2/pypowervm/tests/utils/test_validation.py
--- 1.1.20+ds1-2/pypowervm/tests/utils/test_validation.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/utils/test_validation.py	2022-02-15 15:58:17.000000000 +0000
@@ -87,10 +87,10 @@ class TestValidator(testtools.TestCase):
             lpar_w.mem_config.exp_factor = exp_factor
             lpar_w.mem_config.ppt_ratio = ppt_ratio
             # Can Modify
-            if (state != bp.LPARState.NOT_ACTIVATED
-               and rmc_state != bp.RMCState.ACTIVE):
-                    lpar_w.can_modify_proc.return_value = (False, 'Bad RMC')
-                    lpar_w.can_modify_mem.return_value = (False, 'Bad RMC')
+            if (state != bp.LPARState.NOT_ACTIVATED and
+               rmc_state != bp.RMCState.ACTIVE):
+                lpar_w.can_modify_proc.return_value = (False, 'Bad RMC')
+                lpar_w.can_modify_mem.return_value = (False, 'Bad RMC')
             else:
                 # Doesn't matter what the message is unless it's bad
                 # so always make it bad
diff -pruN 1.1.20+ds1-2/pypowervm/tests/wrappers/test_cdata.py 1.1.27+ds1-2/pypowervm/tests/wrappers/test_cdata.py
--- 1.1.20+ds1-2/pypowervm/tests/wrappers/test_cdata.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/wrappers/test_cdata.py	2022-02-15 15:58:17.000000000 +0000
@@ -59,5 +59,6 @@ class TestCDATA(twrap.TestWrapper):
         self.assertEqual(out, CORRECT_CDATA_CONTENT,
                          "CDATA was not preserved in Results!\n%s" % out)
 
+
 if __name__ == '__main__':
     unittest.main()
diff -pruN 1.1.20+ds1-2/pypowervm/tests/wrappers/test_cluster.py 1.1.27+ds1-2/pypowervm/tests/wrappers/test_cluster.py
--- 1.1.20+ds1-2/pypowervm/tests/wrappers/test_cluster.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/wrappers/test_cluster.py	2022-02-15 15:58:17.000000000 +0000
@@ -173,5 +173,6 @@ class TestCluster(twrap.TestWrapper):
         cl = clust.Cluster.bld(None, 'neotest', repos, node)
         self.assertEqual(cl.toxmlstring(), CLUSTER_RESP.encode('utf-8'))
 
+
 if __name__ == "__main__":
     unittest.main()
diff -pruN 1.1.20+ds1-2/pypowervm/tests/wrappers/test_entry.py 1.1.27+ds1-2/pypowervm/tests/wrappers/test_entry.py
--- 1.1.20+ds1-2/pypowervm/tests/wrappers/test_entry.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/wrappers/test_entry.py	2022-02-15 15:58:17.000000000 +0000
@@ -710,84 +710,84 @@ class TestActionableList(unittest.TestCa
     def test_extend(self):
         def test(new_list):
             self.assertEqual([1, 2, 3, 4, 5], new_list)
-        l = ewrap.ActionableList([1, 2, 3], test)
+        ll = ewrap.ActionableList([1, 2, 3], test)
 
         # Extend here.
-        l.extend([4, 5])
-        self.assertEqual(5, len(l))
-        self.assertEqual(5, l[4])
+        ll.extend([4, 5])
+        self.assertEqual(5, len(ll))
+        self.assertEqual(5, ll[4])
 
     def test_append(self):
         def test(new_list):
             self.assertEqual([1, 2, 3, 4], new_list)
-        l = ewrap.ActionableList([1, 2, 3], test)
+        ll = ewrap.ActionableList([1, 2, 3], test)
 
         # Append here.
-        l.append(4)
-        self.assertEqual(4, len(l))
-        self.assertEqual(4, l[3])
+        ll.append(4)
+        self.assertEqual(4, len(ll))
+        self.assertEqual(4, ll[3])
 
     def test_remove(self):
         def test(new_list):
             self.assertEqual([1, 3], new_list)
-        l = ewrap.ActionableList([1, 2, 3], test)
+        ll = ewrap.ActionableList([1, 2, 3], test)
 
         # Remove here.
-        l.remove(2)
-        self.assertEqual(2, len(l))
-        self.assertEqual(3, l[1])
+        ll.remove(2)
+        self.assertEqual(2, len(ll))
+        self.assertEqual(3, ll[1])
 
     def test_insert(self):
         def test(new_list):
             self.assertEqual([1, 2, 3, 4], new_list)
-        l = ewrap.ActionableList([1, 2, 3], test)
+        ll = ewrap.ActionableList([1, 2, 3], test)
 
         # Insert here.
-        l.insert(3, 4)
-        self.assertEqual(4, len(l))
-        self.assertEqual(4, l[3])
+        ll.insert(3, 4)
+        self.assertEqual(4, len(ll))
+        self.assertEqual(4, ll[3])
 
     def test_pop(self):
         def test(new_list):
             self.assertEqual([1, 2], new_list)
-        l = ewrap.ActionableList([1, 2, 3], test)
+        ll = ewrap.ActionableList([1, 2, 3], test)
 
         # Pop here.
-        l.pop(2)
-        self.assertEqual(2, len(l))
-        self.assertEqual(2, l[1])
+        ll.pop(2)
+        self.assertEqual(2, len(ll))
+        self.assertEqual(2, ll[1])
 
     def test_complex_path(self):
         function = mock.MagicMock()
 
-        l = ewrap.ActionableList([1, 2, 3], function)
-        self.assertEqual(3, len(l))
-        self.assertEqual(3, l[2])
+        ll = ewrap.ActionableList([1, 2, 3], function)
+        self.assertEqual(3, len(ll))
+        self.assertEqual(3, ll[2])
 
         # Try extending
-        l.extend([4, 5])
-        self.assertEqual(5, len(l))
-        self.assertEqual(5, l[4])
+        ll.extend([4, 5])
+        self.assertEqual(5, len(ll))
+        self.assertEqual(5, ll[4])
 
         # Try appending
-        l.append(6)
-        self.assertEqual(6, len(l))
-        self.assertEqual(6, l[5])
+        ll.append(6)
+        self.assertEqual(6, len(ll))
+        self.assertEqual(6, ll[5])
 
         # Try removing
-        l.remove(6)
-        self.assertEqual(5, len(l))
-        self.assertEqual(5, l[4])
+        ll.remove(6)
+        self.assertEqual(5, len(ll))
+        self.assertEqual(5, ll[4])
 
         # Try inserting
-        l.insert(5, 6)
-        self.assertEqual(6, len(l))
-        self.assertEqual(6, l[5])
+        ll.insert(5, 6)
+        self.assertEqual(6, len(ll))
+        self.assertEqual(6, ll[5])
 
         # Try popping
-        self.assertEqual(6, l.pop(5))
-        self.assertEqual(5, len(l))
-        self.assertEqual(5, l[4])
+        self.assertEqual(6, ll.pop(5))
+        self.assertEqual(5, len(ll))
+        self.assertEqual(5, ll[4])
 
         # Make sure our function was called each time
         self.assertEqual(5, function.call_count)
diff -pruN 1.1.20+ds1-2/pypowervm/tests/wrappers/test_iocard.py 1.1.27+ds1-2/pypowervm/tests/wrappers/test_iocard.py
--- 1.1.20+ds1-2/pypowervm/tests/wrappers/test_iocard.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/wrappers/test_iocard.py	2022-02-15 15:58:17.000000000 +0000
@@ -413,5 +413,6 @@ class TestVNIC(twrap.TestWrapper):
         self.assertEqual(0.42, backdev.max_capacity)
         self.assertEqual(1, backdev.desired_max_capacity)
 
+
 if __name__ == "__main__":
     unittest.main()
diff -pruN 1.1.20+ds1-2/pypowervm/tests/wrappers/test_logical_partition.py 1.1.27+ds1-2/pypowervm/tests/wrappers/test_logical_partition.py
--- 1.1.20+ds1-2/pypowervm/tests/wrappers/test_logical_partition.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/wrappers/test_logical_partition.py	2022-02-15 15:58:17.000000000 +0000
@@ -222,6 +222,22 @@ class TestLogicalPartition(testtools.Tes
         self.call_simple_getter(
             "current_secure_boot", 1, 0)
 
+    def test_get_max_pmem_volumes(self):
+        self.call_simple_getter(
+            "max_pmem_volumes", 964, None)
+
+    def test_get_cur_pmem_volumes(self):
+        self.call_simple_getter(
+            "cur_pmem_volumes", 2, None)
+
+    def test_get_max_dram_pmem_volumes(self):
+        self.call_simple_getter(
+            "max_dram_pmem_volumes", 4, None)
+
+    def test_get_cur_dram_pmem_volumes(self):
+        self.call_simple_getter(
+            "cur_dram_pmem_volumes", 2, None)
+
     @mock.patch('warnings.warn')
     def test_rr_off(self, mock_warn):
         """Remote Restart fields when not RR capable."""
diff -pruN 1.1.20+ds1-2/pypowervm/tests/wrappers/test_managed_system.py 1.1.27+ds1-2/pypowervm/tests/wrappers/test_managed_system.py
--- 1.1.20+ds1-2/pypowervm/tests/wrappers/test_managed_system.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/wrappers/test_managed_system.py	2022-02-15 15:58:17.000000000 +0000
@@ -146,6 +146,15 @@ class TestMSEntryWrapper(unittest.TestCa
     def test_default_ppt_ratio(self):
         self.call_simple_getter("default_ppt_ratio", 4, 6)
 
+    def test_supported_ibmi_console_codepage(self):
+        expected = ['930:930 Japanese (Katanaka)',
+                    'e930:e930 Japan (Katanaka Extended)',
+                    '939:939 Japan (Latin Extended)',
+                    '1399:1399 Japanese (Latin Unicode Extended)',
+                    'e1399:e1399 Japanese (Latin Unicode Extended: JIS2014)']
+        self.assertEqual(
+            self.wrapper.supported_ibmi_console_codepage, expected)
+
     def test_get_system_name(self):
         self.wrapper.set_parm_value(ms._SYSTEM_NAME, 'XYZ')
         name = self.wrapper.system_name
@@ -269,7 +278,8 @@ class TestMSEntryWrapper(unittest.TestCa
                     'ioslot_owner_assignment_capable': True,
                     'affinity_check_capable': True,
                     'partition_secure_boot_capable': True,
-                    'dedicated_processor_partition_capable': True}
+                    'dedicated_processor_partition_capable': True,
+                    'PersistentMemoryCapable': False}
         bad_cap = {'active_lpar_mobility_capable': False,
                    'inactive_lpar_mobility_capable': False,
                    'ibmi_lpar_mobility_capable': False,
@@ -290,7 +300,8 @@ class TestMSEntryWrapper(unittest.TestCa
                    'ioslot_owner_assignment_capable': False,
                    'affinity_check_capable': False,
                    'partition_secure_boot_capable': False,
-                   'dedicated_processor_partition_capable': True}
+                   'dedicated_processor_partition_capable': True,
+                   'PersistentMemoryCapable': False}
         self.call_simple_getter("get_capabilities", good_cap,
                                 bad_cap)
 
@@ -327,7 +338,8 @@ class TestMSEntryWrapper(unittest.TestCa
                          'disable_secure_boot_capable': False,
                          'ioslot_owner_assignment_capable': True,
                          'affinity_check_capable': True,
-                         'dedicated_processor_partition_capable': True}
+                         'dedicated_processor_partition_capable': True,
+                         'PersistentMemoryCapable': False}
         result_data = self.wrapper.migration_data
         self.assertEqual(result_data, expected_data,
                          "migration_data returned %s instead of %s" %
@@ -356,5 +368,6 @@ class TestMTMS(unittest.TestCase):
         self.assertEqual(mtms.serial, '0FEDCBA')
         self.assertEqual(mtms.mtms_str, '4321-765*0FEDCBA')
 
+
 if __name__ == "__main__":
     unittest.main()
diff -pruN 1.1.20+ds1-2/pypowervm/tests/wrappers/test_search.py 1.1.27+ds1-2/pypowervm/tests/wrappers/test_search.py
--- 1.1.20+ds1-2/pypowervm/tests/wrappers/test_search.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/wrappers/test_search.py	2022-02-15 15:58:17.000000000 +0000
@@ -53,5 +53,6 @@ class TestSearch(unittest.TestCase):
             else:
                 self.assertEqual(sk, wcls.search_keys)
 
+
 if __name__ == '__main__':
     unittest.main()
diff -pruN 1.1.20+ds1-2/pypowervm/tests/wrappers/test_virtual_io_server.py 1.1.27+ds1-2/pypowervm/tests/wrappers/test_virtual_io_server.py
--- 1.1.20+ds1-2/pypowervm/tests/wrappers/test_virtual_io_server.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/tests/wrappers/test_virtual_io_server.py	2022-02-15 15:58:17.000000000 +0000
@@ -876,5 +876,6 @@ class TestFeed3(twrap.TestWrapper):
         self.assertTrue(c.XAG.VIO_NET < self.dwrap.xags.SCSI_MAPPING)
         self.assertTrue(self.dwrap.xags.NETWORK < c.XAG.VIO_SMAP)
 
+
 if __name__ == "__main__":
     unittest.main()
diff -pruN 1.1.20+ds1-2/pypowervm/util.py 1.1.27+ds1-2/pypowervm/util.py
--- 1.1.20+ds1-2/pypowervm/util.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/util.py	2022-02-15 15:58:17.000000000 +0000
@@ -295,8 +295,8 @@ def round_gb_size_up(gb_size, dp=2):
     next highest ten).
     :return: A new decimal float which is greater than or equal to the input.
     """
-    shift = 10.0**dp
-    return float(math.ceil(gb_size * shift))/shift
+    shift = 10.0 ** dp
+    return float(math.ceil(gb_size * shift)) / shift
 
 
 def sanitize_mac_for_api(mac):
@@ -378,12 +378,12 @@ def sanitize_file_name_for_api(name, pre
 
 
 def sanitize_partition_name_for_api(name, trunc_ok=True):
-    """Sanitize a string to be suitable for use as a partition name.
+    r"""Sanitize a string to be suitable for use as a partition name.
 
     PowerVM's partition name restrictions are:
     - Between 1 and 31 characters, inclusive;
     - Containing ASCII characters between 0x20 (space) and 0x7E (~), inclusive,
-      except ()\<>*$&?|[]'"`
+      except r()\<>*$&?|[]'"`
 
     :param name: The name to scrub.  Invalid characters will be replaced with
                  '_'.
diff -pruN 1.1.20+ds1-2/pypowervm/utils/lpar_builder.py 1.1.27+ds1-2/pypowervm/utils/lpar_builder.py
--- 1.1.20+ds1-2/pypowervm/utils/lpar_builder.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/utils/lpar_builder.py	2022-02-15 15:58:17.000000000 +0000
@@ -498,7 +498,7 @@ class BoolField(Field):
             if value.lower() in ['false', 'no']:
                 return False
         elif isinstance(value, bool):
-                return value
+            return value
         raise ValueError('Could not convert %s.' % value)
 
 
@@ -931,7 +931,7 @@ class LPARBuilder(object):
         smode = self.attr.get(SHARING_MODE, None)
         if (smode is not None and
                 smode in bp.DedicatedSharingMode.ALL_VALUES):
-                return True
+            return True
 
     def _shared_procs_specified(self):
         """Determine if shared procs should be configured.
diff -pruN 1.1.20+ds1-2/pypowervm/utils/retry.py 1.1.27+ds1-2/pypowervm/utils/retry.py
--- 1.1.20+ds1-2/pypowervm/utils/retry.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/utils/retry.py	2022-02-15 15:58:17.000000000 +0000
@@ -59,7 +59,7 @@ def STEPPED_DELAY(attempt, max_attempts,
      - Attempt 5: 20s
      - Attempt 6+: 30s
     """
-    sleep_time = (0.25 * (3**(attempt-1)) - 0.25)
+    sleep_time = (0.25 * (3 ** (attempt - 1)) - 0.25)
     time.sleep(min(sleep_time, 30))
 
 
@@ -232,7 +232,7 @@ def retry(tries=3, delay_func=NO_DELAY,
             if _resp_checker is None:
                 _resp_checker = NO_CHECKER
             # Start retries
-            for try_ in moves.range(1, _tries+1):
+            for try_ in moves.range(1, _tries + 1):
                 try:
                     resp = func(*args, **kwds)
                     # No exception raised, call the response checker
diff -pruN 1.1.20+ds1-2/pypowervm/utils/transaction.py 1.1.27+ds1-2/pypowervm/utils/transaction.py
--- 1.1.20+ds1-2/pypowervm/utils/transaction.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/utils/transaction.py	2022-02-15 15:58:17.000000000 +0000
@@ -254,6 +254,8 @@ class _FunctorSubtask(Subtask):
         if not ('provided' in reflection.get_callable_args(self._func)
                 or reflection.accepts_kwargs(self._func)):
             _kwargs.pop('provided', None)
+        if 'provided' in _kwargs and _kwargs['provided'] == {}:
+            _kwargs.pop('provided', None)
         if self._logspec:
             # Execute the log method (the first element in the list) with its
             # arguments (the remaining elements in the list).
@@ -712,8 +714,8 @@ class FeedTask(tf_task.Task):
             # long as the assignment (by WrapperTask.execute) and the accessor
             # (WrapperTask.wrapper) remain atomic by using simple =/return.
             for wrap in self._feed:
-                if self.get_wrapper(wrap.uuid).etag != wrap.etag:
-                    # Refresh needed
+                if hasattr(self.get_wrapper(wrap.uuid), 'etag') \
+                        and self.get_wrapper(wrap.uuid).etag != wrap.etag:
                     self._feed = [tx.wrapper for tx in
                                   self.wrapper_tasks.values()]
                     break
diff -pruN 1.1.20+ds1-2/pypowervm/wrappers/base_partition.py 1.1.27+ds1-2/pypowervm/wrappers/base_partition.py
--- 1.1.20+ds1-2/pypowervm/wrappers/base_partition.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/wrappers/base_partition.py	2022-02-15 15:58:17.000000000 +0000
@@ -291,6 +291,16 @@ IOAdapter = card.IOAdapter
 PhysFCAdapter = card.PhysFCAdapter
 PhysFCPort = card.PhysFCPort
 
+_ASSOC_PMEM_CONFIG = 'AssociatedPersistentMemoryConfiguration'
+_MAX_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                            'MaximumPersistentMemoryVolumes')
+_CUR_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                            'CurrentPersistentMemoryVolumes')
+_MAX_DRAM_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                                 'MaximumDramPersistentMemoryVolumes')
+_CUR_DRAM_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                                 'CurrentDramPersistentMemoryVolumes')
+
 
 class SharingMode(object):
     """Shared Processor sharing modes.
@@ -358,9 +368,10 @@ class LPARCompat(object):
     POWER9_BASE = 'POWER9_Base'
     POWER9 = 'POWER9'
     POWER9_ENHANCED = 'POWER9_Enhanced'
+    POWER10 = 'POWER10'
     ALL_VALUES = (DEFAULT, POWER6, POWER6_PLUS, POWER6_PLUS_ENHANCED, POWER7,
                   POWER8, POWER8_ENHANCED, POWER9_BASE, POWER9,
-                  POWER9_ENHANCED)
+                  POWER9_ENHANCED, POWER10)
 
 
 class RMCState(object):
@@ -766,6 +777,22 @@ class BasePartition(ewrap.EntryWrapper,
         """Integer time since partition boot, in seconds."""
         return self._get_val_int(_BP_UPTIME)
 
+    @property
+    def max_pmem_volumes(self):
+        return self._get_val_int(_MAX_PMEM_VOLUMES)
+
+    @property
+    def cur_pmem_volumes(self):
+        return self._get_val_int(_CUR_PMEM_VOLUMES)
+
+    @property
+    def max_dram_pmem_volumes(self):
+        return self._get_val_int(_MAX_DRAM_PMEM_VOLUMES)
+
+    @property
+    def cur_dram_pmem_volumes(self):
+        return self._get_val_int(_CUR_DRAM_PMEM_VOLUMES)
+
 
 @ewrap.ElementWrapper.pvm_type(_BP_CAPABILITIES, has_metadata=True,
                                child_order=_CAP_EL_ORDER)
diff -pruN 1.1.20+ds1-2/pypowervm/wrappers/entry_wrapper.py 1.1.27+ds1-2/pypowervm/wrappers/entry_wrapper.py
--- 1.1.20+ds1-2/pypowervm/wrappers/entry_wrapper.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/wrappers/entry_wrapper.py	2022-02-15 15:58:17.000000000 +0000
@@ -529,7 +529,7 @@ class Wrapper(object):
         def str2percent(percent_str):
             if percent_str:
                 percent_str = re.findall(r"\d*\.?\d+", percent_str)[0]
-                return (float(percent_str))/100
+                return (float(percent_str)) / 100
             else:
                 return None
         return self.__get_val(property_name, default=default,
diff -pruN 1.1.20+ds1-2/pypowervm/wrappers/iocard.py 1.1.27+ds1-2/pypowervm/wrappers/iocard.py
--- 1.1.20+ds1-2/pypowervm/wrappers/iocard.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/wrappers/iocard.py	2022-02-15 15:58:17.000000000 +0000
@@ -1,4 +1,4 @@
-# Copyright 2016, 2017 IBM Corp.
+# Copyright 2016, 2019 IBM Corp.
 #
 # All Rights Reserved.
 #
@@ -38,9 +38,9 @@ _SRIOV_ADAPTER_STATE = 'AdapterState'
 
 _SRIOV_CONVERGED_ETHERNET_PHYSICAL_PORTS = 'ConvergedEthernetPhysicalPorts'
 _SRIOV_ETHERNET_PHYSICAL_PORTS = 'EthernetPhysicalPorts'
+_ROCE_SRIOV_PHYSICAL_PORTS = 'SRIOVRoCEPhysicalPorts'
 
 # SR-IOV physical port constants
-
 _SRIOVPP_CFG_SPEED = 'ConfiguredConnectionSpeed'
 _SRIOVPP_CFG_FLOWCTL = 'ConfiguredFlowControl'
 _SRIOVPP_CFG_MTU = 'ConfiguredMTU'
@@ -442,6 +442,13 @@ class SRIOVAdapter(IOAdapter):
             return None
         return ewrap.WrapperElemList(elem, child_class=SRIOVConvPPort)
 
+    def _rocephysicalports(self):
+        """Retrieve all RoCE adapter physical ethernet ports."""
+        elem = self._find(_ROCE_SRIOV_PHYSICAL_PORTS)
+        if elem is None:
+            return None
+        return ewrap.WrapperElemList(elem, child_class=SRIOVRoCEPPort)
+
     def _ethernetphysicalports(self):
         """Retrieve all Ethernet physical ports."""
         elem = self._find(_SRIOV_ETHERNET_PHYSICAL_PORTS)
@@ -460,9 +467,13 @@ class SRIOVAdapter(IOAdapter):
         """
         allports = []
         cports = self._convergedphysicalports()
+        # Physical ports for RoCE adapters is part of different schema
+        rports = self._rocephysicalports()
         eports = self._ethernetphysicalports()
         for c in cports or []:
             allports.append(c)
+        for r in rports or []:
+            allports.append(r)
         for e in eports or []:
             allports.append(e)
         # Set the ports' backpointers to this SRIOVAdapter
@@ -597,6 +608,14 @@ class SRIOVConvPPort(SRIOVEthPPort):
     pass
 
 
+@ewrap.ElementWrapper.pvm_type('SRIOVRoCEPhysicalPort',
+                               has_metadata=True,
+                               child_order=_SRIOVCPP_EL_ORDER)
+class SRIOVRoCEPPort(SRIOVEthPPort):
+    """The SR-IOV RoCE Physical Ethernet port."""
+    pass
+
+
 @ewrap.EntryWrapper.pvm_type('SRIOVEthernetLogicalPort',
                              child_order=_SRIOVLP_EL_ORDER)
 class SRIOVEthLPort(ewrap.EntryWrapper):
diff -pruN 1.1.20+ds1-2/pypowervm/wrappers/managed_system.py 1.1.27+ds1-2/pypowervm/wrappers/managed_system.py
--- 1.1.20+ds1-2/pypowervm/wrappers/managed_system.py	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/wrappers/managed_system.py	2022-02-15 15:58:17.000000000 +0000
@@ -37,6 +37,7 @@ _SYSTEM_NAME = 'SystemName'
 _MASTER_MODE = 'IsPowerVMManagementMaster'
 _PROC_THROTTLE = 'ProcessorThrottling'
 _METER_POOL_ID = 'MeteredPoolID'
+_SUPPORTED_IBMI_CONSOLE_CODEPAGE = 'SupportedIBMiConsoleCodePage'
 
 _SYS_CAPABILITIES = 'AssociatedSystemCapabilities'
 _ACTIVE_LPM_CAP = u.xpath(
@@ -71,6 +72,8 @@ _IOSLOT_OWNER_ASSMT_CAP = u.xpath(
     _SYS_CAPABILITIES, 'IOSlotOwnerAssignmentCapable')
 _DED_PROC_POOL_CAP = u.xpath(
     _SYS_CAPABILITIES, 'DedicatedProcessorPartitionCapable')
+_PERSISTENT_MEM_CAPABLE = u.xpath(
+    _SYS_CAPABILITIES, 'PersistentMemoryCapable')
 
 # Migration Constants
 _SYS_PROC_CONFIG = 'AssociatedSystemProcessorConfiguration'
@@ -135,6 +138,8 @@ _CAPABILITY_MAP = {
         'prop': _IOSLOT_OWNER_ASSMT_CAP, 'default': False},
     'dedicated_processor_partition_capable': {
         'prop': _DED_PROC_POOL_CAP, 'default': True},
+    'PersistentMemoryCapable': {
+        'prop': _PERSISTENT_MEM_CAPABLE, 'default': False}
 }
 
 _SYS_MEM_CONFIG = 'AssociatedSystemMemoryConfiguration'
@@ -146,6 +151,26 @@ _SYS_FIRMWARE_MEM = u.xpath(_SYS_MEM_CON
 _PAGE_TABLE_RATIO = u.xpath(_SYS_MEM_CONFIG, 'DefaultHardwarePageTableRatio')
 _DEFAULT_PPT_RATIO = u.xpath(_SYS_MEM_CONFIG, 'DefaultPhysicalPageTableRatio')
 
+_ASSOC_PMEM_CONFIG = 'AssociatedPersistentMemoryConfiguration'
+_MAX_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                            'MaximumPersistentMemoryVolumes')
+_CUR_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                            'CurrentPersistentMemoryVolumes')
+_MAX_AIXLX_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                                  'MaximumAixLinuxPersistentMemoryVolumes')
+_MAX_OS400_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                                  'MaximumOS400PersistentMemoryVolumes')
+_MAX_VIOS_PMEM_VOLUMES = u.xpath(_ASSOC_PMEM_CONFIG,
+                                 'MaximumVIOSPersistentMemoryVolumes')
+_DRAM_PMEM_VOLUME_BLKSIZE = u.xpath(_ASSOC_PMEM_CONFIG,
+                                    'DramPersistentMemoryVolumeBlockSize')
+_DRAM_PMEM_VOLUME_SIZE = u.xpath(_ASSOC_PMEM_CONFIG,
+                                 'DramPersistentMemoryVolumesSize')
+_DRAM_PMEM_VOLUME_CURSIZE = u.xpath(_ASSOC_PMEM_CONFIG,
+                                    'DramPersistentMemoryVolumesCurrentSize')
+_SUPP_PMEM_VOLUME_DEVTYPE = u.xpath(_ASSOC_PMEM_CONFIG,
+                                    'SupportedPersistentMemoryDeviceTypes')
+
 _PROC_UNITS_INSTALLED = u.xpath(
     _SYS_PROC_CONFIG, 'InstalledSystemProcessorUnits')
 
@@ -406,9 +431,49 @@ class System(ewrap.EntryWrapper):
         return self._get_val_str(_METER_POOL_ID)
 
     @property
+    def supported_ibmi_console_codepage(self):
+        return self._get_vals(_SUPPORTED_IBMI_CONSOLE_CODEPAGE)
+
+    @property
     def processor_is_throttled(self):
         return self._get_val_bool(_PROC_THROTTLE)
 
+    @property
+    def max_pmem_volumes(self):
+        return self._get_val_int(_MAX_PMEM_VOLUMES)
+
+    @property
+    def cur_pmem_volumes(self):
+        return self._get_val_int(_CUR_PMEM_VOLUMES)
+
+    @property
+    def max_aix_lnx_pmem_volumes(self):
+        return self._get_val_int(_MAX_AIXLX_PMEM_VOLUMES)
+
+    @property
+    def max_os400_pmem_volumes(self):
+        return self._get_val_int(_MAX_OS400_PMEM_VOLUMES)
+
+    @property
+    def max_vios_pmem_volumes(self):
+        return self._get_val_int(_MAX_VIOS_PMEM_VOLUMES)
+
+    @property
+    def dram_pmem_vol_blksize(self):
+        return self._get_val_int(_DRAM_PMEM_VOLUME_BLKSIZE)
+
+    @property
+    def dram_pmem_vol_size(self):
+        return self._get_val_int(_DRAM_PMEM_VOLUME_SIZE)
+
+    @property
+    def dram_pmem_vol_cursize(self):
+        return self._get_val_int(_DRAM_PMEM_VOLUME_CURSIZE)
+
+    @property
+    def supported_pmem_vol_devtypes(self):
+        return self._get_val_str(_SUPP_PMEM_VOLUME_DEVTYPE)
+
 
 @ewrap.ElementWrapper.pvm_type(_ASIO_ROOT, has_metadata=True)
 class ASIOConfig(ewrap.ElementWrapper):
diff -pruN 1.1.20+ds1-2/pypowervm/wrappers/pmem.py 1.1.27+ds1-2/pypowervm/wrappers/pmem.py
--- 1.1.20+ds1-2/pypowervm/wrappers/pmem.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.1.27+ds1-2/pypowervm/wrappers/pmem.py	2022-02-15 15:58:17.000000000 +0000
@@ -0,0 +1,265 @@
+# Copyright 2020 IBM Corp.
+#
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Base classes, enums, and constants for Persistent Memory EntryWrappers."""
+
+import pypowervm.const as c
+import pypowervm.util as u
+import pypowervm.wrappers.entry_wrapper as ewrap
+
+# SystemPersistentMemoryConfiguration
+_SYS_PMEM_MAX_PMEM_VOLUMES = "MaximumPersistentMemoryVolumes"
+_SYS_PMEM_CUR_PMEM_VOLUMES = "CurrentPersistentMemoryVolumes"
+_SYS_PMEM_MAX_AIX_PMEM_VOLUMES = "MaximumAixLinuxPersistentMemoryVolumes"
+_SYS_PMEM_MAX_OS400_PMEM_VOLUMES = "MaximumOS400PersistentMemoryVolumes"
+_SYS_PMEM_MAX_VIOS_PMEM_VOLUMES = "MaximumVIOSPersistentMemoryVolumes"
+_SYS_PMEM_DRAM_VOL_BSIZE = "DramPersistentMemoryVolumeBlockSize"
+_SYS_PMEM_DRAM_VOL_SIZE = "DramPersistentMemoryVolumesSize"
+_SYS_PMEM_DRAM_CUR_SIZE = "DramPersistentMemoryVolumesCurrentSize"
+_SYS_PMEM_SUP_DEV_TYPES = "SupportedPersistentMemoryDeviceTypes"
+
+_SYS_PMEM_EL_ORDER = (_SYS_PMEM_MAX_PMEM_VOLUMES, _SYS_PMEM_CUR_PMEM_VOLUMES,
+                      _SYS_PMEM_MAX_AIX_PMEM_VOLUMES,
+                      _SYS_PMEM_MAX_OS400_PMEM_VOLUMES,
+                      _SYS_PMEM_MAX_VIOS_PMEM_VOLUMES,
+                      _SYS_PMEM_DRAM_VOL_BSIZE, _SYS_PMEM_DRAM_VOL_SIZE,
+                      _SYS_PMEM_DRAM_CUR_SIZE, _SYS_PMEM_SUP_DEV_TYPES)
+
+# PersistentMemoryDevice
+_PDEV_DYNAMIC_RECONF_CONNECT_INDEX = "DynamicReconfigurationConnectorIndex"
+_PDEV_TYPE = "Type"
+_PDEV_STATUS = "Status"
+_PDEV_BSIZE = "BlockSize"
+_PDEV_TSIZE = "TotalSize"
+_PDEV_FSIZE = "FreeSize"
+_PDEV_MAX_NUM_VOL = "MaximumNumberOfVolumes"
+_PDEV_CUR_NUM_VOL = "CurrentNumberOfVolumes"
+_PDEV_PHY_LOC_CODE = "PhysicalLocationCode"
+_PDEV_PHY_SERIAL_NUM = "SerialNumber"
+
+_PDEV_EL_ORDER = (_PDEV_DYNAMIC_RECONF_CONNECT_INDEX, _PDEV_TYPE, _PDEV_STATUS,
+                  _PDEV_BSIZE, _PDEV_TSIZE, _PDEV_FSIZE, _PDEV_MAX_NUM_VOL,
+                  _PDEV_CUR_NUM_VOL, _PDEV_PHY_LOC_CODE, _PDEV_PHY_SERIAL_NUM)
+
+
+@ewrap.EntryWrapper.pvm_type('PersistentMemoryDevice',
+                             child_order=_PDEV_EL_ORDER)
+class PersistentMemoryDevice(ewrap.EntryWrapper):
+    """Class PersistentMemoryDevice.
+
+    This corresponds to the abstract PersistentMemoryDevice
+    object in the PowerVM schema.
+    """
+
+    @property
+    def drc_index(self):
+        return self._get_val_int(_PDEV_DYNAMIC_RECONF_CONNECT_INDEX)
+
+    def _drc_index(self, val):
+        self.set_parm_value(_PDEV_DYNAMIC_RECONF_CONNECT_INDEX, val)
+
+    @property
+    def type(self):
+        return self._get_val_str(_PDEV_TYPE)
+
+    @property
+    def status(self):
+        return self._get_val_str(_PDEV_STATUS)
+
+    @property
+    def blocksize(self):
+        return self._get_val_int(_PDEV_BSIZE)
+
+    @property
+    def totalsize(self):
+        return self._get_val_int(_PDEV_TSIZE)
+
+    @property
+    def freesize(self):
+        return self._get_val_int(_PDEV_FSIZE)
+
+    @property
+    def max_num_volumes(self):
+        return self._get_val_int(_PDEV_MAX_NUM_VOL)
+
+    @property
+    def cur_num_volumes(self):
+        return self._get_val_int(_PDEV_CUR_NUM_VOL)
+
+    @property
+    def pys_loc(self):
+        return self._get_val_str(_PDEV_PHY_LOC_CODE)
+
+    @property
+    def serial_number(self):
+        return self._get_val_str(_PDEV_PHY_SERIAL_NUM)
+
+
+# PersistentMemoryVolume
+_PMEM_VOL_DYN_RECONF_INDEX = "DeviceDynamicReconfigurationConnectorIndex"
+_PMEM_VOL_NAME = "Name"
+_PMEM_VOL_SIZE = "Size"
+_PMEM_VOL_ID = "VolumeId"
+_PMEM_VOL_ASC_PART_NAME = "AssociatedPartitionName"
+_PMEM_VOL_ASC_PART_ID = "AssociatedPartitionId"
+_PMEM_VOL_ASC_PARTITION = "AssociatedPartition"
+_PMEM_VOL_UUID = "Uuid"
+
+_PMEM_EL_ORDER = (_PMEM_VOL_DYN_RECONF_INDEX, _PMEM_VOL_NAME, _PMEM_VOL_SIZE,
+                  _PMEM_VOL_ID, _PMEM_VOL_ASC_PART_NAME, _PMEM_VOL_ASC_PART_ID,
+                  _PMEM_VOL_ASC_PARTITION, _PMEM_VOL_UUID)
+
+
+@ewrap.EntryWrapper.pvm_type('PersistentMemoryVolume',
+                             child_order=_PMEM_EL_ORDER)
+class PersistentMemoryVolume(PersistentMemoryDevice):
+    """Class VirtualPersistentMemoryVolume.
+
+    This corresponds to the abstract PersistentMemoryVolume
+    object in the PowerVM schema.
+    """
+
+    @property
+    def drc_index(self):
+        return self._get_val_int(_PMEM_VOL_DYN_RECONF_INDEX)
+
+    @property
+    def uuid(self):
+        return self._get_val_str(_PMEM_VOL_UUID)
+
+    @property
+    def name(self):
+        return self._get_val_str(_PMEM_VOL_NAME)
+
+    @property
+    def size(self):
+        return self._get_val_int(_PMEM_VOL_SIZE)
+
+    @property
+    def volume_id(self):
+        return self._get_val_int(_PMEM_VOL_ID)
+
+
+# PartitionPersistentMemoryConfiguration
+_PMCONF_MAX_PMEM_CONF = "MaximumPersistentMemoryVolumes"
+_PMCONF_CUR_PMEM_CONF = "CurrentPersistentMemoryVolumes"
+
+_PMCONF_EL_ORDER = (_PMCONF_MAX_PMEM_CONF, _PMCONF_CUR_PMEM_CONF)
+
+# VirtualPersistentMemoryVolume
+_VIRT_PMEM_VOL_UUID = "Uuid"
+_VIRT_PMEM_VOL_NAME = "Name"
+_VIRT_PMEM_VOL_SIZE = "Size"
+_VIRT_PMEM_VOL_CURSIZE = "CurrentSize"
+_VIRT_PMEM_VOL_ID = "VolumeId"
+_VIRT_PMEM_VOL_AFFINITY = "Affinity"
+_VIRT_PMEM_VOL_ASC_PARTNAME = "AssociatedPartitionName"
+_VIRT_PMEM_VOL_ASC_PARTID = "AssociatedPartitionId"
+_VIRT_PMEM_VOL_ASC_PART = "AssociatedPartition"
+
+_VIRT_PMEM_EL_ORDER = (_VIRT_PMEM_VOL_UUID, _VIRT_PMEM_VOL_NAME,
+                       _VIRT_PMEM_VOL_SIZE, _VIRT_PMEM_VOL_CURSIZE,
+                       _VIRT_PMEM_VOL_ID, _VIRT_PMEM_VOL_AFFINITY,
+                       _VIRT_PMEM_VOL_ASC_PARTNAME, _VIRT_PMEM_VOL_ASC_PARTID,
+                       _VIRT_PMEM_VOL_ASC_PART)
+
+_LPAR = "LogicalPartition"
+
+
+@ewrap.EntryWrapper.pvm_type('VirtualPersistentMemoryVolume',
+                             child_order=_VIRT_PMEM_EL_ORDER)
+class VirtualPMEMVolume(ewrap.EntryWrapper, ewrap.WrapperSetUUIDMixin):
+    """Class VirtualPersistentMemoryVolume.
+
+    This corresponds to the abstract VirtualPersistentMemoryVolume
+    object in the PowerVM schema.
+    """
+
+    @classmethod
+    def bld(cls, adapter, lpar_id, name, size, affinity=True):
+        """Creates a VirtualPersistentMemoryVolume.
+
+        """
+
+        vpmemvol = super(VirtualPMEMVolume, cls)._bld(adapter)
+        vpmemvol.name = name
+        vpmemvol.size = size
+        vpmemvol.affinity = affinity
+        vpmemvol.assoc_partition_id = lpar_id
+        return vpmemvol
+
+    def set_uuid(self, value):
+        # LPAR uuids must be uppercase.
+        up_uuid = str(value).upper()
+        super(VirtualPMEMVolume, self).set_uuid(up_uuid)
+        self.set_parm_value(_VIRT_PMEM_VOL_UUID, up_uuid)
+
+    @property
+    def pmem_vol_uuid(self):
+        return self._get_val_str(_VIRT_PMEM_VOL_UUID)
+
+    @property
+    def name(self):
+        return self._get_val_str(_VIRT_PMEM_VOL_NAME)
+
+    @name.setter
+    def name(self, value):
+        return self.set_parm_value(_VIRT_PMEM_VOL_NAME, value,
+                                   attrib=c.ATTR_KSV1100)
+
+    @property
+    def size(self):
+        return self._get_val_int(_VIRT_PMEM_VOL_SIZE)
+
+    @size.setter
+    def size(self, value):
+        return self.set_parm_value(_VIRT_PMEM_VOL_SIZE, value,
+                                   attrib=c.ATTR_KSV1100)
+
+    @property
+    def cur_size(self):
+        return self._get_val_int(_VIRT_PMEM_VOL_CURSIZE)
+
+    @property
+    def volume_id(self):
+        return self._get_val_int(_VIRT_PMEM_VOL_ID)
+
+    @property
+    def affinity(self):
+        return self._get_val_bool(_VIRT_PMEM_VOL_AFFINITY, default=True)
+
+    @affinity.setter
+    def affinity(self, value):
+        return self.set_parm_value(_VIRT_PMEM_VOL_AFFINITY,
+                                   u.sanitize_bool_for_api(value),
+                                   attrib=c.ATTR_KSV1100)
+
+    @property
+    def assoc_partition_name(self):
+        return self._get_val_str(_VIRT_PMEM_VOL_ASC_PARTNAME)
+
+    @property
+    def assoc_partition_id(self):
+        return self._get_val_int(_VIRT_PMEM_VOL_ASC_PARTID)
+
+    @assoc_partition_id.setter
+    def assoc_partition_id(self, value):
+        return self.set_parm_value(_VIRT_PMEM_VOL_ASC_PARTID, value,
+                                   attrib=c.ATTR_KSV1100)
+
+    @property
+    def assoc_partition(self):
+        return self.get_href(_VIRT_PMEM_VOL_ASC_PART)
diff -pruN 1.1.20+ds1-2/requirements.txt 1.1.27+ds1-2/requirements.txt
--- 1.1.20+ds1-2/requirements.txt	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/requirements.txt	2022-02-15 15:58:17.000000000 +0000
@@ -9,8 +9,9 @@ oslo.utils>=3.20.0 # Apache-2.0
 pbr>=2.0.0 # Apache-2.0
 pyasn1-modules # BSD
 pyasn1 # BSD
-pytz>=2013.6 # MIT
+pytz>=2019.3 # MIT
 requests!=2.12.2,!=2.13.0,>=2.10.0 # Apache-2.0
-six>=1.9.0 # MIT
-futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD
-taskflow>=2.16.0 # Apache-2.0
+six>=1.14.0 # MIT
+futures>=3.0;python_version=='3.6' # BSD
+Taskflow>=3.8.0 # Apache-2.0
+babel
diff -pruN 1.1.20+ds1-2/rpm/pypowervm.spec 1.1.27+ds1-2/rpm/pypowervm.spec
--- 1.1.20+ds1-2/rpm/pypowervm.spec	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/rpm/pypowervm.spec	2022-02-15 15:58:17.000000000 +0000
@@ -9,20 +9,20 @@ License: IBM Corp.
 Packager: IBM
 URL: http://github.com/powervm/pypowervm
 Vendor: IBM Corp.
-Requires: python-lxml >= 3.4.1
-Requires: python-oslo-i18n >= 1.2.0
-Requires: python-oslo-log >= 1.0.0
-Requires: python-oslo-utils >= 1.2.0
-Requires: python-pbr >= 0.5.21
-Requires: python-pyasn1-modules >= 0.0.5
-Requires: python-pyasn1 >= 0.0.12a
-Requires: python-requests >= 2.3.0
-Requires: python-six >= 1.7.0
-Requires: python-oslo-concurrency >= 0.3.0
-Requires: pytz
-Requires: python-futures
-Requires: python-taskflow >= 0.7.1
-Requires: python-oslo-context
+Requires: python3-lxml
+Requires: python3-oslo-i18n
+Requires: python3-oslo-log
+Requires: python3-oslo-utils
+Requires: python3-pbr
+Requires: python3-pyasn1-modules
+Requires: python3-pyasn1
+Requires: python3-requests
+Requires: python3-six
+Requires: python3-oslo-concurrency
+Requires: python3-pytz
+Requires: python3-future
+Requires: python3-taskflow
+Requires: python3-oslo-context
 
 %description
 Python API wrapper for PowerVM
diff -pruN 1.1.20+ds1-2/setup.cfg 1.1.27+ds1-2/setup.cfg
--- 1.1.20+ds1-2/setup.cfg	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/setup.cfg	2022-02-15 15:58:17.000000000 +0000
@@ -12,10 +12,8 @@ classifier =
     License :: OSI Approved :: Apache Software License
     Operating System :: POSIX :: Linux
     Programming Language :: Python
-    Programming Language :: Python :: 2
-    Programming Language :: Python :: 2.7
     Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.5
+    Programming Language :: Python :: 3.6
 
 [files]
 packages = pypowervm
diff -pruN 1.1.20+ds1-2/test-requirements.txt 1.1.27+ds1-2/test-requirements.txt
--- 1.1.20+ds1-2/test-requirements.txt	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/test-requirements.txt	2022-02-15 15:58:17.000000000 +0000
@@ -1,12 +1,13 @@
 # The order of packages is significant, because pip processes them in the order
 # of appearance.
-hacking!=0.13.0,<0.14,>=0.12.0
+hacking!=0.13.0,>=0.12.0
 
-coverage>=4.0 # Apache-2.0
+coverage>=4.0,<=5.0.3 # Apache-2.0
 discover
 fixtures>=3.0.0 # Apache-2.0/BSD
-pylint==1.4.5 # GPLv2
+pylint>=1.4.5 # GPLv2
 python-subunit>=0.0.18 # Apache-2.0/BSD
+setuptools<45.0.0
 sphinx>=1.5.1 # BSD
 oslosphinx>=4.7.0 # Apache-2.0
 stestr>=1.0.0 # Apache-2.0
diff -pruN 1.1.20+ds1-2/tox.ini 1.1.27+ds1-2/tox.ini
--- 1.1.20+ds1-2/tox.ini	2019-03-26 20:05:43.000000000 +0000
+++ 1.1.27+ds1-2/tox.ini	2022-02-15 15:58:17.000000000 +0000
@@ -1,6 +1,6 @@
 [tox]
 minversion = 1.6
-envlist = py{35,27},pep8
+envlist = py{36},pep8
 skipsdist = True
 
 [testenv]
@@ -21,17 +21,9 @@ whitelist_externals =
 commands =
   find . -type f -name "*.pyc" -delete
 
-[testenv:py27]
+[testenv:py36]
 # TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed.
-basepython = python2.7
-commands =
-  {[testenv]commands}
-  stestr run {posargs}
-  stestr slowest
-
-[testenv:py35]
-# TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed.
-basepython = python3.5
+basepython = python3
 commands =
   {[testenv]commands}
   stestr run {posargs}
@@ -40,7 +32,7 @@ commands =
 [testenv:pep8]
 # TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed.
 basepython = python3
-commands = flake8
+commands = flake8 --ignore=W504,W503,E731,H214,H216
 
 [testenv:venv]
 basepython = python3
