From a50b897459d4bba70f1f76ba2a4b3ef0197bcb10 Mon Sep 17 00:00:00 2001 From: inori Date: Tue, 9 Jan 2024 22:38:04 -0500 Subject: [PATCH] Fujitsu Driver: Add parameter fujitsu_use_cli_copy Add a parameter fujitsu_use_cli_copy. If ``fujitsu_use_cli_copy`` is set to ``False``, create a Snapshot using the conventional SMI-S method. If ``fujitsu_use_cli_copy`` is set to ``True``, create a Snapshot using the CLI method "start copy-snap-opc" with specified "-source-lba" and "-destination-lba". "Copy Scope" will be set to "Extent", allowing volume extension of the source volume. Change-Id: I0e0753d11a5f9dec1978f0a8956e788b6d3c6324 --- .../unit/volume/drivers/test_fujitsu_dx.py | 117 ++++++- .../fujitsu/eternus_dx/eternus_dx_cli.py | 5 + .../fujitsu/eternus_dx/eternus_dx_common.py | 318 +++++++++++++----- .../drivers/fujitsu-eternus-dx-driver.rst | 68 +++- ...fujitsu-add-cli-copy-1647fb54970a186d.yaml | 24 ++ 5 files changed, 427 insertions(+), 105 deletions(-) create mode 100644 releasenotes/notes/fujitsu-add-cli-copy-1647fb54970a186d.yaml diff --git a/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py b/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py index 37b6adba848..48488c6e2c3 100644 --- a/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py +++ b/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py @@ -48,6 +48,7 @@ CONF = """ abcd1234_TPP abcd1234_RG abcd1234_OSVD +abcd1234_TPP """ TEST_VOLUME = { @@ -180,7 +181,7 @@ FAKE_POOLS = [{ }] FAKE_STATS = { - 'driver_version': '1.4.5', + 'driver_version': '1.4.6', 'storage_protocol': 'iSCSI', 'vendor_name': 'FUJITSU', 'QoS_support': True, @@ -190,7 +191,7 @@ FAKE_STATS = { 'pools': FAKE_POOLS, } FAKE_STATS2 = { - 'driver_version': '1.4.5', + 'driver_version': '1.4.6', 'storage_protocol': 'FC', 'vendor_name': 'FUJITSU', 'QoS_support': True, @@ -307,6 +308,15 @@ FAKE_SNAP_META = { 'FJ_Pool_Name': 'abcd1234_OSVD', 'FJ_SDV_Name': u'FJosv_OgEZj1mSvKRvIKOExKktlg==', 'FJ_SDV_No': FAKE_SDV_NO, + 'FJ_Pool_Type': 2 +} + +# Snapshot created on controller@113#abcd1234_TPP +FAKE_SNAP_META2 = { + 'FJ_Pool_Name': 'abcd1234_TPP', + 'FJ_SDV_Name': 'FJosv_OgEZj1mSvKRvIKOExKktlg==', + 'FJ_SDV_No': FAKE_SDV_NO, + 'FJ_Pool_Type': 5 } FAKE_SNAP_INFO = { @@ -314,6 +324,12 @@ FAKE_SNAP_INFO = { 'provider_location': str(FAKE_LOCATION2) } +# Snapshot created on controller@113#abcd1234_TPP +FAKE_SNAP_INFO2 = { + 'metadata': FAKE_SNAP_META2, + 'provider_location': str(FAKE_LOCATION2) +} + FAKE_LUN_META2 = { 'FJ_Pool_Type': 'Thinporvisioning_POOL', 'FJ_Volume_No': FAKE_LUN_NO1, @@ -418,7 +434,10 @@ class FakeEternusConnection(object): if InPool.get('InstanceID') == 'FUJITSU:RSP0005': job = {'TheElement': vol[1].path} else: - job = {'TheElement': vol[0].path} + if ElementName == 'FJosv_OgEZj1mSvKRvIKOExKktlg==': + job = {'TheElement': vol[3].path} + else: + job = {'TheElement': vol[0].path} elif MethodName == 'ReturnToStoragePool': VOL_STAT = '0' rc = 0 @@ -874,6 +893,30 @@ class FakeEternusConnection(object): snap_vol['provider_location'] = str(name2) volumes.append(snap_vol) + snap_vol2 = FJ_StorageVolume() + snap_vol2['name'] = TEST_SNAP['name'] + snap_vol2['poolpath'] = 'FUJITSU:TPP0004' + snap_vol2['CreationClassName'] = 'FUJITSU_StorageVolume' + snap_vol2['Name'] = FAKE_LUN_ID2 + snap_vol2['DeviceID'] = FAKE_LUN_ID2 + snap_vol2['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' + snap_vol2['SystemName'] = STORAGE_SYSTEM + snap_vol2['ElementName'] = 'FJosv_OgEZj1mSvKRvIKOExKktlg==' + snap_vol2.path = snap_vol + snap_vol2.path.classname = snap_vol['CreationClassName'] + + name4 = { + 'classname': 'FUJITSU_StorageVolume', + 'keybindings': { + 'CreationClassName': 'FUJITSU_StorageVolume', + 'SystemName': STORAGE_SYSTEM, + 'DeviceID': snap_vol['DeviceID'], + 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', + }, + } + snap_vol2['provider_location'] = str(name4) + volumes.append(snap_vol2) + clone_vol = FJ_StorageVolume() clone_vol['name'] = TEST_CLONE['name'] clone_vol['poolpath'] = 'FUJITSU:TPP0004' @@ -996,6 +1039,7 @@ class FJFCDriverTestCase(test.TestCase): self.configuration.cinder_eternus_config_file = self.config_file.name self.configuration.safe_get = self.fake_safe_get self.configuration.max_over_subscription_ratio = '20.0' + self.configuration.fujitsu_use_cli_copy = False self.mock_object(dx_common.FJDXCommon, '_get_eternus_connection', self.fake_eternus_connection) @@ -1102,6 +1146,8 @@ class FJFCDriverTestCase(test.TestCase): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('stop copy-session'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline + elif exec_cmdline.startswith('start copy-snap-opc'): + ret = '%s\r\n00\r\n0019\r\nCLI> ' % exec_cmdline else: ret = None return ret @@ -1170,7 +1216,7 @@ class FJFCDriverTestCase(test.TestCase): self.driver.delete_volume(TEST_VOLUME) - def test_create_and_delete_snapshot(self): + def test_create_and_delete_snapshot_using_smis(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) @@ -1182,6 +1228,25 @@ class FJFCDriverTestCase(test.TestCase): self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_VOLUME) + @mock.patch.object(dx_common, 'LOG') + def test_create_and_delete_snapshot_using_cli(self, mock_log): + self.configuration.fujitsu_use_cli_copy = True + driver = dx_fc.FJDXFCDriver(configuration=self.configuration) + self.driver = driver + + model_info = self.driver.create_volume(TEST_VOLUME) + self.volume_update(TEST_VOLUME, model_info) + self.assertEqual(FAKE_MODEL_INFO1, model_info) + + warning_msg = '_create_snapshot, Can not create SDV by SMI-S.' + snap_info = self.driver.create_snapshot(TEST_SNAP) + self.volume_update(TEST_SNAP, snap_info) + self.assertEqual(FAKE_SNAP_INFO2, snap_info) + mock_log.warning.assert_called_with(warning_msg) + + self.driver.delete_snapshot(TEST_SNAP) + self.driver.delete_volume(TEST_VOLUME) + def test_create_volume_from_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) @@ -1272,6 +1337,7 @@ class FJISCSIDriverTestCase(test.TestCase): self.configuration.cinder_eternus_config_file = self.config_file.name self.configuration.safe_get = self.fake_safe_get self.configuration.max_over_subscription_ratio = '20.0' + self.configuration.fujitsu_use_cli_copy = False self.mock_object(dx_common.FJDXCommon, '_get_eternus_connection', self.fake_eternus_connection) @@ -1379,6 +1445,8 @@ class FJISCSIDriverTestCase(test.TestCase): '\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('set qos-bandwidth-limit'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline + elif exec_cmdline.startswith('start copy-snap-opc'): + ret = '%s\r\n00\r\n0019\r\nCLI> ' % exec_cmdline else: ret = None return ret @@ -1448,7 +1516,7 @@ class FJISCSIDriverTestCase(test.TestCase): None) self.driver.delete_volume(TEST_VOLUME) - def test_create_and_delete_snapshot(self): + def test_create_and_delete_snapshot_using_smis(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) @@ -1460,6 +1528,25 @@ class FJISCSIDriverTestCase(test.TestCase): self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_VOLUME) + @mock.patch.object(dx_common, 'LOG') + def test_create_and_delete_snapshot_using_cli(self, mock_log): + self.configuration.fujitsu_use_cli_copy = True + driver = dx_fc.FJDXFCDriver(configuration=self.configuration) + self.driver = driver + + model_info = self.driver.create_volume(TEST_VOLUME) + self.volume_update(TEST_VOLUME, model_info) + self.assertEqual(FAKE_MODEL_INFO1, model_info) + + warning_msg = '_create_snapshot, Can not create SDV by SMI-S.' + snap_info = self.driver.create_snapshot(TEST_SNAP) + self.volume_update(TEST_SNAP, snap_info) + self.assertEqual(FAKE_SNAP_INFO2, snap_info) + mock_log.warning.assert_called_with(warning_msg) + + self.driver.delete_snapshot(TEST_SNAP) + self.driver.delete_volume(TEST_VOLUME) + def test_create_volume_from_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) @@ -1612,6 +1699,8 @@ class FJCLITestCase(test.TestCase): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('delete volume'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline + elif exec_cmdline.startswith('start copy-snap-opc'): + ret = '%s\r\n00\r\n0019\r\nCLI> ' % exec_cmdline else: ret = None return ret @@ -1769,6 +1858,23 @@ class FJCLITestCase(test.TestCase): versioninfo = self.cli._show_enclosure_status() self.assertEqual(FAKE_VERSION_INFO, versioninfo) + def test_start_copy_snap_opc(self): + FAKE_SNAP_OPC_OPTION = self.create_fake_options( + mode='normal', + source_volume_number=31, + destination_volume_number=39, + source_lba=0, + destination=0, + size=1 + ) + + FAKE_OPC_ID = '0019' + FAKE_OPC_INFO = {**FAKE_CLI_OUTPUT, + 'message': [FAKE_OPC_ID]} + + opc_id = self.cli._start_copy_snap_opc(**FAKE_SNAP_OPC_OPTION) + self.assertEqual(FAKE_OPC_INFO, opc_id) + def test_stop_copy_session(self): FAKE_SESSION_ID = '0001' FAKE_STOP_OUTPUT = {**FAKE_CLI_OUTPUT, 'message': []} @@ -1806,6 +1912,7 @@ class FJCommonTestCase(test.TestCase): self.configuration.cinder_eternus_config_file = self.config_file.name self.configuration.safe_get = self.fake_safe_get self.configuration.max_over_subscription_ratio = '20.0' + self.configuration.fujitsu_use_cli_copy = False self.mock_object(dx_common.FJDXCommon, '_get_eternus_connection', self.fake_eternus_connection) diff --git a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py index 840283aa87e..453ae50d178 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py @@ -53,6 +53,7 @@ class FJDXCLI(object): 'show_copy_sessions': self._show_copy_sessions, 'show_volume_qos': self._show_volume_qos, 'show_enclosure_status': self._show_enclosure_status, + 'start_copy_snap_opc': self._start_copy_snap_opc, 'stop_copy_session': self._stop_copy_session, 'delete_volume': self._delete_volume } @@ -482,6 +483,10 @@ class FJDXCLI(object): return output + def _start_copy_snap_opc(self, **option): + """Exec start copy-snap-opc.""" + return self._exec_cli("start copy-snap-opc", **option) + def _stop_copy_session(self, **option): """Exec stop copy-session.""" return self._exec_cli("stop copy-session", **option) diff --git a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py index 2accd213e11..31d2a42d458 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py @@ -53,6 +53,9 @@ FJ_ETERNUS_DX_OPT_opts = [ cfg.StrOpt('cinder_eternus_config_file', default='/etc/cinder/cinder_fujitsu_eternus_dx.xml', help='Config file for cinder eternus_dx volume driver.'), + cfg.BoolOpt('fujitsu_use_cli_copy', + default=False, + help='If True use CLI command to create snapshot.'), ] CONF.register_opts(FJ_ETERNUS_DX_OPT_opts, group=conf.SHARED_CONF_GROUP) @@ -71,10 +74,11 @@ class FJDXCommon(object): 1.4.3 - Add fragment capacity information of RAID Group. 1.4.4 - Add support for update migrated volume. 1.4.5 - Add metadata for snapshot. + 1.4.6 - Add parameter fujitsu_use_cli_copy. """ - VERSION = "1.4.5" + VERSION = "1.4.6" stats = { 'driver_version': VERSION, 'storage_protocol': None, @@ -92,6 +96,7 @@ class FJDXCommon(object): self.configuration.append_config_values(FJ_ETERNUS_DX_OPT_opts) self.conn = None + self.use_cli_copy = self.configuration.fujitsu_use_cli_copy self.fjdxcli = {} self.model_name = self._get_eternus_model() self._check_user() @@ -136,7 +141,7 @@ class FJDXCommon(object): {'vid': volume['id'], 'vsize': volume['size']}) self.conn = self._get_eternus_connection() - volumesize = int(volume['size']) * units.Gi + volumesize = volume['size'] * units.Gi volumename = self._get_volume_name(volume, use_id=True) LOG.debug('_create_volume, volumename: %(volumename)s, ' @@ -664,112 +669,226 @@ class FJDXCommon(object): @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def _create_snapshot(self, snapshot): - LOG.debug('create_snapshot, ' + LOG.debug('_create_snapshot, ' 'snapshot id: %(sid)s, volume id: %(vid)s.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) - self.conn = self._get_eternus_connection() snapshotname = snapshot['name'] volume = snapshot['volume'] volumename = snapshot['volume_name'] d_volumename = self._get_volume_name(snapshot, use_id=True) - s_volumename = self._get_volume_name(volume) vol_instance = self._find_lun(volume) - smis_service = self._find_eternus_service(CONSTANTS.REPL) + service_name = (CONSTANTS.REPL + if self.model_name != CONSTANTS.DX_S2 + else CONSTANTS.STOR_CONF) - # Check the existence of volume. - if not vol_instance: - # Volume not found on ETERNUS. - msg = (_('create_snapshot, ' - 'volumename: %(s_volumename)s, ' - 'source volume not found on ETERNUS.') - % {'s_volumename': s_volumename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) + volume_size = snapshot['volume']['size'] * 1024 + + smis_service = self._find_eternus_service(service_name) if not smis_service: - msg = (_('create_snapshot, ' + msg = (_('_create_snapshot, ' 'volumename: %(volumename)s, ' - 'Replication Service not found.') - % {'volumename': volumename}) + '%(servicename)s not found.') + % {'volumename': volumename, + 'servicename': service_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - # Get poolname from driver configuration file. - eternus_pool = self._get_drvcfg('EternusSnapPool') - # Check the existence of pool - pool = self._find_pool(eternus_pool) - if not pool: - msg = (_('create_snapshot, ' - 'eternus_pool: %(eternus_pool)s, ' - 'pool not found.') - % {'eternus_pool': eternus_pool}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) + # Get all pools information on ETERNUS. + pools_instance_list = self._find_all_pools_instances(self.conn) + # Get the user specified pool name. + pool_name_list = self._get_drvcfg('EternusSnapPool', multiple=True) - LOG.debug('create_snapshot, ' - 'snapshotname: %(snapshotname)s, ' - 'source volume name: %(volumename)s, ' - 'vol_instance.path: %(vol_instance)s, ' - 'dest_volumename: %(d_volumename)s, ' - 'pool: %(pool)s, ' - 'Invoke CreateElementReplica.', - {'snapshotname': snapshotname, - 'volumename': volumename, - 'vol_instance': vol_instance.path, - 'd_volumename': d_volumename, - 'pool': pool}) + poollen = len(pool_name_list) + for i in range(poollen): + # Traverse the user specified pool one by one. + pool_instances, notfound_poolnames = self._find_pools( + [pool_name_list[i]], self.conn, + poolinstances_list=pools_instance_list) - if self.model_name != CONSTANTS.DX_S2: - smis_method = 'CreateElementReplica' - params = { - 'ElementName': d_volumename, - 'TargetPool': pool, - 'SyncType': self._pywbem_uint(7, '16'), - 'SourceElement': vol_instance.path - } + if pool_instances['pools']: + useable = pool_instances['pools'][0]['useable_capacity_mb'] + poolname = pool_instances['pools'][0]['pool_name'] + istpp = pool_instances['pools'][0]['thin_provisioning_support'] + if useable < 24 + volume_size: + continue + if not istpp: + # If it is a RAID Group pool, we need to determine + # the number of volumes and fragmentation capacity. + # The number of RAID Group pool volumes cannot exceed 128. + # The minimum space required for snapshot is 24MB. + fragment = pool_instances['pools'][0][ + 'fragment_capacity_mb'] + volcnt = pool_instances['pools'][0]['total_volumes'] + if volcnt >= 128 or fragment < 24 + volume_size: + LOG.debug('_create_volume, The pool: %(poolname)s ' + 'can not create volume. ' + 'Volume Count: %(volcnt)s, ' + 'Maximum fragment capacity: %(frag)s.', + {'poolname': poolname, + 'volcnt': volcnt, 'frag': fragment}) + continue + + pool_instance = pool_instances['pools'][0] + eternus_pool = pool_instance['pool_name'] + pool = pool_instance['path'] + if 'RSP' in pool['InstanceID']: + pooltype = CONSTANTS.RAIDGROUP + else: + pooltype = CONSTANTS.TPPOOL + + if self.use_cli_copy is False: + LOG.debug('_create_snapshot, ' + 'snapshotname: %(snapshotname)s, ' + 'source volume name: %(volumename)s, ' + 'vol_instance.path: %(vol_instance)s, ' + 'dest_volumename: %(d_volumename)s, ' + 'pool: %(pool)s, ' + 'Invoke CreateElementReplica.', + {'snapshotname': snapshotname, + 'volumename': volumename, + 'vol_instance': vol_instance.path, + 'd_volumename': d_volumename, + 'pool': eternus_pool}) + + if self.model_name != CONSTANTS.DX_S2: + smis_method = 'CreateElementReplica' + params = { + 'ElementName': d_volumename, + 'TargetPool': pool, + 'SyncType': self._pywbem_uint(7, '16'), + 'SourceElement': vol_instance.path + } + else: + smis_method = 'CreateReplica' + params = { + 'ElementName': d_volumename, + 'TargetPool': pool, + 'CopyType': self._pywbem_uint(4, '16'), + 'SourceElement': vol_instance.path + } + # Invoke method for create snapshot. + rc, errordesc, job = self._exec_eternus_service( + smis_method, smis_service, + **params) + + if rc != 0: + LOG.warning('_create_snapshot, ' + 'snapshotname: %(snapshotname)s, ' + 'source volume name: %(volumename)s, ' + 'vol_instance.path: %(vol_instance)s, ' + 'dest volume name: %(d_volumename)s, ' + 'pool: %(pool)s, Return code: %(rc)lu, ' + 'Error: %(errordesc)s.', + {'snapshotname': snapshotname, + 'volumename': volumename, + 'vol_instance': vol_instance.path, + 'd_volumename': d_volumename, + 'pool': eternus_pool, + 'rc': rc, + 'errordesc': errordesc}) + continue + else: + element = job['TargetElement'] + d_volume_no = self._get_volume_number(element) + break + + else: + if pooltype == CONSTANTS.RAIDGROUP: + LOG.warning('_create_snapshot, ' + 'Can not create SDV by SMI-S.') + continue + configservice = self._find_eternus_service( + CONSTANTS.STOR_CONF) + vol_size = snapshot['volume']['size'] * units.Gi + + LOG.debug('_create_snapshot, ' + 'CreateOrModifyElementFromStoragePool, ' + 'ConfigService: %(service)s, ' + 'ElementName: %(volumename)s, ' + 'InPool: %(eternus_pool)s, ' + 'ElementType: %(pooltype)u, ' + 'Size: %(volumesize)u.', + {'service': configservice, + 'volumename': d_volumename, + 'eternus_pool': pool, + 'pooltype': pooltype, + 'volumesize': vol_size}) + + # Invoke method for create volume. + rc, errordesc, job = self._exec_eternus_service( + 'CreateOrModifyElementFromStoragePool', + configservice, + ElementName=d_volumename, + InPool=pool, + ElementType=self._pywbem_uint(pooltype, '16'), + Size=self._pywbem_uint(vol_size, '64')) + + if rc == 32769: + LOG.warning('_create_snapshot, RAID Group pool: %s. ' + 'Maximum number of Logical Volume in a ' + 'RAID Group has been reached. ' + 'Try other pool.', + pool) + continue + elif rc != 0: + msg = (_('_create_volume, ' + 'volumename: %(volumename)s, ' + 'poolname: %(eternus_pool)s, ' + 'Return code: %(rc)lu, ' + 'Error: %(errordesc)s.') + % {'volumename': volumename, + 'eternus_pool': pool, + 'rc': rc, + 'errordesc': errordesc}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + element = job['TheElement'] + d_volume_no = self._get_volume_number(element) + volume_no = self._get_volume_number(vol_instance) + volume_lba = int(vol_size / 512) + param_dict = ( + {'mode': 'normal', + 'source-volume-number': int(volume_no, 16), + 'destination-volume-number': int(d_volume_no, 16), + 'source-lba': 0, + 'destination-lba': 0, + 'size': volume_lba}) + + rc, emsg, clidata = self._exec_eternus_cli( + 'start_copy_snap_opc', + **param_dict) + + if rc != 0: + msg = (_('_create_snapshot, ' + 'create_volume failed. ' + 'Return code: %(rc)lu, ' + 'Error: %(errormsg)s, ' + 'Message: %(clidata)s.') + % {'rc': rc, + 'errormsg': emsg, + 'clidata': clidata}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + break + else: + if notfound_poolnames: + LOG.warning('_create_snapshot, ' + 'pool names: %(notfound_poolnames)s ' + 'are not found.', + {'notfound_poolnames': notfound_poolnames}) else: - smis_method = 'CreateReplica' - params = { - 'ElementName': d_volumename, - 'TargetPool': pool, - 'CopyType': self._pywbem_uint(4, '16'), - 'SourceElement': vol_instance.path - } - # Invoke method for create snapshot - rc, errordesc, job = self._exec_eternus_service( - smis_method, smis_service, - **params) - - if rc != 0: - msg = (_('create_snapshot, ' - 'snapshotname: %(snapshotname)s, ' - 'source volume name: %(volumename)s, ' - 'vol_instance.path: %(vol_instance)s, ' - 'dest volume name: %(d_volumename)s, ' - 'pool: %(pool)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.') - % {'snapshotname': snapshotname, - 'volumename': volumename, - 'vol_instance': vol_instance.path, - 'd_volumename': d_volumename, - 'pool': pool, - 'rc': rc, - 'errordesc': errordesc}) + # It means that all RAID Group pools do not meet + # the volume limit (<128), and the creation request of + # this volume will be rejected. + # If there is a thin pool available, it will not enter this branch. + msg = (_('_create_snapshot, volume id: %(sid)s, ' + 'All pools cannot create this volume.') + % {'sid': snapshot['id']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - else: - element = job['TargetElement'] - d_volume_no = self._get_volume_number(element) - - LOG.debug('create_snapshot, ' - 'volumename:%(volumename)s, ' - 'Return code:%(rc)lu, ' - 'Error:%(errordesc)s.', - {'volumename': volumename, - 'rc': rc, - 'errordesc': errordesc}) # Create return value. element_path = { @@ -781,9 +900,12 @@ class FJDXCommon(object): 'vol_name': d_volumename, } - metadata = {'FJ_SDV_Name': d_volumename, - 'FJ_SDV_No': d_volume_no, - 'FJ_Pool_Name': eternus_pool} + metadata = { + 'FJ_SDV_Name': d_volumename, + 'FJ_SDV_No': d_volume_no, + 'FJ_Pool_Name': eternus_pool, + 'FJ_Pool_Type': pooltype + } d_metadata = self.get_metadata(snapshot) d_metadata.update(metadata) @@ -1386,7 +1508,8 @@ class FJDXCommon(object): LOG.debug('_find_all_pools_instances, poollist: %s', len(poollist)) return poollist - def _find_pools(self, poolname_list, conn): + def _find_pools(self, poolname_list, conn, + poolinstances_list=None): """Find pool instances by using pool name on ETERNUS.""" LOG.debug('_find_pools, pool names: %s.', poolname_list) @@ -1394,7 +1517,12 @@ class FJDXCommon(object): pools = [] # Get pools info from CIM instance(include info about instance path). - poollist = self._find_all_pools_instances(conn) + if not poolinstances_list: + poollist = self._find_all_pools_instances(conn) + is_create = False + else: + poollist = poolinstances_list + is_create = True for pool, ptype in poollist: poolname = pool['ElementName'] @@ -1536,6 +1664,10 @@ class FJDXCommon(object): thick_provisioning_support=not thin_enabled, max_over_subscription_ratio=max_ratio, )) + + if is_create: + single_pool['useable_capacity_mb'] = \ + pool['useable_capacity_mb'] single_pool['multiattach'] = True pools_stats['pools'].append(single_pool) diff --git a/doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst b/doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst index 957ceb66313..26085f760b7 100644 --- a/doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst +++ b/doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst @@ -185,23 +185,21 @@ Configuration ``EternusPool`` (Multiple setting allowed) Name of the storage pool for the volumes from ``ETERNUS DX setup``. - Use the pool RAID Group name or TPP name in the ETERNUS device. + Use the pool RAID Group pool name or TPP pool name in the ETERNUS device. - ``EternusSnapPool`` + ``EternusSnapPool`` (Multiple setting allowed) Name of the storage pool for the snapshots from ``ETERNUS DX setup``. - Use the pool RAID Group name in the ETERNUS device. + Use the pool RAID Group pool name or TPP pool name in the ETERNUS device. - If you did not create a different pool for snapshots, use the same value as ``ETternusPool``. + If you did not create a different pool for snapshots, use the same value as ``EternusPool``. ``EternusISCSIIP`` (Multiple setting allowed) iSCSI connection IP address of the ETERNUS DX. .. note:: - * For ``EternusSnapPool``, you can specify only RAID Group name - and cannot specify TPP name. - * You can specify the same RAID Group name for ``EternusPool`` and ``EternusSnapPool`` + * You can specify the same RAID Group pool name or TPP pool name for ``EternusPool`` and ``EternusSnapPool`` if you create volumes and snapshots on a same storage pool. * For ``EternusPool``, when multiple pools are specified, cinder-scheduler will select one from multiple pools to create the volume. @@ -454,3 +452,59 @@ The following procedure shows how to set the QoS. ETERNUS OpenStack VolumeDriver ends the process to prevent the created volumes from being left in the ETERNUS AF/DX. If volumes fail to be created, the process terminates with an error. + +Specification of the Snapshot Creation Destination Pool +------------------------------------------------------- + +A RAID Group or a Thin Provisioning Pool (TPP) can be specified as the snapshot +creation destination pool. In an ETERNUS AF/DX with a firmware version earlier +than or equal to V10L60, Thin Provisioning Pools(TPPs) cannot be used as the +snapshot creation destination pool. + +Multiple snapshot creation destination pools can be specified. + +A pool where snapshots can be created is searched in the order written in the +driver configuration file and if one is found, snapshots are created in that +pool. + +**Cautions** + +#. If the creation destination pool is a RAID Group, more than 128 snapshots + cannot be created. Therefore, to create more than 128 snapshots in a RAID + Group, multiple RAID Groups must be specified as snapshot creation + destination pools. + +#. When creating a snapshot, Cinder Scheduler checks the capacity of the pool + where the source volume is located. This may lead to the failure of snapshot + creation fail to be created if this pool has insufficient capacity, even if + the snapshot pool specified by ``EternusSnapPool`` has sufficient capacity. + +#. If multiple snapshot creation destination pools are specified, a different + pool must be specified for the volume creation destination pool + (``EternusPool`` and ``EternusSnapPool`` can be specified multiple times but + the same pool name cannot be specified). + If the same pool name is specified and instructions to create multiple + volumes and multiple snapshots are issued at the same time, the number of + logical volumes in a RAID Group will reach 128 and the operation may fail. + +#. To address the issue that a volume with snapshot cannot be extended, a + parameter ``fujitsu_use_cli_copy`` has been introduced. + + The default value of ``fujitsu_use_cli_copy`` is ``False``. + + If ``fujitsu_use_cli_copy`` is set to ``True``, create a Snapshot using the + CLI method instead of SMI-S method, allowing volume extension of the source + volume. + + .. code-block:: console + + $ cat /etc/cinder/cinder.conf + (snip) + [Backend1] + volume_driver=cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver + cinder_eternus_config_file = /etc/cinder/cinder_fujitsu_eternus_dx.xml + volume_backend_name = volume_backend_name1 + fujitsu_use_cli_copy = True + + Note that ``fujitsu_use_cli_copy`` cannot be set to True when the type of + target pool is RAID Group. diff --git a/releasenotes/notes/fujitsu-add-cli-copy-1647fb54970a186d.yaml b/releasenotes/notes/fujitsu-add-cli-copy-1647fb54970a186d.yaml new file mode 100644 index 00000000000..dd1c981dde9 --- /dev/null +++ b/releasenotes/notes/fujitsu-add-cli-copy-1647fb54970a186d.yaml @@ -0,0 +1,24 @@ +--- +features: + - | + Fujitsu Eternus DX driver: Added cli operations when creating snapshot + + Fujitsu Eternus DX driver used to create snapshot using SMI-S, resulting + in the inability to extend the source volume. + + To make the volume extendable after creating a snapshot, an additional + parameter ``fujitsu_use_cli_copy`` is introduced with a default value of + ``False``. + + * If ``fujitsu_use_cli_copy`` is set to ``False``, create a snapshot using + the conventional SMI-S method. + + * If ``fujitsu_use_cli_copy`` is set to ``True``, create a snapshot using + the CLI method, allowing volume extension of the source volume. + + Note that ``fujitsu_use_cli_copy`` cannot be set to True when the type of + target pool is RAID Group. + + See the `Fujitsu ETERNUS DX driver documentation + `_ + for details.