From a7c715b4d08d369ad1246e23b54c36cf89d44a78 Mon Sep 17 00:00:00 2001 From: "hallur, parashuram" Date: Tue, 17 May 2016 23:06:01 +0530 Subject: [PATCH] Implementation for CoprHD Cinder Drivers This patch set includes following CoprHD Cinder drivers 1-Cinder iSCSI Block Volume Driver for CoprHD 2-Cinder FC Block Volume Driver for CoprHD 3-Cinder ScaleIO Block Volume Driver for CoprHD CI trigger: run emc-coprhd DocImpact Implements: blueprint coprhd-cinder-drivers Change-Id: I5fe7ac8190edb2405981c4750dcecde00159a3ec --- cinder/opts.py | 6 + cinder/tests/unit/test_coprhd.py | 919 +++++++++++ cinder/volume/drivers/coprhd/__init__.py | 0 cinder/volume/drivers/coprhd/common.py | 1451 +++++++++++++++++ cinder/volume/drivers/coprhd/fc.py | 222 +++ .../volume/drivers/coprhd/helpers/__init__.py | 0 .../drivers/coprhd/helpers/authentication.py | 216 +++ .../drivers/coprhd/helpers/commoncoprhdapi.py | 517 ++++++ .../coprhd/helpers/consistencygroup.py | 220 +++ .../drivers/coprhd/helpers/exportgroup.py | 303 ++++ cinder/volume/drivers/coprhd/helpers/host.py | 104 ++ .../volume/drivers/coprhd/helpers/project.py | 88 + .../volume/drivers/coprhd/helpers/snapshot.py | 314 ++++ cinder/volume/drivers/coprhd/helpers/tag.py | 55 + .../volume/drivers/coprhd/helpers/tenant.py | 117 ++ .../drivers/coprhd/helpers/urihelper.py | 84 + .../drivers/coprhd/helpers/virtualarray.py | 79 + .../drivers/coprhd/helpers/virtualpool.py | 77 + .../volume/drivers/coprhd/helpers/volume.py | 523 ++++++ cinder/volume/drivers/coprhd/iscsi.py | 173 ++ cinder/volume/drivers/coprhd/scaleio.py | 324 ++++ ...cinder-coprhd-driver-11ebd149ea8610fd.yaml | 2 + 22 files changed, 5794 insertions(+) create mode 100644 cinder/tests/unit/test_coprhd.py create mode 100644 cinder/volume/drivers/coprhd/__init__.py create mode 100644 cinder/volume/drivers/coprhd/common.py create mode 100644 cinder/volume/drivers/coprhd/fc.py create mode 100644 cinder/volume/drivers/coprhd/helpers/__init__.py create mode 100644 cinder/volume/drivers/coprhd/helpers/authentication.py create mode 100644 cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py create mode 100644 cinder/volume/drivers/coprhd/helpers/consistencygroup.py create mode 100644 cinder/volume/drivers/coprhd/helpers/exportgroup.py create mode 100644 cinder/volume/drivers/coprhd/helpers/host.py create mode 100644 cinder/volume/drivers/coprhd/helpers/project.py create mode 100644 cinder/volume/drivers/coprhd/helpers/snapshot.py create mode 100644 cinder/volume/drivers/coprhd/helpers/tag.py create mode 100644 cinder/volume/drivers/coprhd/helpers/tenant.py create mode 100644 cinder/volume/drivers/coprhd/helpers/urihelper.py create mode 100644 cinder/volume/drivers/coprhd/helpers/virtualarray.py create mode 100644 cinder/volume/drivers/coprhd/helpers/virtualpool.py create mode 100644 cinder/volume/drivers/coprhd/helpers/volume.py create mode 100644 cinder/volume/drivers/coprhd/iscsi.py create mode 100644 cinder/volume/drivers/coprhd/scaleio.py create mode 100644 releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml diff --git a/cinder/opts.py b/cinder/opts.py index 8fbd98a2d85..9ca027d18d0 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -67,6 +67,10 @@ from cinder.volume.drivers import blockbridge as \ from cinder.volume.drivers.cloudbyte import options as \ cinder_volume_drivers_cloudbyte_options from cinder.volume.drivers import coho as cinder_volume_drivers_coho +from cinder.volume.drivers.coprhd import common as \ + cinder_volume_drivers_coprhd_common +from cinder.volume.drivers.coprhd import scaleio as \ + cinder_volume_drivers_coprhd_scaleio from cinder.volume.drivers import datera as cinder_volume_drivers_datera from cinder.volume.drivers.dell import dell_storagecenter_common as \ cinder_volume_drivers_dell_dellstoragecentercommon @@ -217,6 +221,7 @@ def list_opts(): cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi. storwize_svc_iscsi_opts, cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts, + cinder_volume_drivers_coprhd_scaleio.scaleio_opts, cinder_backup_drivers_tsm.tsm_opts, cinder_volume_drivers_fujitsu_eternusdxcommon. FJ_ETERNUS_DX_OPT_opts, @@ -234,6 +239,7 @@ def list_opts(): cinder_volume_drivers_sheepdog.sheepdog_opts, [cinder_api_middleware_sizelimit.max_request_body_size_opt], cinder_volume_drivers_solidfire.sf_opts, + cinder_volume_drivers_coprhd_common.volume_opts, cinder_backup_drivers_swift.swiftbackup_service_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_add_qosgroup_opts, diff --git a/cinder/tests/unit/test_coprhd.py b/cinder/tests/unit/test_coprhd.py new file mode 100644 index 00000000000..1ee3df22326 --- /dev/null +++ b/cinder/tests/unit/test_coprhd.py @@ -0,0 +1,919 @@ +# Copyright (c) 2012 - 2016 EMC Corporation, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from mock import Mock + +from cinder import context +from cinder.objects import fields +from cinder import test +from cinder.volume.drivers.coprhd import common as coprhd_common +from cinder.volume.drivers.coprhd import fc as coprhd_fc +from cinder.volume.drivers.coprhd import iscsi as coprhd_iscsi +from cinder.volume.drivers.coprhd import scaleio as coprhd_scaleio +from cinder.volume import volume_types + +""" +Test Data required for mocking +""" +export_group_details_data = { + "inactive": False, + "initiators": [{"creation_time": 1392194176020, + "host": {"id": "urn:storageos:Host:3e21edff-8662-4e60-ab5", + "link": {"href": "/compute/hosts/urn:storageos:H", + "rel": "self"}}, + "hostname": "lglw7134", + "id": "urn:storageos:Initiator:13945431-06b7-44a0-838c-50", + "inactive": False, + "initiator_node": "20:00:00:90:FA:13:81:8D", + "initiator_port": "iqn.1993-08.org.deb:01:222", + "link": {"href": "/compute/initiators/urn:storageos:Initi", + "rel": "self"}, + "protocol": "iSCSI", + "registration_status": "REGISTERED", + "tags": []}], + "name": "ccgroup", + "project": 'project', + "tags": [], + "tenant": 'tenant', + "type": "Host", + "varray": {"id": "urn:storageos:VirtualArray:5af376e9-ce2f-493d-9079-a872", + "link": {"href": "/vdc/varrays/urn:storageos:VirtualArray:5af3", + "rel": "self"} + }, + "volumes": [{"id": "urn:storageos:Volume:6dc64865-bb25-431c-b321-ac268f16" + "a7ae:vdc1", + "lun": 1 + }] +} + +varray_detail_data = {"name": "varray"} + +export_group_list = ["urn:storageos:ExportGroup:2dbce233-7da0-47cb-8ff3-68f48"] + +iscsi_itl_list = {"itl": [{"hlu": 3, + "initiator": {"id": "urn:storageos:Initiator:13945", + "link": {"rel": "self", + "href": "/comput"}, + "port": "iqn.1993-08.org.deb:01:222"}, + "export": {"id": "urn:storageos:ExportGroup:2dbce2", + "name": "ccgroup", + "link": {"rel": "self", + "href": "/block/expo"}}, + "device": {"id": "urn:storageos:Volume:aa1fc84a-af", + "link": {"rel": "self", + "href": "/block/volumes/urn:s"}, + "wwn": "600009700001957015735330303535"}, + "target": {"id": "urn:storageos:StoragePort:d7e42", + "link": {"rel": "self", + "href": "/vdc/stor:"}, + "port": "50:00:09:73:00:18:95:19", + 'ip_address': "10.10.10.10", + 'tcp_port': '22'}}, + {"hlu": 3, + "initiator": {"id": "urn:storageos:Initiator:13945", + "link": {"rel": "self", + "href": "/comput"}, + "port": "iqn.1993-08.org.deb:01:222"}, + "export": {"id": "urn:storageos:ExportGroup:2dbce2", + "name": "ccgroup", + "link": {"rel": "self", + "href": "/block/expo"}}, + "device": {"id": "urn:storageos:Volume:aa1fc84a-af", + "link": {"rel": "self", + "href": "/block/volumes/urn:s"}, + "wwn": "600009700001957015735330303535"}, + "target": {"id": "urn:storageos:StoragePort:d7e42", + "link": {"rel": "self", + "href": "/vdc/stor:"}, + "port": "50:00:09:73:00:18:95:19", + 'ip_address': "10.10.10.10", + 'tcp_port': '22'}}]} + +fcitl_itl_list = {"itl": [{"hlu": 3, + "initiator": {"id": "urn:storageos:Initiator:13945", + "link": {"rel": "self", + "href": "/comput"}, + "port": "12:34:56:78:90:12:34:56"}, + "export": {"id": "urn:storageos:ExportGroup:2dbce2", + "name": "ccgroup", + "link": {"rel": "self", + "href": "/block/expo"}}, + "device": {"id": "urn:storageos:Volume:aa1fc84a-af", + "link": {"rel": "self", + "href": "/block/volumes/urn:s"}, + "wwn": "600009700001957015735330303535"}, + "target": {"id": "urn:storageos:StoragePort:d7e42", + "link": {"rel": "self", + "href": "/vdc/stor:"}, + "port": "12:34:56:78:90:12:34:56", + 'ip_address': "10.10.10.10", + 'tcp_port': '22'}}, + {"hlu": 3, + "initiator": {"id": "urn:storageos:Initiator:13945", + "link": {"rel": "self", + "href": "/comput"}, + "port": "12:34:56:78:90:12:34:56"}, + "export": {"id": "urn:storageos:ExportGroup:2dbce2", + "name": "ccgroup", + "link": {"rel": "self", + "href": "/block/expo"}}, + "device": {"id": "urn:storageos:Volume:aa1fc84a-af", + "link": {"rel": "self", + "href": "/block/volumes/urn:s"}, + "wwn": "600009700001957015735330303535"}, + "target": {"id": "urn:storageos:StoragePort:d7e42", + "link": {"rel": "self", + "href": "/vdc/stor:"}, + "port": "12:34:56:78:90:12:34:56", + 'ip_address': "10.10.10.10", + 'tcp_port': '22'}}]} + +scaleio_itl_list = {"itl": [{"hlu": -1, + "initiator": {"id": + "urn:storageos:Initiator:920aee", + "link": {"rel": "self", + "href": + "/compute/initiators"}, + "port": "bfdf432500000004"}, + "export": {"id": + "urn:storageos:ExportGroup:5449235", + "name": "10.108.225.109", + "link": {"rel": "self", + "href": + "/block/exports/urn:stor"}}, + "device": {"id": + "urn:storageos:Volume:b3624a83-3eb", + "link": {"rel": "self", + "href": "/block/volume"}, + "wwn": + "4F48CC4C27A43248092128B400000004"}, + "target": {}}, + {"hlu": -1, + "initiator": {"id": + "urn:storageos:Initiator:920aee", + "link": {"rel": "self", + "href": + "/compute/initiators/"}, + "port": "bfdf432500000004"}, + "export": {"id": + "urn:storageos:ExportGroup:5449235", + "name": "10.108.225.109", + "link": {"rel": "self", + "href": + "/block/exports/urn:stor"}}, + "device": {"id": + "urn:storageos:Volume:c014e96a-557", + "link": {"rel": "self", + "href": + "/block/volumes/urn:stor"}, + "wwn": + "4F48CC4C27A43248092129320000000E"}, + "target": {}}]} + + +def get_test_volume_data(volume_type_id): + test_volume = {'name': 'test-vol1', + 'size': 1, + 'volume_name': 'test-vol1', + 'id': '1', + 'consistencygroup_id': None, + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'test-vol1', + 'display_description': 'test volume', + 'volume_type_id': volume_type_id} + return test_volume + + +def get_source_test_volume_data(volume_type_id): + test_volume = {'name': 'source_test-vol1', + 'size': 1, + 'volume_name': 'source_test-vol1', + 'id': '1234', + 'consistencygroup_id': None, + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'source_test-vol1', + 'display_description': 'test volume', + 'volume_type_id': volume_type_id} + return test_volume + + +def get_clone_volume_data(volume_type_id): + clone_test_volume = {'name': 'clone-test-vol1', + 'size': 1, + 'volume_name': 'clone-test-vol1', + 'id': '2', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'clone-test-vol1', + 'display_description': 'clone test volume', + 'volume_type_id': volume_type_id} + return clone_test_volume + + +def get_test_snapshot_data(src_volume): + test_snapshot = {'name': 'snapshot1', + 'display_name': 'snapshot1', + 'size': 1, + 'id': '1111', + 'volume_name': 'test-vol1', + 'volume_id': '1234', + 'volume': src_volume, + 'volume_size': 1, + 'project_id': 'project'} + return test_snapshot + + +def get_connector_data(): + connector = {'ip': '10.0.0.2', + 'initiator': 'iqn.1993-08.org.deb:01:222', + 'wwpns': ["1234567890123456", "1234567890543211"], + 'wwnns': ["223456789012345", "223456789054321"], + 'host': 'fakehost'} + return connector + + +def get_test_CG_data(volume_type_id): + test_CG = {'name': 'consistency_group_name', + 'id': '12345abcde', + 'volume_type_id': volume_type_id, + 'status': fields.ConsistencyGroupStatus.AVAILABLE + } + return test_CG + + +def get_test_CG_snap_data(volume_type_id): + test_CG_snapshot = {'name': 'cg_snap_name', + 'id': '12345abcde', + 'consistencygroup_id': '123456789', + 'status': fields.ConsistencyGroupStatus.AVAILABLE, + 'snapshots': [], + 'consistencygroup': get_test_CG_data(volume_type_id) + } + return test_CG_snapshot + + +class MockedEMCCoprHDDriverCommon(coprhd_common.EMCCoprHDDriverCommon): + + def __init__(self, protocol, default_backend_name, + configuration=None): + + super(MockedEMCCoprHDDriverCommon, self).__init__( + protocol, default_backend_name, configuration) + + def authenticate_user(self): + pass + + def get_exports_count_by_initiators(self, initiator_ports): + return 0 + + def _get_coprhd_volume_name(self, vol, verbose=False): + if verbose is True: + return {'volume_name': "coprhd_vol_name", + 'volume_uri': "coprhd_vol_uri"} + else: + return "coprhd_vol_name" + + def _get_coprhd_snapshot_name(self, snapshot, resUri): + return "coprhd_snapshot_name" + + def _get_coprhd_cgid(self, cgid): + return "cg_uri" + + def init_volume_api(self): + self.volume_api = Mock() + self.volume_api.get.return_value = { + 'name': 'source_test-vol1', + 'size': 1, + 'volume_name': 'source_test-vol1', + 'id': '1234', + 'consistencygroup_id': '12345', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'source_test-vol1', + 'display_description': 'test volume', + 'volume_type_id': "vol_type_id-for-snap"} + + def init_coprhd_api_components(self): + self.volume_obj = Mock() + self.volume_obj.create.return_value = "volume_created" + self.volume_obj.volume_query.return_value = "volume_uri" + self.volume_obj.get_storageAttributes.return_value = ( + 'block', 'volume_name') + self.volume_obj.storage_resource_query.return_value = "volume_uri" + self.volume_obj.is_volume_detachable.return_value = False + self.volume_obj.volume_clone_detach.return_value = 'detached' + self.volume_obj.getTags.return_value = ( + ["Openstack-vol", "Openstack-vol1"]) + self.volume_obj.tag.return_value = "tagged" + self.volume_obj.clone.return_value = "volume-cloned" + + if(self.protocol == "iSCSI"): + self.volume_obj.get_exports_by_uri.return_value = ( + iscsi_itl_list) + elif(self.protocol == "FC"): + self.volume_obj.get_exports_by_uri.return_value = ( + fcitl_itl_list) + else: + self.volume_obj.get_exports_by_uri.return_value = ( + scaleio_itl_list) + + self.volume_obj.list_volumes.return_value = [] + self.volume_obj.show.return_value = {"id": "vol_id"} + self.volume_obj.expand.return_value = "expanded" + + self.tag_obj = Mock() + self.tag_obj.list_tags.return_value = [ + "Openstack-vol", "Openstack-vol1"] + self.tag_obj.tag_resource.return_value = "Tagged" + + self.exportgroup_obj = Mock() + self.exportgroup_obj.exportgroup_list.return_value = ( + export_group_list) + self.exportgroup_obj.exportgroup_show.return_value = ( + export_group_details_data) + + self.exportgroup_obj.exportgroup_add_volumes.return_value = ( + "volume-added") + + self.host_obj = Mock() + self.host_obj.list_by_tenant.return_value = [] + self.host_obj.search_by_name.return_value = [] + self.host_obj.list_all.return_value = [{'id': "host1_id", + 'name': "host1"}] + self.host_obj.list_initiators.return_value = [ + {'name': "12:34:56:78:90:12:34:56"}, + {'name': "12:34:56:78:90:54:32:11"}, + {'name': "bfdf432500000004"}] + + self.hostinitiator_obj = Mock() + self.varray_obj = Mock() + self.varray_obj.varray_show.return_value = varray_detail_data + + self.snapshot_obj = Mock() + mocked_snap_obj = self.snapshot_obj.return_value + mocked_snap_obj.storageResource_query.return_value = ( + "resourceUri") + mocked_snap_obj.snapshot_create.return_value = ( + "snapshot_created") + mocked_snap_obj.snapshot_query.return_value = "snapshot_uri" + + self.consistencygroup_obj = Mock() + mocked_cg_object = self.consistencygroup_obj.return_value + mocked_cg_object.create.return_value = "CG-Created" + mocked_cg_object.consistencygroup_query.return_value = "CG-uri" + + +class EMCCoprHDISCSIDriverTest(test.TestCase): + + def setUp(self): + super(EMCCoprHDISCSIDriverTest, self).setUp() + self.create_coprhd_setup() + + def create_coprhd_setup(self): + + self.configuration = Mock() + self.configuration.coprhd_hostname = "10.10.10.10" + self.configuration.coprhd_port = "4443" + self.configuration.volume_backend_name = "EMCCoprHDISCSIDriver" + self.configuration.coprhd_username = "user-name" + self.configuration.coprhd_password = "password" + self.configuration.coprhd_tenant = "tenant" + self.configuration.coprhd_project = "project" + self.configuration.coprhd_varray = "varray" + self.configuration.coprhd_emulate_snapshot = False + + self.volume_type_id = self.create_coprhd_volume_type() + + self.stubs.Set(coprhd_iscsi.EMCCoprHDISCSIDriver, + '_get_common_driver', + self._get_mocked_common_driver) + self.driver = coprhd_iscsi.EMCCoprHDISCSIDriver( + configuration=self.configuration) + + def tearDown(self): + self._cleanUp() + super(EMCCoprHDISCSIDriverTest, self).tearDown() + + def _cleanUp(self): + self.delete_vipr_volume_type() + + def create_coprhd_volume_type(self): + ctx = context.get_admin_context() + vipr_volume_type = volume_types.create(ctx, + "coprhd-volume-type", + {'CoprHD:VPOOL': + 'vpool_coprhd'}) + volume_id = vipr_volume_type['id'] + return volume_id + + def _get_mocked_common_driver(self): + return MockedEMCCoprHDDriverCommon( + protocol="iSCSI", + default_backend_name="EMCViPRISCSIDriver", + configuration=self.configuration) + + def delete_vipr_volume_type(self): + ctx = context.get_admin_context() + volume_types.destroy(ctx, self.volume_type_id) + + def test_create_destroy(self): + volume = get_test_volume_data(self.volume_type_id) + + self.driver.create_volume(volume) + self.driver.delete_volume(volume) + + def test_get_volume_stats(self): + vol_stats = self.driver.get_volume_stats(True) + self.assertTrue(vol_stats['free_capacity_gb'], 'unknown') + + def test_create_volume_clone(self): + src_volume_data = get_test_volume_data(self.volume_type_id) + clone_volume_data = get_clone_volume_data(self.volume_type_id) + self.driver.create_volume(src_volume_data) + self.driver.create_cloned_volume(clone_volume_data, src_volume_data) + self.driver.delete_volume(src_volume_data) + self.driver.delete_volume(clone_volume_data) + + def test_create_destroy_snapshot(self): + volume_data = get_test_volume_data(self.volume_type_id) + snapshot_data = get_test_snapshot_data( + get_source_test_volume_data(self.volume_type_id)) + + self.driver.create_volume(volume_data) + self.driver.create_snapshot(snapshot_data) + self.driver.delete_snapshot(snapshot_data) + self.driver.delete_volume(volume_data) + + def test_create_volume_from_snapshot(self): + + src_vol_data = get_source_test_volume_data(self.volume_type_id) + self.driver.create_volume(src_vol_data) + + volume_data = get_test_volume_data(self.volume_type_id) + snapshot_data = get_test_snapshot_data(src_vol_data) + + self.driver.create_snapshot(snapshot_data) + self.driver.create_volume_from_snapshot(volume_data, snapshot_data) + + self.driver.delete_snapshot(snapshot_data) + self.driver.delete_volume(src_vol_data) + self.driver.delete_volume(volume_data) + + def test_extend_volume(self): + volume_data = get_test_volume_data(self.volume_type_id) + self.driver.create_volume(volume_data) + self.driver.extend_volume(volume_data, 2) + self.driver.delete_volume(volume_data) + + def test_initialize_and_terminate_connection(self): + connector_data = get_connector_data() + volume_data = get_test_volume_data(self.volume_type_id) + + self.driver.create_volume(volume_data) + res_initialize = self.driver.initialize_connection( + volume_data, connector_data) + expected_initialize = {'driver_volume_type': 'iscsi', + 'data': {'target_lun': 3, + 'target_portal': '10.10.10.10:22', + 'target_iqn': + '50:00:09:73:00:18:95:19', + 'target_discovered': False, + 'volume_id': '1'}} + self.assertEqual( + expected_initialize, res_initialize, 'Unexpected return data') + + self.driver.terminate_connection(volume_data, connector_data) + self.driver.delete_volume(volume_data) + + def test_create_delete_empty_CG(self): + cg_data = get_test_CG_data(self.volume_type_id) + ctx = context.get_admin_context() + self.driver.create_consistencygroup(ctx, cg_data) + model_update, volumes_model_update = \ + self.driver.delete_consistencygroup(ctx, cg_data, []) + self.assertEqual([], volumes_model_update, 'Unexpected return data') + + def test_create_update_delete_CG(self): + cg_data = get_test_CG_data(self.volume_type_id) + ctx = context.get_admin_context() + self.driver.create_consistencygroup(ctx, cg_data) + + volume = get_test_volume_data(self.volume_type_id) + self.driver.create_volume(volume) + + model_update, ret1, ret2 = \ + self.driver.update_consistencygroup(ctx, cg_data, [volume], []) + + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + + model_update, volumes_model_update = \ + self.driver.delete_consistencygroup(ctx, cg_data, [volume]) + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + self.assertEqual([{'status': 'deleted', 'id': '1'}], + volumes_model_update) + + def test_create_delete_CG_snap(self): + cg_snap_data = get_test_CG_snap_data(self.volume_type_id) + ctx = context.get_admin_context() + + model_update, snapshots_model_update = \ + self.driver.create_cgsnapshot(ctx, cg_snap_data, []) + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + self.assertEqual([], snapshots_model_update, 'Unexpected return data') + + model_update, snapshots_model_update = \ + self.driver.delete_cgsnapshot(ctx, cg_snap_data, []) + self.assertEqual({}, model_update, 'Unexpected return data') + self.assertEqual([], snapshots_model_update, 'Unexpected return data') + + +class EMCCoprHDFCDriverTest(test.TestCase): + + def setUp(self): + super(EMCCoprHDFCDriverTest, self).setUp() + self.create_coprhd_setup() + + def create_coprhd_setup(self): + + self.configuration = Mock() + self.configuration.coprhd_hostname = "10.10.10.10" + self.configuration.coprhd_port = "4443" + self.configuration.volume_backend_name = "EMCCoprHDFCDriver" + self.configuration.coprhd_username = "user-name" + self.configuration.coprhd_password = "password" + self.configuration.coprhd_tenant = "tenant" + self.configuration.coprhd_project = "project" + self.configuration.coprhd_varray = "varray" + self.configuration.coprhd_emulate_snapshot = False + + self.volume_type_id = self.create_coprhd_volume_type() + + self.stubs.Set(coprhd_fc.EMCCoprHDFCDriver, + '_get_common_driver', + self._get_mocked_common_driver) + self.driver = coprhd_fc.EMCCoprHDFCDriver( + configuration=self.configuration) + + def tearDown(self): + self._cleanUp() + super(EMCCoprHDFCDriverTest, self).tearDown() + + def _cleanUp(self): + self.delete_vipr_volume_type() + + def create_coprhd_volume_type(self): + ctx = context.get_admin_context() + vipr_volume_type = volume_types.create(ctx, + "coprhd-volume-type", + {'CoprHD:VPOOL': 'vpool_vipr'}) + volume_id = vipr_volume_type['id'] + return volume_id + + def _get_mocked_common_driver(self): + return MockedEMCCoprHDDriverCommon( + protocol="FC", + default_backend_name="EMCViPRFCDriver", + configuration=self.configuration) + + def delete_vipr_volume_type(self): + ctx = context.get_admin_context() + volume_types.destroy(ctx, self.volume_type_id) + + def test_create_destroy(self): + volume = get_test_volume_data(self.volume_type_id) + + self.driver.create_volume(volume) + self.driver.delete_volume(volume) + + def test_get_volume_stats(self): + vol_stats = self.driver.get_volume_stats(True) + self.assertTrue(vol_stats['free_capacity_gb'], 'unknown') + + def test_create_volume_clone(self): + + src_volume_data = get_test_volume_data(self.volume_type_id) + clone_volume_data = get_clone_volume_data(self.volume_type_id) + self.driver.create_volume(src_volume_data) + self.driver.create_cloned_volume(clone_volume_data, src_volume_data) + self.driver.delete_volume(src_volume_data) + self.driver.delete_volume(clone_volume_data) + + def test_create_destroy_snapshot(self): + + volume_data = get_test_volume_data(self.volume_type_id) + snapshot_data = get_test_snapshot_data( + get_source_test_volume_data(self.volume_type_id)) + + self.driver.create_volume(volume_data) + self.driver.create_snapshot(snapshot_data) + self.driver.delete_snapshot(snapshot_data) + self.driver.delete_volume(volume_data) + + def test_create_volume_from_snapshot(self): + src_vol_data = get_source_test_volume_data(self.volume_type_id) + self.driver.create_volume(src_vol_data) + + volume_data = get_test_volume_data(self.volume_type_id) + snapshot_data = get_test_snapshot_data(src_vol_data) + + self.driver.create_snapshot(snapshot_data) + self.driver.create_volume_from_snapshot(volume_data, snapshot_data) + + self.driver.delete_snapshot(snapshot_data) + self.driver.delete_volume(src_vol_data) + self.driver.delete_volume(volume_data) + + def test_extend_volume(self): + volume_data = get_test_volume_data(self.volume_type_id) + self.driver.create_volume(volume_data) + self.driver.extend_volume(volume_data, 2) + self.driver.delete_volume(volume_data) + + def test_initialize_and_terminate_connection(self): + + connector_data = get_connector_data() + volume_data = get_test_volume_data(self.volume_type_id) + + self.driver.create_volume(volume_data) + res_initiatlize = self.driver.initialize_connection( + volume_data, connector_data) + expected_initialize = {'driver_volume_type': 'fibre_channel', + 'data': {'target_lun': 3, + 'initiator_target_map': + {'1234567890543211': + ['1234567890123456', + '1234567890123456'], + '1234567890123456': + ['1234567890123456', + '1234567890123456']}, + 'target_wwn': ['1234567890123456', + '1234567890123456'], + 'target_discovered': False, + 'volume_id': '1'}} + self.assertEqual( + expected_initialize, res_initiatlize, 'Unexpected return data') + + res_terminate = self.driver.terminate_connection( + volume_data, connector_data) + expected_terminate = {'driver_volume_type': 'fibre_channel', + 'data': {'initiator_target_map': + {'1234567890543211': + ['1234567890123456', + '1234567890123456'], + '1234567890123456': + ['1234567890123456', + '1234567890123456']}, + 'target_wwn': ['1234567890123456', + '1234567890123456']}} + self.assertEqual( + expected_terminate, res_terminate, 'Unexpected return data') + + self.driver.delete_volume(volume_data) + + def test_create_delete_empty_CG(self): + cg_data = get_test_CG_data(self.volume_type_id) + ctx = context.get_admin_context() + self.driver.create_consistencygroup(ctx, cg_data) + model_update, volumes_model_update = \ + self.driver.delete_consistencygroup(ctx, cg_data, []) + self.assertEqual([], volumes_model_update, 'Unexpected return data') + + def test_create_update_delete_CG(self): + cg_data = get_test_CG_data(self.volume_type_id) + ctx = context.get_admin_context() + self.driver.create_consistencygroup(ctx, cg_data) + + volume = get_test_volume_data(self.volume_type_id) + self.driver.create_volume(volume) + + model_update, ret1, ret2 = \ + self.driver.update_consistencygroup(ctx, cg_data, [volume], []) + + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + + model_update, volumes_model_update = \ + self.driver.delete_consistencygroup(ctx, cg_data, [volume]) + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + self.assertEqual([{'status': 'deleted', 'id': '1'}], + volumes_model_update) + + def test_create_delete_CG_snap(self): + cg_snap_data = get_test_CG_snap_data(self.volume_type_id) + ctx = context.get_admin_context() + + model_update, snapshots_model_update = \ + self.driver.create_cgsnapshot(ctx, cg_snap_data, []) + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + self.assertEqual([], snapshots_model_update, 'Unexpected return data') + + model_update, snapshots_model_update = \ + self.driver.delete_cgsnapshot(ctx, cg_snap_data, []) + self.assertEqual({}, model_update, 'Unexpected return data') + self.assertEqual([], snapshots_model_update, 'Unexpected return data') + + +class EMCCoprHDScaleIODriverTest(test.TestCase): + + def setUp(self): + super(EMCCoprHDScaleIODriverTest, self).setUp() + self.create_coprhd_setup() + + def create_coprhd_setup(self): + + self.configuration = Mock() + self.configuration.coprhd_hostname = "10.10.10.10" + self.configuration.coprhd_port = "4443" + self.configuration.volume_backend_name = "EMCCoprHDFCDriver" + self.configuration.coprhd_username = "user-name" + self.configuration.coprhd_password = "password" + self.configuration.coprhd_tenant = "tenant" + self.configuration.coprhd_project = "project" + self.configuration.coprhd_varray = "varray" + self.configuration.coprhd_scaleio_rest_gateway_ip = "10.10.10.11" + self.configuration.coprhd_scaleio_rest_gateway_port = 443 + self.configuration.coprhd_scaleio_rest_server_username = ( + "scaleio_username") + self.configuration.coprhd_scaleio_rest_server_password = ( + "scaleio_password") + self.configuration.scaleio_verify_server_certificate = False + self.configuration.scaleio_server_certificate_path = ( + "/etc/scaleio/certs") + + self.volume_type_id = self.create_coprhd_volume_type() + + self.stubs.Set(coprhd_scaleio.EMCCoprHDScaleIODriver, + '_get_common_driver', + self._get_mocked_common_driver) + self.stubs.Set(coprhd_scaleio.EMCCoprHDScaleIODriver, + '_get_client_id', + self._get_client_id) + self.driver = coprhd_scaleio.EMCCoprHDScaleIODriver( + configuration=self.configuration) + + def tearDown(self): + self._cleanUp() + super(EMCCoprHDScaleIODriverTest, self).tearDown() + + def _cleanUp(self): + self.delete_vipr_volume_type() + + def create_coprhd_volume_type(self): + ctx = context.get_admin_context() + vipr_volume_type = volume_types.create(ctx, + "coprhd-volume-type", + {'CoprHD:VPOOL': 'vpool_vipr'}) + volume_id = vipr_volume_type['id'] + return volume_id + + def _get_mocked_common_driver(self): + return MockedEMCCoprHDDriverCommon( + protocol="scaleio", + default_backend_name="EMCCoprHDScaleIODriver", + configuration=self.configuration) + + def _get_client_id(self, server_ip, server_port, server_username, + server_password, sdc_ip): + return "bfdf432500000004" + + def delete_vipr_volume_type(self): + ctx = context.get_admin_context() + volume_types.destroy(ctx, self.volume_type_id) + + def test_create_destroy(self): + volume = get_test_volume_data(self.volume_type_id) + + self.driver.create_volume(volume) + self.driver.delete_volume(volume) + + def test_get_volume_stats(self): + vol_stats = self.driver.get_volume_stats(True) + self.assertTrue(vol_stats['free_capacity_gb'], 'unknown') + + def test_create_volume_clone(self): + + src_volume_data = get_test_volume_data(self.volume_type_id) + clone_volume_data = get_clone_volume_data(self.volume_type_id) + self.driver.create_volume(src_volume_data) + self.driver.create_cloned_volume(clone_volume_data, src_volume_data) + self.driver.delete_volume(src_volume_data) + self.driver.delete_volume(clone_volume_data) + + def test_create_destroy_snapshot(self): + + volume_data = get_test_volume_data(self.volume_type_id) + snapshot_data = get_test_snapshot_data( + get_source_test_volume_data(self.volume_type_id)) + + self.driver.create_volume(volume_data) + self.driver.create_snapshot(snapshot_data) + self.driver.delete_snapshot(snapshot_data) + self.driver.delete_volume(volume_data) + + def test_create_volume_from_snapshot(self): + src_vol_data = get_source_test_volume_data(self.volume_type_id) + self.driver.create_volume(src_vol_data) + + volume_data = get_test_volume_data(self.volume_type_id) + snapshot_data = get_test_snapshot_data(src_vol_data) + + self.driver.create_snapshot(snapshot_data) + self.driver.create_volume_from_snapshot(volume_data, snapshot_data) + + self.driver.delete_snapshot(snapshot_data) + self.driver.delete_volume(src_vol_data) + self.driver.delete_volume(volume_data) + + def test_extend_volume(self): + volume_data = get_test_volume_data(self.volume_type_id) + self.driver.create_volume(volume_data) + self.driver.extend_volume(volume_data, 2) + self.driver.delete_volume(volume_data) + + def test_initialize_and_terminate_connection(self): + + connector_data = get_connector_data() + volume_data = get_test_volume_data(self.volume_type_id) + + self.driver.create_volume(volume_data) + res_initiatlize = self.driver.initialize_connection( + volume_data, connector_data) + expected_initialize = {'data': {'bandwidthLimit': None, + 'hostIP': '10.0.0.2', + 'iopsLimit': None, + 'scaleIO_volname': 'test-vol1', + 'serverIP': '10.10.10.11', + 'serverPassword': 'scaleio_password', + 'serverPort': 443, + 'serverToken': None, + 'serverUsername': 'scaleio_username'}, + 'driver_volume_type': 'scaleio'} + self.assertEqual( + expected_initialize, res_initiatlize, 'Unexpected return data') + + self.driver.terminate_connection( + volume_data, connector_data) + self.driver.delete_volume(volume_data) + + def test_create_delete_empty_CG(self): + cg_data = get_test_CG_data(self.volume_type_id) + ctx = context.get_admin_context() + self.driver.create_consistencygroup(ctx, cg_data) + model_update, volumes_model_update = \ + self.driver.delete_consistencygroup(ctx, cg_data, []) + self.assertEqual([], volumes_model_update, 'Unexpected return data') + + def test_create_update_delete_CG(self): + cg_data = get_test_CG_data(self.volume_type_id) + ctx = context.get_admin_context() + self.driver.create_consistencygroup(ctx, cg_data) + + volume = get_test_volume_data(self.volume_type_id) + self.driver.create_volume(volume) + + model_update, ret1, ret2 = \ + self.driver.update_consistencygroup(ctx, cg_data, [volume], []) + + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + + model_update, volumes_model_update = \ + self.driver.delete_consistencygroup(ctx, cg_data, [volume]) + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + self.assertEqual([{'status': 'deleted', 'id': '1'}], + volumes_model_update) + + def test_create_delete_CG_snap(self): + cg_snap_data = get_test_CG_snap_data(self.volume_type_id) + ctx = context.get_admin_context() + + model_update, snapshots_model_update = \ + self.driver.create_cgsnapshot(ctx, cg_snap_data, []) + self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, + model_update) + self.assertEqual([], snapshots_model_update, 'Unexpected return data') + + model_update, snapshots_model_update = \ + self.driver.delete_cgsnapshot(ctx, cg_snap_data, []) + self.assertEqual({}, model_update, 'Unexpected return data') + self.assertEqual([], snapshots_model_update, 'Unexpected return data') diff --git a/cinder/volume/drivers/coprhd/__init__.py b/cinder/volume/drivers/coprhd/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/coprhd/common.py b/cinder/volume/drivers/coprhd/common.py new file mode 100644 index 00000000000..6b53a045def --- /dev/null +++ b/cinder/volume/drivers/coprhd/common.py @@ -0,0 +1,1451 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import binascii +import random +import string + +import eventlet +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import encodeutils +from oslo_utils import excutils +from oslo_utils import units +import six + +from cinder import context +from cinder import exception +from cinder.i18n import _ +from cinder.i18n import _LE +from cinder.i18n import _LI +from cinder.objects import fields +from cinder.volume.drivers.coprhd.helpers import ( + authentication as coprhd_auth) +from cinder.volume.drivers.coprhd.helpers import ( + commoncoprhdapi as coprhd_utils) +from cinder.volume.drivers.coprhd.helpers import ( + consistencygroup as coprhd_cg) +from cinder.volume.drivers.coprhd.helpers import exportgroup as coprhd_eg +from cinder.volume.drivers.coprhd.helpers import host as coprhd_host +from cinder.volume.drivers.coprhd.helpers import snapshot as coprhd_snap +from cinder.volume.drivers.coprhd.helpers import tag as coprhd_tag + +from cinder.volume.drivers.coprhd.helpers import ( + virtualarray as coprhd_varray) +from cinder.volume.drivers.coprhd.helpers import volume as coprhd_vol +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + +MAX_RETRIES = 10 +INTERVAL_10_SEC = 10 + +volume_opts = [ + cfg.StrOpt('coprhd_hostname', + default=None, + help='Hostname for the CoprHD Instance'), + cfg.PortOpt('coprhd_port', + default=4443, + help='Port for the CoprHD Instance'), + cfg.StrOpt('coprhd_username', + default=None, + help='Username for accessing the CoprHD Instance'), + cfg.StrOpt('coprhd_password', + default=None, + help='Password for accessing the CoprHD Instance', + secret=True), + cfg.StrOpt('coprhd_tenant', + default=None, + help='Tenant to utilize within the CoprHD Instance'), + cfg.StrOpt('coprhd_project', + default=None, + help='Project to utilize within the CoprHD Instance'), + cfg.StrOpt('coprhd_varray', + default=None, + help='Virtual Array to utilize within the CoprHD Instance'), + cfg.BoolOpt('coprhd_emulate_snapshot', + default=False, + help='True | False to indicate if the storage array ' + 'in CoprHD is VMAX or VPLEX') +] + +CONF = cfg.CONF +CONF.register_opts(volume_opts) + +URI_VPOOL_VARRAY_CAPACITY = '/block/vpools/{0}/varrays/{1}/capacity' +URI_BLOCK_EXPORTS_FOR_INITIATORS = '/block/exports?initiators={0}' +EXPORT_RETRY_COUNT = 5 + + +def retry_wrapper(func): + def try_and_retry(*args, **kwargs): + retry = False + try: + return func(*args, **kwargs) + except coprhd_utils.CoprHdError as e: + # if we got an http error and + # the string contains 401 or if the string contains the word cookie + if (e.err_code == coprhd_utils.CoprHdError.HTTP_ERR and + (e.msg.find('401') != -1 or + e.msg.lower().find('cookie') != -1)): + retry = True + args[0].AUTHENTICATED = False + else: + exception_message = (_("\nCoprHD Exception: %(msg)s\n") % + {'msg': e.msg}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + except Exception as exc: + exception_message = (_("\nGeneral Exception: %(exec_info)s\n") % + {'exec_info': + encodeutils.exception_to_unicode(exc)}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + if retry: + return func(*args, **kwargs) + + return try_and_retry + + +class EMCCoprHDDriverCommon(object): + + OPENSTACK_TAG = 'OpenStack' + + def __init__(self, protocol, default_backend_name, configuration=None): + self.AUTHENTICATED = False + self.protocol = protocol + self.configuration = configuration + self.configuration.append_config_values(volume_opts) + + self.init_coprhd_api_components() + + self.stats = {'driver_version': '3.0.0.0', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': '0', + 'storage_protocol': protocol, + 'total_capacity_gb': 'unknown', + 'vendor_name': 'CoprHD', + 'volume_backend_name': + self.configuration.volume_backend_name or + default_backend_name} + + def init_coprhd_api_components(self): + + coprhd_utils.AUTH_TOKEN = None + + # instantiate a few coprhd api objects for later use + self.volume_obj = coprhd_vol.Volume( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + self.exportgroup_obj = coprhd_eg.ExportGroup( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + self.host_obj = coprhd_host.Host( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + self.varray_obj = coprhd_varray.VirtualArray( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + self.snapshot_obj = coprhd_snap.Snapshot( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + self.consistencygroup_obj = coprhd_cg.ConsistencyGroup( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + self.tag_obj = coprhd_tag.Tag( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + def check_for_setup_error(self): + # validate all of the coprhd_* configuration values + if self.configuration.coprhd_hostname is None: + message = _("coprhd_hostname is not set in cinder configuration") + raise exception.VolumeBackendAPIException(data=message) + + if self.configuration.coprhd_port is None: + message = _("coprhd_port is not set in cinder configuration") + raise exception.VolumeBackendAPIException(data=message) + + if self.configuration.coprhd_username is None: + message = _("coprhd_username is not set in cinder configuration") + raise exception.VolumeBackendAPIException(data=message) + + if self.configuration.coprhd_password is None: + message = _("coprhd_password is not set in cinder configuration") + raise exception.VolumeBackendAPIException(data=message) + + if self.configuration.coprhd_tenant is None: + message = _("coprhd_tenant is not set in cinder configuration") + raise exception.VolumeBackendAPIException(data=message) + + if self.configuration.coprhd_project is None: + message = _("coprhd_project is not set in cinder configuration") + raise exception.VolumeBackendAPIException(data=message) + + if self.configuration.coprhd_varray is None: + message = _("coprhd_varray is not set in cinder configuration") + raise exception.VolumeBackendAPIException(data=message) + + def authenticate_user(self): + # we should check to see if we are already authenticated before blindly + # doing it again + if self.AUTHENTICATED is False: + obj = coprhd_auth.Authentication( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + username = self.configuration.coprhd_username + password = self.configuration.coprhd_password + + coprhd_utils.AUTH_TOKEN = obj.authenticate_user(username, + password) + self.AUTHENTICATED = True + + def create_volume(self, vol, driver, truncate_name=False): + self.authenticate_user() + name = self._get_resource_name(vol, truncate_name) + size = int(vol['size']) * units.Gi + + vpool = self._get_vpool(vol) + self.vpool = vpool['CoprHD:VPOOL'] + + try: + cgid = None + coprhd_cgid = None + try: + cgid = vol['consistencygroup_id'] + if cgid: + coprhd_cgid = self._get_coprhd_cgid(cgid) + except KeyError: + coprhd_cgid = None + + full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, + self.configuration.coprhd_project)) + self.volume_obj.create(full_project_name, name, size, + self.configuration.coprhd_varray, + self.vpool, + # no longer specified in volume creation + sync=True, + # no longer specified in volume creation + consistencygroup=coprhd_cgid) + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Volume %(name)s: create failed\n%(err)s") % + {'name': name, 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Volume : %s creation failed") % name) + self._raise_or_log_exception( + e.err_code, coprhd_err_msg, log_err_msg) + + @retry_wrapper + def create_consistencygroup(self, context, group, truncate_name=False): + self.authenticate_user() + name = self._get_resource_name(group, truncate_name) + + try: + self.consistencygroup_obj.create( + name, + self.configuration.coprhd_project, + self.configuration.coprhd_tenant) + + cgUri = self.consistencygroup_obj.consistencygroup_query( + name, + self.configuration.coprhd_project, + self.configuration.coprhd_tenant) + + self.set_tags_for_resource( + coprhd_cg.ConsistencyGroup.URI_CONSISTENCY_GROUP_TAGS, + cgUri, group) + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Consistency Group %(name)s:" + " create failed\n%(err)s") % + {'name': name, 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Consistency Group : %s creation failed") % + name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def update_consistencygroup(self, group, add_volumes, + remove_volumes): + self.authenticate_user() + model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} + cg_uri = self._get_coprhd_cgid(group['id']) + add_volnames = [] + remove_volnames = [] + + try: + if add_volumes: + for vol in add_volumes: + vol_name = self._get_coprhd_volume_name(vol) + add_volnames.append(vol_name) + + if remove_volumes: + for vol in remove_volumes: + vol_name = self._get_coprhd_volume_name(vol) + remove_volnames.append(vol_name) + + self.consistencygroup_obj.update( + cg_uri, + self.configuration.coprhd_project, + self.configuration.coprhd_tenant, + add_volnames, remove_volnames, True) + + return model_update, None, None + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Consistency Group %(cg_uri)s:" + " update failed\n%(err)s") % + {'cg_uri': cg_uri, 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Consistency Group : %s update failed") % + cg_uri) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def delete_consistencygroup(self, context, group, volumes, + truncate_name=False): + self.authenticate_user() + name = self._get_resource_name(group, truncate_name) + volumes_model_update = [] + + try: + for vol in volumes: + try: + vol_name = self._get_coprhd_volume_name(vol) + full_project_name = "%s/%s" % ( + self.configuration.coprhd_tenant, + self.configuration.coprhd_project) + + self.volume_obj.delete(full_project_name, vol_name, + sync=True, + force_delete=True) + + update_item = {'id': vol['id'], + 'status': + fields.ConsistencyGroupStatus.DELETED} + volumes_model_update.append(update_item) + + except exception.VolumeBackendAPIException: + update_item = {'id': vol['id'], + 'status': fields.ConsistencyGroupStatus. + ERROR_DELETING} + + volumes_model_update.append(update_item) + + LOG.exception(_LE("Failed to delete the volume %s of CG."), + vol['name']) + + self.consistencygroup_obj.delete( + name, + self.configuration.coprhd_project, + self.configuration.coprhd_tenant) + + model_update = {} + model_update['status'] = group['status'] + + return model_update, volumes_model_update + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Consistency Group %(name)s:" + " delete failed\n%(err)s") % + {'name': name, 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Consistency Group : %s deletion failed") % + name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def create_cgsnapshot(self, cgsnapshot, snapshots, truncate_name=False): + self.authenticate_user() + + snapshots_model_update = [] + cgsnapshot_name = self._get_resource_name(cgsnapshot, truncate_name) + cg_id = cgsnapshot['consistencygroup_id'] + cg_group = cgsnapshot.get('consistencygroup') + cg_name = None + coprhd_cgid = None + + if cg_id: + coprhd_cgid = self._get_coprhd_cgid(cg_id) + cg_name = self._get_consistencygroup_name(cg_group) + + LOG.info(_LI('Start to create cgsnapshot for consistency group' + ': %(group_name)s'), + {'group_name': cg_name}) + + try: + self.snapshot_obj.snapshot_create( + 'block', + 'consistency-groups', + coprhd_cgid, + cgsnapshot_name, + False, + True) + + for snapshot in snapshots: + vol_id_of_snap = snapshot['volume_id'] + + # Finding the volume in CoprHD for this volume id + tagname = "OpenStack:id:" + vol_id_of_snap + rslt = coprhd_utils.search_by_tag( + coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format( + tagname), + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + if not rslt: + continue + + volUri = rslt[0] + + snapshots_of_volume = self.snapshot_obj.snapshot_list_uri( + 'block', + 'volumes', + volUri) + + for snapUri in snapshots_of_volume: + snapshot_obj = self.snapshot_obj.snapshot_show_uri( + 'block', + volUri, + snapUri['id']) + + if not coprhd_utils.get_node_value(snapshot_obj, + 'inactive'): + + # Creating snapshot for a consistency group. + # When we create a consistency group snapshot on + # coprhd then each snapshot of volume in the + # consistencygroup will be given a subscript. Ex if + # the snapshot name is cgsnap1 and lets say there are + # three vols(a,b,c) in CG. Then the names of snapshots + # of the volumes in cg on coprhd end will be like + # cgsnap1-1 cgsnap1-2 cgsnap1-3. So, we list the + # snapshots of the volume under consideration and then + # split the name using - from the ending as prefix + # and postfix. We compare the prefix to the cgsnapshot + # name and filter our the snapshots that correspond to + # the cgsnapshot + + if '-' in snapshot_obj['name']: + (prefix, postfix) = snapshot_obj[ + 'name'].rsplit('-', 1) + + if cgsnapshot_name == prefix: + self.set_tags_for_resource( + coprhd_snap.Snapshot. + URI_BLOCK_SNAPSHOTS_TAG, + snapUri['id'], + snapshot) + + elif cgsnapshot_name == snapshot_obj['name']: + self.set_tags_for_resource( + coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG, + snapUri['id'], + snapshot) + + snapshot['status'] = fields.SnapshotStatus.AVAILABLE + snapshots_model_update.append( + {'id': snapshot['id'], 'status': + fields.SnapshotStatus.AVAILABLE}) + + model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} + + return model_update, snapshots_model_update + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Snapshot for Consistency Group %(cg_name)s:" + " create failed\n%(err)s") % + {'cg_name': cg_name, + 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Snapshot %(name)s for Consistency" + " Group: %(cg_name)s creation failed") % + {'cg_name': cg_name, + 'name': cgsnapshot_name}) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def delete_cgsnapshot(self, cgsnapshot, snapshots, truncate_name=False): + self.authenticate_user() + cgsnapshot_id = cgsnapshot['id'] + cgsnapshot_name = self._get_resource_name(cgsnapshot, truncate_name) + + snapshots_model_update = [] + cg_id = cgsnapshot['consistencygroup_id'] + cg_group = cgsnapshot.get('consistencygroup') + + coprhd_cgid = self._get_coprhd_cgid(cg_id) + cg_name = self._get_consistencygroup_name(cg_group) + + model_update = {} + LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: ' + '%(group_name)s'), {'snap_name': cgsnapshot['name'], + 'group_name': cg_name}) + + try: + uri = None + try: + uri = self.snapshot_obj.snapshot_query('block', + 'consistency-groups', + coprhd_cgid, + cgsnapshot_name + '-1') + except coprhd_utils.CoprHdError as e: + if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR: + uri = self.snapshot_obj.snapshot_query( + 'block', + 'consistency-groups', + coprhd_cgid, + cgsnapshot_name) + self.snapshot_obj.snapshot_delete_uri( + 'block', + coprhd_cgid, + uri, + True, + 0) + + for snapshot in snapshots: + snapshots_model_update.append( + {'id': snapshot['id'], + 'status': fields.SnapshotStatus.DELETED}) + + return model_update, snapshots_model_update + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Snapshot %(cgsnapshot_id)s: for" + " Consistency Group %(cg_name)s: delete" + " failed\n%(err)s") % + {'cgsnapshot_id': cgsnapshot_id, + 'cg_name': cg_name, + 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Snapshot %(name)s for Consistency" + " Group: %(cg_name)s deletion failed") % + {'cg_name': cg_name, + 'name': cgsnapshot_name}) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def set_volume_tags(self, vol, exemptTags=None, truncate_name=False): + if exemptTags is None: + exemptTags = [] + + self.authenticate_user() + name = self._get_resource_name(vol, truncate_name) + full_project_name = ("%s/%s" % ( + self.configuration.coprhd_tenant, + self.configuration.coprhd_project)) + + vol_uri = self.volume_obj.volume_query(full_project_name, + name) + + self.set_tags_for_resource( + coprhd_vol.Volume.URI_TAG_VOLUME, vol_uri, vol, exemptTags) + + @retry_wrapper + def set_tags_for_resource(self, uri, resourceId, resource, + exemptTags=None): + if exemptTags is None: + exemptTags = [] + + self.authenticate_user() + + # first, get the current tags that start with the OPENSTACK_TAG + # eyecatcher + formattedUri = uri.format(resourceId) + remove_tags = [] + currentTags = self.tag_obj.list_tags(formattedUri) + for cTag in currentTags: + if cTag.startswith(self.OPENSTACK_TAG): + remove_tags.append(cTag) + + try: + if remove_tags: + self.tag_obj.tag_resource(uri, + resourceId, + None, + remove_tags) + except coprhd_utils.CoprHdError as e: + if e.err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: + LOG.debug("CoprHdError adding the tag:\n %s", e.msg) + + # now add the tags for the resource + add_tags = [] + # put all the openstack resource properties into the CoprHD resource + + try: + for prop, value in vars(resource).items(): + try: + if prop in exemptTags: + continue + + if prop.startswith("_"): + prop = prop.replace("_", '', 1) + + # don't put the status in, it's always the status before + # the current transaction + if ((not prop.startswith("status") and not + prop.startswith("obj_status") and + prop != "obj_volume") and value): + add_tags.append( + "%s:%s:%s" % (self.OPENSTACK_TAG, prop, + six.text_type(value))) + except TypeError: + LOG.error( + _LE("Error tagging the resource property %s"), prop) + except TypeError: + LOG.error(_LE("Error tagging the resource properties")) + + try: + self.tag_obj.tag_resource( + uri, + resourceId, + add_tags, + None) + except coprhd_utils.CoprHdError as e: + if e.err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: + LOG.debug( + "Adding the tag failed. CoprHdError: %s", e.msg) + + return self.tag_obj.list_tags(formattedUri) + + @retry_wrapper + def create_cloned_volume(self, vol, src_vref, truncate_name=False): + """Creates a clone of the specified volume.""" + self.authenticate_user() + name = self._get_resource_name(vol, truncate_name) + srcname = self._get_coprhd_volume_name(src_vref) + + try: + if src_vref['consistencygroup_id']: + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.SOS_FAILURE_ERR, + _("Clone can't be taken individually on a volume" + " that is part of a Consistency Group")) + except KeyError as e: + pass + try: + (storageresType, + storageresTypename) = self.volume_obj.get_storageAttributes( + srcname, None, None) + + resource_id = self.volume_obj.storage_resource_query( + storageresType, + srcname, + None, + None, + self.configuration.coprhd_project, + self.configuration.coprhd_tenant) + + self.volume_obj.clone( + name, + resource_id, + sync=True) + + full_project_name = "%s/%s" % ( + self.configuration.coprhd_tenant, + self.configuration.coprhd_project) + + detachable = self.volume_obj.is_volume_detachable( + full_project_name, name) + LOG.debug("Is volume detachable : %s", detachable) + + # detach it from the source volume immediately after creation + if detachable: + self.volume_obj.volume_clone_detach( + "", full_project_name, name, True) + + except IndexError as e: + LOG.exception(_LE("Volume clone detach returned empty task list")) + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Volume %(name)s: clone failed\n%(err)s") % + {'name': name, 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Volume : {%s} clone failed") % name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + try: + src_vol_size = src_vref['size'] + except KeyError: + src_vol_size = src_vref['volume_size'] + + if vol['size'] > src_vol_size: + size_in_bytes = coprhd_utils.to_bytes("%sG" % vol['size']) + try: + self.volume_obj.expand( + ("%s/%s" % (self.configuration.coprhd_tenant, + self.configuration.coprhd_project)), name, + size_in_bytes, + True) + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Volume %(volume_name)s: expand failed" + "\n%(err)s") % + {'volume_name': name, + 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Volume : %s expand failed") % name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def expand_volume(self, vol, new_size): + """expands the volume to new_size specified.""" + self.authenticate_user() + volume_name = self._get_coprhd_volume_name(vol) + size_in_bytes = coprhd_utils.to_bytes("%sG" % new_size) + + try: + self.volume_obj.expand( + ("%s/%s" % (self.configuration.coprhd_tenant, + self.configuration.coprhd_project)), volume_name, + size_in_bytes, + True) + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Volume %(volume_name)s:" + " expand failed\n%(err)s") % + {'volume_name': volume_name, + 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Volume : %s expand failed") % + volume_name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def create_volume_from_snapshot(self, snapshot, volume, + truncate_name=False): + """Creates volume from given snapshot ( snapshot clone to volume ).""" + self.authenticate_user() + + if self.configuration.coprhd_emulate_snapshot: + self.create_cloned_volume(volume, snapshot, truncate_name) + return + + src_snapshot_name = None + src_vol_ref = snapshot['volume'] + new_volume_name = self._get_resource_name(volume, truncate_name) + + try: + coprhd_vol_info = self._get_coprhd_volume_name( + src_vol_ref, True) + src_snapshot_name = self._get_coprhd_snapshot_name( + snapshot, coprhd_vol_info['volume_uri']) + + (storageresType, + storageresTypename) = self.volume_obj.get_storageAttributes( + coprhd_vol_info['volume_name'], None, src_snapshot_name) + + resource_id = self.volume_obj.storage_resource_query( + storageresType, + coprhd_vol_info['volume_name'], + None, + src_snapshot_name, + self.configuration.coprhd_project, + self.configuration.coprhd_tenant) + + self.volume_obj.clone( + new_volume_name, + resource_id, + sync=True) + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Snapshot %(src_snapshot_name)s:" + " clone failed\n%(err)s") % + {'src_snapshot_name': src_snapshot_name, + 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Snapshot : %s clone failed") % + src_snapshot_name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + if volume['size'] > snapshot['volume_size']: + size_in_bytes = coprhd_utils.to_bytes("%sG" % volume['size']) + + try: + self.volume_obj.expand( + ("%s/%s" % (self.configuration.coprhd_tenant, + self.configuration.coprhd_project)), + new_volume_name, size_in_bytes, True) + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Volume %(volume_name)s: expand failed" + "\n%(err)s") % + {'volume_name': new_volume_name, + 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Volume : %s expand failed") % + new_volume_name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def delete_volume(self, vol): + self.authenticate_user() + name = self._get_coprhd_volume_name(vol) + try: + full_project_name = ("%s/%s" % ( + self.configuration.coprhd_tenant, + self.configuration.coprhd_project)) + self.volume_obj.delete(full_project_name, name, sync=True) + except coprhd_utils.CoprHdError as e: + if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR: + LOG.info(_LI( + "Volume %s" + " no longer exists; volume deletion is" + " considered successful."), name) + else: + coprhd_err_msg = (_("Volume %(name)s: delete failed" + "\n%(err)s") % + {'name': name, 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Volume : %s delete failed") % name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def create_snapshot(self, snapshot, truncate_name=False): + self.authenticate_user() + + volume = snapshot['volume'] + + try: + if volume['consistencygroup_id']: + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.SOS_FAILURE_ERR, + _("Snapshot can't be taken individually on a volume" + " that is part of a Consistency Group")) + except KeyError: + LOG.info(_LI("No Consistency Group associated with the volume")) + + if self.configuration.coprhd_emulate_snapshot: + self.create_cloned_volume(snapshot, volume, truncate_name) + self.set_volume_tags( + snapshot, ['_volume', '_obj_volume_type'], truncate_name) + return + + try: + snapshotname = self._get_resource_name(snapshot, truncate_name) + vol = snapshot['volume'] + + volumename = self._get_coprhd_volume_name(vol) + projectname = self.configuration.coprhd_project + tenantname = self.configuration.coprhd_tenant + storageresType = 'block' + storageresTypename = 'volumes' + resourceUri = self.snapshot_obj.storage_resource_query( + storageresType, + volume_name=volumename, + cg_name=None, + project=projectname, + tenant=tenantname) + inactive = False + sync = True + self.snapshot_obj.snapshot_create( + storageresType, + storageresTypename, + resourceUri, + snapshotname, + inactive, + sync) + + snapshotUri = self.snapshot_obj.snapshot_query( + storageresType, + storageresTypename, + resourceUri, + snapshotname) + + self.set_tags_for_resource( + coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG, + snapshotUri, snapshot, ['_volume']) + + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Snapshot: %(snapshotname)s, create failed" + "\n%(err)s") % {'snapshotname': snapshotname, + 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Snapshot : %s create failed") % snapshotname) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def delete_snapshot(self, snapshot): + self.authenticate_user() + + vol = snapshot['volume'] + + try: + if vol['consistencygroup_id']: + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.SOS_FAILURE_ERR, + _("Snapshot delete can't be done individually on a volume" + " that is part of a Consistency Group")) + except KeyError: + LOG.info(_LI("No Consistency Group associated with the volume")) + + if self.configuration.coprhd_emulate_snapshot: + self.delete_volume(snapshot) + return + + snapshotname = None + try: + volumename = self._get_coprhd_volume_name(vol) + projectname = self.configuration.coprhd_project + tenantname = self.configuration.coprhd_tenant + storageresType = 'block' + storageresTypename = 'volumes' + resourceUri = self.snapshot_obj.storage_resource_query( + storageresType, + volume_name=volumename, + cg_name=None, + project=projectname, + tenant=tenantname) + if resourceUri is None: + LOG.info(_LI( + "Snapshot %s" + " is not found; snapshot deletion" + " is considered successful."), snapshotname) + else: + snapshotname = self._get_coprhd_snapshot_name( + snapshot, resourceUri) + + self.snapshot_obj.snapshot_delete( + storageresType, + storageresTypename, + resourceUri, + snapshotname, + sync=True) + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Snapshot %s : Delete Failed\n") % + snapshotname) + + log_err_msg = (_LE("Snapshot : %s delete failed") % snapshotname) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) + + @retry_wrapper + def initialize_connection(self, volume, protocol, initiatorPorts, + hostname): + + try: + self.authenticate_user() + volumename = self._get_coprhd_volume_name(volume) + foundgroupname = self._find_exportgroup(initiatorPorts) + foundhostname = None + if foundgroupname is None: + for i in range(len(initiatorPorts)): + # check if this initiator is contained in any CoprHD Host + # object + LOG.debug( + "checking for initiator port: %s", initiatorPorts[i]) + foundhostname = self._find_host(initiatorPorts[i]) + + if foundhostname: + LOG.info(_LI("Found host %s"), foundhostname) + break + + if not foundhostname: + LOG.error(_LE("Auto host creation not supported")) + # create an export group for this host + foundgroupname = foundhostname + 'SG' + # create a unique name + foundgroupname = foundgroupname + '-' + ''.join( + random.choice(string.ascii_uppercase + + string.digits) + for x in range(6)) + self.exportgroup_obj.exportgroup_create( + foundgroupname, + self.configuration.coprhd_project, + self.configuration.coprhd_tenant, + self.configuration.coprhd_varray, + 'Host', + foundhostname) + + LOG.debug( + "adding the volume to the exportgroup : %s", volumename) + + self.exportgroup_obj.exportgroup_add_volumes( + True, + foundgroupname, + self.configuration.coprhd_tenant, + None, + None, + None, + self.configuration.coprhd_project, + [volumename], + None, + None) + + return self._find_device_info(volume, initiatorPorts) + + except coprhd_utils.CoprHdError as e: + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.SOS_FAILURE_ERR, + (_("Attach volume (%(name)s) to host" + " (%(hostname)s) initiator (%(initiatorport)s)" + " failed:\n%(err)s") % + {'name': self._get_coprhd_volume_name( + volume), + 'hostname': hostname, + 'initiatorport': initiatorPorts[0], + 'err': six.text_type(e.msg)}) + ) + + @retry_wrapper + def terminate_connection(self, volume, protocol, initiatorPorts, + hostname): + try: + self.authenticate_user() + volumename = self._get_coprhd_volume_name(volume) + full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, + self.configuration.coprhd_project)) + voldetails = self.volume_obj.show(full_project_name, volumename) + volid = voldetails['id'] + + # find the exportgroups + exports = self.volume_obj.get_exports_by_uri(volid) + exportgroups = set() + itls = exports['itl'] + for itl in itls: + itl_port = itl['initiator']['port'] + if itl_port in initiatorPorts: + exportgroups.add(itl['export']['id']) + + for exportgroup in exportgroups: + self.exportgroup_obj.exportgroup_remove_volumes_by_uri( + exportgroup, + volid, + True, + None, + None, + None, + None) + else: + LOG.info(_LI( + "No export group found for the host: %s" + "; this is considered already detached."), hostname) + + return itls + + except coprhd_utils.CoprHdError as e: + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.SOS_FAILURE_ERR, + (_("Detaching volume %(volumename)s from host" + " %(hostname)s failed: %(err)s") % + {'volumename': volumename, + 'hostname': hostname, + 'err': six.text_type(e.msg)}) + ) + + @retry_wrapper + def _find_device_info(self, volume, initiator_ports): + """Returns device_info in list of itls having the matched initiator. + + (there could be multiple targets, hence a list): + [ + { + "hlu":9, + "initiator":{...,"port":"20:00:00:25:B5:49:00:22"}, + "export":{...}, + "device":{...,"wwn":"600601602B802D00B62236585D0BE311"}, + "target":{...,"port":"50:06:01:6A:46:E0:72:EF"}, + "san_zone_name":"..." + }, + { + "hlu":9, + "initiator":{...,"port":"20:00:00:25:B5:49:00:22"}, + "export":{...}, + "device":{...,"wwn":"600601602B802D00B62236585D0BE311"}, + "target":{...,"port":"50:06:01:62:46:E0:72:EF"}, + "san_zone_name":"..." + } + ] + """ + volumename = self._get_coprhd_volume_name(volume) + full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, + self.configuration.coprhd_project)) + vol_uri = self.volume_obj.volume_query(full_project_name, volumename) + + # The itl info shall be available at the first try since now export is + # a synchronous call. We are trying a few more times to accommodate + # any delay on filling in the itl info after the export task is + # completed. + + itls = [] + for x in range(MAX_RETRIES): + exports = self.volume_obj.get_exports_by_uri(vol_uri) + LOG.debug("Volume exports: ") + LOG.info(vol_uri) + LOG.debug(exports) + for itl in exports['itl']: + itl_port = itl['initiator']['port'] + if itl_port in initiator_ports: + found_device_number = itl['hlu'] + if (found_device_number is not None and + found_device_number != '-1'): + # 0 is a valid number for found_device_number. + # Only loop if it is None or -1 + LOG.debug("Found Device Number: %s", + found_device_number) + itls.append(itl) + + if itls: + break + else: + LOG.debug("Device Number not found yet." + " Retrying after 10 seconds...") + eventlet.sleep(INTERVAL_10_SEC) + + if itls is None: + # No device number found after 10 tries; return an empty itl + LOG.info(_LI( + "No device number has been found after 10 tries; " + "this likely indicates an unsuccessful attach of " + "volume volumename=%(volumename)s to" + " initiator initiator_ports=%(initiator_ports)s"), + {'volumename': volumename, + 'initiator_ports': initiator_ports}) + + return itls + + def _get_coprhd_cgid(self, cgid): + tagname = self.OPENSTACK_TAG + ":id:" + cgid + rslt = coprhd_utils.search_by_tag( + coprhd_cg.ConsistencyGroup.URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG. + format(tagname), + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + # if the result is empty, then search with the tagname as + # "OpenStack:obj_id" the openstack attribute for id can be obj_id + # instead of id. this depends on the version + if rslt is None or len(rslt) == 0: + tagname = self.OPENSTACK_TAG + ":obj_id:" + cgid + rslt = coprhd_utils.search_by_tag( + coprhd_cg.ConsistencyGroup + .URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG. + format(tagname), + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + if len(rslt) > 0: + rsltCg = self.consistencygroup_obj.show( + rslt[0], + self.configuration.coprhd_project, + self.configuration.coprhd_tenant) + return rsltCg['id'] + else: + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.NOT_FOUND_ERR, + (_("Consistency Group %s not found") % cgid)) + + def _get_consistencygroup_name(self, consisgrp): + return consisgrp['name'] + + def _get_coprhd_snapshot_name(self, snapshot, resUri): + tagname = self.OPENSTACK_TAG + ":id:" + snapshot['id'] + rslt = coprhd_utils.search_by_tag( + coprhd_snap.Snapshot.URI_SEARCH_SNAPSHOT_BY_TAG.format(tagname), + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + # if the result is empty, then search with the tagname + # as "OpenStack:obj_id" + # as snapshots will be having the obj_id instead of just id. + if not rslt: + tagname = self.OPENSTACK_TAG + ":obj_id:" + snapshot['id'] + rslt = coprhd_utils.search_by_tag( + coprhd_snap.Snapshot.URI_SEARCH_SNAPSHOT_BY_TAG.format( + tagname), + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + if rslt is None or len(rslt) == 0: + return snapshot['name'] + else: + rsltSnap = self.snapshot_obj.snapshot_show_uri( + 'block', + resUri, + rslt[0]) + return rsltSnap['name'] + + def _get_coprhd_volume_name(self, vol, verbose=False): + tagname = self.OPENSTACK_TAG + ":id:" + vol['id'] + rslt = coprhd_utils.search_by_tag( + coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(tagname), + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + # if the result is empty, then search with the tagname + # as "OpenStack:obj_id" + # as snapshots will be having the obj_id instead of just id. + if len(rslt) == 0: + tagname = self.OPENSTACK_TAG + ":obj_id:" + vol['id'] + rslt = coprhd_utils.search_by_tag( + coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(tagname), + self.configuration.coprhd_hostname, + self.configuration.coprhd_port) + + if len(rslt) > 0: + rsltVol = self.volume_obj.show_by_uri(rslt[0]) + + if verbose is True: + return {'volume_name': rsltVol['name'], 'volume_uri': rslt[0]} + else: + return rsltVol['name'] + else: + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.NOT_FOUND_ERR, + (_("Volume %s not found") % vol['display_name'])) + + def _get_resource_name(self, resource, truncate_name=False): + name = resource.get('display_name', None) + + if not name: + name = resource['name'] + + if truncate_name and len(name) > 31: + name = self._id_to_base64(resource.id) + + return name + + def _get_vpool(self, volume): + vpool = {} + ctxt = context.get_admin_context() + type_id = volume['volume_type_id'] + if type_id is not None: + volume_type = volume_types.get_volume_type(ctxt, type_id) + specs = volume_type.get('extra_specs') + for key, value in specs.items(): + vpool[key] = value + + return vpool + + def _id_to_base64(self, id): + # Base64 encode the id to get a volume name less than 32 characters due + # to ScaleIO limitation. + name = six.text_type(id).replace("-", "") + try: + name = base64.b16decode(name.upper()) + except (TypeError, binascii.Error): + pass + encoded_name = name + if isinstance(encoded_name, six.text_type): + encoded_name = encoded_name.encode('utf-8') + encoded_name = base64.b64encode(encoded_name) + if six.PY3: + encoded_name = encoded_name.decode('ascii') + LOG.debug("Converted id %(id)s to scaleio name %(name)s.", + {'id': id, 'name': encoded_name}) + return encoded_name + + def _raise_or_log_exception(self, err_code, coprhd_err_msg, log_err_msg): + + if err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: + raise coprhd_utils.CoprHdError( + coprhd_utils.CoprHdError.SOS_FAILURE_ERR, + coprhd_err_msg) + else: + with excutils.save_and_reraise_exception(): + LOG.exception(log_err_msg) + + @retry_wrapper + def _find_exportgroup(self, initiator_ports): + """Find export group with initiator ports same as given initiators.""" + foundgroupname = None + grouplist = self.exportgroup_obj.exportgroup_list( + self.configuration.coprhd_project, + self.configuration.coprhd_tenant) + for groupid in grouplist: + groupdetails = self.exportgroup_obj.exportgroup_show( + groupid, + self.configuration.coprhd_project, + self.configuration.coprhd_tenant) + if groupdetails is not None: + if groupdetails['inactive']: + continue + initiators = groupdetails['initiators'] + if initiators is not None: + inits_eg = set() + for initiator in initiators: + inits_eg.add(initiator['initiator_port']) + + if inits_eg <= set(initiator_ports): + foundgroupname = groupdetails['name'] + if foundgroupname is not None: + # Check the associated varray + if groupdetails['varray']: + varray_uri = groupdetails['varray']['id'] + varray_details = self.varray_obj.varray_show( + varray_uri) + if varray_details['name'] == ( + self.configuration.coprhd_varray): + LOG.debug( + "Found exportgroup %s", + foundgroupname) + break + + # Not the right varray + foundgroupname = None + + return foundgroupname + + @retry_wrapper + def _find_host(self, initiator_port): + """Find the host, if exists, to which the given initiator belong.""" + foundhostname = None + hosts = self.host_obj.list_all(self.configuration.coprhd_tenant) + for host in hosts: + initiators = self.host_obj.list_initiators(host['id']) + for initiator in initiators: + if initiator_port == initiator['name']: + foundhostname = host['name'] + break + + if foundhostname is not None: + break + + return foundhostname + + @retry_wrapper + def _host_exists(self, host_name): + """Check if Host object with given hostname already exists in CoprHD. + + """ + hosts = self.host_obj.search_by_name(host_name) + + if len(hosts) > 0: + for host in hosts: + hostname = host['match'] + if host_name == hostname: + return hostname + return hostname + LOG.debug("no host found for: %s", host_name) + return None + + @retry_wrapper + def get_exports_count_by_initiators(self, initiator_ports): + """Fetches ITL map for a given list of initiator ports.""" + comma_delimited_initiator_list = ",".join(initiator_ports) + (s, h) = coprhd_utils.service_json_request( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port, "GET", + URI_BLOCK_EXPORTS_FOR_INITIATORS.format( + comma_delimited_initiator_list), + None) + + export_itl_maps = coprhd_utils.json_decode(s) + + if export_itl_maps is None: + return 0 + + itls = export_itl_maps['itl'] + return itls.__len__() + + @retry_wrapper + def update_volume_stats(self): + """Retrieve stats info.""" + LOG.debug("Updating volume stats") + self.authenticate_user() + + try: + self.stats['consistencygroup_support'] = 'True' + vols = self.volume_obj.list_volumes( + self.configuration.coprhd_tenant + + "/" + + self.configuration.coprhd_project) + + vpairs = set() + if len(vols) > 0: + for vol in vols: + if vol: + vpair = (vol["vpool"]["id"], vol["varray"]["id"]) + if vpair not in vpairs: + vpairs.add(vpair) + + if len(vpairs) > 0: + free_gb = 0.0 + used_gb = 0.0 + for vpair in vpairs: + if vpair: + (s, h) = coprhd_utils.service_json_request( + self.configuration.coprhd_hostname, + self.configuration.coprhd_port, + "GET", + URI_VPOOL_VARRAY_CAPACITY.format(vpair[0], + vpair[1]), + body=None) + capacity = coprhd_utils.json_decode(s) + + free_gb += float(capacity["free_gb"]) + used_gb += float(capacity["used_gb"]) + + self.stats['free_capacity_gb'] = free_gb + self.stats['total_capacity_gb'] = free_gb + used_gb + self.stats['reserved_percentage'] = ( + self.configuration.reserved_percentage) + + return self.stats + + except coprhd_utils.CoprHdError: + with excutils.save_and_reraise_exception(): + LOG.exception(_LE("Update volume stats failed")) + + @retry_wrapper + def retype(self, ctxt, volume, new_type, diff, host): + """changes the vpool type.""" + self.authenticate_user() + volume_name = self._get_coprhd_volume_name(volume) + vpool_name = new_type['extra_specs']['CoprHD:VPOOL'] + + try: + full_project_name = "%s/%s" % ( + self.configuration.coprhd_tenant, + self.configuration.coprhd_project) + + task = self.volume_obj.update( + full_project_name, + volume_name, + vpool_name) + + self.volume_obj.check_for_sync(task['task'][0], True) + return True + except coprhd_utils.CoprHdError as e: + coprhd_err_msg = (_("Volume %(volume_name)s: update failed" + "\n%(err)s") % {'volume_name': volume_name, + 'err': six.text_type(e.msg)}) + + log_err_msg = (_LE("Volume : %s type update failed") % + volume_name) + self._raise_or_log_exception(e.err_code, coprhd_err_msg, + log_err_msg) diff --git a/cinder/volume/drivers/coprhd/fc.py b/cinder/volume/drivers/coprhd/fc.py new file mode 100644 index 00000000000..f334245e8f4 --- /dev/null +++ b/cinder/volume/drivers/coprhd/fc.py @@ -0,0 +1,222 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Driver for EMC CoprHD FC volumes.""" + +import re + +from oslo_log import log as logging + +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.coprhd import common as coprhd_common +from cinder.zonemanager import utils + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class EMCCoprHDFCDriver(driver.FibreChannelDriver): + """CoprHD FC Driver.""" + + def __init__(self, *args, **kwargs): + super(EMCCoprHDFCDriver, self).__init__(*args, **kwargs) + self.common = self._get_common_driver() + + def _get_common_driver(self): + return coprhd_common.EMCCoprHDDriverCommon( + protocol='FC', + default_backend_name=self.__class__.__name__, + configuration=self.configuration) + + def check_for_setup_error(self): + self.common.check_for_setup_error() + + def create_volume(self, volume): + """Creates a Volume.""" + self.common.create_volume(volume, self) + self.common.set_volume_tags(volume, ['_obj_volume_type']) + + def create_cloned_volume(self, volume, src_vref): + """Creates a cloned Volume.""" + self.common.create_cloned_volume(volume, src_vref) + self.common.set_volume_tags(volume, ['_obj_volume_type']) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self.common.create_volume_from_snapshot(snapshot, volume) + self.common.set_volume_tags(volume, ['_obj_volume_type']) + + def extend_volume(self, volume, new_size): + """expands the size of the volume.""" + self.common.expand_volume(volume, new_size) + + def delete_volume(self, volume): + """Deletes a volume.""" + self.common.delete_volume(volume) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + self.common.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + self.common.delete_snapshot(snapshot) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + pass + + def create_export(self, context, volume, connector=None): + """Driver entry point to get the export info for a new volume.""" + pass + + def remove_export(self, context, volume): + """Driver exntry point to remove an export for a volume.""" + pass + + def create_consistencygroup(self, context, group): + """Creates a consistencygroup.""" + return self.common.create_consistencygroup(context, group) + + def update_consistencygroup(self, context, group, add_volumes, + remove_volumes): + """Updates volumes in consistency group.""" + return self.common.update_consistencygroup(group, add_volumes, + remove_volumes) + + def delete_consistencygroup(self, context, group, volumes): + """Deletes a consistency group.""" + return self.common.delete_consistencygroup(context, group, volumes) + + def create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a cgsnapshot.""" + return self.common.create_cgsnapshot(cgsnapshot, snapshots) + + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Deletes a cgsnapshot.""" + return self.common.delete_cgsnapshot(cgsnapshot, snapshots) + + def check_for_export(self, context, volume_id): + """Make sure volume is exported.""" + pass + + @utils.AddFCZone + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info.""" + + properties = {} + properties['volume_id'] = volume['id'] + properties['target_discovered'] = False + properties['target_wwn'] = [] + + init_ports = self._build_initport_list(connector) + itls = self.common.initialize_connection(volume, + 'FC', + init_ports, + connector['host']) + + target_wwns = None + initiator_target_map = None + + if itls: + properties['target_lun'] = itls[0]['hlu'] + target_wwns, initiator_target_map = ( + self._build_initiator_target_map(itls, connector)) + + properties['target_wwn'] = target_wwns + properties['initiator_target_map'] = initiator_target_map + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + LOG.debug('FC properties: %s', properties) + return { + 'driver_volume_type': 'fibre_channel', + 'data': properties + } + + @utils.RemoveFCZone + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to detach a volume from an instance.""" + + init_ports = self._build_initport_list(connector) + itls = self.common.terminate_connection(volume, + 'FC', + init_ports, + connector['host']) + + volumes_count = self.common.get_exports_count_by_initiators(init_ports) + if volumes_count > 0: + # return empty data + data = {'driver_volume_type': 'fibre_channel', 'data': {}} + else: + target_wwns, initiator_target_map = ( + self._build_initiator_target_map(itls, connector)) + data = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_wwn': target_wwns, + 'initiator_target_map': initiator_target_map}} + + LOG.debug('Return FC data: %s', data) + return data + + def _build_initiator_target_map(self, itls, connector): + + target_wwns = [] + for itl in itls: + target_wwns.append(itl['target']['port'].replace(':', '').lower()) + + initiator_wwns = connector['wwpns'] + initiator_target_map = {} + for initiator in initiator_wwns: + initiator_target_map[initiator] = target_wwns + + return target_wwns, initiator_target_map + + def _build_initport_list(self, connector): + init_ports = [] + for i in range(len(connector['wwpns'])): + initiator_port = ':'.join(re.findall( + '..', + connector['wwpns'][i])).upper() # Add ":" every two digits + init_ports.append(initiator_port) + + return init_ports + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self.update_volume_stats() + + return self._stats + + def update_volume_stats(self): + """Retrieve stats info from virtual pool/virtual array.""" + LOG.debug("Updating volume stats") + self._stats = self.common.update_volume_stats() + + def retype(self, ctxt, volume, new_type, diff, host): + """Change the volume type.""" + return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/cinder/volume/drivers/coprhd/helpers/__init__.py b/cinder/volume/drivers/coprhd/helpers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/coprhd/helpers/authentication.py b/cinder/volume/drivers/coprhd/helpers/authentication.py new file mode 100644 index 00000000000..4383b1b3e76 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/authentication.py @@ -0,0 +1,216 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +try: + import cookielib as cookie_lib +except ImportError: + import http.cookiejar as cookie_lib +import socket + +import requests +from requests import exceptions +import six + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common + + +class Authentication(common.CoprHDResource): + + # Commonly used URIs for the 'Authentication' module + URI_SERVICES_BASE = '' + URI_AUTHENTICATION = '/login' + + HEADERS = {'Content-Type': 'application/json', + 'ACCEPT': 'application/json', 'X-EMC-REST-CLIENT': 'TRUE'} + + def authenticate_user(self, username, password): + """Makes REST API call to generate the authentication token. + + Authentication token is generated for the specified user after + validation + + :param username: Name of the user + :param password: Password for the user + :returns: The authtoken + """ + + SEC_REDIRECT = 302 + SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN' + LB_API_PORT = 4443 + # Port on which load-balancer/reverse-proxy listens to all incoming + # requests for CoprHD REST APIs + APISVC_PORT = 8443 # Port on which apisvc listens to incoming requests + + cookiejar = cookie_lib.LWPCookieJar() + + url = ('https://%(ip)s:%(port)d%(uri)s' % + {'ip': self.ipaddr, 'port': self.port, + 'uri': self.URI_AUTHENTICATION}) + + try: + if self.port == APISVC_PORT: + login_response = requests.get( + url, headers=self.HEADERS, verify=False, + auth=(username, password), cookies=cookiejar, + allow_redirects=False, timeout=common.TIMEOUT_SEC) + if login_response.status_code == SEC_REDIRECT: + location = login_response.headers['Location'] + if not location: + raise common.CoprHdError( + common.CoprHdError.HTTP_ERR, (_("The redirect" + " location of the" + " authentication" + " service is not" + " provided"))) + # Make the second request + login_response = requests.get( + location, headers=self.HEADERS, verify=False, + cookies=cookiejar, allow_redirects=False, + timeout=common.TIMEOUT_SEC) + if (login_response.status_code != + requests.codes['unauthorized']): + raise common.CoprHdError( + common.CoprHdError.HTTP_ERR, (_("The" + " authentication" + " service failed" + " to reply with" + " 401"))) + + # Now provide the credentials + login_response = requests.get( + location, headers=self.HEADERS, + auth=(username, password), verify=False, + cookies=cookiejar, allow_redirects=False, + timeout=common.TIMEOUT_SEC) + if login_response.status_code != SEC_REDIRECT: + raise common.CoprHdError( + common.CoprHdError.HTTP_ERR, + (_("Access forbidden: Authentication required"))) + location = login_response.headers['Location'] + if not location: + raise common.CoprHdError( + common.CoprHdError.HTTP_ERR, + (_("The" + " authentication service failed to provide the" + " location of the service URI when redirecting" + " back"))) + authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER] + if not authtoken: + details_str = self.extract_error_detail(login_response) + raise common.CoprHdError(common.CoprHdError.HTTP_ERR, + (_("The token is not" + " generated by" + " authentication service." + "%s") % + details_str)) + # Make the final call to get the page with the token + new_headers = self.HEADERS + new_headers[SEC_AUTHTOKEN_HEADER] = authtoken + login_response = requests.get( + location, headers=new_headers, verify=False, + cookies=cookiejar, allow_redirects=False, + timeout=common.TIMEOUT_SEC) + if login_response.status_code != requests.codes['ok']: + raise common.CoprHdError( + common.CoprHdError.HTTP_ERR, (_( + "Login failure code: " + "%(statuscode)s Error: %(responsetext)s") % + {'statuscode': six.text_type( + login_response.status_code), + 'responsetext': login_response.text})) + elif self.port == LB_API_PORT: + login_response = requests.get( + url, headers=self.HEADERS, verify=False, + cookies=cookiejar, allow_redirects=False) + + if(login_response.status_code == + requests.codes['unauthorized']): + # Now provide the credentials + login_response = requests.get( + url, headers=self.HEADERS, auth=(username, password), + verify=False, cookies=cookiejar, allow_redirects=False) + authtoken = None + if SEC_AUTHTOKEN_HEADER in login_response.headers: + authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER] + else: + raise common.CoprHdError( + common.CoprHdError.HTTP_ERR, + (_("Incorrect port number. Load balanced port is: " + "%(lb_api_port)s, api service port is: " + "%(apisvc_port)s") % + {'lb_api_port': LB_API_PORT, + 'apisvc_port': APISVC_PORT})) + + if not authtoken: + details_str = self.extract_error_detail(login_response) + raise common.CoprHdError( + common.CoprHdError.HTTP_ERR, + (_("The token is not generated by authentication service." + " %s") % details_str)) + + if login_response.status_code != requests.codes['ok']: + error_msg = None + if login_response.status_code == 401: + error_msg = _("Access forbidden: Authentication required") + elif login_response.status_code == 403: + error_msg = _("Access forbidden: You don't have" + " sufficient privileges to perform" + " this operation") + elif login_response.status_code == 500: + error_msg = _("Bourne internal server error") + elif login_response.status_code == 404: + error_msg = _( + "Requested resource is currently unavailable") + elif login_response.status_code == 405: + error_msg = (_("GET method is not supported by resource:" + " %s"), + url) + elif login_response.status_code == 503: + error_msg = _("Service temporarily unavailable:" + " The server is temporarily unable" + " to service your request") + else: + error_msg = login_response.text + raise common.CoprHdError(common.CoprHdError.HTTP_ERR, + (_("HTTP code: %(status_code)s" + ", response: %(reason)s" + " [%(error_msg)s]") % { + 'status_code': six.text_type( + login_response.status_code), + 'reason': six.text_type( + login_response.reason), + 'error_msg': six.text_type( + error_msg) + })) + except (exceptions.SSLError, socket.error, exceptions.ConnectionError, + exceptions.Timeout) as e: + raise common.CoprHdError( + common.CoprHdError.HTTP_ERR, six.text_type(e)) + + return authtoken + + def extract_error_detail(self, login_response): + details_str = "" + try: + if login_response.content: + json_object = common.json_decode(login_response.content) + if 'details' in json_object: + details_str = json_object['details'] + + return details_str + except common.CoprHdError: + return details_str diff --git a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py b/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py new file mode 100644 index 00000000000..fb6c39eac90 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py @@ -0,0 +1,517 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Contains some commonly used utility methods.""" +try: + import cookielib as cookie_lib +except ImportError: + import http.cookiejar as cookie_lib +import json +import re +import socket +import threading + +import oslo_serialization +from oslo_utils import units +import requests +from requests import exceptions +import six + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers.urihelper import ( + singletonURIHelperInstance) + + +PROD_NAME = 'storageos' + +TIMEOUT_SEC = 20 # 20 SECONDS +IS_TASK_TIMEOUT = False + +global AUTH_TOKEN +AUTH_TOKEN = None + +TASK_TIMEOUT = 300 + + +def _decode_list(data): + rv = [] + for item in data: + if isinstance(item, unicode): + item = item.encode('utf-8') + elif isinstance(item, list): + item = _decode_list(item) + elif isinstance(item, dict): + item = _decode_dict(item) + rv.append(item) + return rv + + +def _decode_dict(data): + rv = {} + for key, value in data.items(): + if isinstance(key, unicode): + key = key.encode('utf-8') + if isinstance(value, unicode): + value = value.encode('utf-8') + elif isinstance(value, list): + value = _decode_list(value) + elif isinstance(value, dict): + value = _decode_dict(value) + rv[key] = value + return rv + + +def json_decode(rsp): + """Used to decode the JSON encoded response.""" + + o = "" + try: + o = json.loads(rsp, object_hook=_decode_dict) + except ValueError: + raise CoprHdError(CoprHdError.VALUE_ERR, + (_("Failed to recognize JSON payload:\n[%s]") % rsp)) + return o + + +def service_json_request(ip_addr, port, http_method, uri, body, + contenttype='application/json', customheaders=None): + """Used to make an HTTP request and get the response. + + The message body is encoded in JSON format + + :param ip_addr: IP address or host name of the server + :param port: port number of the server on which it + is listening to HTTP requests + :param http_method: one of GET, POST, PUT, DELETE + :param uri: the request URI + :param body: the request payload + :returns: a tuple of two elements: (response body, response headers) + :raises: CoprHdError in case of HTTP errors with err_code 3 + """ + + SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN' + + headers = {'Content-Type': contenttype, + 'ACCEPT': 'application/json, application/octet-stream', + 'X-EMC-REST-CLIENT': 'TRUE'} + + if customheaders: + headers.update(customheaders) + + try: + protocol = "https://" + if port == 8080: + protocol = "http://" + url = protocol + ip_addr + ":" + six.text_type(port) + uri + + cookiejar = cookie_lib.LWPCookieJar() + headers[SEC_AUTHTOKEN_HEADER] = AUTH_TOKEN + + if http_method == 'GET': + response = requests.get(url, headers=headers, verify=False, + cookies=cookiejar) + elif http_method == 'POST': + response = requests.post(url, data=body, headers=headers, + verify=False, cookies=cookiejar) + elif http_method == 'PUT': + response = requests.put(url, data=body, headers=headers, + verify=False, cookies=cookiejar) + elif http_method == 'DELETE': + + response = requests.delete(url, headers=headers, verify=False, + cookies=cookiejar) + else: + raise CoprHdError(CoprHdError.HTTP_ERR, + (_("Unknown/Unsupported HTTP method: %s") % + http_method)) + + if (response.status_code == requests.codes['ok'] or + response.status_code == 202): + return (response.text, response.headers) + + error_msg = None + if response.status_code == 500: + responseText = json_decode(response.text) + errorDetails = "" + if 'details' in responseText: + errorDetails = responseText['details'] + error_msg = (_("CoprHD internal server error. Error details: %s"), + errorDetails) + elif response.status_code == 401: + error_msg = _("Access forbidden: Authentication required") + elif response.status_code == 403: + error_msg = "" + errorDetails = "" + errorDescription = "" + + responseText = json_decode(response.text) + + if 'details' in responseText: + errorDetails = responseText['details'] + error_msg = (_("%(error_msg)s Error details:" + " %(errorDetails)s"), + {'error_msg': error_msg, + 'errorDetails': errorDetails + }) + elif 'description' in responseText: + errorDescription = responseText['description'] + error_msg = (_("%(error_msg)s Error description:" + " %(errorDescription)s"), + {'error_msg': error_msg, + 'errorDescription': errorDescription + }) + else: + error_msg = _("Access forbidden: You don't have" + " sufficient privileges to perform this" + " operation") + + elif response.status_code == 404: + error_msg = "Requested resource not found" + elif response.status_code == 405: + error_msg = six.text_type(response.text) + elif response.status_code == 503: + error_msg = "" + errorDetails = "" + errorDescription = "" + + responseText = json_decode(response.text) + + if 'code' in responseText: + errorCode = responseText['code'] + error_msg = error_msg + "Error " + six.text_type(errorCode) + + if 'details' in responseText: + errorDetails = responseText['details'] + error_msg = error_msg + ": " + errorDetails + elif 'description' in responseText: + errorDescription = responseText['description'] + error_msg = error_msg + ": " + errorDescription + else: + error_msg = _("Service temporarily unavailable:" + " The server is temporarily unable to" + " service your request") + else: + error_msg = response.text + if isinstance(error_msg, unicode): + error_msg = error_msg.encode('utf-8') + raise CoprHdError(CoprHdError.HTTP_ERR, + (_("HTTP code: %(status_code)s" + ", %(reason)s" + " [%(error_msg)s]") % { + 'status_code': six.text_type( + response.status_code), + 'reason': six.text_type( + response.reason), + 'error_msg': six.text_type( + error_msg) + })) + except (CoprHdError, socket.error, exceptions.SSLError, + exceptions.ConnectionError, exceptions.TooManyRedirects, + exceptions.Timeout) as e: + raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e)) + # TODO(Ravi) : Either following exception should have proper message or + # IOError should just be combined with the above statement + except IOError as e: + raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e)) + + +def is_uri(name): + """Checks whether the name is a URI or not. + + :param name: Name of the resource + :returns: True if name is URI, False otherwise + """ + try: + (urn, prod, trailer) = name.split(':', 2) + return (urn == 'urn' and prod == PROD_NAME) + except Exception: + return False + + +def format_json_object(obj): + """Formats JSON object to make it readable by proper indentation. + + :param obj: JSON object + :returns: a string of formatted JSON object + """ + return oslo_serialization.jsonutils.dumps(obj, sort_keys=True, indent=3) + + +def get_parent_child_from_xpath(name): + """Returns the parent and child elements from XPath.""" + if '/' in name: + (pname, label) = name.rsplit('/', 1) + else: + pname = None + label = name + return (pname, label) + + +def to_bytes(in_str): + """Converts a size to bytes. + + :param in_str: a number suffixed with a unit: {number}{unit} + units supported: + K, KB, k or kb - kilobytes + M, MB, m or mb - megabytes + G, GB, g or gb - gigabytes + T, TB, t or tb - terabytes + :returns: number of bytes + None; if input is incorrect + """ + match = re.search('^([0-9]+)([a-zA-Z]{0,2})$', in_str) + + if not match: + return None + + unit = match.group(2).upper() + value = match.group(1) + + size_count = int(value) + if unit in ['K', 'KB']: + multiplier = int(units.Ki) + elif unit in ['M', 'MB']: + multiplier = int(units.Mi) + elif unit in ['G', 'GB']: + multiplier = int(units.Gi) + elif unit in ['T', 'TB']: + multiplier = int(units.Ti) + elif unit == "": + return size_count + else: + return None + + size_in_bytes = int(size_count * multiplier) + return size_in_bytes + + +def get_list(json_object, parent_node_name, child_node_name=None): + """Returns a list of values from child_node_name. + + If child_node is not given, then it will retrieve list from parent node + """ + if not json_object: + return [] + + return_list = [] + if isinstance(json_object[parent_node_name], list): + for detail in json_object[parent_node_name]: + if child_node_name: + return_list.append(detail[child_node_name]) + else: + return_list.append(detail) + else: + if child_node_name: + return_list.append(json_object[parent_node_name][child_node_name]) + else: + return_list.append(json_object[parent_node_name]) + + return return_list + + +def get_node_value(json_object, parent_node_name, child_node_name=None): + """Returns value of given child_node. + + If child_node is not given, then value of parent node is returned + returns None: If json_object or parent_node is not given, + If child_node is not found under parent_node + """ + if not json_object: + return None + + if not parent_node_name: + return None + + detail = json_object[parent_node_name] + if not child_node_name: + return detail + + return_value = None + + if child_node_name in detail: + return_value = detail[child_node_name] + else: + return_value = None + + return return_value + + +def format_err_msg_and_raise(operation_type, component, + error_message, error_code): + """Method to format error message. + + :param operation_type: create, update, add, etc + :param component: storagesystem, vpool, etc + :param error_code: Error code from the API call + :param error_message: Detailed error message + """ + + formated_err_msg = (_("Error: Failed to %(operation_type)s" + " %(component)s") % + {'operation_type': operation_type, + 'component': component + }) + if error_message.startswith("\"\'") and error_message.endswith("\'\""): + # stripping the first 2 and last 2 characters, which are quotes. + error_message = error_message[2:len(error_message) - 2] + + formated_err_msg = formated_err_msg + "\nReason:" + error_message + raise CoprHdError(error_code, formated_err_msg) + + +def search_by_tag(resource_search_uri, ipaddr, port): + """Fetches the list of resources with a given tag. + + :param resource_search_uri: The tag based search uri + Example: '/block/volumes/search?tag=tagexample1' + :param ipaddr: IP address of CoprHD host + :param port: Port number + """ + # check if the URI passed has both project and name parameters + strUri = six.text_type(resource_search_uri) + if strUri.__contains__("search") and strUri.__contains__("?tag="): + # Get the project URI + + (s, h) = service_json_request( + ipaddr, port, "GET", + resource_search_uri, None) + + o = json_decode(s) + if not o: + return None + + resources = get_node_value(o, "resource") + + resource_uris = [] + for resource in resources: + resource_uris.append(resource["id"]) + return resource_uris + else: + raise CoprHdError(CoprHdError.VALUE_ERR, (_("Search URI %s" + " is not in the expected" + " format, it should end" + " with ?tag={0}") + % strUri)) + +# Timeout handler for synchronous operations + + +def timeout_handler(): + global IS_TASK_TIMEOUT + IS_TASK_TIMEOUT = True + + +# Blocks the operation until the task is complete/error out/timeout +def block_until_complete(component_type, + resource_uri, + task_id, + ipAddr, + port, + synctimeout=0): + global IS_TASK_TIMEOUT + IS_TASK_TIMEOUT = False + if synctimeout: + t = threading.Timer(synctimeout, timeout_handler) + else: + synctimeout = TASK_TIMEOUT + t = threading.Timer(synctimeout, timeout_handler) + t.start() + while True: + out = get_task_by_resourceuri_and_taskId( + component_type, resource_uri, task_id, ipAddr, port) + + if out: + if out["state"] == "ready": + + # cancel the timer and return + t.cancel() + break + + # if the status of the task is 'error' then cancel the timer + # and raise exception + if out["state"] == "error": + # cancel the timer + t.cancel() + if ("service_error" in out and + "details" in out["service_error"]): + error_message = out["service_error"]["details"] + raise CoprHdError(CoprHdError.VALUE_ERR, + (_("Task: %(task_id)s" + " is failed with" + " error: %(error_message)s") % + {'task_id': task_id, + 'error_message': error_message + })) + + if IS_TASK_TIMEOUT: + IS_TASK_TIMEOUT = False + raise CoprHdError(CoprHdError.TIME_OUT, + (_("Task did not complete in %d secs." + " Operation timed out. Task in CoprHD" + " will continue") % synctimeout)) + + return + + +def get_task_by_resourceuri_and_taskId(component_type, resource_uri, + task_id, ipAddr, port): + """Returns the single task details.""" + + task_uri_constant = singletonURIHelperInstance.getUri( + component_type, "task") + (s, h) = service_json_request( + ipAddr, port, "GET", + task_uri_constant.format(resource_uri, task_id), None) + if not s: + return None + o = json_decode(s) + return o + + +class CoprHdError(exception.VolumeBackendAPIException): + + """Custom exception class used to report logical errors. + + Attributes: + err_code - String error code + msg - String text + """ + SOS_FAILURE_ERR = 1 + CMD_LINE_ERR = 2 + HTTP_ERR = 3 + VALUE_ERR = 4 + NOT_FOUND_ERR = 1 + ENTRY_ALREADY_EXISTS_ERR = 5 + MAX_COUNT_REACHED = 6 + TIME_OUT = 7 + + def __init__(self, err_code, msg): + self.err_code = err_code + self.msg = msg + + def __str__(self): + return repr(self.msg) + + +class CoprHDResource(object): + + def __init__(self, ipaddr, port): + """Constructor: takes IP address and port of the CoprHD instance. + + These are needed to make http requests for REST API + """ + self.ipaddr = ipaddr + self.port = port diff --git a/cinder/volume/drivers/coprhd/helpers/consistencygroup.py b/cinder/volume/drivers/coprhd/helpers/consistencygroup.py new file mode 100644 index 00000000000..0723e070d29 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/consistencygroup.py @@ -0,0 +1,220 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_serialization + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common +from cinder.volume.drivers.coprhd.helpers import project + + +class ConsistencyGroup(common.CoprHDResource): + + URI_CONSISTENCY_GROUP = "/block/consistency-groups" + URI_CONSISTENCY_GROUPS_INSTANCE = URI_CONSISTENCY_GROUP + "/{0}" + URI_CONSISTENCY_GROUPS_DEACTIVATE = (URI_CONSISTENCY_GROUPS_INSTANCE + + "/deactivate") + URI_CONSISTENCY_GROUPS_SEARCH = ( + '/block/consistency-groups/search?project={0}') + URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG = ( + '/block/consistency-groups/search?tag={0}') + URI_CONSISTENCY_GROUP_TAGS = ( + '/block/consistency-groups/{0}/tags') + + def list(self, project_name, tenant): + """This function gives list of comma separated consistency group uris. + + :param project_name: Name of the project path + :param tenant: Name of the tenant + :returns: list of consistency group ids separated by comma + """ + if tenant is None: + tenant = "" + projobj = project.Project(self.ipaddr, self.port) + fullproj = tenant + "/" + project_name + projuri = projobj.project_query(fullproj) + + (s, h) = common.service_json_request( + self.ipaddr, self.port, "GET", + self.URI_CONSISTENCY_GROUPS_SEARCH.format(projuri), None) + o = common.json_decode(s) + if not o: + return [] + + congroups = [] + resources = common.get_node_value(o, "resource") + for resource in resources: + congroups.append(resource["id"]) + + return congroups + + def show(self, name, project, tenant): + """This function will display the consistency group with details. + + :param name : Name of the consistency group + :param project: Name of the project + :param tenant: Name of the tenant + :returns: details of consistency group + """ + uri = self.consistencygroup_query(name, project, tenant) + (s, h) = common.service_json_request( + self.ipaddr, self.port, "GET", + self.URI_CONSISTENCY_GROUPS_INSTANCE.format(uri), None) + o = common.json_decode(s) + if o['inactive']: + return None + return o + + def consistencygroup_query(self, name, project, tenant): + """This function will return consistency group id. + + :param name : Name/id of the consistency group + :param project: Name of the project + :param tenant: Name of the tenant + :returns: id of the consistency group + """ + if common.is_uri(name): + return name + + uris = self.list(project, tenant) + for uri in uris: + congroup = self.show(uri, project, tenant) + if congroup and congroup['name'] == name: + return congroup['id'] + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + (_("Consistency Group %s: not found") % name)) + + # Blocks the operation until the task is complete/error out/timeout + def check_for_sync(self, result, sync, synctimeout=0): + if len(result["resource"]) > 0: + resource = result["resource"] + return ( + common.block_until_complete("consistencygroup", resource["id"], + result["id"], self.ipaddr, + self.port, synctimeout) + ) + else: + raise common.CoprHdError( + common.CoprHdError.SOS_FAILURE_ERR, + _("error: task list is empty, no task response found")) + + def create(self, name, project_name, tenant): + """This function will create consistency group with the given name. + + :param name : Name of the consistency group + :param project_name: Name of the project path + :param tenant: Container tenant name + :returns: status of creation + """ + # check for existence of consistency group. + try: + status = self.show(name, project_name, tenant) + except common.CoprHdError as e: + if e.err_code == common.CoprHdError.NOT_FOUND_ERR: + if tenant is None: + tenant = "" + fullproj = tenant + "/" + project_name + projobj = project.Project(self.ipaddr, self.port) + projuri = projobj.project_query(fullproj) + + parms = {'name': name, 'project': projuri, } + body = oslo_serialization.jsonutils.dumps(parms) + + (s, h) = common.service_json_request( + self.ipaddr, self.port, "POST", + self.URI_CONSISTENCY_GROUP, body) + + o = common.json_decode(s) + return o + else: + raise + if status: + common.format_err_msg_and_raise( + "create", "consistency group", + (_("consistency group with name: %s already exists") % name), + common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR) + + def delete(self, name, project, tenant, coprhdonly=False): + """This function marks a particular consistency group as delete. + + :param name: Name of the consistency group + :param project: Name of the project + :param tenant: Name of the tenant + :returns: status of the delete operation + false, incase it fails to do delete + """ + params = '' + if coprhdonly is True: + params += "?type=" + 'CoprHD_ONLY' + uri = self.consistencygroup_query(name, project, tenant) + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "POST", + self.URI_CONSISTENCY_GROUPS_DEACTIVATE.format(uri) + params, + None) + return + + def update(self, uri, project, tenant, add_volumes, remove_volumes, + sync, synctimeout=0): + """Function used to add or remove volumes from consistency group. + + It will update the consistency group with given volumes + + :param uri : URI of the consistency group + :param project : Name of the project path + :param tenant : Container tenant name + :param add_volumes : volumes to be added to the consistency group + :param remove_volumes: volumes to be removed from CG + :param sync : synchronous request + :param synctimeout : Query for task status for "synctimeout" secs. + If the task doesn't complete in synctimeout + secs, an exception is thrown + :returns: status of creation + """ + if tenant is None: + tenant = "" + + parms = [] + add_voluris = [] + remove_voluris = [] + from cinder.volume.drivers.coprhd.helpers.volume import Volume + volobj = Volume(self.ipaddr, self.port) + if add_volumes: + for volname in add_volumes: + full_project_name = tenant + "/" + project + add_voluris.append( + volobj.volume_query(full_project_name, volname)) + volumes = {'volume': add_voluris} + parms = {'add_volumes': volumes} + + if remove_volumes: + for volname in remove_volumes: + full_project_name = tenant + "/" + project + remove_voluris.append( + volobj.volume_query(full_project_name, volname)) + volumes = {'volume': remove_voluris} + parms = {'remove_volumes': volumes} + + body = oslo_serialization.jsonutils.dumps(parms) + (s, h) = common.service_json_request( + self.ipaddr, self.port, "PUT", + self.URI_CONSISTENCY_GROUPS_INSTANCE.format(uri), + body) + + o = common.json_decode(s) + if sync: + return self.check_for_sync(o, sync, synctimeout) + else: + return o diff --git a/cinder/volume/drivers/coprhd/helpers/exportgroup.py b/cinder/volume/drivers/coprhd/helpers/exportgroup.py new file mode 100644 index 00000000000..fc36f919858 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/exportgroup.py @@ -0,0 +1,303 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_serialization + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common +from cinder.volume.drivers.coprhd.helpers import host +from cinder.volume.drivers.coprhd.helpers import project +from cinder.volume.drivers.coprhd.helpers import virtualarray +from cinder.volume.drivers.coprhd.helpers import volume + + +class ExportGroup(common.CoprHDResource): + + URI_EXPORT_GROUP = "/block/exports" + URI_EXPORT_GROUPS_SHOW = URI_EXPORT_GROUP + "/{0}" + URI_EXPORT_GROUP_SEARCH = '/block/exports/search' + URI_EXPORT_GROUP_UPDATE = '/block/exports/{0}' + + def exportgroup_remove_volumes_by_uri(self, exportgroup_uri, + volume_id_list, sync=False, + tenantname=None, projectname=None, + cg=None, synctimeout=0): + """Remove volumes from the exportgroup, given the uris of volume.""" + + volume_list = volume_id_list + parms = {} + + parms['volume_changes'] = self._remove_list(volume_list) + o = self.send_json_request(exportgroup_uri, parms) + return self.check_for_sync(o, sync, synctimeout) + + def _remove_list(self, uris): + resChanges = {} + if not isinstance(uris, list): + resChanges['remove'] = [uris] + else: + resChanges['remove'] = uris + return resChanges + + def send_json_request(self, exportgroup_uri, param): + body = oslo_serialization.jsonutils.dumps(param) + (s, h) = common.service_json_request( + self.ipaddr, self.port, "PUT", + self.URI_EXPORT_GROUP_UPDATE.format(exportgroup_uri), body) + return common.json_decode(s) + + def check_for_sync(self, result, sync, synctimeout=0): + if sync: + if len(result["resource"]) > 0: + resource = result["resource"] + return ( + common.block_until_complete("export", resource["id"], + result["id"], self.ipaddr, + self.port, synctimeout) + ) + else: + raise common.CoprHdError( + common.CoprHdError.SOS_FAILURE_ERR, _( + "error: task list is empty, no task response found")) + else: + return result + + def exportgroup_list(self, project_name, tenant): + """This function gives list of export group uris separated by comma. + + :param project_name: Name of the project path + :param tenant: Name of the tenant + :returns: list of export group ids separated by comma + """ + if tenant is None: + tenant = "" + projobj = project.Project(self.ipaddr, self.port) + fullproj = tenant + "/" + project_name + projuri = projobj.project_query(fullproj) + + uri = self.URI_EXPORT_GROUP_SEARCH + + if '?' in uri: + uri += '&project=' + projuri + else: + uri += '?project=' + projuri + + (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", + uri, None) + o = common.json_decode(s) + if not o: + return [] + + exportgroups = [] + resources = common.get_node_value(o, "resource") + for resource in resources: + exportgroups.append(resource["id"]) + + return exportgroups + + def exportgroup_show(self, name, project, tenant, varray=None): + """This function displays the Export group with details. + + :param name: Name of the export group + :param project: Name of the project + :param tenant: Name of the tenant + :returns: Details of export group + """ + varrayuri = None + if varray: + varrayObject = virtualarray.VirtualArray( + self.ipaddr, self.port) + varrayuri = varrayObject.varray_query(varray) + uri = self.exportgroup_query(name, project, tenant, varrayuri) + (s, h) = common.service_json_request( + self.ipaddr, + self.port, + "GET", + self.URI_EXPORT_GROUPS_SHOW.format(uri), None) + o = common.json_decode(s) + if o['inactive']: + return None + + return o + + def exportgroup_create(self, name, project_name, tenant, varray, + exportgrouptype, export_destination=None): + """This function creates the Export group with given name. + + :param name: Name of the export group + :param project_name: Name of the project path + :param tenant: Container tenant name + :param varray: Name of the virtual array + :param exportgrouptype: Type of the export group. Ex:Host etc + :returns: status of creation + """ + # check for existence of export group. + try: + status = self.exportgroup_show(name, project_name, tenant) + except common.CoprHdError as e: + if e.err_code == common.CoprHdError.NOT_FOUND_ERR: + if tenant is None: + tenant = "" + + fullproj = tenant + "/" + project_name + projObject = project.Project(self.ipaddr, self.port) + projuri = projObject.project_query(fullproj) + + varrayObject = virtualarray.VirtualArray( + self.ipaddr, self.port) + nhuri = varrayObject.varray_query(varray) + + parms = { + 'name': name, + 'project': projuri, + 'varray': nhuri, + 'type': exportgrouptype + } + + if exportgrouptype and export_destination: + host_obj = host.Host(self.ipaddr, self.port) + host_uri = host_obj.query_by_name(export_destination) + parms['hosts'] = [host_uri] + + body = oslo_serialization.jsonutils.dumps(parms) + (s, h) = common.service_json_request(self.ipaddr, + self.port, "POST", + self.URI_EXPORT_GROUP, + body) + + o = common.json_decode(s) + return o + else: + raise + + if status: + raise common.CoprHdError( + common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR, (_( + "Export group with name %s" + " already exists") % name)) + + def exportgroup_query(self, name, project, tenant, varrayuri=None): + """Makes REST API call to query the exportgroup by name. + + :param name: Name/id of the export group + :param project: Name of the project + :param tenant: Name of the tenant + :param varrayuri: URI of the virtual array + :returns: id of the export group + """ + if common.is_uri(name): + return name + + uris = self.exportgroup_list(project, tenant) + for uri in uris: + exportgroup = self.exportgroup_show(uri, project, tenant) + if exportgroup and exportgroup['name'] == name: + if varrayuri: + varrayobj = exportgroup['varray'] + if varrayobj['id'] == varrayuri: + return exportgroup['id'] + else: + continue + else: + return exportgroup['id'] + raise common.CoprHdError( + common.CoprHdError.NOT_FOUND_ERR, + (_("Export Group %s: not found") % name)) + + def exportgroup_add_volumes(self, sync, exportgroupname, tenantname, + maxpaths, minpaths, pathsperinitiator, + projectname, volumenames, + cg=None, synctimeout=0, varray=None): + """Add volume to export group. + + :param sync : synchronous request + :param exportgroupname : Name/id of the export group + :param tenantname : tenant name + :param maxpaths : Maximum number of paths + :param minpaths : Minimum number of paths + :param pathsperinitiator : Paths per initiator + :param projectname : name of project + :param volumenames : names of volumes that needs + to be added to exportgroup + :param cg : consistency group + :param synctimeout : Query for task status for "synctimeout" secs + If the task doesn't complete in synctimeout + secs, an exception is thrown + :param varray : Name of varray + :returns: action result + """ + varrayuri = None + if varray: + varrayObject = virtualarray.VirtualArray( + self.ipaddr, self.port) + varrayuri = varrayObject.varray_query(varray) + + exportgroup_uri = self.exportgroup_query(exportgroupname, + projectname, + tenantname, + varrayuri) + + # get volume uri + if tenantname is None: + tenantname = "" + # List of volumes + volume_list = [] + + if volumenames: + volume_list = self._get_resource_lun_tuple( + volumenames, "volumes", None, tenantname, + projectname, None) + + parms = {} + # construct the body + + volChanges = {} + volChanges['add'] = volume_list + parms['volume_changes'] = volChanges + + o = self.send_json_request(exportgroup_uri, parms) + return self.check_for_sync(o, sync, synctimeout) + + def _get_resource_lun_tuple(self, resources, resType, baseResUri, + tenantname, projectname, blockTypeName): + """Function to validate input volumes and return list of ids and luns. + + """ + copyEntries = [] + volumeObject = volume.Volume(self.ipaddr, self.port) + for copy in resources: + copyParam = [] + try: + copyParam = copy.split(":") + except Exception: + raise common.CoprHdError( + common.CoprHdError.CMD_LINE_ERR, + (_("Please provide valid format volume:" + " lun for parameter %s") % + resType)) + copy = dict() + if not len(copyParam): + raise common.CoprHdError( + common.CoprHdError.CMD_LINE_ERR, + (_("Please provide at least one volume for parameter %s") % + resType)) + if resType == "volumes": + full_project_name = tenantname + "/" + projectname + copy['id'] = volumeObject.volume_query( + full_project_name, copyParam[0]) + if len(copyParam) > 1: + copy['lun'] = copyParam[1] + copyEntries.append(copy) + return copyEntries diff --git a/cinder/volume/drivers/coprhd/helpers/host.py b/cinder/volume/drivers/coprhd/helpers/host.py new file mode 100644 index 00000000000..e728b3a6504 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/host.py @@ -0,0 +1,104 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common +from cinder.volume.drivers.coprhd.helpers import tenant + + +class Host(common.CoprHDResource): + + # All URIs for the Host operations + URI_HOST_DETAILS = "/compute/hosts/{0}" + URI_HOST_LIST_INITIATORS = "/compute/hosts/{0}/initiators" + URI_COMPUTE_HOST = "/compute/hosts" + URI_HOSTS_SEARCH_BY_NAME = "/compute/hosts/search?name={0}" + + def query_by_name(self, host_name, tenant_name=None): + """Search host matching host_name and tenant if tenant_name provided. + + tenant_name is optional + """ + hostList = self.list_all(tenant_name) + for host in hostList: + hostUri = host['id'] + hostDetails = self.show_by_uri(hostUri) + if hostDetails: + if hostDetails['name'] == host_name: + return hostUri + + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, (_( + "Host with name: %s not found") % host_name)) + + def list_initiators(self, host_name): + """Lists all initiators for the given host. + + :param host_name: The name of the host + """ + if not common.is_uri(host_name): + hostUri = self.query_by_name(host_name, None) + else: + hostUri = host_name + + (s, h) = common.service_json_request( + self.ipaddr, self.port, "GET", + Host.URI_HOST_LIST_INITIATORS.format(hostUri), + None) + o = common.json_decode(s) + + if not o or "initiator" not in o: + return [] + + return common.get_node_value(o, 'initiator') + + def list_all(self, tenant_name): + """Gets the ids and self links for all compute elements.""" + restapi = self.URI_COMPUTE_HOST + tenant_obj = tenant.Tenant(self.ipaddr, self.port) + if tenant_name is None: + tenant_uri = tenant_obj.tenant_getid() + else: + tenant_uri = tenant_obj.tenant_query(tenant_name) + restapi = restapi + "?tenant=" + tenant_uri + + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "GET", + restapi, + None) + o = common.json_decode(s) + return o['host'] + + def show_by_uri(self, uri): + """Makes REST API call to retrieve Host details based on its UUID.""" + (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", + Host.URI_HOST_DETAILS.format(uri), + None) + o = common.json_decode(s) + inactive = common.get_node_value(o, 'inactive') + + if inactive: + return None + return o + + def search_by_name(self, host_name): + """Search host by its name.""" + (s, h) = common.service_json_request( + self.ipaddr, self.port, "GET", + self.URI_HOSTS_SEARCH_BY_NAME.format(host_name), None) + o = common.json_decode(s) + if not o: + return [] + return common.get_node_value(o, "resource") diff --git a/cinder/volume/drivers/coprhd/helpers/project.py b/cinder/volume/drivers/coprhd/helpers/project.py new file mode 100644 index 00000000000..45f662f059b --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/project.py @@ -0,0 +1,88 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common +from cinder.volume.drivers.coprhd.helpers import tenant + + +class Project(common.CoprHDResource): + + # Commonly used URIs for the 'Project' module + URI_PROJECT_LIST = '/tenants/{0}/projects' + URI_PROJECT = '/projects/{0}' + + def project_query(self, name): + """Retrieves UUID of project based on its name. + + :param name: name of project + :returns: UUID of project + :raises: CoprHdError - when project name is not found + """ + if common.is_uri(name): + return name + (tenant_name, project_name) = common.get_parent_child_from_xpath(name) + + tenant_obj = tenant.Tenant(self.ipaddr, self.port) + + tenant_uri = tenant_obj.tenant_query(tenant_name) + projects = self.project_list(tenant_uri) + if projects: + for project in projects: + if project: + project_detail = self.project_show_by_uri( + project['id']) + if(project_detail and + project_detail['name'] == project_name): + return project_detail['id'] + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, (_( + "Project: %s not found") % project_name)) + + def project_list(self, tenant_name): + """Makes REST API call and retrieves projects based on tenant UUID. + + :param tenant_name: Name of the tenant + :returns: List of project UUIDs in JSON response payload + """ + tenant_obj = tenant.Tenant(self.ipaddr, self.port) + tenant_uri = tenant_obj.tenant_query(tenant_name) + (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", + Project.URI_PROJECT_LIST.format( + tenant_uri), + None) + o = common.json_decode(s) + + if "project" in o: + return common.get_list(o, 'project') + return [] + + def project_show_by_uri(self, uri): + """Makes REST API call and retrieves project derails based on UUID. + + :param uri: UUID of project + :returns: Project details in JSON response payload + """ + + (s, h) = common.service_json_request(self.ipaddr, self.port, + "GET", + Project.URI_PROJECT.format(uri), + None) + o = common.json_decode(s) + inactive = common.get_node_value(o, 'inactive') + if inactive: + return None + + return o diff --git a/cinder/volume/drivers/coprhd/helpers/snapshot.py b/cinder/volume/drivers/coprhd/helpers/snapshot.py new file mode 100644 index 00000000000..1a31c2eafe6 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/snapshot.py @@ -0,0 +1,314 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +import oslo_serialization + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common +from cinder.volume.drivers.coprhd.helpers import consistencygroup +from cinder.volume.drivers.coprhd.helpers import volume + + +class Snapshot(common.CoprHDResource): + + # Commonly used URIs for the 'Snapshot' module + URI_SNAPSHOTS = '/{0}/snapshots/{1}' + URI_BLOCK_SNAPSHOTS = '/block/snapshots/{0}' + URI_SEARCH_SNAPSHOT_BY_TAG = '/block/snapshots/search?tag={0}' + URI_SNAPSHOT_LIST = '/{0}/{1}/{2}/protection/snapshots' + URI_SNAPSHOT_TASKS_BY_OPID = '/vdc/tasks/{0}' + URI_RESOURCE_DEACTIVATE = '{0}/deactivate' + URI_CONSISTENCY_GROUP = "/block/consistency-groups" + URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE = ( + URI_CONSISTENCY_GROUP + "/{0}/protection/snapshots/{1}") + URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE = ( + URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE + "/deactivate") + URI_BLOCK_SNAPSHOTS_TAG = URI_BLOCK_SNAPSHOTS + '/tags' + + VOLUMES = 'volumes' + CG = 'consistency-groups' + BLOCK = 'block' + + is_timeout = False + timeout = 300 + + def snapshot_list_uri(self, otype, otypename, ouri): + """Makes REST API call to list snapshots under a volume. + + :param otype : block + :param otypename : either volume or consistency-group should be + provided + :param ouri : uri of volume or consistency-group + :returns: list of snapshots + """ + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "GET", + Snapshot.URI_SNAPSHOT_LIST.format(otype, otypename, ouri), None) + o = common.json_decode(s) + return o['snapshot'] + + def snapshot_show_uri(self, otype, resource_uri, suri): + """Retrieves snapshot details based on snapshot Name or Label. + + :param otype : block + :param suri : uri of the Snapshot. + :param resource_uri: uri of the source resource + :returns: Snapshot details in JSON response payload + """ + if(resource_uri is not None and + resource_uri.find('BlockConsistencyGroup') > 0): + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "GET", + Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE.format( + resource_uri, + suri), + None) + else: + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "GET", + Snapshot.URI_SNAPSHOTS.format(otype, suri), None) + + return common.json_decode(s) + + def snapshot_query(self, storageres_type, + storageres_typename, resuri, snapshot_name): + if resuri is not None: + uris = self.snapshot_list_uri( + storageres_type, + storageres_typename, + resuri) + for uri in uris: + snapshot = self.snapshot_show_uri( + storageres_type, + resuri, + uri['id']) + if (False == common.get_node_value(snapshot, 'inactive') and + snapshot['name'] == snapshot_name): + return snapshot['id'] + + raise common.CoprHdError( + common.CoprHdError.SOS_FAILURE_ERR, + (_("snapshot with the name: " + "%s Not Found") % snapshot_name)) + + def snapshot_show_task_opid(self, otype, snap, taskid): + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "GET", + Snapshot.URI_SNAPSHOT_TASKS_BY_OPID.format(taskid), + None) + if (not s): + return None + o = common.json_decode(s) + return o + + # Blocks the operation until the task is complete/error out/timeout + def block_until_complete(self, storageres_type, resuri, + task_id, synctimeout=0): + if synctimeout: + t = threading.Timer(synctimeout, common.timeout_handler) + else: + synctimeout = self.timeout + t = threading.Timer(synctimeout, common.timeout_handler) + t.start() + while True: + out = self.snapshot_show_task_opid( + storageres_type, resuri, task_id) + + if out: + if out["state"] == "ready": + # cancel the timer and return + t.cancel() + break + # if the status of the task is 'error' then cancel the timer + # and raise exception + if out["state"] == "error": + # cancel the timer + t.cancel() + error_message = "Please see logs for more details" + if("service_error" in out and + "details" in out["service_error"]): + error_message = out["service_error"]["details"] + raise common.CoprHdError( + common.CoprHdError.VALUE_ERR, + (_("Task: %(task_id)s is failed with error: " + "%(error_message)s") % + {'task_id': task_id, + 'error_message': error_message})) + + if self.is_timeout: + self.is_timeout = False + raise common.CoprHdError(common.CoprHdError.TIME_OUT, + (_("Task did not complete in %d secs." + " Operation timed out. Task in" + " CoprHD will continue") % + synctimeout)) + return + + def storage_resource_query(self, + storageres_type, + volume_name, + cg_name, + project, + tenant): + resourcepath = "/" + project + if tenant is not None: + resourcepath = tenant + resourcepath + + resUri = None + resourceObj = None + if Snapshot.BLOCK == storageres_type and volume_name is not None: + resourceObj = volume.Volume(self.ipaddr, self.port) + resUri = resourceObj.volume_query(resourcepath, volume_name) + elif Snapshot.BLOCK == storageres_type and cg_name is not None: + resourceObj = consistencygroup.ConsistencyGroup( + self.ipaddr, + self.port) + resUri = resourceObj.consistencygroup_query( + cg_name, + project, + tenant) + else: + resourceObj = None + + return resUri + + def snapshot_create(self, otype, typename, ouri, + snaplabel, inactive, sync, + readonly=False, synctimeout=0): + """New snapshot is created, for a given volume. + + :param otype : block type should be provided + :param typename : either volume or consistency-groups should + be provided + :param ouri : uri of volume + :param snaplabel : name of the snapshot + :param inactive : if true, the snapshot will not activate the + synchronization between source and target volumes + :param sync : synchronous request + :param synctimeout : Query for task status for "synctimeout" secs. + If the task doesn't complete in synctimeout + secs, an exception is thrown + """ + + # check snapshot is already exist + is_snapshot_exist = True + try: + self.snapshot_query(otype, typename, ouri, snaplabel) + except common.CoprHdError as e: + if e.err_code == common.CoprHdError.NOT_FOUND_ERR: + is_snapshot_exist = False + else: + raise + + if is_snapshot_exist: + raise common.CoprHdError( + common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR, + (_("Snapshot with name %(snaplabel)s" + " already exists under %(typename)s") % + {'snaplabel': snaplabel, + 'typename': typename + })) + + parms = { + 'name': snaplabel, + # if true, the snapshot will not activate the synchronization + # between source and target volumes + 'create_inactive': inactive + } + if readonly is True: + parms['read_only'] = readonly + body = oslo_serialization.jsonutils.dumps(parms) + + # REST api call + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "POST", + Snapshot.URI_SNAPSHOT_LIST.format(otype, typename, ouri), body) + o = common.json_decode(s) + + task = o["task"][0] + + if sync: + return ( + self.block_until_complete( + otype, + task['resource']['id'], + task["id"], synctimeout) + ) + else: + return o + + def snapshot_delete_uri(self, otype, resource_uri, + suri, sync, synctimeout=0): + """Delete a snapshot by uri. + + :param otype : block + :param resource_uri: uri of the source resource + :param suri : Uri of the Snapshot + :param sync : To perform operation synchronously + :param synctimeout : Query for task status for "synctimeout" secs. If + the task doesn't complete in synctimeout secs, an + exception is thrown + """ + s = None + if resource_uri.find("Volume") > 0: + + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "POST", + Snapshot.URI_RESOURCE_DEACTIVATE.format( + Snapshot.URI_BLOCK_SNAPSHOTS.format(suri)), + None) + elif resource_uri.find("BlockConsistencyGroup") > 0: + + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "POST", + Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE.format( + resource_uri, + suri), + None) + o = common.json_decode(s) + task = o["task"][0] + + if sync: + return ( + self.block_until_complete( + otype, + task['resource']['id'], + task["id"], synctimeout) + ) + else: + return o + + def snapshot_delete(self, storageres_type, + storageres_typename, resource_uri, + name, sync, synctimeout=0): + snapshotUri = self.snapshot_query( + storageres_type, + storageres_typename, + resource_uri, + name) + self.snapshot_delete_uri( + storageres_type, + resource_uri, + snapshotUri, + sync, synctimeout) diff --git a/cinder/volume/drivers/coprhd/helpers/tag.py b/cinder/volume/drivers/coprhd/helpers/tag.py new file mode 100644 index 00000000000..818c70d922e --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/tag.py @@ -0,0 +1,55 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Contains tagging related methods.""" + +import oslo_serialization + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common + + +class Tag(common.CoprHDResource): + + def tag_resource(self, uri, resource_id, add, remove): + params = { + 'add': add, + 'remove': remove + } + + body = oslo_serialization.jsonutils.dumps(params) + + (s, h) = common.service_json_request(self.ipaddr, self.port, "PUT", + uri.format(resource_id), body) + o = common.json_decode(s) + return o + + def list_tags(self, resource_uri): + if resource_uri.__contains__("tag") is False: + raise common.CoprHdError( + common.CoprHdError.VALUE_ERR, _("URI should end with /tag")) + + (s, h) = common.service_json_request(self.ipaddr, + self.port, + "GET", + resource_uri, + None) + + allTags = [] + o = common.json_decode(s) + allTags = o['tag'] + + return allTags diff --git a/cinder/volume/drivers/coprhd/helpers/tenant.py b/cinder/volume/drivers/coprhd/helpers/tenant.py new file mode 100644 index 00000000000..9fb0f022096 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/tenant.py @@ -0,0 +1,117 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common + + +class Tenant(common.CoprHDResource): + + URI_SERVICES_BASE = '' + URI_TENANT = URI_SERVICES_BASE + '/tenant' + URI_TENANTS = URI_SERVICES_BASE + '/tenants/{0}' + URI_TENANTS_SUBTENANT = URI_TENANTS + '/subtenants' + + def tenant_query(self, label): + """Returns the UID of the tenant specified by the hierarchical name. + + (ex tenant1/tenant2/tenant3) + """ + + if common.is_uri(label): + return label + + tenant_id = self.tenant_getid() + + if not label: + return tenant_id + + subtenants = self.tenant_list(tenant_id) + subtenants.append(self.tenant_show(None)) + + for tenant in subtenants: + if tenant['name'] == label: + rslt = self.tenant_show_by_uri(tenant['id']) + if rslt: + return tenant['id'] + + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + (_("Tenant %s: not found") % label)) + + def tenant_show(self, label): + """Returns the details of the tenant based on its name.""" + if label: + tenant_id = self.tenant_query(label) + else: + tenant_id = self.tenant_getid() + + return self.tenant_show_by_uri(tenant_id) + + def tenant_getid(self): + (s, h) = common.service_json_request(self.ipaddr, self.port, + "GET", Tenant.URI_TENANT, None) + + o = common.json_decode(s) + return o['id'] + + def tenant_list(self, uri=None): + """Returns all the tenants under a parent tenant. + + :param uri: The parent tenant name + :returns: JSON payload of tenant list + """ + + if not uri: + uri = self.tenant_getid() + + tenantdtls = self.tenant_show_by_uri(uri) + + if(tenantdtls and not ('parent_tenant' in tenantdtls and + ("id" in tenantdtls['parent_tenant']))): + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "GET", self.URI_TENANTS_SUBTENANT.format(uri), None) + + o = common.json_decode(s) + return o['subtenant'] + + else: + return [] + + def tenant_show_by_uri(self, uri): + """Makes REST API call to retrieve tenant details based on UUID.""" + (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", + Tenant.URI_TENANTS.format(uri), + None) + + o = common.json_decode(s) + if 'inactive' in o and o['inactive']: + return None + + return o + + def get_tenant_by_name(self, tenant): + uri = None + if not tenant: + uri = self.tenant_getid() + else: + if not common.is_uri(tenant): + uri = self.tenant_query(tenant) + else: + uri = tenant + if not uri: + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + (_("Tenant %s: not found") % tenant)) + return uri diff --git a/cinder/volume/drivers/coprhd/helpers/urihelper.py b/cinder/volume/drivers/coprhd/helpers/urihelper.py new file mode 100644 index 00000000000..79a91c3c913 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/urihelper.py @@ -0,0 +1,84 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class URIHelper(object): + + """This map will be a map of maps. + + e.g for project component type, it will hold a map + of its operations vs their uris + """ + COMPONENT_TYPE_VS_URIS_MAP = dict() + """Volume URIs.""" + VOLUME_URIS_MAP = dict() + URI_VOLUMES = '/block/volumes' + URI_VOLUME = URI_VOLUMES + '/{0}' + URI_VOLUME_TASK_LIST = URI_VOLUME + '/tasks' + URI_VOLUME_TASK = URI_VOLUME_TASK_LIST + '/{1}' + + """Consistencygroup URIs.""" + CG_URIS_MAP = dict() + URI_CGS = '/block/consistency-groups' + URI_CG = URI_CGS + '/{0}' + URI_CG_TASK_LIST = URI_CG + '/tasks' + URI_CG_TASK = URI_CG_TASK_LIST + '/{1}' + + """Export Group URIs.""" + # Map to hold all export group uris + EXPORT_GROUP_URIS_MAP = dict() + URI_EXPORT_GROUP_TASKS_LIST = '/block/exports/{0}/tasks' + URI_EXPORT_GROUP_TASK = URI_EXPORT_GROUP_TASKS_LIST + '/{1}' + + def __init__(self): + """During initialization of the class, lets fill all the maps.""" + self.__fillExportGroupMap() + self.__fillVolumeMap() + self.__fillConsistencyGroupMap() + self.__initializeComponentVsUriMap() + + def __call__(self): + return self + + def __initializeComponentVsUriMap(self): + self.COMPONENT_TYPE_VS_URIS_MAP["export"] = self.EXPORT_GROUP_URIS_MAP + self.COMPONENT_TYPE_VS_URIS_MAP[ + "volume"] = self.VOLUME_URIS_MAP + self.COMPONENT_TYPE_VS_URIS_MAP[ + "consistencygroup"] = self.CG_URIS_MAP + + def __fillExportGroupMap(self): + self.EXPORT_GROUP_URIS_MAP["task"] = self.URI_EXPORT_GROUP_TASK + + def __fillVolumeMap(self): + self.VOLUME_URIS_MAP["task"] = self.URI_VOLUME_TASK + + def __fillConsistencyGroupMap(self): + self.CG_URIS_MAP["task"] = self.URI_CG_TASK + + def getUri(self, componentType, operationType): + return ( + self.COMPONENT_TYPE_VS_URIS_MAP.get( + componentType).get( + operationType) + ) + + return None + +"""Defining the singleton instance. + +Use this instance any time the access is required for this module/class +""" +singletonURIHelperInstance = URIHelper() diff --git a/cinder/volume/drivers/coprhd/helpers/virtualarray.py b/cinder/volume/drivers/coprhd/helpers/virtualarray.py new file mode 100644 index 00000000000..de0ec96b1f2 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/virtualarray.py @@ -0,0 +1,79 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common + + +class VirtualArray(common.CoprHDResource): + + # Commonly used URIs for the 'varrays' module + URI_VIRTUALARRAY = '/vdc/varrays' + URI_VIRTUALARRAY_BY_VDC_ID = '/vdc/varrays?vdc-id={0}' + URI_VIRTUALARRAY_URI = '/vdc/varrays/{0}' + + def varray_query(self, name): + """Returns the UID of the varray specified by the name.""" + if common.is_uri(name): + return name + + uris = self.varray_list() + + for uri in uris: + varray = self.varray_show(uri) + if varray and varray['name'] == name: + return varray['id'] + + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + (_("varray %s: not found") % name)) + + def varray_list(self, vdcname=None): + """Returns all the varrays in a vdc. + + :param vdcname: Name of the Virtual Data Center + :returns: JSON payload of varray list + """ + vdcrestapi = None + if vdcname is not None: + vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format( + vdcname) + else: + vdcrestapi = VirtualArray.URI_VIRTUALARRAY + (s, h) = common.service_json_request( + self.ipaddr, self.port, "GET", + vdcrestapi, None) + + o = common.json_decode(s) + + returnlst = [] + for item in o['varray']: + returnlst.append(item['id']) + + return returnlst + + def varray_show(self, label): + """Makes REST API call to retrieve varray details based on name.""" + uri = self.varray_query(label) + + (s, h) = common.service_json_request( + self.ipaddr, self.port, "GET", + VirtualArray.URI_VIRTUALARRAY_URI.format(uri), + None) + + o = common.json_decode(s) + if 'inactive' in o and o['inactive'] is True: + return None + else: + return o diff --git a/cinder/volume/drivers/coprhd/helpers/virtualpool.py b/cinder/volume/drivers/coprhd/helpers/virtualpool.py new file mode 100644 index 00000000000..ae64b52ba06 --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/virtualpool.py @@ -0,0 +1,77 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common + + +class VirtualPool(common.CoprHDResource): + + URI_VPOOL = "/{0}/vpools" + URI_VPOOL_SHOW = URI_VPOOL + "/{1}" + URI_VPOOL_SEARCH = URI_VPOOL + "/search?name={1}" + + def vpool_show_uri(self, vpooltype, uri): + """Makes REST API call and retrieves vpool details based on UUID. + + This function will take uri as input and returns with + all parameters of VPOOL like label, urn and type. + + :param vpooltype : Type of virtual pool {'block'} + :param uri : unique resource identifier of the vpool + :returns: object containing all the details of vpool + """ + + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "GET", + self.URI_VPOOL_SHOW.format(vpooltype, uri), None) + + o = common.json_decode(s) + if o['inactive']: + return None + + return o + + def vpool_query(self, name, vpooltype): + """Makes REST API call to query the vpool by name and type. + + This function will take the VPOOL name and type of VPOOL + as input and get uri of the first occurance of given VPOOL. + + :param name: Name of the VPOOL + :param vpooltype: Type of the VPOOL {'block'} + :returns: uri of the given vpool + """ + if common.is_uri(name): + return name + + (s, h) = common.service_json_request( + self.ipaddr, self.port, "GET", + self.URI_VPOOL_SEARCH.format(vpooltype, name), None) + + o = common.json_decode(s) + if len(o['resource']) > 0: + # Get the Active vpool ID. + for vpool in o['resource']: + if self.vpool_show_uri(vpooltype, vpool['id']) is not None: + return vpool['id'] + # Raise not found exception. as we did not find any active vpool. + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + (_("VPool %(name)s ( %(vpooltype)s ) :" + " not found") % + {'name': name, + 'vpooltype': vpooltype + })) diff --git a/cinder/volume/drivers/coprhd/helpers/volume.py b/cinder/volume/drivers/coprhd/helpers/volume.py new file mode 100644 index 00000000000..6f93ca7f4bf --- /dev/null +++ b/cinder/volume/drivers/coprhd/helpers/volume.py @@ -0,0 +1,523 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_serialization +from oslo_utils import units +import six + +from cinder.i18n import _ +from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common +from cinder.volume.drivers.coprhd.helpers import consistencygroup +from cinder.volume.drivers.coprhd.helpers import project +from cinder.volume.drivers.coprhd.helpers import virtualarray +from cinder.volume.drivers.coprhd.helpers import virtualpool + + +class Volume(common.CoprHDResource): + + # Commonly used URIs for the 'Volume' module + URI_SEARCH_VOLUMES = '/block/volumes/search?project={0}' + URI_SEARCH_VOLUMES_BY_TAG = '/block/volumes/search?tag={0}' + URI_VOLUMES = '/block/volumes' + URI_VOLUME = URI_VOLUMES + '/{0}' + URI_VOLUME_EXPORTS = URI_VOLUME + '/exports' + URI_BULK_DELETE = URI_VOLUMES + '/deactivate' + URI_DEACTIVATE = URI_VOLUME + '/deactivate' + URI_EXPAND = URI_VOLUME + '/expand' + URI_TAG_VOLUME = URI_VOLUME + "/tags" + URI_VOLUME_CHANGE_VPOOL = URI_VOLUMES + "/vpool-change" + + # Protection REST APIs - clone + URI_VOLUME_PROTECTION_FULLCOPIES = ( + '/block/volumes/{0}/protection/full-copies') + URI_SNAPSHOT_PROTECTION_FULLCOPIES = ( + '/block/snapshots/{0}/protection/full-copies') + + URI_VOLUME_CLONE_DETACH = "/block/full-copies/{0}/detach" + + # New CG URIs + URI_CG_CLONE = "/block/consistency-groups/{0}/protection/full-copies" + URI_CG_CLONE_DETACH = ( + "/block/consistency-groups/{0}/protection/full-copies/{1}/detach") + + VOLUMES = 'volumes' + CG = 'consistency-groups' + BLOCK = 'block' + SNAPSHOTS = 'snapshots' + + # Lists volumes in a project + def list_volumes(self, project): + """Makes REST API call to list volumes under a project. + + :param project: name of project + :returns: List of volumes uuids in JSON response payload + """ + + volume_uris = self.search_volumes(project) + volumes = [] + for uri in volume_uris: + volume = self.show_by_uri(uri) + if volume: + volumes.append(volume) + return volumes + + def search_volumes(self, project_name): + + proj = project.Project(self.ipaddr, self.port) + project_uri = proj.project_query(project_name) + + (s, h) = common.service_json_request(self.ipaddr, self.port, + "GET", + Volume.URI_SEARCH_VOLUMES.format( + project_uri), + None) + o = common.json_decode(s) + if not o: + return [] + + volume_uris = [] + resources = common.get_node_value(o, "resource") + for resource in resources: + volume_uris.append(resource["id"]) + return volume_uris + + # Shows volume information given its uri + def show_by_uri(self, uri): + """Makes REST API call and retrieves volume details based on UUID. + + :param uri: UUID of volume + :returns: Volume details in JSON response payload + """ + + (s, h) = common.service_json_request(self.ipaddr, self.port, + "GET", + Volume.URI_VOLUME.format(uri), + None) + o = common.json_decode(s) + inactive = common.get_node_value(o, 'inactive') + if inactive: + return None + return o + + # Creates a volume given label, project, vpool and size + def create(self, project_name, label, size, varray, vpool, + sync, consistencygroup, synctimeout=0): + """Makes REST API call to create volume under a project. + + :param project_name : name of the project under which the volume + will be created + :param label : name of volume + :param size : size of volume + :param varray : name of varray + :param vpool : name of vpool + :param sync : synchronous request + :param consistencygroup : To create volume under a consistencygroup + :param synctimeout : Query for task status for "synctimeout" secs. + If the task doesn't complete in synctimeout + secs, an exception is thrown + :returns: Created task details in JSON response payload + """ + + proj_obj = project.Project(self.ipaddr, self.port) + project_uri = proj_obj.project_query(project_name) + + vpool_obj = virtualpool.VirtualPool(self.ipaddr, self.port) + vpool_uri = vpool_obj.vpool_query(vpool, "block") + + varray_obj = virtualarray.VirtualArray(self.ipaddr, self.port) + varray_uri = varray_obj.varray_query(varray) + + request = { + 'name': label, + 'size': size, + 'varray': varray_uri, + 'project': project_uri, + 'vpool': vpool_uri, + 'count': 1 + } + if consistencygroup: + request['consistency_group'] = consistencygroup + + body = oslo_serialization.jsonutils.dumps(request) + (s, h) = common.service_json_request(self.ipaddr, self.port, + "POST", + Volume.URI_VOLUMES, + body) + o = common.json_decode(s) + + if sync: + # check task empty + if len(o["task"]) > 0: + task = o["task"][0] + return self.check_for_sync(task, sync, synctimeout) + else: + raise common.CoprHdError( + common.CoprHdError.SOS_FAILURE_ERR, + _("error: task list is empty, no task response found")) + else: + return o + + # Blocks the operation until the task is complete/error out/timeout + def check_for_sync(self, result, sync, synctimeout=0): + if sync: + if len(result["resource"]) > 0: + resource = result["resource"] + return ( + common.block_until_complete("volume", resource["id"], + result["id"], self.ipaddr, + self.port, synctimeout) + ) + else: + raise common.CoprHdError( + common.CoprHdError.SOS_FAILURE_ERR, + _("error: task list is empty, no task response found")) + else: + return result + + # Queries a volume given its name + def volume_query(self, full_project_name, volume_name): + """Makes REST API call to query the volume by name. + + :param volume_name: name of volume + :param full_project_name: Full project path + :returns: Volume details in JSON response payload + """ + if common.is_uri(volume_name): + return volume_name + + if not full_project_name: + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + _("Project name not specified")) + uris = self.search_volumes(full_project_name) + for uri in uris: + volume = self.show_by_uri(uri) + if volume and 'name' in volume and volume['name'] == volume_name: + return volume['id'] + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + (_("Volume" + "%s: not found") % volume_name)) + + def get_storageAttributes(self, volume_name, cg_name, snapshot_name=None): + storageres_type = None + storageres_typename = None + + if snapshot_name is not None: + storageres_type = Volume.BLOCK + storageres_typename = Volume.SNAPSHOTS + elif volume_name is not None: + storageres_type = Volume.BLOCK + storageres_typename = Volume.VOLUMES + elif cg_name is not None: + storageres_type = Volume.BLOCK + storageres_typename = Volume.CG + else: + storageres_type = None + storageres_typename = None + return (storageres_type, storageres_typename) + + def storage_resource_query(self, + storageres_type, + volume_name, + cg_name, + snapshot_name, + project, + tenant): + resourcepath = "/" + project + if tenant is not None: + resourcepath = tenant + resourcepath + + resUri = None + resourceObj = None + + if Volume.BLOCK == storageres_type and volume_name is not None: + resUri = self.volume_query(resourcepath, volume_name) + if snapshot_name is not None: + from cinder.volume.drivers.coprhd.helpers import snapshot + snapobj = snapshot.Snapshot(self.ipaddr, self.port) + resUri = snapobj.snapshot_query(storageres_type, + Volume.VOLUMES, resUri, + snapshot_name) + + elif Volume.BLOCK == storageres_type and cg_name is not None: + resourceObj = consistencygroup.ConsistencyGroup( + self.ipaddr, self.port) + resUri = resourceObj.consistencygroup_query( + cg_name, + project, + tenant) + else: + resourceObj = None + + return resUri + + # Creates volume(s) from given source volume + def clone(self, new_vol_name, resource_uri, + sync, synctimeout=0): + """Makes REST API call to clone volume. + + :param new_vol_name: name of volume + :param resource_uri: uri of source volume + :param sync : synchronous request + :param synctimeout : Query for task status for "synctimeout" secs. + If the task doesn't complete in synctimeout + secs, an exception is thrown + :returns: Created task details in JSON response payload + """ + from cinder.volume.drivers.coprhd.helpers import snapshot + snap_obj = snapshot.Snapshot(self.ipaddr, self.port) + is_snapshot_clone = False + clone_full_uri = None + + # consistency group + if resource_uri.find("BlockConsistencyGroup") > 0: + clone_full_uri = Volume.URI_CG_CLONE.format(resource_uri) + elif resource_uri.find("BlockSnapshot") > 0: + is_snapshot_clone = True + clone_full_uri = ( + Volume.URI_SNAPSHOT_PROTECTION_FULLCOPIES.format(resource_uri)) + else: + clone_full_uri = ( + Volume.URI_VOLUME_PROTECTION_FULLCOPIES.format(resource_uri)) + + request = { + 'name': new_vol_name, + 'type': None, + 'count': 1 + } + + request["count"] = 1 + + body = oslo_serialization.jsonutils.dumps(request) + (s, h) = common.service_json_request(self.ipaddr, self.port, + "POST", + clone_full_uri, + body) + o = common.json_decode(s) + + if sync: + task = o["task"][0] + + if is_snapshot_clone: + return ( + snap_obj.block_until_complete( + "block", + task["resource"]["id"], + task["id"]) + ) + else: + return self.check_for_sync(task, sync, synctimeout) + else: + return o + + # To check whether a cloned volume is in detachable state or not + def is_volume_detachable(self, full_project_name, name): + + volume_uri = self.volume_query(full_project_name, name) + vol = self.show_by_uri(volume_uri) + # Filtering based on "replicaState" attribute value of Cloned volume. + # If "replicaState" value is "SYNCHRONIZED" then only Cloned volume + # would be in detachable state. + if(vol and 'protection' in vol and + 'full_copies' in vol['protection'] and + 'replicaState' in vol['protection']['full_copies']): + if(vol['protection']['full_copies']['replicaState'] == + 'SYNCHRONIZED'): + return True + return False + return False + + def volume_clone_detach(self, resource_uri, full_project_name, + name, sync, synctimeout=0): + + volume_uri = self.volume_query(full_project_name, name) + + # consistency group + if resource_uri.find("BlockConsistencyGroup") > 0: + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "POST", + Volume.URI_CG_CLONE_DETACH.format( + resource_uri, + volume_uri), None) + else: + (s, h) = common.service_json_request( + self.ipaddr, self.port, + "POST", + Volume.URI_VOLUME_CLONE_DETACH.format(volume_uri), None) + + o = common.json_decode(s) + if sync: + task = o["task"][0] + return self.check_for_sync(task, sync, synctimeout) + else: + return o + + # Shows volume information given its name + def show(self, full_project_name, name): + """Retrieves volume details based on volume name. + + :param full_project_name : project path of the volume + :param name: name of the volume. If the volume is under a project, + then full XPath needs to be specified. + Example: If VOL1 is a volume under project PROJ1, then the name + of volume is PROJ1/VOL1 + :returns: Volume details in JSON response payload + """ + if common.is_uri(name): + return name + if full_project_name is None: + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + (_("Volume %s : not found") % + six.text_type(name))) + + uris = self.search_volumes(full_project_name) + + for uri in uris: + volume = self.show_by_uri(uri) + if volume and 'name' in volume and volume['name'] == name: + return volume + raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, + (_("Volume" + " %s : not found") % six.text_type(name))) + + def expand(self, full_project_name, volume_name, new_size, + sync=False, synctimeout=0): + + volume_detail = self.show(full_project_name, volume_name) + from decimal import Decimal + new_size_in_gb = Decimal(Decimal(new_size) / (units.Gi)) + current_size = Decimal(volume_detail["provisioned_capacity_gb"]) + if new_size_in_gb <= current_size: + raise common.CoprHdError( + common.CoprHdError.VALUE_ERR, + (_("error: Incorrect value of new size: %(new_size_in_gb)s" + " GB\nNew size must be greater than current size: " + "%(current_size)s GB") % {'new_size_in_gb': new_size_in_gb, + 'current_size': current_size})) + + body = oslo_serialization.jsonutils.dumps({ + "new_size": new_size + }) + + (s, h) = common.service_json_request(self.ipaddr, self.port, + "POST", + Volume.URI_EXPAND.format( + volume_detail["id"]), + body) + if not s: + return None + o = common.json_decode(s) + + if sync: + return self.check_for_sync(o, sync, synctimeout) + return o + + # Deletes a volume given a volume name + def delete(self, full_project_name, name, sync=False, + force_delete=False, coprhdonly=False, synctimeout=0): + """Deletes a volume based on volume name. + + :param full_project_name: project name + :param name : name of volume to be deleted + :param sync : synchronous request + :param force_delete: if true, it will force the delete of internal + volumes that have the SUPPORTS_FORCE flag + :param coprhdonly : to delete volumes from coprHD only + :param synctimeout: Query for task status for "synctimeout" secs. If + the task doesn't complete in synctimeout secs, an + exception is thrown + + """ + volume_uri = self.volume_query(full_project_name, name) + return self.delete_by_uri(volume_uri, sync, force_delete, + coprhdonly, synctimeout) + + # Deletes a volume given a volume uri + def delete_by_uri(self, uri, sync=False, + force_delete=False, coprhdonly=False, synctimeout=0): + """Deletes a volume based on volume uri.""" + params = '' + if force_delete: + params += '&' if ('?' in params) else '?' + params += "force=" + "true" + if coprhdonly is True: + params += '&' if ('?' in params) else '?' + params += "type=" + 'CoprHD_ONLY' + + (s, h) = common.service_json_request(self.ipaddr, self.port, + "POST", + Volume.URI_DEACTIVATE.format( + uri) + params, + None) + if not s: + return None + o = common.json_decode(s) + if sync: + return self.check_for_sync(o, sync, synctimeout) + return o + + # Gets the exports info given a volume uri + def get_exports_by_uri(self, uri): + """Makes REST API call to get exports info of a volume. + + :param uri: URI of the volume + :returns: Exports details in JSON response payload + """ + (s, h) = common.service_json_request(self.ipaddr, self.port, + "GET", + Volume.URI_VOLUME_EXPORTS.format( + uri), + None) + return common.json_decode(s) + + # Update a volume information + # Changed the volume vpool + def update(self, prefix_path, name, vpool): + """Makes REST API call to update a volume information. + + :param name: name of the volume to be updated + :param vpool: name of vpool + :returns: Created task details in JSON response payload + """ + namelist = [] + + if type(name) is list: + namelist = name + else: + namelist.append(name) + + volumeurilist = [] + + for item in namelist: + volume_uri = self.volume_query(prefix_path, item) + volumeurilist.append(volume_uri) + + vpool_obj = virtualpool.VirtualPool(self.ipaddr, self.port) + vpool_uri = vpool_obj.vpool_query(vpool, "block") + + params = { + 'vpool': vpool_uri, + 'volumes': volumeurilist + } + + body = oslo_serialization.jsonutils.dumps(params) + + (s, h) = common.service_json_request( + self.ipaddr, self.port, "POST", + Volume.URI_VOLUME_CHANGE_VPOOL, + body) + + o = common.json_decode(s) + return o diff --git a/cinder/volume/drivers/coprhd/iscsi.py b/cinder/volume/drivers/coprhd/iscsi.py new file mode 100644 index 00000000000..31d64b6fa90 --- /dev/null +++ b/cinder/volume/drivers/coprhd/iscsi.py @@ -0,0 +1,173 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Driver for EMC CoprHD iSCSI volumes.""" + +from oslo_log import log as logging + +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.coprhd import common as coprhd_common + + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class EMCCoprHDISCSIDriver(driver.ISCSIDriver): + """CoprHD iSCSI Driver.""" + + def __init__(self, *args, **kwargs): + super(EMCCoprHDISCSIDriver, self).__init__(*args, **kwargs) + self.common = self._get_common_driver() + + def _get_common_driver(self): + return coprhd_common.EMCCoprHDDriverCommon( + protocol='iSCSI', + default_backend_name=self.__class__.__name__, + configuration=self.configuration) + + def check_for_setup_error(self): + self.common.check_for_setup_error() + + def create_volume(self, volume): + """Creates a Volume.""" + self.common.create_volume(volume, self) + self.common.set_volume_tags(volume, ['_obj_volume_type']) + + def create_cloned_volume(self, volume, src_vref): + """Creates a cloned Volume.""" + self.common.create_cloned_volume(volume, src_vref) + self.common.set_volume_tags(volume, ['_obj_volume_type']) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self.common.create_volume_from_snapshot(snapshot, volume) + self.common.set_volume_tags(volume, ['_obj_volume_type']) + + def extend_volume(self, volume, new_size): + """expands the size of the volume.""" + self.common.expand_volume(volume, new_size) + + def delete_volume(self, volume): + """Deletes an volume.""" + self.common.delete_volume(volume) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + self.common.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + self.common.delete_snapshot(snapshot) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + pass + + def create_export(self, context, volume, connector=None): + """Driver entry point to get the export info for a new volume.""" + pass + + def remove_export(self, context, volume): + """Driver entry point to remove an export for a volume.""" + pass + + def create_consistencygroup(self, context, group): + """Creates a consistencygroup.""" + return self.common.create_consistencygroup(context, group) + + def delete_consistencygroup(self, context, group, volumes): + """Deletes a consistency group.""" + return self.common.delete_consistencygroup(context, group, volumes) + + def update_consistencygroup(self, context, group, + add_volumes, remove_volumes): + """Updates volumes in consistency group.""" + return self.common.update_consistencygroup(group, add_volumes, + remove_volumes) + + def create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a cgsnapshot.""" + return self.common.create_cgsnapshot(cgsnapshot, snapshots) + + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Deletes a cgsnapshot.""" + return self.common.delete_cgsnapshot(cgsnapshot, snapshots) + + def check_for_export(self, context, volume_id): + """Make sure volume is exported.""" + pass + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info.""" + + initiator_ports = [] + initiator_ports.append(connector['initiator']) + itls = self.common.initialize_connection(volume, + 'iSCSI', + initiator_ports, + connector['host']) + properties = {} + properties['target_discovered'] = False + properties['volume_id'] = volume['id'] + if itls: + properties['target_iqn'] = itls[0]['target']['port'] + properties['target_portal'] = '%s:%s' % ( + itls[0]['target']['ip_address'], + itls[0]['target']['tcp_port']) + properties['target_lun'] = itls[0]['hlu'] + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + LOG.debug("ISCSI properties: %s", properties) + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + + init_ports = [] + init_ports.append(connector['initiator']) + self.common.terminate_connection(volume, + 'iSCSI', + init_ports, + connector['host']) + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self.update_volume_stats() + + return self._stats + + def update_volume_stats(self): + """Retrieve stats info from virtual pool/virtual array.""" + LOG.debug("Updating volume stats") + self._stats = self.common.update_volume_stats() + + def retype(self, ctxt, volume, new_type, diff, host): + """Change the volume type.""" + return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/cinder/volume/drivers/coprhd/scaleio.py b/cinder/volume/drivers/coprhd/scaleio.py new file mode 100644 index 00000000000..841ad0020d0 --- /dev/null +++ b/cinder/volume/drivers/coprhd/scaleio.py @@ -0,0 +1,324 @@ +# Copyright (c) 2016 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Driver for EMC CoprHD ScaleIO volumes.""" + +from oslo_config import cfg +from oslo_log import log as logging +import requests +import six +from six.moves import urllib + +from cinder import exception +from cinder.i18n import _ +from cinder.i18n import _LI +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.coprhd import common as coprhd_common + + +LOG = logging.getLogger(__name__) + +scaleio_opts = [ + cfg.StrOpt('coprhd_scaleio_rest_gateway_ip', + default='None', + help='Rest Gateway for Scaleio'), + cfg.PortOpt('coprhd_scaleio_rest_gateway_port', + default=4984, + help='Rest Gateway Port for Scaleio'), + cfg.StrOpt('coprhd_scaleio_rest_server_username', + default=None, + help='Username for Rest Gateway'), + cfg.StrOpt('coprhd_scaleio_rest_server_password', + default=None, + help='Rest Gateway Password', + secret=True), + cfg.BoolOpt('scaleio_verify_server_certificate', + default=False, + help='verify server certificate'), + cfg.StrOpt('scaleio_server_certificate_path', + default=None, + help='Server certificate path') +] + +CONF = cfg.CONF +CONF.register_opts(scaleio_opts) + + +@interface.volumedriver +class EMCCoprHDScaleIODriver(driver.VolumeDriver): + """CoprHD ScaleIO Driver.""" + server_token = None + + def __init__(self, *args, **kwargs): + super(EMCCoprHDScaleIODriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(scaleio_opts) + self.common = self._get_common_driver() + + def _get_common_driver(self): + return coprhd_common.EMCCoprHDDriverCommon( + protocol='scaleio', + default_backend_name=self.__class__.__name__, + configuration=self.configuration) + + def check_for_setup_error(self): + self.common.check_for_setup_error() + if (self.configuration.scaleio_verify_server_certificate is True and + self.configuration.scaleio_server_certificate_path is None): + message = _("scaleio_verify_server_certificate is True but" + " scaleio_server_certificate_path is not provided" + " in cinder configuration") + raise exception.VolumeBackendAPIException(data=message) + + def create_volume(self, volume): + """Creates a Volume.""" + self.common.create_volume(volume, self, True) + self.common.set_volume_tags(volume, ['_obj_volume_type'], True) + vol_size = self._update_volume_size(int(volume['size'])) + return {'size': vol_size} + + def _update_volume_size(self, vol_size): + """update the openstack volume size.""" + default_size = 8 + if (vol_size % default_size) != 0: + return (vol_size / default_size) * default_size + default_size + else: + return vol_size + + def create_cloned_volume(self, volume, src_vref): + """Creates a cloned Volume.""" + self.common.create_cloned_volume(volume, src_vref, True) + self.common.set_volume_tags(volume, ['_obj_volume_type'], True) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self.common.create_volume_from_snapshot(snapshot, volume, True) + self.common.set_volume_tags(volume, ['_obj_volume_type'], True) + + def extend_volume(self, volume, new_size): + """expands the size of the volume.""" + self.common.expand_volume(volume, new_size) + + def delete_volume(self, volume): + """Deletes an volume.""" + self.common.delete_volume(volume) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + self.common.create_snapshot(snapshot, True) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + self.common.delete_snapshot(snapshot) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + pass + + def create_export(self, context, volume, connector=None): + """Driver entry point to get the export info for a new volume.""" + pass + + def remove_export(self, context, volume): + """Driver exntry point to remove an export for a volume.""" + pass + + def create_consistencygroup(self, context, group): + """Creates a consistencygroup.""" + return self.common.create_consistencygroup(context, group, True) + + def update_consistencygroup(self, context, group, + add_volumes, remove_volumes): + """Updates volumes in consistency group.""" + return self.common.update_consistencygroup(group, add_volumes, + remove_volumes) + + def delete_consistencygroup(self, context, group, volumes): + """Deletes a consistency group.""" + return self.common.delete_consistencygroup(context, group, + volumes, True) + + def create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a cgsnapshot.""" + return self.common.create_cgsnapshot(cgsnapshot, snapshots, True) + + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Deletes a cgsnapshot.""" + return self.common.delete_cgsnapshot(cgsnapshot, snapshots, True) + + def check_for_export(self, context, volume_id): + """Make sure volume is exported.""" + pass + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info.""" + + volname = self.common._get_resource_name(volume, True) + + properties = {} + properties['scaleIO_volname'] = volname + properties['hostIP'] = connector['ip'] + properties[ + 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_ip + properties[ + 'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port + properties[ + 'serverUsername'] = ( + self.configuration.coprhd_scaleio_rest_server_username) + properties[ + 'serverPassword'] = ( + self.configuration.coprhd_scaleio_rest_server_password) + properties['iopsLimit'] = None + properties['bandwidthLimit'] = None + properties['serverToken'] = self.server_token + + initiatorPorts = [] + initiatorPort = self._get_client_id(properties['serverIP'], + properties['serverPort'], + properties['serverUsername'], + properties['serverPassword'], + properties['hostIP']) + initiatorPorts.append(initiatorPort) + + properties['serverToken'] = self.server_token + self.common.initialize_connection(volume, + 'scaleio', + initiatorPorts, + connector['host']) + + dictobj = { + 'driver_volume_type': 'scaleio', + 'data': properties + } + + return dictobj + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + + volname = volume['display_name'] + properties = {} + properties['scaleIO_volname'] = volname + properties['hostIP'] = connector['ip'] + properties[ + 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_ip + properties[ + 'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port + properties[ + 'serverUsername'] = ( + self.configuration.coprhd_scaleio_rest_server_username) + properties[ + 'serverPassword'] = ( + self.configuration.coprhd_scaleio_rest_server_password) + properties['serverToken'] = self.server_token + + initiatorPort = self._get_client_id(properties['serverIP'], + properties['serverPort'], + properties['serverUsername'], + properties['serverPassword'], + properties['hostIP']) + initPorts = [] + initPorts.append(initiatorPort) + self.common.terminate_connection(volume, + 'scaleio', + initPorts, + connector['host']) + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self.update_volume_stats() + + return self._stats + + def update_volume_stats(self): + """Retrieve stats info from virtual pool/virtual array.""" + LOG.debug("Updating volume stats") + self._stats = self.common.update_volume_stats() + + def _get_client_id(self, server_ip, server_port, server_username, + server_password, sdc_ip): + ip_encoded = urllib.parse.quote(sdc_ip, '') + ip_double_encoded = urllib.parse.quote(ip_encoded, '') + + request = ("https://%s:%s/api/types/Sdc/instances/getByIp::%s/" % + (server_ip, six.text_type(server_port), ip_double_encoded)) + + LOG.info(_LI("ScaleIO get client id by ip request: %s"), request) + + if self.configuration.scaleio_verify_server_certificate: + verify_cert = self.configuration.scaleio_server_certificate_path + else: + verify_cert = False + + r = requests.get( + request, auth=(server_username, self.server_token), + verify=verify_cert) + r = self._check_response( + r, request, server_ip, server_port, + server_username, server_password) + + sdc_id = r.json() + if not sdc_id: + msg = (_("Client with ip %s wasn't found ") % sdc_ip) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if r.status_code != 200 and "errorCode" in sdc_id: + msg = (_("Error getting sdc id from ip %(sdc_ip)s:" + " %(sdc_id_message)s") % {'sdc_ip': sdc_ip, + 'sdc_id_message': sdc_id[ + 'message']}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + LOG.info(_LI("ScaleIO sdc id is %s"), sdc_id) + return sdc_id + + def _check_response(self, response, request, + server_ip, server_port, + server_username, server_password): + if response.status_code == 401 or response.status_code == 403: + LOG.info( + _LI("Token is invalid, going to re-login and get a new one")) + + login_request = ("https://%s:%s/api/login" % + (server_ip, six.text_type(server_port))) + if self.configuration.scaleio_verify_server_certificate: + verify_cert = ( + self.configuration.scaleio_server_certificate_path) + else: + verify_cert = False + + r = requests.get( + login_request, auth=(server_username, server_password), + verify=verify_cert) + + token = r.json() + self.server_token = token + # repeat request with valid token + LOG.info(_LI("Going to perform request again %s with valid token"), + request) + res = requests.get( + request, auth=(server_username, self.server_token), + verify=verify_cert) + return res + return response + + def retype(self, ctxt, volume, new_type, diff, host): + """Change the volume type.""" + return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml b/releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml new file mode 100644 index 00000000000..cc63218f23c --- /dev/null +++ b/releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml @@ -0,0 +1,2 @@ +features: + - Added volume backend drivers for CoprHD FC, iSCSI and Scaleio.