[NetApp] Enabling total_volumes capability support
Currently, NetApp driver doesn't have a way to filter out the backends at scheduler level once maximum number of volumes is reached per pool. The total_volumes capability support is added and default function is updated for iscsi/nvme drivers to filter out the backends once the pool reaches maximum number of volumes w.r.t driver limits which is 1024. Closes-Bug: #2117263 Change-Id: I40263682af4735406d341e77ca90ee37cb361994 Signed-off-by: Saikumar Pulluri <saikumar1016@gmail.com>
This commit is contained in:

committed by
Fernando Ferraz

parent
92c645f1f1
commit
9c63da02a4
@@ -3480,6 +3480,7 @@ class NetAppCmodeClientTestCase(test.TestCase):
|
||||
'query': {
|
||||
'lun-info': {
|
||||
'volume': fake.NETAPP_VOLUME,
|
||||
'vserver': fake_client.VSERVER_NAME
|
||||
}
|
||||
},
|
||||
'desired-attributes': {
|
||||
|
@@ -801,6 +801,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
volume_name = fake_client.VOLUME_NAME
|
||||
query = {
|
||||
'location.volume.name': volume_name,
|
||||
'svm.name': fake_client.VSERVER_NAME,
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
response = fake_client.LUN_GET_ITER_REST
|
||||
@@ -824,8 +825,10 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
|
||||
def test_get_lun_sizes_by_volume_no_records(self):
|
||||
volume_name = fake_client.VOLUME_NAME
|
||||
vserver = fake_client.VSERVER_NAME
|
||||
query = {
|
||||
'location.volume.name': volume_name,
|
||||
'svm.name': vserver,
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
response = fake_client.NO_RECORDS_RESPONSE_REST
|
||||
@@ -4023,6 +4026,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
|
||||
fake_query = {
|
||||
'location.volume.name': 'fake_volume',
|
||||
'svm.name': fake_client.VSERVER_NAME,
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
|
||||
@@ -4050,6 +4054,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
|
||||
fake_query = {
|
||||
'location.volume.name': 'fake_volume',
|
||||
'svm.name': fake_client.VSERVER_NAME,
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
|
||||
|
@@ -550,6 +550,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
'replication_enabled': False,
|
||||
'online_extend_support': True,
|
||||
'netapp_is_flexgroup': 'false',
|
||||
'total_volumes': 2,
|
||||
}]
|
||||
if report_provisioned_capacity:
|
||||
expected[0].update({'provisioned_capacity_gb': 5.0})
|
||||
|
@@ -670,6 +670,7 @@ class NetAppNVMeStorageLibraryTestCase(test.TestCase):
|
||||
'netapp_disk_type': 'SSD',
|
||||
'online_extend_support': False,
|
||||
'netapp_is_flexgroup': 'false',
|
||||
'total_volumes': 2,
|
||||
}]
|
||||
if report_provisioned_capacity:
|
||||
expected[0].update({'provisioned_capacity_gb': 5.0})
|
||||
|
@@ -110,6 +110,37 @@ SSC_QOS_MIN_INFO = {
|
||||
},
|
||||
}
|
||||
|
||||
SSC_VOLUME_COUNT_INFO = {
|
||||
'volume1': {
|
||||
'total_volumes': 3,
|
||||
},
|
||||
'volume2': {
|
||||
'total_volumes': 2,
|
||||
},
|
||||
}
|
||||
|
||||
SSC_LUNS_BY_SIZES = [
|
||||
{
|
||||
'path': '/vol/volume-ae947c9b-2392-4956-b373-aaac4521f37e',
|
||||
'size': 5368709120.0
|
||||
},
|
||||
{
|
||||
'path': '/vol/snapshot-527eedad-a431-483d-b0ca-18995dd65b66',
|
||||
'size': 1073741824.0
|
||||
}
|
||||
]
|
||||
|
||||
SSC_NAMESPACES_BY_SIZES = [
|
||||
{
|
||||
'path': '/vol/namespace-ae947c9b-2392-4956-b373-aaac4521f37e',
|
||||
'size': 5379821234.0
|
||||
},
|
||||
{
|
||||
'path': '/vol/namespace-527eedad-a431-483d-b0ca-18995dd65b66',
|
||||
'size': 4673741874.0
|
||||
}
|
||||
]
|
||||
|
||||
SSC_MIRROR_INFO = {
|
||||
'volume1': {
|
||||
'netapp_mirrored': 'false',
|
||||
|
@@ -38,6 +38,8 @@ class CapabilitiesLibraryTestCase(test.TestCase):
|
||||
self.ssc_library = capabilities.CapabilitiesLibrary(
|
||||
'iSCSI', fake.SSC_VSERVER, self.zapi_client, self.configuration)
|
||||
self.ssc_library.ssc = fake.SSC
|
||||
self.ssc_library_nvme = capabilities.CapabilitiesLibrary(
|
||||
'NVMe', fake.SSC_VSERVER, self.zapi_client, self.configuration)
|
||||
|
||||
def get_config_cmode(self):
|
||||
config = na_fakes.create_configuration_cmode()
|
||||
@@ -88,7 +90,8 @@ class CapabilitiesLibraryTestCase(test.TestCase):
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_update_ssc(self):
|
||||
@ddt.data('nfs', 'iscsi')
|
||||
def test_update_ssc(self, protocol):
|
||||
|
||||
mock_get_ssc_flexvol_info = self.mock_object(
|
||||
self.ssc_library, '_get_ssc_flexvol_info',
|
||||
@@ -114,6 +117,15 @@ class CapabilitiesLibraryTestCase(test.TestCase):
|
||||
self.ssc_library, '_get_ssc_qos_min_info',
|
||||
side_effect=[fake.SSC_QOS_MIN_INFO['volume1'],
|
||||
fake.SSC_QOS_MIN_INFO['volume2']])
|
||||
if protocol != 'nfs':
|
||||
mock_get_ssc_volume_count_info = self.mock_object(
|
||||
self.ssc_library, '_get_ssc_volume_count_info',
|
||||
side_effect=[fake.SSC_QOS_MIN_INFO['volume1'],
|
||||
fake.SSC_QOS_MIN_INFO['volume2']])
|
||||
else:
|
||||
mock_get_ssc_volume_count_info = self.mock_object(
|
||||
self.ssc_library, '_get_ssc_volume_count_info',
|
||||
side_effect=None)
|
||||
|
||||
ordered_ssc = collections.OrderedDict()
|
||||
ordered_ssc['volume1'] = fake.SSC_VOLUME_MAP['volume1']
|
||||
@@ -121,6 +133,13 @@ class CapabilitiesLibraryTestCase(test.TestCase):
|
||||
|
||||
result = self.ssc_library.update_ssc(ordered_ssc)
|
||||
|
||||
if protocol != 'nfs':
|
||||
mock_get_ssc_volume_count_info.assert_has_calls([
|
||||
mock.call('volume1'), mock.call('volume2')])
|
||||
else:
|
||||
self.ssc_library._get_ssc_volume_count_info(fake.SSC_VOLUMES[0]).\
|
||||
assert_not_called()
|
||||
|
||||
self.assertIsNone(result)
|
||||
self.assertEqual(fake.SSC, self.ssc_library.ssc)
|
||||
mock_get_ssc_flexvol_info.assert_has_calls([
|
||||
@@ -543,6 +562,35 @@ class CapabilitiesLibraryTestCase(test.TestCase):
|
||||
self.zapi_client.is_qos_min_supported.assert_called_once_with(False,
|
||||
'node')
|
||||
|
||||
@ddt.data('iscsi', 'fc', 'nvme')
|
||||
def test_get_ssc_volume_count_info(self, protocol):
|
||||
|
||||
self.ssc_library = self.ssc_library_nvme if protocol == 'nvme' else \
|
||||
self.ssc_library
|
||||
|
||||
self.mock_object(self.ssc_library.zapi_client,
|
||||
'get_namespace_sizes_by_volume',
|
||||
return_value=fake.SSC_NAMESPACES_BY_SIZES)
|
||||
|
||||
self.mock_object(self.ssc_library.zapi_client,
|
||||
'get_lun_sizes_by_volume',
|
||||
return_value=fake.SSC_LUNS_BY_SIZES)
|
||||
|
||||
result = self.ssc_library._get_ssc_volume_count_info(
|
||||
fake_client.VOLUME_NAMES[0])
|
||||
|
||||
expected = {'total_volumes': 2}
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
if protocol != 'nvme':
|
||||
self.zapi_client.get_lun_sizes_by_volume.\
|
||||
assert_called_once_with(fake_client.VOLUME_NAMES[0])
|
||||
self.zapi_client.get_namespace_sizes_by_volume.assert_not_called()
|
||||
else:
|
||||
self.zapi_client.get_namespace_sizes_by_volume.\
|
||||
assert_called_once_with(fake_client.VOLUME_NAMES[0])
|
||||
self.zapi_client.get_lun_sizes_by_volume.assert_not_called()
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_is_flexgroup(self, is_fg):
|
||||
pool_name = 'fake_pool'
|
||||
|
@@ -87,7 +87,8 @@ class NetAppBlockStorageLibrary(
|
||||
'xen', 'hyper_v']
|
||||
DEFAULT_LUN_OS = 'linux'
|
||||
DEFAULT_HOST_TYPE = 'linux'
|
||||
DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
|
||||
DEFAULT_FILTER_FUNCTION = ('capabilities.utilization < 70 and '
|
||||
'capabilities.total_volumes < 1024')
|
||||
DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
|
||||
|
||||
def __init__(self, driver_name, driver_protocol, **kwargs):
|
||||
|
@@ -412,9 +412,10 @@ class NetAppBlockStorageCmodeLibrary(
|
||||
size_available_gb = capacity['size-available'] / units.Gi
|
||||
pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)
|
||||
|
||||
luns = self.zapi_client.get_lun_sizes_by_volume(
|
||||
ssc_vol_name)
|
||||
pool['total_volumes'] = len(luns)
|
||||
if self.configuration.netapp_driver_reports_provisioned_capacity:
|
||||
luns = self.zapi_client.get_lun_sizes_by_volume(
|
||||
ssc_vol_name)
|
||||
provisioned_cap = 0
|
||||
for lun in luns:
|
||||
lun_name = lun['path'].split('/')[-1]
|
||||
|
@@ -441,7 +441,8 @@ class Client(client_base.Client, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
api_args = {
|
||||
'query': {
|
||||
'lun-info': {
|
||||
'volume': volume_name
|
||||
'volume': volume_name,
|
||||
'vserver': self.vserver
|
||||
}
|
||||
},
|
||||
'desired-attributes': {
|
||||
|
@@ -774,6 +774,7 @@ class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
|
||||
query = {
|
||||
'location.volume.name': volume_name,
|
||||
'svm.name': self.vserver,
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
|
||||
@@ -2776,6 +2777,7 @@ class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
|
||||
query = {
|
||||
'location.volume.name': volume_name,
|
||||
'svm.name': self.vserver,
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
response = self.send_request('/storage/namespaces', 'get', query=query)
|
||||
|
@@ -73,7 +73,8 @@ class NetAppNVMeStorageLibrary(
|
||||
ALLOWED_SUBSYSTEM_HOST_TYPES = ['aix', 'linux', 'vmware', 'windows']
|
||||
DEFAULT_NAMESPACE_OS = 'linux'
|
||||
DEFAULT_HOST_TYPE = 'linux'
|
||||
DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
|
||||
DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70 and ' \
|
||||
'capabilities.total_volumes < 1024'
|
||||
DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
|
||||
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
|
||||
NVME_PORT = 4420
|
||||
@@ -564,9 +565,10 @@ class NetAppNVMeStorageLibrary(
|
||||
size_available_gb = capacity['size-available'] / units.Gi
|
||||
pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)
|
||||
|
||||
namespaces = self.client.get_namespace_sizes_by_volume(
|
||||
ssc_vol_name)
|
||||
pool['total_volumes'] = len(namespaces)
|
||||
if self.configuration.netapp_driver_reports_provisioned_capacity:
|
||||
namespaces = self.client.get_namespace_sizes_by_volume(
|
||||
ssc_vol_name)
|
||||
provisioned_cap = 0
|
||||
for namespace in namespaces:
|
||||
namespace_name = namespace['path'].split('/')[-1]
|
||||
|
@@ -115,6 +115,9 @@ class CapabilitiesLibrary(object):
|
||||
|
||||
ssc_volume.update(self._get_ssc_qos_min_info(node_name))
|
||||
|
||||
if self.protocol.casefold() != 'nfs':
|
||||
ssc_volume.update
|
||||
(self._get_ssc_volume_count_info(flexvol_name))
|
||||
ssc[flexvol_name] = ssc_volume
|
||||
|
||||
self.ssc = ssc
|
||||
@@ -300,6 +303,21 @@ class CapabilitiesLibrary(object):
|
||||
'netapp_node_name': node_name,
|
||||
}
|
||||
|
||||
def _get_ssc_volume_count_info(self, flexvol_name):
|
||||
"""Gather volume count info and recast into SSC-style volume stats."""
|
||||
|
||||
if self.protocol.casefold() == 'nvme':
|
||||
namespaces = self.zapi_client.get_namespace_sizes_by_volume(
|
||||
flexvol_name)
|
||||
volume_count = len(namespaces)
|
||||
else:
|
||||
luns = self.zapi_client.get_lun_sizes_by_volume(flexvol_name)
|
||||
volume_count = len(luns)
|
||||
|
||||
return {
|
||||
'total_volumes': volume_count,
|
||||
}
|
||||
|
||||
def get_matching_flexvols_for_extra_specs(self, extra_specs):
|
||||
"""Return a list of flexvol names that match a set of extra specs."""
|
||||
|
||||
|
@@ -0,0 +1,24 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
NetApp driver `bug #2117263
|
||||
<https://bugs.launchpad.net/cinder/+bug/2117263>`_: Fixed
|
||||
the issue where the driver does not account for storage limits when
|
||||
provisioning volumes.
|
||||
features:
|
||||
- |
|
||||
The NetApp driver now supports the capability "total_volumes" and the
|
||||
default filter function is updated to filter the backends once the pool
|
||||
reaches maximum number of volumes which is 1024 and is due to the
|
||||
limitations from ONTAP FlexVolume.
|
||||
|
||||
The "total_volumes" can be used in netapp driver backend stanza to restrict
|
||||
the number of volumes per pool, like in below example we are restricting
|
||||
maximum number of volumes per a pool to 10.
|
||||
Example: filter_function="capabilities.total_volumes < 10"
|
||||
|
||||
Note: The admin needs to configure the scheduler_default_filters to include
|
||||
the DriverFilter as well under [DEFAULT] stanza as part of cinder.conf,
|
||||
please refer [1] for the default filter list.
|
||||
|
||||
[1] https://docs.openstack.org/cinder/latest/configuration/block-storage/samples/cinder.conf.html
|
Reference in New Issue
Block a user