diff --git a/delfin/drivers/hitachi/hnas/__init__.py b/delfin/drivers/hitachi/hnas/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/hitachi/hnas/constants.py b/delfin/drivers/hitachi/hnas/constants.py new file mode 100644 index 000000000..a31307f6c --- /dev/null +++ b/delfin/drivers/hitachi/hnas/constants.py @@ -0,0 +1,89 @@ +# Copyright 2021 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import re + +from delfin.common import constants + +PATTERN = re.compile('[-]{3,}') +STORAGE_VENDOR = 'HITACHI' +TIME_TYPE = '%Y-%m-%d %H:%M:%S' + +OID_TRAP_DATA = '1.3.6.1.4.1.11096.6.1.1' + +STORAGE_INFO_COMMAND = "cluster-show" +STORAGE_MODEL_COMMAND = "ver" +LOCATION_COMMAND = 'system-information-get' + +DISK_INFO_COMMAND = "sd-list --scsi" + +POOL_INFO_COMMAND = "span-list" +POOL_SIZE_COMMAND = "span-space-distribution" + +CONTROLLER_INFO_COMMAND = "cluster-show -y" + +ALERT_INFO_COMMAND = "event-log-show -w -s" + +FC_PORT_COMMAND = "fc-hports" +FC_SPEED_COMMAND = "fc-link-speed" +ETH_PORT_COMMAND = "ifconfig" + +FS_INFO_COMMAND = 'df -k' +FS_STATUS_COMMAND = 'filesystem-list' + +CHECK_EVS = 'evs-select %s' +QUOTA_INFO_COMMAND = "quota list %s" + +TREE_INFO_COMMAND = 'virtual-volume list --verbose %s' + +CIFS_SHARE_COMMAND = 'cifs-share list' + +NFS_SHARE_COMMAND = "nfs-export list" + +CLUSTER_STATUS = { + 'Robust': constants.StorageStatus.NORMAL, + 'Degraded': constants.StorageStatus.ABNORMAL, + 'Critical': constants.StorageStatus.ABNORMAL, + 'OK': constants.StorageStatus.NORMAL, + 'Failure(s)': constants.StorageStatus.ABNORMAL +} + +SEVERITY_MAP = { + 'Severe': constants.Severity.FATAL, + 'Warning': constants.Severity.WARNING, + 'Information': constants.Severity.INFORMATIONAL +} + +FS_STATUS_MAP = { + 'Fail!': constants.FilesystemStatus.FAULTY, + 'OK': constants.FilesystemStatus.NORMAL, + 'NoEVS': constants.FilesystemStatus.NORMAL, + 'EVS-D': constants.FilesystemStatus.NORMAL, + 'Hiddn': constants.FilesystemStatus.NORMAL, + 'Clust': constants.FilesystemStatus.FAULTY, + 'Unavl': constants.FilesystemStatus.NORMAL, + 'Check': constants.FilesystemStatus.NORMAL, + 'Fixng': constants.FilesystemStatus.NORMAL, + 'Mount': constants.FilesystemStatus.NORMAL, + 'MntRO': constants.FilesystemStatus.NORMAL, + 'SysLk': constants.FilesystemStatus.NORMAL, + 'SysRO': constants.FilesystemStatus.NORMAL, + 'RepTg': constants.FilesystemStatus.NORMAL, + 'Rcvry': constants.FilesystemStatus.NORMAL, + 'UnMnt': constants.FilesystemStatus.FAULTY, + 'Mntg': constants.FilesystemStatus.NORMAL, + 'Formt': constants.FilesystemStatus.NORMAL, + 'Failg': constants.FilesystemStatus.FAULTY, + None: constants.FilesystemStatus.NORMAL, +} diff --git a/delfin/drivers/hitachi/hnas/hds_nas.py b/delfin/drivers/hitachi/hnas/hds_nas.py new file mode 100644 index 000000000..60eea4738 --- /dev/null +++ b/delfin/drivers/hitachi/hnas/hds_nas.py @@ -0,0 +1,85 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from delfin.drivers import driver +from delfin.drivers.hitachi.hnas import nas_handler + + +class HitachiHNasDriver(driver.StorageDriver): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.nas_handler = nas_handler.NasHandler(**kwargs) + self.nas_handler.login() + + def reset_connection(self, context, **kwargs): + self.nas_handler.login() + + def get_storage(self, context): + return self.nas_handler.get_storage() + + def list_storage_pools(self, context): + return self.nas_handler.get_pool(self.storage_id) + + def list_volumes(self, context): + pass + + def list_controllers(self, context): + return self.nas_handler.list_controllers(self.storage_id) + + def list_ports(self, context): + return self.nas_handler.list_ports(self.storage_id) + + def list_disks(self, context): + return self.nas_handler.get_disk(self.storage_id) + + def list_alerts(self, context, query_para=None): + return self.nas_handler.list_alerts(query_para) + + def list_qtrees(self, context): + return self.nas_handler.list_qtrees(self.storage_id) + + def list_quotas(self, context): + return self.nas_handler.list_quotas(self.storage_id) + + def list_filesystems(self, context): + return self.nas_handler.list_filesystems(self.storage_id) + + def list_shares(self, context): + return self.nas_handler.list_shares(self.storage_id) + + def add_trap_config(self, context, trap_config): + pass + + def remove_trap_config(self, context, trap_config): + pass + + @staticmethod + def parse_alert(context, alert): + return nas_handler.NasHandler.parse_alert(alert) + + def clear_alert(self, context, alert): + pass + + @staticmethod + def get_access_url(): + return 'https://{ip}' + + def collect_perf_metrics(self, context, storage_id, + resource_metrics, start_time, end_time): + pass + + @staticmethod + def get_capabilities(context): + pass diff --git a/delfin/drivers/hitachi/hnas/nas_handler.py b/delfin/drivers/hitachi/hnas/nas_handler.py new file mode 100644 index 000000000..2c6bd7952 --- /dev/null +++ b/delfin/drivers/hitachi/hnas/nas_handler.py @@ -0,0 +1,816 @@ +# Copyright 2021 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WarrayANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import hashlib +import time + +import six + +from oslo_log import log as logging + +from delfin import exception, utils +from delfin.common import constants +from delfin.drivers.utils.ssh_client import SSHPool +from delfin.drivers.hitachi.hnas import constants as constant +from delfin.drivers.utils.tools import Tools + +LOG = logging.getLogger(__name__) + + +class NasHandler(object): + + def __init__(self, **kwargs): + self.ssh_pool = SSHPool(**kwargs) + self.evs_list = [] + + @staticmethod + def get_size(limit, is_calculate=False): + if limit == '0B': + return 0 + if limit == '-': + return 0 if is_calculate else '-' + return int(Tools.get_capacity_size(limit)) + + def ssh_do_exec(self, command_list): + res = '' + res = self.ssh_pool.do_exec_command(command_list) + while 'Failed to establish SSC connection' in res: + res = self.ssh_pool.do_exec_command(command_list) + return res + + def login(self): + try: + result = self.ssh_do_exec(['cluster-show -y']) + if 'is not a recognized command' in result \ + or 'Unknown command' in result \ + or 'EVS' not in result: + raise exception.InvalidIpOrPort() + except Exception as e: + LOG.error("Failed to login netapp %s" % + (six.text_type(e))) + raise e + + @staticmethod + def split_value_map_list( + value_info, + map_list, + value_key, + line='\r\n', + split=":", + split_key=None): + detail_array = value_info.split(line) + value_map = {} + for detail in detail_array: + if detail: + string_info = detail.split(split) + key = string_info[0].replace(' ', '') + value = '' + if len(string_info) > 1: + for string in string_info[1:]: + value += string.\ + replace('""', '').\ + replace('\'', '').\ + replace(' ', '') + if value_map.get(key): + value_map[key + '1'] = value + else: + value_map[key] = value + else: + if value_map != {} and value_key in value_map: + map_list.append(value_map) + value_map = {} + if split_key and split_key in detail: + if value_map != {} and value_key in value_map: + map_list.append(value_map) + value_map = {} + if value_map != {} and value_key in value_map: + map_list.append(value_map) + + @staticmethod + def get_table_data(values): + header_index = 0 + table = values.split("\r\n") + for i in range(0, len(table)): + if constant.PATTERN.search(table[i]): + header_index = i + return table[(header_index + 1):] + + def get_storage(self): + try: + storage_info = self.ssh_do_exec([constant.STORAGE_INFO_COMMAND]) + model_info = self.ssh_do_exec([constant.STORAGE_MODEL_COMMAND]) + location_info = self.ssh_do_exec(([constant.LOCATION_COMMAND])) + storage_map_list = [] + model_map_list = [] + version_map_list = [] + location_map_list = [] + serial_map_list = [] + self.split_value_map_list( + model_info, model_map_list, 'Model', split=":") + self.split_value_map_list( + storage_info, storage_map_list, 'ClusterName', split="=") + self.split_value_map_list( + model_info, version_map_list, 'Software', split=":") + self.split_value_map_list( + location_info, location_map_list, 'Location', split=':') + self.split_value_map_list( + model_info, serial_map_list, 'Hardware', split=':') + if len(storage_map_list) > 0: + model_map = {} + version_map = {} + location_map = {} + serial_map = {} + if len(model_map_list) > 0: + model_map = model_map_list[-1] + if len(version_map_list) > 0: + version_map = version_map_list[-1] + if len(location_map_list) > 0: + location_map = location_map_list[-1] + if len(serial_map_list) > 0: + serial_map = serial_map_list[-1] + version = version_map.get("Software").split('(') + serial_number = serial_map.get("Hardware").split('(')[-1] + storage_map = storage_map_list[-1] + disk_list = self.get_disk(None) + total_capacity = \ + raw_capacity = \ + used_capacity = \ + free_capacity = 0 + for disk in disk_list: + raw_capacity += disk['capacity'] + status = \ + constant.CLUSTER_STATUS.get(storage_map['ClusterHealth']) + pool_list = self.get_pool(None) + for pool in pool_list: + total_capacity += pool['total_capacity'] + used_capacity += pool['used_capacity'] + free_capacity += pool['free_capacity'] + storage_model = { + "name": storage_map['ClusterName'], + "vendor": constant.STORAGE_VENDOR, + "model": model_map.get('Model'), + "status": status, + "serial_number": serial_number.replace(')', ''), + "firmware_version": version[0], + "location": location_map['Location'], + "total_capacity": total_capacity, + "raw_capacity": raw_capacity, + "used_capacity": used_capacity, + "free_capacity": free_capacity + } + return storage_model + except exception.DelfinException as e: + err_msg = "Failed to get storage from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def get_disk(self, storage_id): + try: + disk_info = self.ssh_do_exec([constant.DISK_INFO_COMMAND]) + disk_map_list = [] + self.split_value_map_list( + disk_info, disk_map_list, 'Capacity', split=":") + disks_list = [] + for disk_map in disk_map_list: + if 'Status' in disk_map: + size = disk_map['Capacity'].split("GiB")[0] + "GB" + status = constants.DiskStatus.NORMAL \ + if disk_map['Status'] == 'OK' \ + else constants.DiskStatus.ABNORMAL + type = disk_map['Type'] + type_array = type.split(";") + model = vendor = version = None + if len(type_array) > 2: + model = type_array[1].replace('Model', '') + vendor = type_array[0].replace('Make', '') + version = type_array[2].replace('Revision', '') + pool_id = disk_map.get('Usedinspan', None) + serial_number = disk_map['Luid'].split(']')[-1] + if pool_id: + pool_id = pool_id.split('(')[0] + disk_model = { + 'name': disk_map['HDSdevname'], + 'storage_id': storage_id, + 'native_disk_id': disk_map['DeviceID'], + 'serial_number': serial_number, + 'manufacturer': vendor, + 'model': model, + 'firmware': version, + 'speed': None, + 'capacity': int(self.get_size(size)), + 'status': status, + 'physical_type': None, + 'logical_type': None, + 'native_disk_group_id': pool_id, + 'location': disk_map['Serialnumber'], + } + disks_list.append(disk_model) + return disks_list + except exception.DelfinException as e: + err_msg = "Failed to get disk from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get disk from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def get_pool_size(self): + size_info = self.ssh_do_exec([constant.POOL_SIZE_COMMAND]) + size_array = size_info.split("\r\n") + size_map = {} + pool_name = None + count = 0 + for size in size_array: + if 'Span ' in size: + pool_name = size.split()[-1].replace(':', '') + size_map[pool_name] = 0 + count = 0 + if '[Free space]' in size: + free_array = size.split() + if len(free_array) > 2: + count += 1 + free_size = free_array[0].replace('GiB', 'GB') + size_map[pool_name] += self.get_size(free_size) + return size_map + + def get_pool(self, storage_id): + try: + pool_info = self.ssh_do_exec([constant.POOL_INFO_COMMAND]) + pool_list = [] + pool_array = self.get_table_data(pool_info) + size_map = self.get_pool_size() + for pool in pool_array: + value_array = pool.split() + if len(value_array) == 6: + total_capacity = \ + self.get_size(value_array[3] + "GB") + free_capacity = \ + size_map.get(value_array[0], total_capacity) + status = constants.StoragePoolStatus.NORMAL \ + if value_array[1] == 'Yes' \ + else constants.StoragePoolStatus.ABNORMAL + pool_model = { + 'name': value_array[0], + 'storage_id': storage_id, + 'native_storage_pool_id': value_array[0], + 'description': None, + 'status': status, + 'storage_type': constants.StorageType.FILE, + 'total_capacity': total_capacity, + 'used_capacity': total_capacity - free_capacity, + 'free_capacity': free_capacity, + } + pool_list.append(pool_model) + return pool_list + except exception.DelfinException as e: + err_msg = "Failed to get pool from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get pool from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_controllers(self, storage_id): + try: + controller_list = [] + node_info = self.ssh_do_exec([constant.CONTROLLER_INFO_COMMAND]) + nodes_array = self.get_table_data(node_info) + for nodes in nodes_array: + node = nodes.split() + if len(node) > 2: + status = constants.ControllerStatus.NORMAL \ + if node[2] == 'ONLINE' \ + else constants.ControllerStatus.OFFLINE + controller_model = { + 'name': node[1], + 'storage_id': storage_id, + 'native_controller_id': node[0], + 'status': status, + 'location': None, + 'soft_version': None, + 'cpu_info': None, + 'memory_size': None, + 'mgmt_ip': None + } + controller_list.append(controller_model) + return controller_list + except exception.DelfinException as e: + err_msg = "Failed to get controllers from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get controllers from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_alerts(self, query_para): + try: + alert_info = self.ssh_do_exec([constant.ALERT_INFO_COMMAND]) + alert_array = self.get_table_data(alert_info) + alert_list = [] + for alert in alert_array: + value_array = alert.split() + if len(value_array) > 4 \ + and '******' not in value_array[0] \ + and value_array[1] in constant.SEVERITY_MAP: + occur_time = \ + value_array[2] + ' ' + \ + value_array[3].split("+")[0] + occur_time = \ + int(time.mktime(time.strptime( + occur_time, constant.TIME_TYPE))) * 1000 + if not query_para or \ + (int(query_para['begin_time']) + <= occur_time + <= int(query_para['end_time'])): + description = '' + for i in range(4, len(value_array)): + description += value_array[i] + ' ' + severity = constant.SEVERITY_MAP.get(value_array[1]) + alert_model = { + 'alert_id': value_array[0], + 'alert_name': value_array[0], + 'severity': severity, + 'category': constants.Category.FAULT, + 'type': constants.EventType.EQUIPMENT_ALARM, + 'occur_time': occur_time, + 'description': description, + 'match_key': hashlib.md5( + (value_array[0] + severity + + description).encode()).hexdigest(), + 'resource_type': constants.DEFAULT_RESOURCE_TYPE, + 'location': '' + } + alert_list.append(alert_model) + return alert_list + except exception.DelfinException as e: + err_msg = "Failed to get alerts from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get alerts from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + @staticmethod + def parse_alert(alert): + try: + alert_info = alert.get(constant.OID_TRAP_DATA) + alert_array = alert_info.split(":") + if len(alert_array) > 1: + description = alert_array[1] + alert = alert_array[0].split() + if len(alert) > 1: + alert_id = alert[0] + severity = constant.SEVERITY_MAP.get(alert[1]) + alert_model = { + 'alert_id': alert_id, + 'alert_name': alert_id, + 'severity': severity, + 'category': constants.Category.FAULT, + 'type': constants.EventType.EQUIPMENT_ALARM, + 'occur_time': utils.utcnow_ms(), + 'description': description, + 'match_key': hashlib.md5( + (alert_id + severity + + description).encode()).hexdigest(), + 'resource_type': constants.DEFAULT_RESOURCE_TYPE, + 'location': '' + } + return alert_model + except exception.DelfinException as e: + err_msg = "Failed to parse alert from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to parse alert from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_ports(self, storage_id): + try: + ports_list = [] + ports_list.extend(self.get_fc_port(storage_id)) + ports_list.extend(self.get_eth_port(storage_id)) + return ports_list + except exception.DelfinException as e: + err_msg = "Failed to get ports from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get ports from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def get_fc_port(self, storage_id): + try: + fc_info = self.ssh_do_exec([constant.FC_PORT_COMMAND]) + fc_map_list = [] + self.split_value_map_list(fc_info, fc_map_list, 'Portname') + fc_list = [] + speed_info = self.ssh_do_exec([constant.FC_SPEED_COMMAND]) + speed_map_list = [] + self.split_value_map_list(speed_info, speed_map_list, 'FC1') + speed_map = speed_map_list[-1] + for value_map in fc_map_list: + if 'Portname' in value_map: + status = value_map.get('Status', None) + health = constants.PortHealthStatus.ABNORMAL + if status and status == 'Good': + health = constants.PortHealthStatus.NORMAL + connection_status = \ + constants.PortConnectionStatus.DISCONNECTED + if 'FCLinkisup' in value_map: + connection_status = \ + constants.PortConnectionStatus.CONNECTED + port_id = '' + for key in value_map.keys(): + if 'HostPort' in key: + port_id = key.replace('HostPort', '') + break + speed = \ + int(speed_map.get('FC' + port_id).replace('Gbps', '')) + fc_model = { + 'name': 'FC' + port_id, + 'storage_id': storage_id, + 'native_port_id': port_id, + 'location': None, + 'connection_status': connection_status, + 'health_status': health, + 'type': constants.PortType.FC, + 'logical_type': None, + 'speed': speed * (1000 ** 3), + 'max_speed': 8 * (1000 ** 3), + 'native_parent_id': None, + 'wwn': value_map.get('Portname'), + 'mac_address': None, + 'ipv4': None, + 'ipv4_mask': None, + 'ipv6': None, + 'ipv6_mask': None, + } + fc_list.append(fc_model) + return fc_list + except exception.DelfinException as e: + err_msg = "Failed to get fc ports from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get fc ports from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def get_eth_port(self, storage_id): + try: + eth_info = self.ssh_do_exec([constant.ETH_PORT_COMMAND]) + eth_list = [] + value_array = eth_info.split('\r\n') + eth_model = {} + for value in value_array: + if value: + if 'Link encap' in value: + value_info = value.split() + if len(value_info) > 1: + eth_model['name'] = value_info[0] + if 'MTU' in value: + value_info = value.split() + if len(value_info) > 2: + eth_model['connection_status'] = \ + constants.PortConnectionStatus.DISCONNECTED + eth_model['health_status'] = \ + constants.PortHealthStatus.UNKNOWN + if value_info[0].split(":")[1] == 'UP': + eth_model['connection_status'] = \ + constants.PortConnectionStatus.CONNECTED + eth_model['health_status'] = \ + constants.PortHealthStatus.NORMAL + eth_model['type'] = constants.PortType.ETH + eth_model['storage_id'] = storage_id + eth_model['native_port_id'] =\ + 'ETH' + '-' + eth_model['name'] + if 'inet addr' in value: + value_info = value.split() + if len(value_info) > 2: + eth_model['ipv4'] = value_info[1].split(":")[1] + eth_model['ipv4_mask'] = \ + value_info[3].split(":")[1] + else: + if 'name' in eth_model: + eth_list.append(eth_model) + eth_model = {} + return eth_list + except exception.DelfinException as e: + err_msg = "Failed to get eth ports from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get eth ports from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_filesystems(self, storage_id): + try: + fs_list = [] + fs_info = self.ssh_do_exec([constant.FS_INFO_COMMAND]) + fs_array = self.get_table_data(fs_info) + status_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND]) + status_array = self.get_table_data(status_info) + status_map = {} + for status in status_array: + status_info = status.split() + if len(status_info) > 6: + status_map[status_info[1]] = \ + [status_info[2], status_info[3]] + for fs in fs_array: + fs_info = list(filter(None, fs.split(" "))) + if len(fs_info) > 8: + total_capacity = fs_info[3].replace(" ", '') + used_capacity = fs_info[4].replace(" ", '').split("(")[0] + free_capacity = fs_info[7].replace(" ", '').split("(")[0] + total_capacity = self.get_size(total_capacity) + used_capacity = self.get_size(used_capacity) + free_capacity = self.get_size(free_capacity) + type = constants.VolumeType.THICK \ + if fs_info[8] == 'No' \ + else constants.VolumeType.THIN + pool_id = None \ + if len(status_map.get(fs_info[0])) < 1 \ + else status_map.get(fs_info[0])[0] + status = None \ + if len(status_map.get(fs_info[0])) < 1 \ + else status_map.get(fs_info[0])[1] + fs_model = { + 'name': fs_info[1], + 'storage_id': storage_id, + 'native_filesystem_id': fs_info[1], + 'native_pool_id': pool_id, + 'compressed': None, + 'deduplicated': None, + 'worm': None, + 'status': constant.FS_STATUS_MAP[status], + 'security_mode': None, + 'type': type, + 'total_capacity': total_capacity, + 'used_capacity': used_capacity, + 'free_capacity': free_capacity + } + fs_list.append(fs_model) + return fs_list + except exception.DelfinException as e: + err_msg = "Failed to get filesystem from " \ + "hitachi nas: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get filesystem from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def get_fs_evs(self): + fs_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND]) + fs_array = self.get_table_data(fs_info) + evs_list = [] + for fs in fs_array: + fs_info_array = fs.split() + if len(fs_info_array) > 6: + evs_list.append([fs_info_array[0], fs_info_array[4]]) + return evs_list + + def list_quotas(self, storage_id): + try: + evs_list = self.get_fs_evs() + quota_list = [] + for evs in evs_list: + quota_info = self.ssh_do_exec([ + constant.CHECK_EVS % evs[1], + constant.QUOTA_INFO_COMMAND % evs[0]]) + quota_map_list = [] + self.split_value_map_list(quota_info, quota_map_list, 'Usage') + for quota_map in quota_map_list: + type = None + user_group_name = None + qtree_id = None + if 'Group' in quota_map.get('Target'): + type = constants.QuotaType.GROUP + user_group_name = \ + quota_map.get('Target').replace('Group', '') + elif 'User' in quota_map.get('Target'): + type = constants.QuotaType.USER + user_group_name = \ + quota_map.get('Target').replace('User', '') + elif 'ViVol' in quota_map.get('Target'): + type = constants.QuotaType.TREE + user_group_name = \ + quota_map.get('Target').replace('ViVol', '') + qtree_id = evs[0] + '-' + user_group_name + quota_id = evs[0] + '-' + type + '-' + user_group_name + capacity_soft_limit = \ + quota_map.get('Limit').replace('(Soft)', '') + file_soft_limit = \ + quota_map.get('Limit1').replace('(Soft)', '') + quota = { + 'native_quota_id': quota_id, + 'type': type, + 'storage_id': storage_id, + 'native_filesystem_id': evs[0], + 'native_qtree_id': qtree_id, + 'capacity_hard_limit': None, + 'capacity_soft_limit': + self.get_size(capacity_soft_limit), + 'file_hard_limit': None, + 'file_soft_limit': file_soft_limit, + 'file_count': quota_map.get('FileCount'), + 'used_capacity': + self.get_size(quota_map.get('Usage')), + 'user_group_name': user_group_name + } + quota_list.append(quota) + return quota_list + except exception.DelfinException as e: + err_msg = "Failed to get storage quota from " \ + "hitachi nas: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage quota from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_qtrees(self, storage_id): + try: + evs_list = self.get_fs_evs() + return self.get_qtree(evs_list, storage_id) + except exception.DelfinException as e: + err_msg = "Failed to get storage qtree from " \ + "hitachi nas: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage qtree from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def get_qtree(self, evs_list, storage_id): + qtree_list = [] + for evs in evs_list: + tree_info = self.ssh_do_exec([ + constant.CHECK_EVS % evs[1], + constant.TREE_INFO_COMMAND % evs[0]]) + tree_map_list = [] + self.split_value_map_list(tree_info, + tree_map_list, + 'root', + split_key='last modified') + for qt_map in tree_map_list: + qt_name = '' + for key in qt_map: + if qt_map[key] == '' and key != 'email': + qt_name = key + qt_id = evs[0] + '-' + qt_name + qt_model = { + 'name': qt_name, + 'storage_id': storage_id, + 'native_qtree_id': qt_id, + 'path': qt_map.get('root'), + 'native_filesystem_id': evs[0], + 'security_mode': None, + } + qtree_list.append(qt_model) + return qtree_list + + def get_cifs_share(self, evs_list, storage_id): + share_list = [] + evs_array = [] + for evs in evs_list: + if evs[1] not in evs_array: + evs_array.append(evs[1]) + for evs in evs_array: + cifs_share = self.ssh_do_exec([ + constant.CHECK_EVS % evs, + constant.CIFS_SHARE_COMMAND]) + cifs_map_list = [] + self.split_value_map_list(cifs_share, cifs_map_list, 'Sharename') + for cifs in cifs_map_list: + qtree_id = None + if 'VirtualVolume' in cifs.get('Sharecomment'): + qtree = cifs.get('Sharecomment').split('Volume') + if cifs.get('Filesystemlabel'): + qtree_id = \ + cifs.get('Filesystemlabel') + '-' + qtree[1] + if cifs.get('Filesystemlabel'): + native_share_id = \ + '%s-%s-%s' % (cifs.get('Filesystemlabel'), + cifs.get('Sharename'), + constants.ShareProtocol.CIFS), + else: + native_share_id = \ + cifs.get('Sharename') + '-' + \ + constants.ShareProtocol.CIFS, + share = { + 'name': cifs.get('Sharename'), + 'storage_id': storage_id, + 'native_share_id': native_share_id, + 'native_qtree_id': qtree_id, + 'native_filesystem_id': cifs.get('Filesystemlabel'), + 'path': cifs.get('Sharepath'), + 'protocol': constants.ShareProtocol.CIFS + } + share_list.append(share) + return share_list + + def get_nfs_share(self, evs_list, storage_id): + share_list = [] + evs_array = [] + for evs in evs_list: + if evs[1] not in evs_array: + evs_array.append(evs[1]) + for evs in evs_array: + nfs_share = self.ssh_do_exec([ + constant.CHECK_EVS % evs, + constant.NFS_SHARE_COMMAND]) + nfs_map_list = [] + self.split_value_map_list(nfs_share, nfs_map_list, 'Exportname') + qtree_list = self.get_qtree(evs_list, None) + for nfs in nfs_map_list: + qtree_id = None + for qtree in qtree_list: + if nfs.get('Exportpath') == qtree['path'] \ + and qtree['native_filesystem_id'] \ + == nfs.get('Filesystemlabel'): + qtree_id = qtree['native_qtree_id'] + if nfs.get('Filesystemlabel'): + native_share_id = \ + nfs.get('Filesystemlabel') \ + + '-' + nfs.get('Exportname') \ + + '-' + constants.ShareProtocol.NFS, + else: + native_share_id = \ + nfs.get('Exportname') + '-' +\ + constants.ShareProtocol.NFS, + share = { + 'name': nfs.get('Exportname'), + 'storage_id': storage_id, + 'native_share_id': native_share_id, + 'native_qtree_id': qtree_id, + 'native_filesystem_id': nfs.get('Filesystemlabel'), + 'path': nfs.get('Exportpath'), + 'protocol': constants.ShareProtocol.NFS + } + share_list.append(share) + return share_list + + def list_shares(self, storage_id): + try: + evs_list = self.get_fs_evs() + share_list = [] + share_list.extend(self.get_cifs_share(evs_list, storage_id)) + share_list.extend(self.get_nfs_share(evs_list, storage_id)) + return share_list + except exception.DelfinException as e: + err_msg = "Failed to get storage share from " \ + "hitachi nas: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage share from " \ + "hitachi nas: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) diff --git a/delfin/drivers/utils/ssh_client.py b/delfin/drivers/utils/ssh_client.py index 0fc16921f..e118ff1e0 100644 --- a/delfin/drivers/utils/ssh_client.py +++ b/delfin/drivers/utils/ssh_client.py @@ -13,6 +13,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import time + import paramiko import six from eventlet import pools @@ -278,3 +280,44 @@ def do_exec(self, command_str): 'is not a recognized command' in result: raise exception.StorageBackendException(result) return result + + def do_exec_command(self, command_list): + result = '' + try: + with self.item() as ssh: + if command_list is not None and len(command_list) > 0 \ + and ssh is not None: + channel = ssh.invoke_shell() + for command in command_list: + utils.check_ssh_injection(command) + channel.send(command + '\n') + time.sleep(0.5) + channel.send("exit" + "\n") + channel.close() + while True: + resp = channel.recv(9999).decode('utf8') + if not resp: + break + result += resp + except paramiko.AuthenticationException as ae: + LOG.error('doexec Authentication error:{}'.format(ae)) + raise exception.InvalidUsernameOrPassword() + except Exception as e: + err = six.text_type(e) + LOG.error(err) + if 'timed out' in err \ + or 'SSH connect timeout' in err: + raise exception.SSHConnectTimeout() + elif 'No authentication methods available' in err \ + or 'Authentication failed' in err \ + or 'Invalid username or password' in err: + raise exception.InvalidUsernameOrPassword() + elif 'not a valid RSA private key file' in err \ + or 'not a valid RSA private key' in err: + raise exception.InvalidPrivateKey() + elif 'Unable to connect to port' in err \ + or 'Invalid ip or port' in err: + raise exception.InvalidIpOrPort() + else: + raise exception.SSHException(err) + return result diff --git a/delfin/tests/unit/drivers/hitachi/hnas/__init__.py b/delfin/tests/unit/drivers/hitachi/hnas/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/hitachi/hnas/test_constans.py b/delfin/tests/unit/drivers/hitachi/hnas/test_constans.py new file mode 100644 index 000000000..00939ffba --- /dev/null +++ b/delfin/tests/unit/drivers/hitachi/hnas/test_constans.py @@ -0,0 +1,832 @@ +# Copyright 2021 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +ACCESS_INFO = { + "storage_id": "12345", + "vendor": "hitachi", + "model": "hnas", + "ssh": { + "host": "192.168.3.211", + "port": 22, + "username": "manager", + "password": "manager", + } +} + +STORAGE_INFO = """\r +cluster-show\r + +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ cluster-show\r +Overall Status = Online\r +Cluster Health = Robust\r +Cluster Mode = Not clustered\r +Cluster Name = pba-hnas-1\r +Cluster UUID = a39f815a-e582-11d6-9000-b76f3098a657\r +Cluster Size = 1\r + Node Name = pba-hnas-1-1\r + Node ID = 1\r +Cluster GenId = 1\r +Cluster Master = No\r +\r +pba-hnas-1-1:$ """ + +VERSION_INFO = """\r +ver\r + +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ ver\r +\r +Model: HNAS 4060\r +\r +Software: 12.7.4221.12 (built 2016-10-28 21:51:37+01:00)\r +\r +Hardware: NAS Platform (M4SJKW1423160)\r +\r +board MMB1\r +mmb 12.7.4221.12 release (2016-10-28 21:51:37+01:00)\r +\r +board MFB2\r +mfb2hw MB v0132 WL v0132 TD v0132 FD v0132 TC v00C6 RY v00C6 \r +TY v00C6 IC v00C6 WF v007C FS v007C OS v007C WD v007C D0 v0077 \r +Serial no B1423125 (Tue Jun 17 13:38:33 2014)\r +\r +board MCP\r +Serial no B1423160 (Wed Jun 18 20:39:53 2014)\r +\r +pba-hnas-1-1:$ """ + +LOCATION_INFO = """\r +system-information-get\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ system-information-get\r +\r + Name: pba-hnas-1\r + Location: chengdu\r + Contact: \r +\r +pba-hnas-1-1:$ """ + +DISK_INFO = """\r +sd-list --scsi\r + +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ sd-list --scsi\r +Device ID: 0\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span1' (capacity 200GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:00\r +Blocksize: 512\r +Superflush: Default\r +Lun: 0\r +Serial number: 212902\r +Site ID: 0\r +Tier: 1\r +HDS ctrlr port: 0000\r +HDS dev name: 1000\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 1\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span1' (capacity 200GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:01\r +Blocksize: 512\r +Superflush: Default\r +Lun: 1\r +Serial number: 212902\r +Site ID: 0\r +Tier: 1\r +HDS ctrlr port: 0400\r +HDS dev name: 1001\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 2\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span1' (capacity 200GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:02\r +Blocksize: 512\r +Superflush: Default\r +Lun: 2\r +Serial number: 212902\r +Site ID: 0\r +Tier: 1\r +HDS ctrlr port: 0000\r +HDS dev name: 1002\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 3\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span1' (capacity 200GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:03\r +Blocksize: 512\r +Superflush: Default\r +Lun: 3\r +Serial number: 212902\r +Site ID: 0\r +Tier: 1\r +HDS ctrlr port: 0400\r +HDS dev name: 1003\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 4\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span2' (capacity 400GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:04\r +Blocksize: 512\r +Superflush: Default\r +Lun: 4\r +Serial number: 212902\r +Site ID: 0\r +Tier: None\r +HDS ctrlr port: 0000\r +HDS dev name: 1004\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 5\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span2' (capacity 400GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:05\r +Blocksize: 512\r +Superflush: Default\r +Lun: 5\r +Serial number: 212902\r +Site ID: 0\r +Tier: None\r +HDS ctrlr port: 0400\r +HDS dev name: 1005\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 6\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span2' (capacity 400GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:06\r +Blocksize: 512\r +Superflush: Default\r +Lun: 6\r +Serial number: 212902\r +Site ID: 0\r +Tier: None\r +HDS ctrlr port: 0000\r +HDS dev name: 1006\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 7\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span2' (capacity 400GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:07\r +Blocksize: 512\r +Superflush: Default\r +Lun: 7\r +Serial number: 212902\r +Site ID: 0\r +Tier: None\r +HDS ctrlr port: 0400\r +HDS dev name: 1007\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 8\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span2' (capacity 400GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:08\r +Blocksize: 512\r +Superflush: Default\r +Lun: 8\r +Serial number: 212902\r +Site ID: 0\r +Tier: None\r +HDS ctrlr port: 0400\r +HDS dev name: 1008\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 9\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span2' (capacity 400GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:09\r +Blocksize: 512\r +Superflush: Default\r +Lun: 9\r +Serial number: 212902\r +Site ID: 0\r +Tier: None\r +HDS ctrlr port: 0000\r +HDS dev name: 1009\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 10\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span2' (capacity 400GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0A\r +Blocksize: 512\r +Superflush: Default\r +Lun: 10\r +Serial number: 212902\r +Site ID: 0\r +Tier: None\r +HDS ctrlr port: 0400\r +HDS dev name: 100A\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +Device ID: 11\r +Comment: \r +Capacity: 50GiB (53687746560 bytes)\r +Status: OK\r +Role: Primary\r +Access: Allowed\r +Used in span: 'span2' (capacity 400GiB)\r +Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r +Submodel: HM70\r +Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0B\r +Blocksize: 512\r +Superflush: Default\r +Lun: 11\r +Serial number: 212902\r +Site ID: 0\r +Tier: None\r +HDS ctrlr port: 0000\r +HDS dev name: 100B\r +HDP pool no: 0\r +GAD: No\r +Queue depth: min 16, default 32, max 512, configured [default], + effective 32\r +\r +pba-hnas-1-1:$ """ + +POOL_INFO = """\r +span-list\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ span-list\r +Span instance name OK? Free Cap/GiB System drives Con\r +--------------------- --- ---- ------- ------------------------- ---\r +span1 Yes 100% 200 0,1,2,3 90%\r + Tier 0: empty: file systems can't be created or mounted\r + Tier 1: capacity 200GiB; free: 200GiB (100%); HDP pool free 996GiB\r +span2 Yes 86% 400 4,5,6,7;8,9,10,11 90%\r +pba-hnas-1-1:$ """ + +POOL_DETAIL_INFO = """\r +\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ span-space-distribution\r +Span span2:\r +\r + How each stripeset is used:\r + Stripeset 0:\r + 18GiB 9.09% fs1\r + 18GiB 9.09% fs2\r + 18GiB 9.09% fs3\r + 145GiB 72.74% [Free space]\r + Stripeset 1:\r + 200GiB 100.00% [Free space]\r +\r + Where each filesystem resides:\r + Filesystem fs1:\r + Stripeset 0 18GiB 100.00%\r + Filesystem fs2:\r + Stripeset 0 18GiB 100.00%\r + Filesystem fs3:\r + Stripeset 0 18GiB 100.00%\r +\r +Span span1:\r +\r + How each stripeset is used:\r + Stripeset 0:\r + 200GiB 100.00% [Free space]\r +\r + Where each filesystem resides:\r +\r +pba-hnas-1-1:$""" + +ALERT_INFO = """\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ event-log-show -w -s\r +****** Current time : 2021-10-25 11:12:35+08:00 ******\r +8462 Warning 2021-10-25 08:00:10+08:00 [ pba-hnas-1 ] The +SMU does not have an email alert profile relating to a managed server.\r +8462 Warning 2021-10-24 08:00:10+08:00 [ pba-hnas-1 ] The +SMU does not have an email alert profile relating to a managed server.\r +8462 Warning 2021-10-23 08:00:10+08:00 [ pba-hnas-1 ] The +SMU does not have an email alert profile relating to a managed server.\r +8462 Warning 2021-10-22 14:29:02+08:00 [ pba-hnas-1 ] The +SMU does not have an email alert profile relating to a managed server.\r +8462 Warning 2021-10-22 14:00:10+08:00 [ pba-hnas-1 ] The +SMU does not have an email alert profile relating to a managed server.\r +8462 Warning 2021-10-21 14:00:10+08:00 [ pba-hnas-1 ] The +SMU does not have an email alert profile relating to a managed server.\r +9997 Severe 2021-10-21 12:33:53+08:00 testSNMP.\r +9996 Warning 2021-10-21 12:32:20+08:00 123123123123.\r +9997 Severe 2021-10-21 12:26:04+08:00 Test Message5456.\r +9997 Severe 2021-10-21 11:53:54+08:00 Test Message.\r +9996 Warning 2021-10-21 11:36:37+08:00 Test Message.\r +9997 Severe 2021-10-21 11:33:05+08:00 Test Message.\r +9997 Severe 2021-10-21 11:23:08+08:00 Test Message.\r +9997 Severe 2021-10-21 11:13:30+08:00 Test Message.\r +9997 Severe 2021-10-21 11:12:30+08:00 12312312312.\r +9996 Warning 2021-10-21 10:43:44+08:00 Test Message123.\r +9997 Severe 2021-10-21 10:40:35+08:00 snmp.\r +9997 Severe 2021-10-21 10:35:47+08:00 Test Message.\r +9997 Severe 2021-10-21 10:33:01+08:00 Test SNMP.\r +9997 Severe 2021-10-21 10:28:43+08:00 Test Message.\r +9997 Severe 2021-10-21 10:14:48+08:00 Test Message.\r +9996 Warning 2021-10-21 10:10:42+08:00 Test Message.\r +8462 Warning 2021-10-20 14:00:10+08:00 [ pba-hnas-1 ] The\r +SMU does not have an email alert profile relating to a managed server.\r +9996 Warning 2021-10-19 17:45:02+08:00 Test Message.\r +9996 Warning 2021-10-19 17:36:19+08:00 Test Message123.\r +9997 Severe 2021-10-19 17:34:55+08:00 Test Message.\r +9997 Severe 2021-10-19 16:40:05+08:00 Test Messagesdfsfdsfdsf.\r +9997 Severe 2021-10-19 16:39:58+08:00 Test Messagesdfsdfsdfsfsdfsdfsfs.\r +8462 Warning 2021-10-19 14:00:10+08:00 [ pba-hnas-1 ] The SMU\r +does not have an email alert profile relating to a managed server.\r +9996 Warning 2021-10-19 11:20:40+08:00 Test Message zzp 告警信息 .\r +9997 Severe 2021-10-19 11:20:32+08:00 Test Message zzp 严重信息 .\r +9997 Severe 2021-10-19 11:11:30+08:00 Test Message zzp 严重信息 .\r +9996 Warning 2021-10-19 11:11:10+08:00 Test Message zzp 告警信息 .\r +****** Current time : 2021-10-25 11:12:35+08:00 ******\r +pba-hnas-1-1:$ """ + +TRAP_INFO = { + '1.3.6.1.4.1.11096.6.1.1': + "8462 Warning: [ pba-hnas-1 ] The SMU does not have an email alert " + "profile relating to a managed server." +} + +NODE_INFO = """Linux pba-hnas-1 2.6.32-5-amd64 #1 SMP Sun Dec 21 18: +01:12 UTC 2014 x86_64\r +\r +\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ cluster-show -y\r + Ethernet Mgmnt\r +ID Node Name Status FS Access Aggs Netwrk FC EVS IDs\r +-- --------------- -------- ---------- ---------- ------ --- -------\r +1 pba-hnas-1-1 ONLINE OK Degraded OK OK [0,1,2]\r +pba-hnas-1-1:$ """ + +FC_PORT_INFO = """\r +fc-hports\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ fc-hports\r +\r +Host Port 1\r +Addrs: 0x1\r +Port name: 50:03:01:70:00:06:8B:01\r +Node name: 50:03:01:70:00:06:8B:00 \r +FC Link is up\r +Status : Good \r +\r +Host Port 2\r +Addrs: not assigned\r +Port name: 50:03:01:70:00:06:8B:02\r +Node name: 50:03:01:70:00:06:8B:00 \r +FC Link is down\r +\r +Host Port 3\r +Addrs: 0x1\r +Port name: 50:03:01:70:00:06:8B:03\r +Node name: 50:03:01:70:00:06:8B:00 \r +FC Link is up\r +Status : Good \r +\r +Host Port 4\r +Addrs: not assigned\r +Port name: 50:03:01:70:00:06:8B:04\r +Node name: 50:03:01:70:00:06:8B:00 \r +FC Link is down\r +\r +pba-hnas-1-1:$ """ + +FC_PORT_STATUS = """\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ fc-link-speed\r +FC 1: 8 Gbps\r +FC 2: 4 Gbps\r +FC 3: 8 Gbps\r +FC 4: 8 Gbps\r +pba-hnas-1-1:$ """ + +ETH_PORT_INFO = """\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ ifconfig\r +ag1 Link encap:1 HWaddr 00-30-17-09-fc-08\r + inet addr:192.168.0.1 Bcast:192.168.0.255 mask:255.255.255.0\r + inet addr:192.168.0.2 Bcast:192.168.0.255 mask:255.255.255.0\r + Link:DOWN Admin:UP MTU:1500 Metric:1 txqueuelen:64\r +\r +ag2 Link encap:1 HWaddr 00-30-17-09-fc-09\r + Link:DOWN Admin:DOWN MTU:1500 Metric:1 txqueuelen:64\r +\r +c1 Link encap:1 HWaddr 00-30-17-09-fc-10\r + inet addr:240.152.166.87 Bcast:240.255.255.255 mask:255.0.0.0\r + Link:DOWN Admin:UP MTU:1488 Metric:2 txqueuelen:64\r +\r +c2 Link encap:1 HWaddr 00-30-17-09-fc-11\r + Link:DOWN Admin:DOWN MTU:1488 Metric:2 txqueuelen:64\r +\r +eth0 Link encap:1 HWaddr 0c-c4-7a-05-9e-a0\r + inet addr:192.168.3.211 Bcast:192.168.3.255 mask:255.255.255.0\r + inet6 addr: fe80::ec4:7aff:fe05:9ea0/64 Scope:Link\r + Link:UP Admin:UP MTU:1500 Metric:4 txqueuelen:64\r +\r +eth1 Link encap:1 HWaddr 0c-c4-7a-05-9e-a1\r + inet addr:192.0.2.2 Bcast:192.0.255.255 mask:255.255.0.0\r + inet addr:192.0.2.200 Bcast:192.0.255.255 mask:255.255.0.0\r + Link:DOWN Admin:UP MTU:1500 Metric:4 txqueuelen:64\r +\r +lo Link encap:1 \r + inet addr:127.0.0.1 Bcast:127.255.255.255 mask:255.0.0.0\r + inet6 addr: ::1/128 Scope:Global\r + inet6 addr: fe80::200:ff:fe00:0/64 Scope:Link\r + Link:UP Admin:UP MTU:1500 Metric:4 txqueuelen:64\r +\r +pba-hnas-1-1:$ """ + +FS_INFO = """\r +filesystem-list\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ filesystem-list\r +Instance name Dev On span State EVS Cap/GiB Confined Flag\r +----------------- ---- ----------- ----- --- ------- -------- ----\r +fs1 1024 span2 Mount 1 18 20 \r +pba-hnas-1-1:$ """ + +QTREE_INFO = """\r +evs-select 1\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ evs-select 1\r +pba-hnas-1-1[EVS1]:$ virtual-volume list --verbose fs1\r +tree1\r + email : \r + root : /12323\r + tag : 2\r + usage bytes : 0 B files: 1\r + last modified: 2021-09-23 07:18:14.714807865+00:00\r +vol2\r + email : \r + root : /123\r + tag : 1\r + usage bytes : 0 B files: 1\r + last modified: 2021-09-15 07:17:02.790323869+00:00\r +pba-hnas-1-1[EVS1]:$ """ + +CIFS_SHARE_INFO = """\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ evs-select 1\r +pba-hnas-1-1[EVS1]:$ cifs-share list\r +\r + Share name: tree1\r + Share path: \12323\r + Share users: 0\r + Share online: Yes\r + Share comment: Share associated with Virtual Volume tree1\r + Cache options: Manual local caching for documents\r + ABE enabled: No\r +Continuous Availability: No\r + Access snapshots: Yes\r + Display snapshots: Yes\r + ShadowCopy enabled: Yes\r + Lower case on create: No\r + Follow symlinks: Yes\r + Follow global symlinks: No\r + Scan for viruses: Yes\r + File system label: fs1\r + File system size: 18 GB\r +File system free space: 15.6 GB\r + File system state: \r + formatted = Yes\r + mounted = Yes\r + failed = No\r + thin provisioned = No\r +Disaster recovery setting:\r +Recovered = No\r +Transfer setting = Use file system default\r + Home directories: Off\r + Mount point options:\r +\r + Share name: C$\r + Share path: \\r + Share users: 0\r + Share online: Yes\r + Share comment: Default share\r + Cache options: Manual local caching for documents\r + ABE enabled: No\r +Continuous Availability: No\r + Access snapshots: Yes\r + Display snapshots: No\r + ShadowCopy enabled: Yes\r + Lower case on create: No\r + Follow symlinks: Yes\r + Follow global symlinks: No\r + Scan for viruses: Yes\r + File system info: *** not available ***\r +Disaster recovery setting:\r +Recovered = No\r +Transfer setting = Use file system default\r +Home directories: Off\r + Mount point options:\r +\r +\r + Share name: vol6\r + Share path: \666\r + Share users: 0\r + Share online: No\r + Share comment: Share associated with Virtual Volume vol6\r + Cache options: Manual local caching for documents\r + ABE enabled: No\r +Continuous Availability: No\r + Access snapshots: Yes\r + Display snapshots: Yes\r + ShadowCopy enabled: Yes\r + Lower case on create: No\r + Follow symlinks: Yes\r + Follow global symlinks: No\r + Scan for viruses: Yes\r + File system info: *** not available ***\r +Disaster recovery setting:\r +Recovered = No\r +Transfer setting = Use file system default\r +Home directories: Off\r + Mount point options:\r + \r +pba-hnas-1-1[EVS1]:$ """ + +NFS_SHARE_INFO = """\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ evs-select 1\r +pba-hnas-1-1[EVS1]:$ nfs-export list\r +\r + Export name: /nfs1\r + Export path: /\r + File system label: fs1\r + File system size: 18 GB\r + File system free space: 15.6 GB\r + File system state: \r + formatted = Yes\r + mounted = Yes\r + failed = No\r + thin provisioned = No\r + Access snapshots: Yes\r + Display snapshots: Yes\r + Read Caching: Disabled\r +Disaster recovery setting:\r +Recovered = No\r +Transfer setting = Use file system default\r +\r +Export configuration:\r +192.168.3.163\r +\r +\r + Export name: /vol6\r + Export path: /666\r + File system info: *** not available *** \r + Access snapshots: Yes\r + Display snapshots: Yes\r + Read Caching: Disabled\r +Disaster recovery setting:\r +Recovered = No\r +Transfer setting = Use file system default\r +\r +Export configuration:\r +\r +\r +\r + Export name: /vol2\r + Export path: /123\r + File system label: fs1\r + File system size: 18 GB\r + File system free space: 15.6 GB\r + File system state: \r + formatted = Yes\r + mounted = Yes\r + failed = No\r + thin provisioned = No\r + Access snapshots: Yes\r + Display snapshots: Yes\r + Read Caching: Disabled\r +Disaster recovery setting:\r +Recovered = No\r +Transfer setting = Use file system default\r +\r +Export configuration:\r +\r +\r +pba-hnas-1-1[EVS1]:$ """ + +FS_DETAIL_INFO = """\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ df -k\r +\r + ID Label EVS Size Used Snapshots """\ + + """ Deduped Avail Thin FS Type \r +---- ----- --- ----------- ---------------- --------- """\ + + """ ------- ----------------- ---- ----- \r +1024 fs1 1 18874368 KB 2520544 KB (13%) 0 KB (0%) """\ + + """ NA 16353824 KB (87%) No 32 KB,WFS-2,128 DSBs \r +\r +pba-hnas-1-1:$ """ + +QUOTA_INFO = """\r +\r +HDS NAS OS Console\r +MAC ID : B7-6F-30-98-A6-57\r +\r +pba-hnas-1-1:$ evs-select 1\r +pba-hnas-1-1[EVS1]:$ quota list fs1\r +Type : Explicit\r +Target : Group: root\r +Usage : 10 GB\r + Limit : 1 GB (Soft)\r + Warning : 75% (768 MB)\r + Critical : 85% (870.4 MB)\r + Reset : 5% (51.2 MB)\r +File Count : 7\r + Limit : 213 (Soft)\r + Warning : 75% (159)\r + Critical : 85% (181)\r + Reset : 5% (10)\r +Generate Events : Disabled\r +\r +Type : Explicit\r +Target : User: root\r +Usage : 10 GB\r + Limit : 1 GB (Soft)\r + Warning : 75% (768 MB)\r + Critical : 85% (870.4 MB)\r + Reset : 5% (51.2 MB)\r +File Count : 7\r + Limit : 213 (Soft)\r + Warning : 75% (159)\r + Critical : 85% (181)\r + Reset : 5% (10)\r +Generate Events : Disabled\r +\r +Type : Explicit\r +Target : ViVol: vol2\r +Usage : 0 B\r + Limit : 1 GB (Soft)\r + Warning : 75% (768 MB)\r + Critical : 85% (870.4 MB)\r + Reset : 5% (51.2 MB)\r +File Count : 1\r + Limit : 213 (Soft)\r + Warning : 75% (159)\r + Critical : 85% (181)\r + Reset : 5% (10)\r +Generate Events : Disabled\r +\r +pba-hnas-1-1[EVS1]:$""" diff --git a/delfin/tests/unit/drivers/hitachi/hnas/test_hnas.py b/delfin/tests/unit/drivers/hitachi/hnas/test_hnas.py new file mode 100644 index 000000000..a2414fb88 --- /dev/null +++ b/delfin/tests/unit/drivers/hitachi/hnas/test_hnas.py @@ -0,0 +1,125 @@ +# Copyright 2021 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import TestCase, mock + +import paramiko + +from delfin.tests.unit.drivers.hitachi.hnas import test_constans +from delfin import context +from delfin.drivers.hitachi.hnas.nas_handler import NasHandler +from delfin.drivers.hitachi.hnas.hds_nas import HitachiHNasDriver +from delfin.drivers.utils.ssh_client import SSHPool + + +class Request: + def __init__(self): + self.environ = {'delfin.context': context.RequestContext()} + pass + + +class TestHitachiHNasDriver(TestCase): + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + + NasHandler.login = mock.Mock() + hnas_client = HitachiHNasDriver(**test_constans.ACCESS_INFO) + + def test_reset_connection(self): + kwargs = test_constans.ACCESS_INFO + NasHandler.login = mock.Mock() + hnas_client = HitachiHNasDriver(**kwargs) + hnas_client.reset_connection(context, **kwargs) + self.assertEqual(hnas_client.nas_handler.ssh_pool.ssh_host, + "192.168.3.211") + self.assertEqual(hnas_client.nas_handler.ssh_pool.ssh_port, 22) + + def test_get_storage(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.STORAGE_INFO, + test_constans.VERSION_INFO, + test_constans.LOCATION_INFO, + test_constans.DISK_INFO, + test_constans.POOL_INFO, + test_constans.POOL_DETAIL_INFO]) + data = self.hnas_client.get_storage(context) + self.assertEqual(data['vendor'], 'HITACHI') + + def test_list_storage_pools(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.POOL_INFO, + test_constans.POOL_DETAIL_INFO]) + data = self.hnas_client.list_storage_pools(context) + self.assertEqual(data[0]['name'], 'span1') + + def test_list_alerts(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.ALERT_INFO]) + data = self.hnas_client.list_alerts(context) + self.assertEqual(data[0]['alert_name'], + '8462') + + def test_parse_alert(self): + data = self.hnas_client.parse_alert(context, test_constans.TRAP_INFO) + self.assertEqual(data['alert_name'], '8462') + + def test_list_controllers(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.NODE_INFO]) + data = self.hnas_client.list_controllers(context) + self.assertEqual(data[0]['name'], 'pba-hnas-1-1') + + def test_list_ports(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.FC_PORT_INFO, + test_constans.FC_PORT_STATUS, + test_constans.ETH_PORT_INFO]) + data = self.hnas_client.list_ports(context) + self.assertEqual(data[0]['name'], 'FC1') + + def test_list_disks(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.DISK_INFO]) + data = self.hnas_client.list_disks(context) + self.assertEqual(data[0]['name'], '1000') + + def test_list_qtrees(self): + SSHPool.do_exec_command = mock.Mock(side_effect=[ + test_constans.FS_INFO, test_constans.QTREE_INFO]) + data = self.hnas_client.list_qtrees(context) + self.assertEqual(data[0]['name'], 'tree1') + + def test_list_shares(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.FS_INFO, + test_constans.CIFS_SHARE_INFO, + test_constans.NFS_SHARE_INFO, + test_constans.QTREE_INFO]) + data = self.hnas_client.list_shares(context) + self.assertEqual(data[0]['name'], 'tree1') + + def test_list_filesystems(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.FS_DETAIL_INFO, + test_constans.FS_INFO]) + data = self.hnas_client.list_filesystems(context) + self.assertEqual(data[0]['name'], 'fs1') + + def test_list_quotas(self): + SSHPool.do_exec_command = mock.Mock( + side_effect=[test_constans.FS_INFO, + test_constans.QUOTA_INFO]) + data = self.hnas_client.list_quotas(context) + self.assertEqual(data[0]['file_soft_limit'], '213')