rtslib-2.2/0000755000175000017500000000000012176066620011060 5ustar rrsrrsrtslib-2.2/rtslib/0000755000175000017500000000000012176066620012357 5ustar rrsrrsrtslib-2.2/rtslib/__init__.py0000644000175000017500000000237112176066620014473 0ustar rrsrrs''' This file is part of RTSLib. Copyright (c) 2011-2013 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import utils from root import RTSRoot from utils import RTSLibError, RTSLibBrokenLink, RTSLibNotInCFS from target import LUN, MappedLUN from target import NodeACL, NetworkPortal, TPG, Target, FabricModule from tcm import FileIOBackstore, IBlockBackstore from tcm import FileIOStorageObject, IBlockStorageObject from tcm import PSCSIBackstore, RDDRBackstore, RDMCPBackstore from tcm import PSCSIStorageObject, RDDRStorageObject, RDMCPStorageObject __version__ = 'GIT_VERSION' __author__ = "Jerome Martin " __url__ = "http://www.risingtidesystems.com" __description__ = "API for RisingTide Systems generic SCSI target." __license__ = __doc__ rtslib-2.2/rtslib/node.py0000644000175000017500000002437012176066620013664 0ustar rrsrrs''' Implements the base CFSNode class and a few inherited variants. This file is part of RTSLib. Copyright (c) 2011-2013 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import os import stat from utils import fread, fwrite, RTSLibError, RTSLibNotInCFS class CFSNode(object): # Where do we store the fabric modules spec files ? spec_dir = "/var/target/fabric" # Where is the configfs base LIO directory ? configfs_dir = '/sys/kernel/config/target' # TODO: Make the ALUA path generic, not iscsi-centric # What is the ALUA directory ? alua_metadata_dir = "/var/target/alua/iSCSI" # CFSNode private stuff def __init__(self): self._path = self.configfs_dir def __nonzero__(self): if os.path.isdir(self.path): return True else: return False def __str__(self): return self.path def _get_path(self): return self._path def _create_in_cfs_ine(self, mode): ''' Creates the configFS node if it does not already exists depending on the mode. any -> makes sure it exists, also works if the node already does exists lookup -> make sure it does NOT exists create -> create the node which must not exists beforehand Upon success (no exception raised), self._fresh is True if a node was created, else self._fresh is False. ''' if mode not in ['any', 'lookup', 'create']: raise RTSLibError("Invalid mode: %s" % mode) if self and mode == 'create': raise RTSLibError("This %s already exists in configFS." % self.__class__.__name__) elif not self and mode == 'lookup': raise RTSLibNotInCFS("No such %s in configfs: %s." % (self.__class__.__name__, self.path)) if not self: os.mkdir(self.path) self._fresh = True else: self._fresh = False def _exists(self): return bool(self) def _check_self(self): if not self: raise RTSLibNotInCFS("This %s does not exist in configFS." % self.__class__.__name__) def _is_fresh(self): return self._fresh def _list_files(self, path, writable=None): ''' List files under a path depending on their owner's write permissions. @param path: The path under which the files are expected to be. If the path itself is not a directory, an empty list will be returned. @type path: str @param writable: If None (default), returns all parameters, if True, returns read-write parameters, if False, returns just the read-only parameters. @type writable: bool or None @return: List of file names filtered according to their write perms. ''' if not os.path.isdir(path): return [] if writable is None: names = os.listdir(path) elif writable: names = [name for name in os.listdir(path) if (os.stat("%s/%s" % (path, name))[stat.ST_MODE] \ & stat.S_IWUSR)] else: names = [os.path.basename(name) for name in os.listdir(path) if not (os.stat("%s/%s" % (path, name))[stat.ST_MODE] \ & stat.S_IWUSR)] names.sort() return names # CFSNode public stuff def list_parameters(self, writable=None): ''' @param writable: If None (default), returns all parameters, if True, returns read-write parameters, if False, returns just the read-only parameters. @type writable: bool or None @return: The list of existing RFC-3720 parameter names. ''' self._check_self() path = "%s/param" % self.path return self._list_files(path, writable) def list_attributes(self, writable=None): ''' @param writable: If None (default), returns all attributes, if True, returns read-write attributes, if False, returns just the read-only attributes. @type writable: bool or None @return: A list of existing attribute names as strings. ''' self._check_self() path = "%s/attrib" % self.path return self._list_files(path, writable) def list_auth_attrs(self, writable=None): ''' @param writable: If None (default), returns all auth attrs, if True, returns read-write auth attrs, if False, returns just the read-only auth attrs. @type writable: bool or None @return: A list of existing attribute names as strings. ''' self._check_self() path = "%s/auth" % self.path return self._list_files(path, writable) def set_attribute(self, attribute, value): ''' Sets the value of a named attribute. The attribute must exist in configFS. @param attribute: The attribute's name. It is case-sensitive. @type attribute: string @param value: The attribute's value. @type value: string ''' self._check_self() path = "%s/attrib/%s" % (self.path, str(attribute)) if not os.path.isfile(path): raise RTSLibError("Cannot find attribute: %s." % str(attribute)) else: try: fwrite(path, "%s\n" % str(value)) except IOError, msg: msg = msg[1] raise RTSLibError("Cannot set attribute %s: %s" % (str(attribute), str(msg))) def get_attribute(self, attribute): ''' @param attribute: The attribute's name. It is case-sensitive. @return: The named attribute's value, as a string. ''' self._check_self() path = "%s/attrib/%s" % (self.path, str(attribute)) if not os.path.isfile(path): raise RTSLibError("Cannot find attribute: %s." % str(attribute)) else: return fread(path).strip() def set_parameter(self, parameter, value): ''' Sets the value of a named RFC-3720 parameter. The parameter must exist in configFS. @param parameter: The RFC-3720 parameter's name. It is case-sensitive. @type parameter: string @param value: The parameter's value. @type value: string ''' self._check_self() path = "%s/param/%s" % (self.path, str(parameter)) if not os.path.isfile(path): raise RTSLibError("Cannot find parameter: %s." % str(parameter)) else: try: fwrite(path, "%s\n" % str(value)) except IOError, msg: msg = msg[1] raise RTSLibError("Cannot set parameter %s: %s" % (str(parameter), str(msg))) def get_parameter(self, parameter): ''' @param parameter: The RFC-3720 parameter's name. It is case-sensitive. @type parameter: string @return: The named parameter value as a string. ''' self._check_self() path = "%s/param/%s" % (self.path, str(parameter)) if not os.path.isfile(path): raise RTSLibError("Cannot find RFC-3720 parameter: %s." % str(parameter)) else: return fread(path).rstrip() def set_auth_attr(self, auth_attr, value): ''' Sets the value of a named auth_attr. The auth_attr must exist in configFS. @param auth_attr: The auth_attr's name. It is case-sensitive. @type auth_attr: string @param value: The auth_attr's value. @type value: string ''' self._check_self() path = "%s/auth/%s" % (self.path, str(auth_attr)) if not os.path.isfile(path): raise RTSLibError("Cannot find auth attribute: %s." % str(auth_attr)) else: try: fwrite(path, "%s" % str(value)) except IOError, msg: msg = msg[1] raise RTSLibError("Cannot set auth attribute %s: %s" % (str(auth_attr), str(msg))) def get_auth_attr(self, auth_attr): ''' @param auth_attr: The auth_attr's name. It is case-sensitive. @return: The named auth_attr's value, as a string. ''' self._check_self() path = "%s/auth/%s" % (self.path, str(auth_attr)) if not os.path.isfile(path): raise RTSLibError("Cannot find auth attribute: %s." % str(auth_attr)) else: return fread(path).strip() def delete(self): ''' If the underlying configFS object does not exists, this method does nothing. If the underlying configFS object exists, this method attempts to delete it. ''' if self: os.rmdir(self.path) path = property(_get_path, doc="Get the configFS object path.") exists = property(_exists, doc="Is True as long as the underlying configFS object exists. " \ + "If the underlying configFS objects gets deleted " \ + "either by calling the delete() method, or by any " \ + "other means, it will be False.") is_fresh = property(_is_fresh, doc="Is True if the underlying configFS object has been created " \ + "when instantiating this particular object. Is " \ + "False if this object instantiation just looked " \ + "up the underlying configFS object.") def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test() rtslib-2.2/rtslib/loop.py0000644000175000017500000003760512176066620013715 0ustar rrsrrs''' Implements the RTS SAS loopback classes. This file is part of RTSLib. Copyright (c) 2011-2013 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import re import os import glob import uuid import shutil # rtslib modules from root import RTSRoot from node import CFSNode from utils import RTSLibError, RTSLibBrokenLink from utils import generate_wwn, fwrite, fread class LUN(CFSNode): ''' This is an interface to RTS Target LUNs in configFS. A LUN is identified by its parent Nexus and LUN index. ''' # LUN private stuff def __init__(self, parent_nexus, lun, storage_object=None, alias=None): ''' A LUN object can be instantiated in two ways: - B{Creation mode}: If I{storage_object} is specified, the underlying configFS object will be created with that parameter. No LUN with the same I{lun} index can pre-exist in the parent Nexus in that mode, or instantiation will fail. - B{Lookup mode}: If I{storage_object} is not set, then the LUN will be bound to the existing configFS LUN object of the parent Nexus having the specified I{lun} index. The underlying configFS object must already exist in that mode. @param parent_nexus: The parent Nexus object. @type parent_nexus: Nexus @param lun: The LUN index. @type lun: 0-255 @param storage_object: The storage object to be exported as a LUN. @type storage_object: StorageObject subclass @param alias: An optional parameter to manually specify the LUN alias. You probably do not need this. @type alias: string @return: A LUN object. ''' super(LUN, self).__init__() if isinstance(parent_nexus, Nexus): self._parent_nexus = parent_nexus else: raise RTSLibError("Invalid parent Nexus.") try: lun = int(lun) except ValueError: raise RTSLibError("Invalid LUN index: %s" % str(lun)) else: if lun > 255 or lun < 0: raise RTSLibError("Invalid LUN index, it must be " + "between 0 and 255: %d" % lun) self._lun = lun self._path = "%s/lun/lun_%d" % (self.parent_nexus.path, self.lun) if storage_object is None and alias is not None: raise RTSLibError("The alias parameter has no meaning " + "without the storage_object parameter.") if storage_object is not None: self._create_in_cfs_ine('create') try: self._configure(storage_object, alias) except: self.delete() raise else: self._create_in_cfs_ine('lookup') def __str__(self): try: storage_object = self.storage_object except RTSLibBrokenLink: desc = "[BROKEN STORAGE LINK]" else: backstore = storage_object.backstore soname = storage_object.name if backstore.plugin.startswith("rd"): path = "ramdisk" else: path = storage_object.udev_path desc = "-> %s%d '%s' (%s)" \ % (backstore.plugin, backstore.index, soname, path) return "LUN %d %s" % (self.lun, desc) def _create_in_cfs_ine(self, mode): super(LUN, self)._create_in_cfs_ine(mode) def _configure(self, storage_object, alias): self._check_self() if alias is None: alias = str(uuid.uuid4())[-10:] else: alias = str(alias).strip() if '/' in alias: raise RTSLibError("Invalid alias: %s", alias) destination = "%s/%s" % (self.path, alias) from tcm import StorageObject if isinstance(storage_object, StorageObject): if storage_object.exists: source = storage_object.path else: raise RTSLibError("The storage_object does not exist " + "in configFS.") else: raise RTSLibError("Invalid storage object.") os.symlink(source, destination) def _get_alias(self): self._check_self() alias = None for path in os.listdir(self.path): if os.path.islink("%s/%s" % (self.path, path)): alias = os.path.basename(path) break if alias is None: raise RTSLibBrokenLink("Broken LUN in configFS, no " \ + "storage object attached.") else: return alias def _get_storage_object(self): self._check_self() alias_path = None for path in os.listdir(self.path): if os.path.islink("%s/%s" % (self.path, path)): alias_path = os.path.realpath("%s/%s" % (self.path, path)) break if alias_path is None: raise RTSLibBrokenLink("Broken LUN in configFS, no " \ + "storage object attached.") rtsroot = RTSRoot() for storage_object in rtsroot.storage_objects: if storage_object.path == alias_path: return storage_object raise RTSLibBrokenLink("Broken storage object link in LUN.") def _get_parent_nexus(self): return self._parent_nexus def _get_lun(self): return self._lun def _get_alua_metadata_path(self): return "%s/lun_%d" % (self.parent_nexus.alua_metadata_path, self.lun) # LUN public stuff def delete(self): ''' If the underlying configFS object does not exists, this method does nothing. If the underlying configFS object exists, this method attempts to delete it. ''' self._check_self() try: link = self.alias except RTSLibBrokenLink: pass else: if os.path.islink("%s/%s" % (self.path, link)): os.unlink("%s/%s" % (self.path, link)) super(LUN, self).delete() if os.path.isdir(self.alua_metadata_path): shutil.rmtree(self.alua_metadata_path) alua_metadata_path = property(_get_alua_metadata_path, doc="Get the ALUA metadata directory path for the LUN.") parent_nexus = property(_get_parent_nexus, doc="Get the parent Nexus object.") lun = property(_get_lun, doc="Get the LUN index as an int.") storage_object = property(_get_storage_object, doc="Get the storage object attached to the LUN.") alias = property(_get_alias, doc="Get the LUN alias.") class Nexus(CFSNode): ''' This is a an interface to Target Portal Groups in configFS. A Nexus is identified by its parent Target object and its nexus Tag. To a Nexus object is attached a list of NetworkPortals. ''' # Nexus private stuff def __init__(self, parent_target, tag, mode='any'): ''' @param parent_target: The parent Target object of the Nexus. @type parent_target: Target @param tag: The Nexus Tag (TPGT). @type tag: int > 0 @param mode:An optionnal string containing the object creation mode: - I{'any'} means the configFS object will be either looked up or created. - I{'lookup'} means the object MUST already exist configFS. - I{'create'} means the object must NOT already exist in configFS. @type mode:string @return: A Nexus object. ''' super(Nexus, self).__init__() try: self._tag = int(tag) except ValueError: raise RTSLibError("Invalid Tag.") if tag < 1: raise RTSLibError("Invalig Tag, it must be >0.") if isinstance(parent_target, Target): self._parent_target = parent_target else: raise RTSLibError("Invalid parent Target.") self._path = "%s/tpgt_%d" % (self.parent_target.path, self.tag) self._create_in_cfs_ine(mode) def __str__(self): try: initiator = self.initiator except RTSLibError: initiator = "[BROKEN]" return "Nexus %d / initiator %s" % (self.tag, initiator) def _get_initiator(self): nexus_path = self._path + "/nexus" if os.path.isfile(nexus_path): try: initiator = fread(nexus_path) except IOError, msg: raise RTSLibError("Cannot read Nexus initiator address " + "(>=4.0 style, %s): %s." % (nexus_path, msg)) else: try: initiator = os.listdir(nexus_path)[0] except IOError, msg: raise RTSLibError("Cannot read Nexus initiator address " + "(<4.0 style, %s): %s." % (nexus_path, msg)) return initiator.strip() def _get_tag(self): return self._tag def _get_parent_target(self): return self._parent_target def _create_in_cfs_ine(self, mode): super(Nexus, self)._create_in_cfs_ine(mode) if not os.path.isdir(self.alua_metadata_path): os.makedirs(self.alua_metadata_path) if self._fresh: initiator = generate_wwn('naa') nexus_path = self._path + "/nexus" if os.path.isfile(nexus_path): try: fwrite(nexus_path, initiator) except IOError, msg: raise RTSLibError("Cannot create Nexus initiator " + "(>=4.0 style, %s): %s." % (nexus_path, msg)) else: try: os.makedirs(nexus_path + "/" + initiator) except IOError, msg: raise RTSLibError("Cannot create Nexus initiator." + "(<4.0 style, %s): %s." % (nexus_path, msg)) def _list_luns(self): self._check_self() luns = [] lun_dirs = [os.path.basename(path) for path in os.listdir("%s/lun" % self.path)] for lun_dir in lun_dirs: lun = lun_dir.split('_')[1] lun = int(lun) luns.append(LUN(self, lun)) return luns def _control(self, command): self._check_self() path = "%s/control" % self.path fwrite(path, "%s\n" % str(command)) def _get_alua_metadata_path(self): return "%s/%s+%d" \ % (self.alua_metadata_dir, self.parent_target.naa, self.tag) # Nexus public stuff def delete(self): ''' Recursively deletes a Nexus object. This will delete all attached LUN, and then the Nexus itself. ''' self._check_self() for lun in self.luns: lun.delete() # TODO: check that ALUA MD removal works while removing Nexus if os.path.isdir(self.alua_metadata_path): shutil.rmtree(self.alua_metadata_path) nexus_path = self._path + "/nexus" if os.path.isfile(nexus_path): try: fwrite(nexus_path, "NULL") except IOError, msg: raise RTSLibError("Cannot delete Nexus initiator " + "(>=4.0 style, %s): %s." % (nexus_path, msg)) else: try: os.rmdir(nexus_path + "/" + self.initiator) except IOError, msg: raise RTSLibError("Cannot delete Nexus initiator." + "(<4.0 style, %s): %s." % (nexus_path, msg)) super(Nexus, self).delete() def lun(self, lun, storage_object=None, alias=None): ''' Same as LUN() but without specifying the parent_nexus. ''' self._check_self() return LUN(self, lun=lun, storage_object=storage_object, alias=alias) alua_metadata_path = property(_get_alua_metadata_path, doc="Get the ALUA metadata directory path " \ + "for the Nexus.") tag = property(_get_tag, doc="Get the Nexus Tag as an int.") initiator = property(_get_initiator, doc="Get the Nexus initiator address as a string.") parent_target = property(_get_parent_target, doc="Get the parent Target object to which the " \ + "Nexus is attached.") luns = property(_list_luns, doc="Get the list of LUN objects currently attached " \ + "to the Nexus.") class Target(CFSNode): ''' This is an interface to loopback SAS Targets in configFS. A Target is identified by its naa SAS address. To a Target is attached a list of Nexus objects. ''' # Target private stuff def __init__(self, naa=None, mode='any'): ''' @param naa: The optionnal Target's address. If no address or an empty address is specified, one will be generated for you. @type naa: string @param mode:An optionnal string containing the object creation mode: - I{'any'} means the configFS object will be either looked up or created. - I{'lookup'} means the object MUST already exist configFS. - I{'create'} means the object must NOT already exist in configFS. @type mode:string @return: A Target object. ''' super(Target, self).__init__() if naa is None: naa = generate_wwn('naa') else: naa = str(naa).lower().strip() self._naa = naa self._path = "%s/loopback/%s" % (self.configfs_dir, self._naa) if not self: if not re.match( "naa\.[0-9]+", naa) \ or re.search(' ', naa) \ or re.search('_', naa): raise RTSLibError("Invalid naa: %s" % naa) self._create_in_cfs_ine(mode) def __str__(self): return "SAS loopback %s" % self.naa def _list_nexuses(self): self._check_self() nexuses = [] nexus_dirs = glob.glob("%s/tpgt*" % self.path) for nexus_dir in nexus_dirs: tag = os.path.basename(nexus_dir).split('_')[1] tag = int(tag) nexuses.append(Nexus(self, tag, 'lookup')) return nexuses def _get_naa(self): return self._naa # Target public stuff def delete(self): ''' Recursively deletes a Target object. This will delete all attached Nexus objects and then the Target itself. ''' self._check_self() for nexus in self.nexuses: nexus.delete() super(Target, self).delete() def nexus(self, tag, mode='any'): ''' Same as Nexus() but without the parent_target parameter. ''' self._check_self() return Nexus(self, tag=tag, mode=mode) naa = property(_get_naa, doc="Get the naa of the Target object as a string.") nexuses = property(_list_nexuses, doc="Get the list of Nexus objects currently " + "attached to the Target.") def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test() rtslib-2.2/rtslib/root.py0000644000175000017500000001336312176066620013722 0ustar rrsrrs''' Implements the RTSRoot class. This file is part of RTSLib. Copyright (c) 2011-2013 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import re import os import glob from node import CFSNode from target import Target, FabricModule from tcm import FileIOBackstore, IBlockBackstore from tcm import PSCSIBackstore, RDDRBackstore, RDMCPBackstore from utils import RTSLibError, RTSLibBrokenLink, flatten_nested_list, modprobe class RTSRoot(CFSNode): ''' This is an interface to the root of the configFS object tree. Is allows one to start browsing Target and Backstore objects, as well as helper methods to return arbitrary objects from the configFS tree. >>> import rtslib.root as root >>> rtsroot = root.RTSRoot() >>> rtsroot.path '/sys/kernel/config/target' >>> rtsroot.exists True >>> rtsroot.targets # doctest: +ELLIPSIS [...] >>> rtsroot.backstores # doctest: +ELLIPSIS [...] >>> rtsroot.tpgs # doctest: +ELLIPSIS [...] >>> rtsroot.storage_objects # doctest: +ELLIPSIS [...] >>> rtsroot.network_portals # doctest: +ELLIPSIS [...] ''' # The core target/tcm kernel module target_core_mod = 'target_core_mod' # RTSRoot private stuff def __init__(self): ''' Instantiate an RTSRoot object. Basically checks for configfs setup and base kernel modules (tcm ) ''' super(RTSRoot, self).__init__() modprobe(self.target_core_mod) self._create_in_cfs_ine('any') def _list_targets(self): self._check_self() targets = set([]) for fabric_module in self.fabric_modules: targets.update(fabric_module.targets) return targets def _list_backstores(self): self._check_self() backstores = set([]) if os.path.isdir("%s/core" % self.path): backstore_dirs = glob.glob("%s/core/*_*" % self.path) for backstore_dir in [os.path.basename(path) for path in backstore_dirs]: regex = re.search("([a-z]+[_]*[a-z]+)(_)([0-9]+)", backstore_dir) if regex: if regex.group(1) == "fileio": backstores.add( FileIOBackstore(int(regex.group(3)), 'lookup')) elif regex.group(1) == "pscsi": backstores.add( PSCSIBackstore(int(regex.group(3)), 'lookup')) elif regex.group(1) == "iblock": backstores.add( IBlockBackstore(int(regex.group(3)), 'lookup')) elif regex.group(1) == "rd_dr": backstores.add( RDDRBackstore(int(regex.group(3)), 'lookup')) elif regex.group(1) == "rd_mcp": backstores.add( RDMCPBackstore(int(regex.group(3)), 'lookup')) return backstores def _list_storage_objects(self): self._check_self() return set(flatten_nested_list([backstore.storage_objects for backstore in self.backstores])) def _list_tpgs(self): self._check_self() return set(flatten_nested_list([t.tpgs for t in self.targets])) def _list_node_acls(self): self._check_self() return set(flatten_nested_list([t.node_acls for t in self.tpgs])) def _list_network_portals(self): self._check_self() return set(flatten_nested_list([t.network_portals for t in self.tpgs])) def _list_luns(self): self._check_self() return set(flatten_nested_list([t.luns for t in self.tpgs])) def _list_fabric_modules(self): self._check_self() mod_names = [mod_name[:-5] for mod_name in os.listdir(self.spec_dir) if mod_name.endswith('.spec')] modules = [FabricModule(mod_name) for mod_name in mod_names] return modules def _list_loaded_fabric_modules(self): return [fm for fm in self._list_fabric_modules() if fm.exists] def __str__(self): return "rtsadmin" # RTSRoot public stuff backstores = property(_list_backstores, doc="Get the list of Backstore objects.") targets = property(_list_targets, doc="Get the list of Target objects.") tpgs = property(_list_tpgs, doc="Get the list of all the existing TPG objects.") node_acls = property(_list_node_acls, doc="Get the list of all the existing NodeACL objects.") network_portals = property(_list_network_portals, doc="Get the list of all the existing Network Portal objects.") storage_objects = property(_list_storage_objects, doc="Get the list of all the existing Storage objects.") luns = property(_list_luns, doc="Get the list of all existing LUN objects.") fabric_modules = property(_list_fabric_modules, doc="Get the list of all FabricModule objects.") loaded_fabric_modules = property(_list_loaded_fabric_modules, doc="Get the list of all loaded FabricModule objects.") def _test(): '''Run the doctests.''' import doctest doctest.testmod() if __name__ == "__main__": _test() rtslib-2.2/rtslib/target.py0000644000175000017500000013120112176066620014215 0ustar rrsrrs''' Implements the RTS generic Target fabric classes. This file is part of RTSLib. Copyright (c) 2011-2013 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import re import os import glob import uuid import shutil from node import CFSNode from os.path import isdir from doctest import testmod from configobj import ConfigObj from utils import RTSLibError, RTSLibBrokenLink, modprobe from utils import is_ipv6_address, is_ipv4_address from utils import fread, fwrite, generate_wwn, is_valid_wwn, exec_argv class FabricModule(CFSNode): ''' This is an interface to RTS Target Fabric Modules. It can load/unload modules, provide information about them and handle the configfs housekeeping. It uses module configuration files in /var/target/fabric/*.spec. After instantiation, whether or not the fabric module is loaded and ''' version_attributes = set(["lio_version", "version"]) discovery_auth_attributes = set(["discovery_auth"]) target_names_excludes = version_attributes | discovery_auth_attributes # FabricModule private stuff def __init__(self, name): ''' Instantiate a FabricModule object, according to the provided name. @param name: the name of the FabricModule object. It must match an existing target fabric module specfile (name.spec). @type name: str ''' super(FabricModule, self).__init__() self.name = name self.spec = self._parse_spec() self._path = "%s/%s" % (self.configfs_dir, self.spec['configfs_group']) # FabricModule public stuff def has_feature(self, feature): ''' Whether or not this FabricModule has a certain feature. ''' if feature in self.spec['features']: return True else: return False def load(self, yield_steps=False): ''' Attempt to load the target fabric kernel module as defined in the specfile. @param yield_steps: Whether or not to yield an (action, taken, desc) tuple at each step: action is either 'load_module' or 'create_cfs_group', 'taken' is a bool indicating whether the action was taken (if needed) or not, and desc is a text description of the step suitable for logging. @type yield_steps: bool @raises RTSLibError: For failure to load kernel module and/or create configfs group. ''' module = self.spec['kernel_module'] load_module = modprobe(module) if yield_steps: yield ('load_module', load_module, "Loaded %s kernel module." % module) # TODO: Also load saved targets and config if needed. For that, support # XXX: from the configfs side would be nice: have a config ID present # XXX: both on the on-disk saved config and a configfs attibute. # Create the configfs group self._create_in_cfs_ine('any') if yield_steps: yield ('create_cfs_group', self._fresh, "Created '%s'." % self.path) def _parse_spec(self): ''' Parses the fabric module spec file. ''' # Recognized options and their default values defaults = dict(features=['discovery_auth', 'acls', 'acls_auth', 'nps', 'tpgts'], kernel_module="%s_target_mod" % self.name, configfs_group=self.name, wwn_from_files=[], wwn_from_files_filter='', wwn_from_cmds=[], wwn_from_cmds_filter='', wwn_type='free') spec_file = "%s/%s.spec" % (self.spec_dir, self.name) spec = ConfigObj(spec_file).dict() if spec: self.spec_file = spec_file else: self.spec_file = '' # Do not allow unknown options unknown_options = set(spec.keys()) - set(defaults.keys()) if unknown_options: raise RTSLibError("Unknown option(s) in %s: %s" % (spec_file, list(unknown_options))) # Use defaults for missing options missing_options = set(defaults.keys()) - set(spec.keys()) for option in missing_options: spec[option] = defaults[option] # Type conversion and checking for option in spec: spec_type = type(spec[option]).__name__ defaults_type = type(defaults[option]).__name__ if spec_type != defaults_type: # Type mismatch, go through acceptable conversions if spec_type == 'str' and defaults_type == 'list': spec[option] = [spec[option]] else: raise RTSLibError("Wrong type for option '%s' in %s. " % (option, spec_file) + "Expected type '%s' and got '%s'." % (defaults_type, spec_type)) # Generate the list of fixed WWNs if not empty wwn_list = None wwn_type = spec['wwn_type'] if spec['wwn_from_files']: for wwn_pattern in spec['wwn_from_files']: for wwn_file in glob.iglob(wwn_pattern): wwns_in_file = [wwn for wwn in re.split('\t|\0|\n| ', fread(wwn_file)) if wwn.strip()] if spec['wwn_from_files_filter']: wwns_filtered = [] for wwn in wwns_in_file: filter = "echo %s|%s" \ % (wwn, spec['wwn_from_files_filter']) wwns_filtered.append(exec_argv(filter, shell=True)) else: wwns_filtered = wwns_in_file if wwn_list is None: wwn_list = set([]) wwn_list.update(set([wwn for wwn in wwns_filtered if is_valid_wwn(wwn_type, wwn) if wwn] )) if spec['wwn_from_cmds']: for wwn_cmd in spec['wwn_from_cmds']: cmd_result = exec_argv(wwn_cmd, shell=True) wwns_from_cmd = [wwn for wwn in re.split('\t|\0|\n| ', cmd_result) if wwn.strip()] if spec['wwn_from_cmds_filter']: wwns_filtered = [] for wwn in wwns_from_cmd: filter = "echo %s|%s" \ % (wwn, spec['wwn_from_cmds_filter']) wwns_filtered.append(exec_argv(filter, shell=True)) else: wwns_filtered = wwns_from_cmd if wwn_list is None: wwn_list = set([]) wwn_list.update(set([wwn for wwn in wwns_filtered if is_valid_wwn(wwn_type, wwn) if wwn] )) spec['wwn_list'] = wwn_list return spec def _list_targets(self): if self.exists: return set( [Target(self, wwn, 'lookup') for wwn in os.listdir(self.path) if os.path.isdir("%s/%s" % (self.path, wwn)) if wwn not in self.target_names_excludes]) else: return set([]) def _get_version(self): if self.exists: for attr in self.version_attributes: path = "%s/%s" % (self.path, attr) if os.path.isfile(path): return fread(path) else: raise RTSLibError("Can't find version for fabric module %s." % self.name) else: return None # FabricModule public stuff def is_valid_wwn(self, wwn): ''' Checks whether or not the provided WWN is valid for this fabric module according to the spec file. ''' return is_valid_wwn(self.spec['wwn_type'], wwn, self.spec['wwn_list']) def _assert_feature(self, feature): if not self.has_feature(feature): raise RTSLibError("This fabric module does not implement " + "the %s feature." % feature) def _get_discovery_mutual_password(self): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/password_mutual" % self.path value = fread(path).strip() if value == "NULL": return '' else: return value def _set_discovery_mutual_password(self, password): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/password_mutual" % self.path if password.strip() == '': password = "NULL" fwrite(path, "%s" % password) def _get_discovery_mutual_userid(self): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/userid_mutual" % self.path value = fread(path).strip() if value == "NULL": return '' else: return value def _set_discovery_mutual_userid(self, userid): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/userid_mutual" % self.path if userid.strip() == '': userid = "NULL" fwrite(path, "%s" % userid) def _get_discovery_password(self): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/password" % self.path value = fread(path).strip() if value == "NULL": return '' else: return value def _set_discovery_password(self, password): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/password" % self.path if password.strip() == '': password = "NULL" fwrite(path, "%s" % password) def _get_discovery_userid(self): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/userid" % self.path value = fread(path).strip() if value == "NULL": return '' else: return value def _set_discovery_userid(self, userid): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/userid" % self.path if userid.strip() == '': userid = "NULL" fwrite(path, "%s" % userid) def _get_discovery_enable_auth(self): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/enforce_discovery_auth" % self.path value = fread(path).strip() return value def _set_discovery_enable_auth(self, enable): self._check_self() self._assert_feature('discovery_auth') path = "%s/discovery_auth/enforce_discovery_auth" % self.path if enable: enable = 1 else: enable = 0 fwrite(path, "%s" % enable) discovery_userid = \ property(_get_discovery_userid, _set_discovery_userid, doc="Set or get the initiator discovery userid.") discovery_password = \ property(_get_discovery_password, _set_discovery_password, doc="Set or get the initiator discovery password.") discovery_mutual_userid = \ property(_get_discovery_mutual_userid, _set_discovery_mutual_userid, doc="Set or get the mutual discovery userid.") discovery_mutual_password = \ property(_get_discovery_mutual_password, _set_discovery_mutual_password, doc="Set or get the mutual discovery password.") discovery_enable_auth = \ property(_get_discovery_enable_auth, _set_discovery_enable_auth, doc="Set or get the discovery enable_auth flag.") targets = property(_list_targets, doc="Get the list of target objects.") version = property(_get_version, doc="Get the fabric module version string.") class LUN(CFSNode): ''' This is an interface to RTS Target LUNs in configFS. A LUN is identified by its parent TPG and LUN index. ''' # LUN private stuff def __init__(self, parent_tpg, lun, storage_object=None, alias=None): ''' A LUN object can be instantiated in two ways: - B{Creation mode}: If I{storage_object} is specified, the underlying configFS object will be created with that parameter. No LUN with the same I{lun} index can pre-exist in the parent TPG in that mode, or instantiation will fail. - B{Lookup mode}: If I{storage_object} is not set, then the LUN will be bound to the existing configFS LUN object of the parent TPG having the specified I{lun} index. The underlying configFS object must already exist in that mode. @param parent_tpg: The parent TPG object. @type parent_tpg: TPG @param lun: The LUN index. @type lun: 0-255 @param storage_object: The storage object to be exported as a LUN. @type storage_object: StorageObject subclass @param alias: An optional parameter to manually specify the LUN alias. You probably do not need this. @type alias: string @return: A LUN object. ''' super(LUN, self).__init__() if isinstance(parent_tpg, TPG): self._parent_tpg = parent_tpg else: raise RTSLibError("Invalid parent TPG.") try: lun = int(lun) except ValueError: raise RTSLibError("Invalid LUN index: %s" % str(lun)) else: if lun > 255 or lun < 0: raise RTSLibError("Invalid LUN index, it must be " \ + "between 0 and 255: %d" % lun) self._lun = lun self._path = "%s/lun/lun_%d" % (self.parent_tpg.path, self.lun) if storage_object is None and alias is not None: raise RTSLibError("The alias parameter has no meaning " \ + "without the storage_object parameter.") if storage_object is not None: self._create_in_cfs_ine('create') try: self._configure(storage_object, alias) except: self.delete() raise else: self._create_in_cfs_ine('lookup') def _create_in_cfs_ine(self, mode): super(LUN, self)._create_in_cfs_ine(mode) def _configure(self, storage_object, alias): self._check_self() if alias is None: alias = str(uuid.uuid4())[-10:] else: alias = str(alias).strip() if '/' in alias: raise RTSLibError("Invalid alias: %s", alias) destination = "%s/%s" % (self.path, alias) from tcm import StorageObject if isinstance(storage_object, StorageObject): if storage_object.exists: source = storage_object.path else: raise RTSLibError("The storage_object does not exist " \ + "in configFS.") else: raise RTSLibError("Invalid storage object.") os.symlink(source, destination) def _get_alias(self): self._check_self() alias = None for path in os.listdir(self.path): if os.path.islink("%s/%s" % (self.path, path)): alias = os.path.basename(path) break if alias is None: raise RTSLibBrokenLink("Broken LUN in configFS, no " \ + "storage object attached.") else: return alias def _get_storage_object(self): self._check_self() alias_path = None for path in os.listdir(self.path): if os.path.islink("%s/%s" % (self.path, path)): alias_path = os.path.realpath("%s/%s" % (self.path, path)) break if alias_path is None: raise RTSLibBrokenLink("Broken LUN in configFS, no " + "storage object attached.") from root import RTSRoot rtsroot = RTSRoot() for storage_object in rtsroot.storage_objects: if storage_object.path == alias_path: return storage_object raise RTSLibBrokenLink("Broken storage object link in LUN.") def _get_parent_tpg(self): return self._parent_tpg def _get_lun(self): return self._lun def _get_alua_metadata_path(self): return "%s/lun_%d" % (self.parent_tpg.alua_metadata_path, self.lun) def _list_mapped_luns(self): self._check_self() listdir = os.listdir realpath = os.path.realpath path = self.path tpg = self.parent_tpg if not tpg.has_feature('acls'): return [] else: base = "%s/acls/" % tpg.path xmlun = ["param", "info", "cmdsn_depth", "auth", "attrib", "node_name", "port_name"] return [MappedLUN(NodeACL(tpg, nodeacl), mapped_lun.split('_')[1]) for nodeacl in listdir(base) for mapped_lun in listdir("%s/%s" % (base, nodeacl)) if mapped_lun not in xmlun if isdir("%s/%s/%s" % (base, nodeacl, mapped_lun)) for link in listdir("%s/%s/%s" \ % (base, nodeacl, mapped_lun)) if realpath("%s/%s/%s/%s" \ % (base, nodeacl, mapped_lun, link)) == path] # LUN public stuff def delete(self): ''' If the underlying configFS object does not exists, this method does nothing. If the underlying configFS object exists, this method attempts to delete it along with all MappedLUN objects referencing that LUN. ''' self._check_self() [mlun.delete() for mlun in self._list_mapped_luns()] try: link = self.alias except RTSLibBrokenLink: pass else: if os.path.islink("%s/%s" % (self.path, link)): os.unlink("%s/%s" % (self.path, link)) super(LUN, self).delete() if os.path.isdir(self.alua_metadata_path): shutil.rmtree(self.alua_metadata_path) alua_metadata_path = property(_get_alua_metadata_path, doc="Get the ALUA metadata directory path for the LUN.") parent_tpg = property(_get_parent_tpg, doc="Get the parent TPG object.") lun = property(_get_lun, doc="Get the LUN index as an int.") storage_object = property(_get_storage_object, doc="Get the storage object attached to the LUN.") alias = property(_get_alias, doc="Get the LUN alias.") mapped_luns = property(_list_mapped_luns, doc="List all MappedLUN objects referencing this LUN.") class MappedLUN(CFSNode): ''' This is an interface to RTS Target Mapped LUNs. A MappedLUN is a mapping of a TPG LUN to a specific initiator node, and is part of a NodeACL. It allows the initiator to actually access the TPG LUN if ACLs are enabled for the TPG. The initial TPG LUN will then be seen by the initiator node as the MappedLUN. ''' # MappedLUN private stuff def __init__(self, parent_nodeacl, mapped_lun, tpg_lun=None, write_protect=None): ''' A MappedLUN object can be instantiated in two ways: - B{Creation mode}: If I{tpg_lun} is specified, the underlying configFS object will be created with that parameter. No MappedLUN with the same I{mapped_lun} index can pre-exist in the parent NodeACL in that mode, or instantiation will fail. - B{Lookup mode}: If I{tpg_lun} is not set, then the MappedLUN will be bound to the existing configFS MappedLUN object of the parent NodeACL having the specified I{mapped_lun} index. The underlying configFS object must already exist in that mode. @param mapped_lun: The mapped LUN index. @type mapped_lun: int @param tpg_lun: The TPG LUN index to map, or directly a LUN object that belong to the same TPG as the parent NodeACL. @type tpg_lun: int or LUN @param write_protect: The write-protect flag value, defaults to False (write-protection disabled). @type write_protect: bool ''' super(MappedLUN, self).__init__() if not isinstance(parent_nodeacl, NodeACL): raise RTSLibError("The parent_nodeacl parameter must be " \ + "a NodeACL object.") else: self._parent_nodeacl = parent_nodeacl if not parent_nodeacl.exists: raise RTSLibError("The parent_nodeacl does not exist.") try: self._mapped_lun = int(mapped_lun) except ValueError: raise RTSLibError("The mapped_lun parameter must be an " \ + "integer value.") self._path = "%s/lun_%d" % (self.parent_nodeacl.path, self.mapped_lun) if tpg_lun is None and write_protect is not None: raise RTSLibError("The write_protect parameter has no " \ + "meaning without the tpg_lun parameter.") if tpg_lun is not None: self._create_in_cfs_ine('create') try: self._configure(tpg_lun, write_protect) except: self.delete() raise else: self._create_in_cfs_ine('lookup') def _configure(self, tpg_lun, write_protect): self._check_self() if isinstance(tpg_lun, LUN): tpg_lun = tpg_lun.lun else: try: tpg_lun = int(tpg_lun) except ValueError: raise RTSLibError("The tpg_lun must be either an " + "integer or a LUN object.") # Check that the tpg_lun exists in the TPG for lun in self.parent_nodeacl.parent_tpg.luns: if lun.lun == tpg_lun: tpg_lun = lun break if not (isinstance(tpg_lun, LUN) and tpg_lun): raise RTSLibError("LUN %s does not exist in this TPG." % str(tpg_lun)) os.symlink(tpg_lun.path, "%s/%s" % (self.path, str(uuid.uuid4())[-10:])) try: self.write_protect = int(write_protect) > 0 except: self.write_protect = False def _get_alias(self): self._check_self() alias = None for path in os.listdir(self.path): if os.path.islink("%s/%s" % (self.path, path)): alias = os.path.basename(path) break if alias is None: raise RTSLibBrokenLink("Broken LUN in configFS, no " \ + "storage object attached.") else: return alias def _get_mapped_lun(self): return self._mapped_lun def _get_parent_nodeacl(self): return self._parent_nodeacl def _set_write_protect(self, write_protect): self._check_self() path = "%s/write_protect" % self.path if write_protect: fwrite(path, "1") else: fwrite(path, "0") def _get_write_protect(self): self._check_self() path = "%s/write_protect" % self.path write_protect = fread(path).strip() if write_protect == "1": return True else: return False def _get_tpg_lun(self): self._check_self() path = os.path.realpath("%s/%s" % (self.path, self._get_alias())) for lun in self.parent_nodeacl.parent_tpg.luns: if lun.path == path: return lun raise RTSLibBrokenLink("Broken MappedLUN, no TPG LUN found !") def _get_node_wwn(self): self._check_self() return self.parent_nodeacl.node_wwn # MappedLUN public stuff def delete(self): ''' Delete the MappedLUN. ''' self._check_self() try: lun_link = "%s/%s" % (self.path, self._get_alias()) except RTSLibBrokenLink: pass else: if os.path.islink(lun_link): os.unlink(lun_link) super(MappedLUN, self).delete() mapped_lun = property(_get_mapped_lun, doc="Get the integer MappedLUN mapped_lun index.") parent_nodeacl = property(_get_parent_nodeacl, doc="Get the parent NodeACL object.") write_protect = property(_get_write_protect, _set_write_protect, doc="Get or set the boolean write protection.") tpg_lun = property(_get_tpg_lun, doc="Get the TPG LUN object the MappedLUN is pointing at.") node_wwn = property(_get_node_wwn, doc="Get the wwn of the node for which the TPG LUN is mapped.") class NodeACL(CFSNode): ''' This is an interface to node ACLs in configFS. A NodeACL is identified by the initiator node wwn and parent TPG. ''' # NodeACL private stuff def __init__(self, parent_tpg, node_wwn, mode='any'): ''' @param parent_tpg: The parent TPG object. @type parent_tpg: TPG @param node_wwn: The wwn of the initiator node for which the ACL is created. @type node_wwn: string @param mode:An optionnal string containing the object creation mode: - I{'any'} means the configFS object will be either looked up or created. - I{'lookup'} means the object MUST already exist configFS. - I{'create'} means the object must NOT already exist in configFS. @type mode:string @return: A NodeACL object. ''' super(NodeACL, self).__init__() if isinstance(parent_tpg, TPG): self._parent_tpg = parent_tpg else: raise RTSLibError("Invalid parent TPG.") self._node_wwn = str(node_wwn).lower() self._path = "%s/acls/%s" % (self.parent_tpg.path, self.node_wwn) self._create_in_cfs_ine(mode) def _get_node_wwn(self): return self._node_wwn def _get_parent_tpg(self): return self._parent_tpg def _get_tcq_depth(self): self._check_self() path = "%s/cmdsn_depth" % self.path return fread(path).strip() def _set_tcq_depth(self, depth): self._check_self() path = "%s/cmdsn_depth" % self.path try: fwrite(path, "%s" % depth) except IOError, msg: msg = msg[1] raise RTSLibError("Cannot set tcq_depth: %s" % str(msg)) def _list_mapped_luns(self): self._check_self() mapped_luns = [] mapped_lun_dirs = glob.glob("%s/lun_*" % self.path) for mapped_lun_dir in mapped_lun_dirs: mapped_lun = int(os.path.basename(mapped_lun_dir).split("_")[1]) mapped_luns.append(MappedLUN(self, mapped_lun)) return mapped_luns # NodeACL public stuff def has_feature(self, feature): ''' Whether or not this NodeACL has a certain feature. ''' return self.parent_tpg.has_feature(feature) def delete(self): ''' Delete the NodeACL, including all MappedLUN objects. If the underlying configFS object does not exist, this method does nothing. ''' self._check_self() for mapped_lun in self.mapped_luns: mapped_lun.delete() super(NodeACL, self).delete() def mapped_lun(self, mapped_lun, tpg_lun=None, write_protect=None): ''' Same as MappedLUN() but without the parent_nodeacl parameter. ''' self._check_self() return MappedLUN(self, mapped_lun=mapped_lun, tpg_lun=tpg_lun, write_protect=write_protect) tcq_depth = property(_get_tcq_depth, _set_tcq_depth, doc="Set or get the TCQ depth for the initiator " \ + "sessions matching this NodeACL.") parent_tpg = property(_get_parent_tpg, doc="Get the parent TPG object.") node_wwn = property(_get_node_wwn, doc="Get the node wwn.") mapped_luns = property(_list_mapped_luns, doc="Get the list of all MappedLUN objects in this NodeACL.") class NetworkPortal(CFSNode): ''' This is an interface to NetworkPortals in configFS. A NetworkPortal is identified by its IP and port, but here we also require the parent TPG, so instance objects represent both the NetworkPortal and its association to a TPG. This is necessary to get path information in order to create the portal in the proper configFS hierarchy. ''' # NetworkPortal private stuff def __init__(self, parent_tpg, ip_address, port=3260, mode='any'): ''' @param parent_tpg: The parent TPG object. @type parent_tpg: TPG @param ip_address: The ipv4 IP address of the NetworkPortal. @type ip_address: string @param port: The optional (defaults to 3260) NetworkPortal TCP/IP port. @type port: int @param mode: An optionnal string containing the object creation mode: - I{'any'} means the configFS object will be either looked up or created. - I{'lookup'} means the object MUST already exist configFS. - I{'create'} means the object must NOT already exist in configFS. @type mode:string @return: A NetworkPortal object. ''' super(NetworkPortal, self).__init__() if not (is_ipv4_address(ip_address) or is_ipv6_address(ip_address)): raise RTSLibError("Invalid IP address: %s" % ip_address) else: self._ip_address = str(ip_address) try: self._port = int(port) except ValueError: raise RTSLibError("Invalid port.") if isinstance(parent_tpg, TPG): self._parent_tpg = parent_tpg else: raise RTSLibError("Invalid parent TPG.") if is_ipv4_address(ip_address): self._path = "%s/np/%s:%d" \ % (self.parent_tpg.path, self.ip_address, self.port) else: self._path = "%s/np/[%s]:%d" \ % (self.parent_tpg.path, self.ip_address, self.port) try: self._create_in_cfs_ine(mode) except OSError, msg: raise RTSLibError(msg[1]) def _get_ip_address(self): return self._ip_address def _get_port(self): return self._port def _get_parent_tpg(self): return self._parent_tpg def _set_iser_attr(self, iser_attr): path = "%s/iser" % self.path if os.path.isfile(path): if iser_attr: fwrite(path, "1") else: fwrite(path, "0") else: raise RTSLibError("iser network portal attribute does not exist.") def _get_iser_attr(self): path = "%s/iser" % self.path if os.path.isfile(path): iser_attr = fread(path).strip() if iser_attr == "1": return True else: return False else: return False # NetworkPortal public stuff def delete(self): ''' Delete the NetworkPortal. ''' path = "%s/iser" % self.path if os.path.isfile(path): iser_attr = fread(path).strip() if iser_attr == "1": fwrite(path, "0") super(NetworkPortal, self).delete() parent_tpg = property(_get_parent_tpg, doc="Get the parent TPG object.") port = property(_get_port, doc="Get the NetworkPortal's TCP port as an int.") ip_address = property(_get_ip_address, doc="Get the NetworkPortal's IP address as a string.") class TPG(CFSNode): ''' This is a an interface to Target Portal Groups in configFS. A TPG is identified by its parent Target object and its TPG Tag. To a TPG object is attached a list of NetworkPortals. Targets without the 'tpgts' feature cannot have more than a single TPG, so attempts to create more will raise an exception. ''' # TPG private stuff def __init__(self, parent_target, tag, mode='any'): ''' @param parent_target: The parent Target object of the TPG. @type parent_target: Target @param tag: The TPG Tag (TPGT). @type tag: int > 0 @param mode:An optionnal string containing the object creation mode: - I{'any'} means the configFS object will be either looked up or created. - I{'lookup'} means the object MUST already exist configFS. - I{'create'} means the object must NOT already exist in configFS. @type mode:string @return: A TPG object. ''' super(TPG, self).__init__() try: self._tag = int(tag) except ValueError: raise RTSLibError("Invalid Tag.") if tag < 1: raise RTSLibError("Invalig Tag, it must be >0.") if isinstance(parent_target, Target): self._parent_target = parent_target else: raise RTSLibError("Invalid parent Target.") self._path = "%s/tpgt_%d" % (self.parent_target.path, self.tag) target_path = self.parent_target.path if not self.has_feature('tpgts') and not os.path.isdir(self._path): for filename in os.listdir(target_path): if filename.startswith("tpgt_") \ and os.path.isdir("%s/%s" % (target_path, filename)) \ and filename != "tpgt_%d" % self.tag: raise RTSLibError("Target cannot have multiple TPGs.") self._create_in_cfs_ine(mode) if self.has_feature('nexus') and not self._get_nexus(): self._set_nexus() def _get_tag(self): return self._tag def _get_parent_target(self): return self._parent_target def _list_network_portals(self): self._check_self() if not self.has_feature('nps'): return [] network_portals = [] network_portal_dirs = os.listdir("%s/np" % self.path) for network_portal_dir in network_portal_dirs: if network_portal_dir.startswith('['): # IPv6 portals are [IPv6]:PORT (ip_address, port) = \ os.path.basename(network_portal_dir)[1:].split("]") port = port[1:] else: # IPv4 portals are IPv4:PORT (ip_address, port) = \ os.path.basename(network_portal_dir).split(":") port = int(port) network_portals.append( NetworkPortal(self, ip_address, port, 'lookup')) return network_portals def _get_enable(self): self._check_self() path = "%s/enable" % self.path # If the TPG does not have the enable attribute, then it is always # enabled. if os.path.isfile(path): return int(fread(path)) else: return 1 def _set_enable(self, boolean): ''' Enables or disables the TPG. Raises an error if trying to disable a TPG without en enable attribute (but enabling works in that case). ''' self._check_self() path = "%s/enable" % self.path if os.path.isfile(path): if boolean and not self._get_enable(): fwrite(path, "1") elif not boolean and self._get_enable(): fwrite(path, "0") elif not boolean: raise RTSLibError("TPG cannot be disabled.") def _get_nexus(self): ''' Gets the nexus initiator WWN, or None if the TPG does not have one. ''' self._check_self() if self.has_feature('nexus'): try: nexus_wwn = fread("%s/nexus" % self.path).strip() except IOError: nexus_wwn = '' return nexus_wwn else: return None def _set_nexus(self, nexus_wwn=None): ''' Sets the nexus initiator WWN. Raises an exception if the nexus is already set or if the TPG does not use a nexus. ''' self._check_self() if not self.has_feature('nexus'): raise RTSLibError("The TPG does not use a nexus.") elif self._get_nexus(): raise RTSLibError("The TPG's nexus initiator WWN is already set.") else: if nexus_wwn is None: nexus_wwn = generate_wwn(self.parent_target.wwn_type) elif not is_valid_wwn(self.parent_target.wwn_type, nexus_wwn): raise RTSLibError("WWN '%s' is not of type '%s'." % (nexus_wwn, self.parent_target.wwn_type)) fwrite("%s/nexus" % self.path, nexus_wwn) def _create_in_cfs_ine(self, mode): super(TPG, self)._create_in_cfs_ine(mode) if not os.path.isdir(self.alua_metadata_path): os.makedirs(self.alua_metadata_path) def _list_node_acls(self): self._check_self() if not self.has_feature('acls'): return [] node_acls = [] node_acl_dirs = [os.path.basename(path) for path in os.listdir("%s/acls" % self.path)] for node_acl_dir in node_acl_dirs: node_acls.append(NodeACL(self, node_acl_dir, 'lookup')) return node_acls def _list_luns(self): self._check_self() luns = [] lun_dirs = [os.path.basename(path) for path in os.listdir("%s/lun" % self.path)] for lun_dir in lun_dirs: lun = lun_dir.split('_')[1] lun = int(lun) luns.append(LUN(self, lun)) return luns def _control(self, command): self._check_self() path = "%s/control" % self.path fwrite(path, "%s\n" % str(command)) def _get_alua_metadata_path(self): return "%s/%s+%d" \ % (self.alua_metadata_dir, self.parent_target.wwn, self.tag) # TPG public stuff def has_feature(self, feature): ''' Whether or not this TPG has a certain feature. ''' return self.parent_target.has_feature(feature) def delete(self): ''' Recursively deletes a TPG object. This will delete all attached LUN, NetworkPortal and Node ACL objects and then the TPG itself. Before starting the actual deletion process, all sessions will be disconnected. ''' self._check_self() path = "%s/enable" % self.path if os.path.isfile(path): self.enable = False for acl in self.node_acls: acl.delete() for lun in self.luns: lun.delete() for portal in self.network_portals: portal.delete() super(TPG, self).delete() # TODO: check that ALUA MD removal works while removing TPG if os.path.isdir(self.alua_metadata_path): shutil.rmtree(self.alua_metadata_path) def node_acl(self, node_wwn, mode='any'): ''' Same as NodeACL() but without specifying the parent_tpg. ''' self._check_self() return NodeACL(self, node_wwn=node_wwn, mode=mode) def network_portal(self, ip_address, port, mode='any'): ''' Same as NetworkPortal() but without specifying the parent_tpg. ''' self._check_self() return NetworkPortal(self, ip_address=ip_address, port=port, mode=mode) def lun(self, lun, storage_object=None, alias=None): ''' Same as LUN() but without specifying the parent_tpg. ''' self._check_self() return LUN(self, lun=lun, storage_object=storage_object, alias=alias) alua_metadata_path = property(_get_alua_metadata_path, doc="Get the ALUA metadata directory path " \ + "for the TPG.") tag = property(_get_tag, doc="Get the TPG Tag as an int.") parent_target = property(_get_parent_target, doc="Get the parent Target object to which the " \ + "TPG is attached.") enable = property(_get_enable, _set_enable, doc="Get or set a boolean value representing the " \ + "enable status of the TPG. " \ + "True means the TPG is enabled, False means it is " \ + "disabled.") network_portals = property(_list_network_portals, doc="Get the list of NetworkPortal objects currently attached " \ + "to the TPG.") node_acls = property(_list_node_acls, doc="Get the list of NodeACL objects currently " \ + "attached to the TPG.") luns = property(_list_luns, doc="Get the list of LUN objects currently attached " \ + "to the TPG.") nexus = property(_get_nexus, _set_nexus, doc="Get or set (once) the TPG's Nexus is used.") class Target(CFSNode): ''' This is an interface to Targets in configFS. A Target is identified by its wwn. To a Target is attached a list of TPG objects. ''' # Target private stuff def __init__(self, fabric_module, wwn=None, mode='any'): ''' @param fabric_module: The target's fabric module. @type fabric_module: FabricModule @param wwn: The optionnal Target's wwn. If no wwn or an empty wwn is specified, one will be generated for you. @type wwn: string @param mode:An optionnal string containing the object creation mode: - I{'any'} means the configFS object will be either looked up or created. - I{'lookup'} means the object MUST already exist configFS. - I{'create'} means the object must NOT already exist in configFS. @type mode:string @return: A Target object. ''' super(Target, self).__init__() self.fabric_module = fabric_module self.wwn_type = fabric_module.spec['wwn_type'] if wwn is not None: wwn = str(wwn).strip() elif fabric_module.spec['wwn_list']: existing_wwns = set([child.wwn for child in fabric_module.targets]) free_wwns = fabric_module.spec['wwn_list'] - existing_wwns if free_wwns: wwn = free_wwns.pop() else: raise RTSLibError("All WWN are in use, can't create target.") else: wwn = generate_wwn(self.wwn_type) self.wwn = wwn self._path = "%s/%s" % (self.fabric_module.path, self.wwn) if not self: if not self.fabric_module.is_valid_wwn(self.wwn): raise RTSLibError("Invalid %s wwn: %s" % (self.wwn_type, self.wwn)) self._create_in_cfs_ine(mode) def _list_tpgs(self): self._check_self() tpgs = [] tpg_dirs = glob.glob("%s/tpgt*" % self.path) for tpg_dir in tpg_dirs: tag = os.path.basename(tpg_dir).split('_')[1] tag = int(tag) tpgs.append(TPG(self, tag, 'lookup')) return tpgs # Target public stuff def has_feature(self, feature): ''' Whether or not this Target has a certain feature. ''' return self.fabric_module.has_feature(feature) def delete(self): ''' Recursively deletes a Target object. This will delete all attached TPG objects and then the Target itself. ''' self._check_self() for tpg in self.tpgs: tpg.delete() super(Target, self).delete() tpgs = property(_list_tpgs, doc="Get the list of TPG for the Target.") def _test(): testmod() if __name__ == "__main__": _test() rtslib-2.2/rtslib/tcm.py0000644000175000017500000012412312176066620013517 0ustar rrsrrs''' Implements the RTS Target backstore and storage object classes. This file is part of RTSLib. Copyright (c) 2011-2013 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import os import re from target import LUN, TPG, Target, FabricModule from node import CFSNode from utils import fread, fwrite, RTSLibError, list_scsi_hbas, generate_wwn from utils import convert_scsi_path_to_hctl, convert_scsi_hctl_to_path from utils import convert_human_to_bytes, is_dev_in_use, get_block_type from utils import is_disk_partition, get_disk_size class Backstore(CFSNode): # Backstore private stuff def __init__(self, plugin, storage_class, index, mode): super(Backstore, self).__init__() if issubclass(storage_class, StorageObject): self._storage_object_class = storage_class self._plugin = plugin else: raise RTSLibError("StorageClass must derive from StorageObject.") try: self._index = int(index) except ValueError: raise RTSLibError("Invalid backstore index: %s" % index) self._path = "%s/core/%s_%d" % (self.configfs_dir, self._plugin, self._index) self._create_in_cfs_ine(mode) def _get_plugin(self): return self._plugin def _get_index(self): return self._index def _list_storage_objects(self): self._check_self() storage_objects = [] storage_object_names = [os.path.basename(s) for s in os.listdir(self.path) if s not in set(["hba_info", "hba_mode"])] for storage_object_name in storage_object_names: storage_objects.append(self._storage_object_class( self, storage_object_name)) return storage_objects def _create_in_cfs_ine(self, mode): try: super(Backstore, self)._create_in_cfs_ine(mode) except OSError, msg: raise RTSLibError("Cannot create backstore: %s" % msg) def _parse_info(self, key): self._check_self() info = fread("%s/hba_info" % self.path) return re.search(".*%s: ([^: ]+).*" \ % key, ' '.join(info.split())).group(1).lower() def _get_version(self): self._check_self() return self._parse_info("version") def _get_plugin(self): self._check_self() return self._parse_info("plugin") def _get_name(self): self._check_self() return "%s%d" % (self.plugin, self.index) # Backstore public stuff def delete(self): ''' Recursively deletes a Backstore object. This will delete all attached StorageObject objects, and then the Backstore itself. The underlying file and block storages will not be touched, but all ramdisk data will be lost. ''' self._check_self() for storage in self.storage_objects: storage.delete() super(Backstore, self).delete() plugin = property(_get_plugin, doc="Get the backstore plugin name.") index = property(_get_index, doc="Get the backstore index as an int.") storage_objects = property(_list_storage_objects, doc="Get the list of StorageObjects attached to the backstore.") version = property(_get_version, doc="Get the Backstore plugin version string.") plugin = property(_get_plugin, doc="Get the Backstore plugin name.") name = property(_get_name, doc="Get the backstore name.") class PSCSIBackstore(Backstore): ''' This is an interface to pscsi backstore plugin objects in configFS. A PSCSIBackstore object is identified by its backstore index. ''' # PSCSIBackstore private stuff def __init__(self, index, mode='any', legacy=False): ''' @param index: The backstore index matching a physical SCSI HBA. @type index: int @param mode: An optionnal string containing the object creation mode: - I{'any'} the configFS object will be either lookuped or created. - I{'lookup'} the object MUST already exist configFS. - I{'create'} the object must NOT already exist in configFS. @type mode:string @param legacy: Enable legacy physcal HBA mode. If True, you must specify it also in lookup mode for StorageObjects to be notified. You've been warned ! @return: A PSCSIBackstore object. ''' self._legacy = legacy super(PSCSIBackstore, self).__init__("pscsi", PSCSIStorageObject, index, mode) def _create_in_cfs_ine(self, mode): if self.legacy_mode and self._index not in list_scsi_hbas(): raise RTSLibError("Cannot create backstore, hba " + "scsi%d does not exist." % self._index) else: Backstore._create_in_cfs_ine(self, mode) def _get_legacy(self): return self._legacy # PSCSIBackstore public stuff def storage_object(self, name, dev=None): ''' Same as PSCSIStorageObject() without specifying the backstore ''' self._check_self() return PSCSIStorageObject(self, name=name, dev=dev) legacy_mode = property(_get_legacy, doc="Get the legacy mode flag. If True, the Vitualbackstore " + " index must match the StorageObjects real HBAs.") class RDDRBackstore(Backstore): ''' This is an interface to rd_dr backstore plugin objects in configFS. A RDDRBackstore object is identified by its backstore index. ''' # RDDRBackstore private stuff def __init__(self, index, mode='any'): ''' @param index: The backstore index. @type index: int @param mode: An optionnal string containing the object creation mode: - I{'any'} the configFS object will be either lookupd or created. - I{'lookup'} the object MUST already exist configFS. - I{'create'} the object must NOT already exist in configFS. @type mode:string @return: A RDDRBackstore object. ''' super(RDDRBackstore, self).__init__("rd_dr", RDDRStorageObject, index, mode) # RDDRBackstore public stuff def storage_object(self, name, size=None, gen_wwn=True): ''' Same as RDDRStorageObject() without specifying the backstore ''' self._check_self() return RDDRStorageObject(self, name=name, size=size, gen_wwn=gen_wwn) class RDMCPBackstore(Backstore): ''' This is an interface to rd_mcp backstore plugin objects in configFS. A RDMCPBackstore object is identified by its backstore index. ''' # RDMCPBackstore private stuff def __init__(self, index, mode='any'): ''' @param index: The backstore index. @type index: int @param mode: An optionnal string containing the object creation mode: - I{'any'} the configFS object will be either lookupd or created. - I{'lookup'} the object MUST already exist configFS. - I{'create'} the object must NOT already exist in configFS. @type mode:string @return: A RDMCPBackstore object. ''' super(RDMCPBackstore, self).__init__("rd_mcp", RDMCPStorageObject, index, mode) # RDMCPBackstore public stuff def storage_object(self, name, size=None, gen_wwn=True): ''' Same as RDMCPStorageObject() without specifying the backstore ''' self._check_self() return RDMCPStorageObject(self, name=name, size=size, gen_wwn=gen_wwn) class FileIOBackstore(Backstore): ''' This is an interface to fileio backstore plugin objects in configFS. A FileIOBackstore object is identified by its backstore index. ''' # FileIOBackstore private stuff def __init__(self, index, mode='any'): ''' @param index: The backstore index. @type index: int @param mode: An optionnal string containing the object creation mode: - I{'any'} the configFS object will be either lookuped or created. - I{'lookup'} the object MUST already exist configFS. - I{'create'} the object must NOT already exist in configFS. @type mode:string @return: A FileIOBackstore object. ''' super(FileIOBackstore, self).__init__("fileio", FileIOStorageObject, index, mode) # FileIOBackstore public stuff def storage_object(self, name, dev=None, size=None, gen_wwn=True, buffered_mode=False): ''' Same as FileIOStorageObject() without specifying the backstore ''' self._check_self() return FileIOStorageObject(self, name=name, dev=dev, size=size, gen_wwn=gen_wwn, buffered_mode=buffered_mode) class IBlockBackstore(Backstore): ''' This is an interface to iblock backstore plugin objects in configFS. An IBlockBackstore object is identified by its backstore index. ''' # IBlockBackstore private stuff def __init__(self, index, mode='any'): ''' @param index: The backstore index. @type index: int @param mode: An optionnal string containing the object creation mode: - I{'any'} the configFS object will be either lookupd or created. - I{'lookup'} the object MUST already exist configFS. - I{'create'} the object must NOT already exist in configFS. @type mode:string @return: An IBlockBackstore object. ''' super(IBlockBackstore, self).__init__("iblock", IBlockStorageObject, index, mode) # IBlockBackstore public stuff def storage_object(self, name, dev=None, gen_wwn=True): ''' Same as IBlockStorageObject() without specifying the backstore ''' self._check_self() return IBlockStorageObject(self, name=name, dev=dev, gen_wwn=gen_wwn) class StorageObject(CFSNode): ''' This is an interface to storage objects in configFS. A StorageObject is identified by its backstore and its name. ''' # StorageObject private stuff def __init__(self, backstore, backstore_class, name, mode): if not isinstance(backstore, backstore_class): raise RTSLibError("The parent backstore must be of " + "type %s" % backstore_class.__name__) super(StorageObject, self).__init__() self._backstore = backstore if "/" in name or " " in name or "\t" in name or "\n" in name: raise RTSLibError("A storage object's name cannot contain " " /, newline or spaces/tabs.") else: self._name = name self._path = "%s/%s" % (self.backstore.path, self.name) self._create_in_cfs_ine(mode) def _get_wwn(self): self._check_self() if self.is_configured(): path = "%s/wwn/vpd_unit_serial" % self.path return fread(path).partition(":")[2].strip() else: return "" def _set_wwn(self, wwn): self._check_self() if self.is_configured(): path = "%s/wwn/vpd_unit_serial" % self.path fwrite(path, "%s\n" % wwn) else: raise RTSLibError("Cannot write a T10 WWN Unit Serial to " + "an unconfigured StorageObject.") def _set_udev_path(self, udev_path): self._check_self() path = "%s/udev_path" % self.path fwrite(path, "%s" % udev_path) def _get_udev_path(self): self._check_self() path = "%s/udev_path" % self.path udev_path = fread(path).strip() if not udev_path and self.backstore.plugin == "fileio": udev_path = self._parse_info('File').strip() return udev_path def _get_name(self): return self._name def _get_backstore(self): return self._backstore def _enable(self): self._check_self() path = "%s/enable" % self.path fwrite(path, "1\n") def _control(self, command): self._check_self() path = "%s/control" % self.path fwrite(path, "%s" % str(command).strip()) def _write_fd(self, contents): self._check_self() path = "%s/fd" % self.path fwrite(path, "%s" % str(contents).strip()) def _parse_info(self, key): self._check_self() info = fread("%s/info" % self.path) return re.search(".*%s: ([^: ]+).*" \ % key, ' '.join(info.split())).group(1).lower() def _get_status(self): self._check_self() return self._parse_info('Status') def _gen_attached_luns(self): ''' Fast scan of luns attached to a storage object. This is an order of magnitude faster than using root.luns and matching path on them. ''' isdir = os.path.isdir islink = os.path.islink listdir = os.listdir realpath = os.path.realpath path = self.path from root import RTSRoot rtsroot = RTSRoot() target_names_excludes = FabricModule.target_names_excludes for fabric_module in rtsroot.loaded_fabric_modules: base = fabric_module.path for tgt_dir in listdir(base): if tgt_dir not in target_names_excludes: tpgts_base = "%s/%s" % (base, tgt_dir) for tpgt_dir in listdir(tpgts_base): luns_base = "%s/%s/lun" % (tpgts_base, tpgt_dir) if isdir(luns_base): for lun_dir in listdir(luns_base): links_base = "%s/%s" % (luns_base, lun_dir) for lun_file in listdir(links_base): link = "%s/%s" % (links_base, lun_file) if islink(link) and realpath(link) == path: val = (tpgt_dir + "_" + lun_dir) val = val.split('_') target = Target(fabric_module, tgt_dir) yield LUN(TPG(target, val[1]), val[3]) def _list_attached_luns(self): ''' Just returns a set of all luns attached to a storage object. ''' self._check_self() luns = set([]) for lun in self._gen_attached_luns(): luns.add(lun) return luns # StorageObject public stuff def delete(self): ''' Recursively deletes a StorageObject object. This will delete all attached LUNs currently using the StorageObject object, and then the StorageObject itself. The underlying file and block storages will not be touched, but all ramdisk data will be lost. ''' self._check_self() # If we are called after a configure error, we can skip this if self.is_configured(): for lun in self._gen_attached_luns(): if self.status != 'activated': break else: lun.delete() super(StorageObject, self).delete() def is_configured(self): ''' @return: True if the StorageObject is configured, else returns False ''' self._check_self() path = "%s/info" % self.path try: fread(path) except IOError: return False else: return True backstore = property(_get_backstore, doc="Get the backstore object.") name = property(_get_name, doc="Get the StorageObject name as a string.") udev_path = property(_get_udev_path, doc="Get the StorageObject udev_path as a string.") wwn = property(_get_wwn, _set_wwn, doc="Get or set the StorageObject T10 WWN Serial as a string.") status = property(_get_status, doc="Get the storage object status, depending on wether or not it"\ + "is used by any LUN") attached_luns = property(_list_attached_luns, doc="Get the list of all LUN objects attached.") class PSCSIStorageObject(StorageObject): ''' An interface to configFS storage objects for pscsi backstore. ''' # PSCSIStorageObject private stuff def __init__(self, backstore, name, dev=None): ''' A PSCSIStorageObject can be instantiated in two ways: - B{Creation mode}: If I{dev} is specified, the underlying configFS object will be created with that parameter. No PSCSIStorageObject with the same I{name} can pre-exist in the parent PSCSIBackstore in that mode, or instantiation will fail. - B{Lookup mode}: If I{dev} is not set, then the PSCSIStorageObject will be bound to the existing configFS object in the parent PSCSIBackstore having the specified I{name}. The underlying configFS object must already exist in that mode, or instantiation will fail. @param backstore: The parent backstore of the PSCSIStorageObject. @type backstore: PSCSIBackstore @param name: The name of the PSCSIStorageObject. @type name: string @param dev: You have two choices: - Use the SCSI id of the device: I{dev="H:C:T:L"}. If the parent backstore is in legacy mode, you must use I{dev="C:T:L"} instead, as the backstore index of the SCSI dev device would then be constrained by the parent backstore index. - Use the path to the SCSI device: I{dev="/path/to/dev"}. Note that if the parent Backstore is in legacy mode, the device must have the same backstore index as the parent backstore. @type dev: string @return: A PSCSIStorageObject object. ''' if dev is not None: super(PSCSIStorageObject, self).__init__(backstore, PSCSIBackstore, name, 'create') try: self._configure(dev) except: self.delete() raise else: super(PSCSIStorageObject, self).__init__(backstore, PSCSIBackstore, name, 'lookup') def _configure(self, dev): self._check_self() parent_hostid = self.backstore.index legacy = self.backstore.legacy_mode if legacy: try: (hostid, channelid, targetid, lunid) = \ convert_scsi_path_to_hctl(dev) except TypeError: try: (channelid, targetid, lunid) = dev.split(':') channelid = int(channelid) targetid = int(targetid) lunid = int(lunid) except ValueError: raise RTSLibError("Cannot find SCSI device by " + "path, and dev parameter not " + "in C:T:L format: %s." % dev) else: udev_path = convert_scsi_hctl_to_path(parent_hostid, channelid, targetid, lunid) if not udev_path: raise RTSLibError("SCSI device does not exist.") else: if hostid != parent_hostid: raise RTSLibError("The specified SCSI device does " + "not belong to the backstore.") else: udev_path = dev.strip() else: # The Backstore is not in legacy mode. # Use H:C:T:L format or preserve the path given by the user. try: (hostid, channelid, targetid, lunid) = \ convert_scsi_path_to_hctl(dev) except TypeError: try: (hostid, channelid, targetid, lunid) = dev.split(':') hostid = int(hostid) channelid = int(channelid) targetid = int(targetid) lunid = int(lunid) except ValueError: raise RTSLibError("Cannot find SCSI device by " + "path, and dev " + "parameter not in H:C:T:L " + "format: %s." % dev) else: udev_path = convert_scsi_hctl_to_path(hostid, channelid, targetid, lunid) if not udev_path: raise RTSLibError("SCSI device does not exist.") else: udev_path = dev.strip() if is_dev_in_use(udev_path): raise RTSLibError("Cannot configure StorageObject because " + "device %s (SCSI %d:%d:%d:%d) " % (udev_path, hostid, channelid, targetid, lunid) + "is already in use.") if legacy: self._control("scsi_channel_id=%d," % channelid \ + "scsi_target_id=%d," % targetid \ + "scsi_lun_id=%d" % lunid) else: self._control("scsi_host_id=%d," % hostid \ + "scsi_channel_id=%d," % channelid \ + "scsi_target_id=%d," % targetid \ + "scsi_lun_id=%d" % lunid) self._set_udev_path(udev_path) self._enable() def _get_model(self): self._check_self() info = fread("%s/info" % self.path) return str(re.search(".*Model:(.*)Rev:", ' '.join(info.split())).group(1)).strip() def _get_vendor(self): self._check_self() info = fread("%s/info" % self.path) return str(re.search(".*Vendor:(.*)Model:", ' '.join(info.split())).group(1)).strip() def _get_revision(self): self._check_self() return self._parse_info('Rev') def _get_channel_id(self): self._check_self() return int(self._parse_info('Channel ID')) def _get_target_id(self): self._check_self() return int(self._parse_info('Target ID')) def _get_lun(self): self._check_self() return int(self._parse_info('LUN')) def _get_host_id(self): self._check_self() return int(self._parse_info('Host ID')) # PSCSIStorageObject public stuff wwn = property(StorageObject._get_wwn, doc="Get the StorageObject T10 WWN Unit Serial as a string." + " You cannot set it for pscsi-backed StorageObjects.") model = property(_get_model, doc="Get the SCSI device model string") vendor = property(_get_vendor, doc="Get the SCSI device vendor string") revision = property(_get_revision, doc="Get the SCSI device revision string") host_id = property(_get_host_id, doc="Get the SCSI device host id") channel_id = property(_get_channel_id, doc="Get the SCSI device channel id") target_id = property(_get_target_id, doc="Get the SCSI device target id") lun = property(_get_lun, doc="Get the SCSI device LUN") class RDDRStorageObject(StorageObject): ''' An interface to configFS storage objects for rd_dr backstore. ''' # RDDRStorageObject private stuff def __init__(self, backstore, name, size=None, gen_wwn=True): ''' A RDDRStorageObject can be instantiated in two ways: - B{Creation mode}: If I{size} is specified, the underlying configFS object will be created with that parameter. No RDDRStorageObject with the same I{name} can pre-exist in the parent RDDRBackstore in that mode, or instantiation will fail. - B{Lookup mode}: If I{size} is not set, then the RDDRStorageObject will be bound to the existing configFS object in the parent RDDRBackstore having the specified I{name}. The underlying configFS object must already exist in that mode, or instantiation will fail. @param backstore: The parent backstore of the RDDRStorageObject. @type backstore: RDDRBackstore @param name: The name of the RDDRStorageObject. @type name: string @param size: The size of the ramdrive to create: - If size is an int, it represents a number of bytes - If size is a string, the following units can be used : - I{B} or no unit present for bytes - I{k}, I{K}, I{kB}, I{KB} for kB (kilobytes) - I{m}, I{M}, I{mB}, I{MB} for MB (megabytes) - I{g}, I{G}, I{gB}, I{GB} for GB (gigabytes) - I{t}, I{T}, I{tB}, I{TB} for TB (terabytes) Example: size="1MB" for a one megabytes storage object. - Note that the size will be rounded to the closest 4096 Bytes RAM pages count. For instance, a size of 100000 Bytes will be rounded to 24 pages, really 98304 Bytes. - The base value for kilo is 1024, aka 1kB = 1024B. Strictly speaking, we use kiB, MiB, etc. @type size: string or int @param gen_wwn: Should we generate a T10 WWN Unit Serial ? @type gen_wwn: bool @return: A RDDRStorageObject object. ''' if size is not None: super(RDDRStorageObject, self).__init__(backstore, RDDRBackstore, name, 'create') try: self._configure(size, gen_wwn) except: self.delete() raise else: super(RDDRStorageObject, self).__init__(backstore, RDDRBackstore, name, 'lookup') def _configure(self, size, wwn): self._check_self() size = convert_human_to_bytes(size) # convert to 4k pages size = round(float(size)/4096) if size == 0: size = 1 self._control("rd_pages=%d" % size) self._enable() if wwn: self.wwn = generate_wwn('unit_serial') def _get_page_size(self): self._check_self() return int(self._parse_info("PAGES/PAGE_SIZE").split('*')[1]) def _get_pages(self): self._check_self() return int(self._parse_info("PAGES/PAGE_SIZE").split('*')[0]) def _get_size(self): self._check_self() size = self._get_page_size() * self._get_pages() return size # RDDRStorageObject public stuff page_size = property(_get_page_size, doc="Get the ramdisk page size.") pages = property(_get_pages, doc="Get the ramdisk number of pages.") size = property(_get_size, doc="Get the ramdisk size in bytes.") class RDMCPStorageObject(StorageObject): ''' An interface to configFS storage objects for rd_mcp backstore. ''' # RDMCPStorageObject private stuff def __init__(self, backstore, name, size=None, gen_wwn=True): ''' A RDMCPStorageObject can be instantiated in two ways: - B{Creation mode}: If I{size} is specified, the underlying configFS object will be created with that parameter. No RDMCPStorageObject with the same I{name} can pre-exist in the parent RDMCPBackstore in that mode, or instantiation will fail. - B{Lookup mode}: If I{size} is not set, then the RDMCPStorageObject will be bound to the existing configFS object in the parent RDMCPBackstore having the specified I{name}. The underlying configFS object must already exist in that mode, or instantiation will fail. @param backstore: The parent backstore of the RDMCPStorageObject. @type backstore: RDMCPBackstore @param name: The name of the RDMCPStorageObject. @type name: string @param size: The size of the ramdrive to create: - If size is an int, it represents a number of bytes - If size is a string, the following units can be used : - B{B} or no unit present for bytes - B{k}, B{K}, B{kB}, B{KB} for kB (kilobytes) - B{m}, B{M}, B{mB}, B{MB} for MB (megabytes) - B{g}, B{G}, B{gB}, B{GB} for GB (gigabytes) - B{t}, B{T}, B{tB}, B{TB} for TB (terabytes) Example: size="1MB" for a one megabytes storage object. - Note that the size will be rounded to the closest 4096 Bytes RAM pages count. For instance, a size of 100000 Bytes will be rounded to 24 pages, really 98304 Bytes. - The base value for kilo is 1024, aka 1kB = 1024B. Strictly speaking, we use kiB, MiB, etc. @type size: string or int @param gen_wwn: Should we generate a T10 WWN Unit Serial ? @type gen_wwn: bool @return: A RDMCPStorageObject object. ''' if size is not None: super(RDMCPStorageObject, self).__init__(backstore, RDMCPBackstore, name, 'create') try: self._configure(size, gen_wwn) except: self.delete() raise else: super(RDMCPStorageObject, self).__init__(backstore, RDMCPBackstore, name, 'lookup') def _configure(self, size, wwn): self._check_self() size = convert_human_to_bytes(size) # convert to 4k pages size = round(float(size)/4096) if size == 0: size = 1 self._control("rd_pages=%d" % size) self._enable() if wwn: self.wwn = generate_wwn('unit_serial') def _get_page_size(self): self._check_self() return int(self._parse_info("PAGES/PAGE_SIZE").split('*')[1]) def _get_pages(self): self._check_self() return int(self._parse_info("PAGES/PAGE_SIZE").split('*')[0]) def _get_size(self): self._check_self() size = self._get_page_size() * self._get_pages() return size # RDMCPStorageObject public stuff page_size = property(_get_page_size, doc="Get the ramdisk page size.") pages = property(_get_pages, doc="Get the ramdisk number of pages.") size = property(_get_size, doc="Get the ramdisk size in bytes.") class FileIOStorageObject(StorageObject): ''' An interface to configFS storage objects for fileio backstore. ''' # FileIOStorageObject private stuff def __init__(self, backstore, name, dev=None, size=None, gen_wwn=True, buffered_mode=False): ''' A FileIOStorageObject can be instantiated in two ways: - B{Creation mode}: If I{dev} and I{size} are specified, the underlying configFS object will be created with those parameters. No FileIOStorageObject with the same I{name} can pre-exist in the parent FileIOBackstore in that mode, or instantiation will fail. - B{Lookup mode}: If I{dev} and I{size} are not set, then the FileIOStorageObject will be bound to the existing configFS object in the parent FileIOBackstore having the specified I{name}. The underlying configFS object must already exist in that mode, or instantiation will fail. @param backstore: The parent backstore of the FileIOStorageObject. @type backstore: FileIOBackstore @param name: The name of the FileIOStorageObject. @type name: string @param dev: The path to the backend file or block device to be used. - Examples: I{dev="/dev/sda"}, I{dev="/tmp/myfile"} - The only block device type that is accepted I{TYPE_DISK}, or partitions of a I{TYPE_DISK} device. For other device types, use pscsi. @type dev: string @param size: The maximum size to allocate for the file. Not used for block devices. - If size is an int, it represents a number of bytes - If size is a string, the following units can be used : - B{B} or no unit present for bytes - B{k}, B{K}, B{kB}, B{KB} for kB (kilobytes) - B{m}, B{M}, B{mB}, B{MB} for MB (megabytes) - B{g}, B{G}, B{gB}, B{GB} for GB (gigabytes) - B{t}, B{T}, B{tB}, B{TB} for TB (terabytes) Example: size="1MB" for a one megabytes storage object. - The base value for kilo is 1024, aka 1kB = 1024B. Strictly speaking, we use kiB, MiB, etc. @type size: string or int @param gen_wwn: Should we generate a T10 WWN Unit Serial ? @type gen_wwn: bool @param buffered_mode: Should we create the StorageObject in buffered mode or not ? Byt default, we create it in synchronous mode (non-buffered). This cannot be changed later. @type buffered_mode: bool @return: A FileIOStorageObject object. ''' if dev is not None: super(FileIOStorageObject, self).__init__(backstore, FileIOBackstore, name, 'create') try: self._configure(dev, size, gen_wwn, buffered_mode) except: self.delete() raise else: super(FileIOStorageObject, self).__init__(backstore, FileIOBackstore, name, 'lookup') def _configure(self, dev, size, wwn, buffered_mode): self._check_self() rdev = os.path.realpath(dev) if not os.path.isdir(os.path.dirname(rdev)): raise RTSLibError("The dev parameter must be a path to a " + "file inside an existing directory, " + "not %s." % str(os.path.dirname(dev))) if os.path.isdir(rdev): raise RTSLibError("The dev parameter must be a path to a " + "file or block device not a directory:" + "%s." % dev) block_type = get_block_type(rdev) if block_type is None and not is_disk_partition(rdev): if os.path.exists(rdev) and not os.path.isfile(dev): raise RTSLibError("Device %s is neither a file, " % dev + "a disk partition or a block device.") # It is a file if size is None: raise RTSLibError("The size parameter is mandatory " + "when using a file.") size = convert_human_to_bytes(size) self._control("fd_dev_name=%s,fd_dev_size=%d" % (dev, size)) else: # it is a block device or a disk partition if size is not None: raise RTSLibError("You cannot specify a size for a " + "block device.") if block_type != 0 and block_type is not None: raise RTSLibError("Device %s is a block device, " % dev + "but not of TYPE_DISK.") if is_dev_in_use(rdev): raise RTSLibError("Cannot configure StorageObject " + "because device " + "%s is already in use." % dev) if is_disk_partition(rdev): size = get_disk_size(rdev) print "fd_dev_name=%s,fd_dev_size=%d" % (dev, size) self._control("fd_dev_name=%s,fd_dev_size=%d" % (dev, size)) else: self._control("fd_dev_name=%s" % dev) self._set_udev_path(dev) if buffered_mode: self._set_buffered_mode() self._enable() if wwn: self.wwn = generate_wwn('unit_serial') def _get_mode(self): self._check_self() return self._parse_info('Mode') def _get_size(self): self._check_self() return int(self._parse_info('Size')) def _set_buffered_mode(self): ''' FileIOStorage objects have synchronous mode enable by default. This allows to move them to buffered mode. Warning, setting the object back to synchronous mode is not implemented yet, so there is no turning back unless you delete and recreate the FileIOStorageObject. ''' self._check_self() self._control("fd_buffered_io=1") # FileIOStorageObject public stuff mode = property(_get_mode, doc="Get the current FileIOStorage mode, buffered or synchronous") size = property(_get_size, doc="Get the current FileIOStorage size in bytes") class IBlockStorageObject(StorageObject): ''' An interface to configFS storage objects for iblock backstore. ''' # IBlockStorageObject private stuff def __init__(self, backstore, name, dev=None, gen_wwn=True): ''' A BlockIOStorageObject can be instantiated in two ways: - B{Creation mode}: If I{dev} is specified, the underlying configFS object will be created with that parameter. No BlockIOStorageObject with the same I{name} can pre-exist in the parent BlockIOBackstore in that mode. - B{Lookup mode}: If I{dev} is not set, then the BlockIOStorageObject will be bound to the existing configFS object in the parent BlockIOBackstore having the specified I{name}. The underlying configFS object must already exist in that mode, or instantiation will fail. @param backstore: The parent backstore of the BlockIOStorageObject. @type backstore: BlockIOBackstore @param name: The name of the BlockIOStorageObject. @type name: string @param dev: The path to the backend block device to be used. - Example: I{dev="/dev/sda"}. - The only device type that is accepted I{TYPE_DISK}. For other device types, use pscsi. @type dev: string @param gen_wwn: Should we generate a T10 WWN Unit Serial when creating the object ? @type gen_wwn: bool @return: A BlockIOStorageObject object. ''' if dev is not None: super(IBlockStorageObject, self).__init__(backstore, IBlockBackstore, name, 'create') try: self._configure(dev, gen_wwn) except: self.delete() raise else: super(IBlockStorageObject, self).__init__(backstore, IBlockBackstore, name, 'lookup') def _configure(self, dev, wwn): self._check_self() if get_block_type(dev) != 0: raise RTSLibError("Device is not a TYPE_DISK block device.") if is_dev_in_use(dev): raise RTSLibError("Cannot configure StorageObject because " + "device %s is already in use." % dev) self._set_udev_path(dev) if self._backstore.version.startswith("v3."): # For 3.x, use the fd method file_fd = os.open(dev, os.O_RDWR) try: self._write_fd(file_fd) finally: os.close(file_fd) else: # For 4.x and above, use the generic udev_path method self._control("udev_path=%s" % dev) self._enable() if wwn: self.wwn = generate_wwn('unit_serial') def _get_major(self): self._check_self() return int(self._parse_info('Major')) def _get_minor(self): self._check_self() return int(self._parse_info('Minor')) # IblockStorageObject public stuff major = property(_get_major, doc="Get the block device major number") minor = property(_get_minor, doc="Get the block device minor number") def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test() rtslib-2.2/rtslib/utils.py0000644000175000017500000006725412176066620014107 0ustar rrsrrs''' Provides various utility functions. This file is part of RTSLib. Copyright (c) 2011-2013 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import re import os import stat import uuid import glob import socket import ipaddr import netifaces import subprocess from array import array from fcntl import ioctl from threading import Thread from Queue import Queue, Empty from struct import pack, unpack class RTSLibError(Exception): ''' Generic rtslib error. ''' pass class RTSLibBrokenLink(RTSLibError): ''' Broken link in configfs, i.e. missing LUN storage object. ''' pass class RTSLibNotInCFS(RTSLibError): ''' The underlying configfs object does not exist. Happens when calling methods of an object that is instantiated but have been deleted from congifs, or when trying to lookup an object that does not exist. ''' pass def flatten_nested_list(nested_list): ''' Function to flatten a nested list. >>> import rtslib.utils as utils >>> utils.flatten_nested_list([[1,2,3,[4,5,6]],[7,8],[[[9,10]],[11,]]]) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] @param nested_list: A nested list (list of lists of lists etc.) @type nested_list: list @return: A list with only non-list elements ''' return list(gen_list_item(nested_list)) def gen_list_item(nested_list): ''' The generator for flatten_nested_list(). It returns one by one items that are not a list, and recurses when he finds an item that is a list. ''' for item in nested_list: if type(item) is list: for nested_item in gen_list_item(item): yield nested_item else: yield item def fwrite(path, string): ''' This function writes a string to a file, and takes care of opening it and closing it. If the file does not exists, it will be created. >>> from rtslib.utils import * >>> fwrite("/tmp/test", "hello") >>> fread("/tmp/test") 'hello' @param path: The file to write to. @type path: string @param string: The string to write to the file. @type string: string ''' path = os.path.realpath(str(path)) file_fd = open(path, 'w') try: file_fd.write("%s" % string) finally: file_fd.close() def fread(path): ''' This function reads the contents of a file. It takes care of opening and closing it. >>> from rtslib.utils import * >>> fwrite("/tmp/test", "hello") >>> fread("/tmp/test") 'hello' >>> fread("/tmp/notexistingfile") # doctest: +ELLIPSIS Traceback (most recent call last): ... IOError: [Errno 2] No such file or directory: '/tmp/notexistingfile' @param path: The path to the file to read from. @type path: string @return: A string containing the file's contents. ''' path = os.path.realpath(str(path)) string = "" file_fd = open(path, 'r') try: string = file_fd.read() finally: file_fd.close() return string def is_dev_in_use(path): ''' This function will check if the device or file referenced by path is already mounted or used as a storage object backend. It works by trying to open the path with O_EXCL flag, which will fail if someone else already did. Note that the file is closed before the function returns, so this does not guaranteed the device will still be available after the check. @param path: path to the file of device to check @type path: string @return: A boolean, True is we cannot get exclusive descriptor on the path, False if we can. ''' path = os.path.realpath(str(path)) try: file_fd = os.open(path, os.O_EXCL|os.O_NDELAY) except OSError: return True else: os.close(file_fd) return False def is_disk_partition(path): ''' Try to find out if path is a partition of a TYPE_DISK device. Handles both /dev/sdaX and /dev/disk/by-*/*-part? schemes. ''' regex = re.match(r'([a-z/]+)([1-9]+)$', path) if not regex: regex = re.match(r'(/dev/disk/.+)(-part[1-9]+)$', path) if not regex: return False else: if get_block_type(regex.group(1)) == 0: return True def get_disk_size(path): ''' This function returns the size in bytes of a disk-type block device, or None if path does not point to a disk- type device. ''' (major, minor) = get_block_numbers(path) if major is None: return None # list of [major, minor, #blocks (1K), name partitions = [ x.split()[0:4] for x in fread("/proc/partitions").split("\n")[2:] if x] size = None for partition in partitions: if partition[0:2] == [str(major), str(minor)]: size = int(partition[2]) * 1024 break return size def get_block_numbers(path): ''' This function returns a (major,minor) tuple for the block device found at path, or (None, None) if path is not a block device. ''' dev = os.path.realpath(path) try: mode = os.stat(dev) except OSError: return (None, None) if not stat.S_ISBLK(mode[stat.ST_MODE]): return (None, None) major = os.major(mode.st_rdev) minor = os.minor(mode.st_rdev) return (major, minor) def get_block_type(path): ''' This function returns a block device's type. Example: 0 is TYPE_DISK If no match is found, None is returned. >>> from rtslib.utils import * >>> get_block_type("/dev/sda") 0 >>> get_block_type("/dev/sr0") 5 >>> get_block_type("/dev/scd0") 5 >>> get_block_type("/dev/nodevicehere") is None True @param path: path to the block device @type path: string @return: An int for the block device type, or None if not a block device. ''' dev = os.path.realpath(path) # TODO: Make adding new majors on-the-fly possible, using some config file # for instance, maybe an additionnal list argument, or even a match all # mode for overrides ? # Make sure we are dealing with a block device (major, minor) = get_block_numbers(dev) if major is None: return None # Treat disk partitions as TYPE_DISK if is_disk_partition(path): return 0 # These devices are disk type block devices, but might not report this # correctly in /sys/block/xxx/device/type, so use their major number. type_disk_known_majors = [1, # RAM disk 8, # SCSI disk devices 9, # Metadisk RAID devices 13, # 8-bit MFM/RLL/IDE controller 19, # "Double" compressed disk 21, # Acorn MFM hard drive interface 30, # FIXME: Normally 'Philips LMS CM-205 # CD-ROM' in the Linux devices list but # used by Cirtas devices. 35, # Slow memory ramdisk 36, # MCA ESDI hard disk 37, # Zorro II ramdisk 43, # Network block devices 44, # Flash Translation Layer (FTL) filesystems 45, # Parallel port IDE disk devices 47, # Parallel port ATAPI disk devices 48, # Mylex DAC960 PCI RAID controller 48, # Mylex DAC960 PCI RAID controller 49, # Mylex DAC960 PCI RAID controller 50, # Mylex DAC960 PCI RAID controller 51, # Mylex DAC960 PCI RAID controller 52, # Mylex DAC960 PCI RAID controller 53, # Mylex DAC960 PCI RAID controller 54, # Mylex DAC960 PCI RAID controller 55, # Mylex DAC960 PCI RAID controller 58, # Reserved for logical volume manager 59, # Generic PDA filesystem device 60, # LOCAL/EXPERIMENTAL USE 61, # LOCAL/EXPERIMENTAL USE 62, # LOCAL/EXPERIMENTAL USE 63, # LOCAL/EXPERIMENTAL USE 64, # Scramdisk/DriveCrypt encrypted devices 65, # SCSI disk devices (16-31) 66, # SCSI disk devices (32-47) 67, # SCSI disk devices (48-63) 68, # SCSI disk devices (64-79) 69, # SCSI disk devices (80-95) 70, # SCSI disk devices (96-111) 71, # SCSI disk devices (112-127) 72, # Compaq Intelligent Drive Array 73, # Compaq Intelligent Drive Array 74, # Compaq Intelligent Drive Array 75, # Compaq Intelligent Drive Array 76, # Compaq Intelligent Drive Array 77, # Compaq Intelligent Drive Array 78, # Compaq Intelligent Drive Array 79, # Compaq Intelligent Drive Array 80, # I2O hard disk 80, # I2O hard disk 81, # I2O hard disk 82, # I2O hard disk 83, # I2O hard disk 84, # I2O hard disk 85, # I2O hard disk 86, # I2O hard disk 87, # I2O hard disk 93, # NAND Flash Translation Layer filesystem 94, # IBM S/390 DASD block storage 96, # Inverse NAND Flash Translation Layer 98, # User-mode virtual block device 99, # JavaStation flash disk 101, # AMI HyperDisk RAID controller 102, # Compressed block device 104, # Compaq Next Generation Drive Array 105, # Compaq Next Generation Drive Array 106, # Compaq Next Generation Drive Array 107, # Compaq Next Generation Drive Array 108, # Compaq Next Generation Drive Array 109, # Compaq Next Generation Drive Array 110, # Compaq Next Generation Drive Array 111, # Compaq Next Generation Drive Array 112, # IBM iSeries virtual disk 114, # IDE BIOS powered software RAID interfaces 115, # NetWare (NWFS) Devices (0-255) 117, # Enterprise Volume Management System 120, # LOCAL/EXPERIMENTAL USE 121, # LOCAL/EXPERIMENTAL USE 122, # LOCAL/EXPERIMENTAL USE 123, # LOCAL/EXPERIMENTAL USE 124, # LOCAL/EXPERIMENTAL USE 125, # LOCAL/EXPERIMENTAL USE 126, # LOCAL/EXPERIMENTAL USE 127, # LOCAL/EXPERIMENTAL USE 128, # SCSI disk devices (128-143) 129, # SCSI disk devices (144-159) 130, # SCSI disk devices (160-175) 131, # SCSI disk devices (176-191) 132, # SCSI disk devices (192-207) 133, # SCSI disk devices (208-223) 134, # SCSI disk devices (224-239) 135, # SCSI disk devices (240-255) 136, # Mylex DAC960 PCI RAID controller 137, # Mylex DAC960 PCI RAID controller 138, # Mylex DAC960 PCI RAID controller 139, # Mylex DAC960 PCI RAID controller 140, # Mylex DAC960 PCI RAID controller 141, # Mylex DAC960 PCI RAID controller 142, # Mylex DAC960 PCI RAID controller 143, # Mylex DAC960 PCI RAID controller 144, # Non-device (e.g. NFS) mounts 145, # Non-device (e.g. NFS) mounts 146, # Non-device (e.g. NFS) mounts 147, # DRBD device 152, # EtherDrive Block Devices 153, # Enhanced Metadisk RAID storage units 160, # Carmel 8-port SATA Disks 161, # Carmel 8-port SATA Disks 199, # Veritas volume manager (VxVM) volumes 201, # Veritas VxVM dynamic multipathing driver 230, # ZFS ZVols 240, # LOCAL/EXPERIMENTAL USE 241, # LOCAL/EXPERIMENTAL USE 242, # LOCAL/EXPERIMENTAL USE 243, # LOCAL/EXPERIMENTAL USE 244, # LOCAL/EXPERIMENTAL USE 245, # LOCAL/EXPERIMENTAL USE 246, # LOCAL/EXPERIMENTAL USE 247, # LOCAL/EXPERIMENTAL USE 248, # LOCAL/EXPERIMENTAL USE 249, # LOCAL/EXPERIMENTAL USE 250, # LOCAL/EXPERIMENTAL USE 251, # LOCAL/EXPERIMENTAL USE 252, # LOCAL/EXPERIMENTAL USE 253, # LOCAL/EXPERIMENTAL USE 254 # LOCAL/EXPERIMENTAL USE ] if major in type_disk_known_majors: return 0 # Same for LVM LVs, but as we cannot use major here # (it varies accross distros), use the realpath to check if os.path.dirname(dev) == "/dev/mapper": return 0 # list of (major, minor, type) tuples blocks = [(fread("%s/dev" % fdev).strip().split(':')[0], fread("%s/dev" % fdev).strip().split(':')[1], fread("%s/device/type" % fdev).strip()) for fdev in glob.glob("/sys/block/*") if os.path.isfile("%s/device/type" % fdev)] for block in blocks: if int(block[0]) == major and int(block[1]) == minor: return int(block[2]) return None def list_scsi_hbas(): ''' This function returns the list of HBA indexes for existing SCSI HBAs. ''' return list(set([int(device.partition(":")[0]) for device in os.listdir("/sys/bus/scsi/devices") if re.match("[0-9:]+", device)])) def convert_scsi_path_to_hctl(path): ''' This function returns the SCSI ID in H:C:T:L form for the block device being mapped to the udev path specified. If no match is found, None is returned. >>> import rtslib.utils as utils >>> utils.convert_scsi_path_to_hctl('/dev/scd0') (2, 0, 0, 0) >>> utils.convert_scsi_path_to_hctl('/dev/sr0') (2, 0, 0, 0) >>> utils.convert_scsi_path_to_hctl('/dev/sda') (3, 0, 0, 0) >>> utils.convert_scsi_path_to_hctl('/dev/sda1') >>> utils.convert_scsi_path_to_hctl('/dev/sdb') (3, 0, 1, 0) >>> utils.convert_scsi_path_to_hctl('/dev/sdc') (3, 0, 2, 0) @param path: The udev path to the SCSI block device. @type path: string @return: An (host, controller, target, lun) tuple of integer values representing the SCSI ID of the device, or None if no match is found. ''' devname = os.path.basename(os.path.realpath(path)) try: hctl = os.listdir("/sys/block/%s/device/scsi_device" % devname)[0].split(':') except: return None return [int(data) for data in hctl] def convert_scsi_hctl_to_path(host, controller, target, lun): ''' This function returns a udev path pointing to the block device being mapped to the SCSI device that has the provided H:C:T:L. >>> import rtslib.utils as utils >>> utils.convert_scsi_hctl_to_path(0,0,0,0) '' >>> utils.convert_scsi_hctl_to_path(2,0,0,0) # doctest: +ELLIPSIS '/dev/s...0' >>> utils.convert_scsi_hctl_to_path(3,0,2,0) '/dev/sdc' @param host: The SCSI host id. @type host: int @param controller: The SCSI controller id. @type controller: int @param target: The SCSI target id. @type target: int @param lun: The SCSI Logical Unit Number. @type lun: int @return: A string for the canonical path to the device, or empty string. ''' try: host = int(host) controller = int(controller) target = int(target) lun = int(lun) except ValueError: raise RTSLibError( "The host, controller, target and lun parameter must be integers.") for devname in os.listdir("/sys/block"): path = "/dev/%s" % devname hctl = [host, controller, target, lun] if convert_scsi_path_to_hctl(path) == hctl: return os.path.realpath(path) return '' def convert_human_to_bytes(hsize, kilo=1024): ''' This function converts human-readable amounts of bytes to bytes. It understands the following units : - I{B} or no unit present for Bytes - I{k}, I{K}, I{kB}, I{KB} for kB (kilobytes) - I{m}, I{M}, I{mB}, I{MB} for MB (megabytes) - I{g}, I{G}, I{gB}, I{GB} for GB (gigabytes) - I{t}, I{T}, I{tB}, I{TB} for TB (terabytes) Note: The definition of I{kilo} defaults to 1kB = 1024Bytes. Strictly speaking, those should not be called I{kB} but I{kiB}. You can override that with the optional kilo parameter. Example: >>> import rtslib.utils as utils >>> utils.convert_human_to_bytes("1k") 1024 >>> utils.convert_human_to_bytes("1k", 1000) 1000 >>> utils.convert_human_to_bytes("1MB") 1048576 >>> utils.convert_human_to_bytes("12kB") 12288 @param hsize: The human-readable version of the Bytes amount to convert @type hsize: string or int @param kilo: Optionnal base for the kilo prefix @type kilo: int @return: An int representing the human-readable string converted to bytes ''' size = str(hsize).replace("g","G").replace("K","k") size = size.replace("m","M").replace("t","T") if not re.match("^[0-9]+[T|G|M|k]?[B]?$", size): raise RTSLibError("Cannot interpret size, wrong format: %s" % hsize) size = size.rstrip('B') units = ['k', 'M', 'G', 'T'] try: power = units.index(size[-1]) + 1 except ValueError: power = 0 size = int(size) else: size = int(size[:-1]) size = size * int(kilo) ** power return size def generate_wwn(wwn_type): ''' Generates a random WWN of the specified type: - unit_serial: T10 WWN Unit Serial. - iqn: iSCSI IQN - naa: SAS NAA address @param wwn_type: The WWN address type. @type wwn_type: str @returns: A string containing the WWN. ''' wwn_type = wwn_type.lower() if wwn_type == 'free': return str(uuid.uuid4()) if wwn_type == 'unit_serial': return str(uuid.uuid4()) elif wwn_type == 'iqn': localname = socket.gethostname().split(".")[0] localarch = os.uname()[4].replace("_","") prefix = "iqn.2003-01.org.linux-iscsi.%s.%s" % (localname, localarch) prefix = prefix.strip().lower() serial = "sn.%s" % str(uuid.uuid4())[24:] return "%s:%s" % (prefix, serial) elif wwn_type == 'naa': sas_address = "naa.6001405%s" % str(uuid.uuid4())[:10] return sas_address.replace('-', '') else: raise ValueError("Unknown WWN type: %s." % wwn_type) def is_valid_wwn(wwn_type, wwn, wwn_list=None): ''' Returns True if the wwn is a valid wwn of type wwn_type. @param wwn_type: The WWN address type. @type wwn_type: str @param wwn: The WWN address to check. @type wwn: str @param wwn_list: An optional list of wwns to check the wwn parameter from. @type wwn_list: list of str @returns: bool. ''' wwn_type = wwn_type.lower() if wwn_list is not None and wwn not in wwn_list: return False elif wwn_type == 'free': return True elif wwn_type == 'iqn' \ and re.match("iqn\.[0-9]{4}-[0-1][0-9]\..*\..*", wwn) \ and not re.search(' ', wwn) \ and not re.search('_', wwn): return True elif wwn_type == 'naa' \ and re.match("naa\.[0-9A-Fa-f]{16}$", wwn): return True elif wwn_type == 'unit_serial' \ and re.match( "[0-9A-Fa-f]{8}(-[0-9A-Fa-f]{4}){3}-[0-9A-Fa-f]{12}$", wwn): return True else: return False def list_available_kernel_modules(): ''' List all loadable kernel modules as registered by depmod ''' kver = os.uname()[2] depfile = "/lib/modules/%s/modules.dep" % kver handle = open(depfile) try: lines = handle.readlines() finally: handle.close() return [os.path.basename(line.partition(":")[0]).partition(".")[0] for line in lines] def list_loaded_kernel_modules(): ''' List all currently loaded kernel modules ''' return [line.split(" ")[0] for line in fread("/proc/modules").split('\n') if line] def modprobe(module): ''' Load the specified kernel module if needed. @param module: The name of the kernel module to be loaded. @type module: str @return: Whether of not we had to load the module. ''' if module not in list_loaded_kernel_modules(): if module in list_available_kernel_modules(): try: exec_argv(["modprobe", module]) except Exception, msg: raise RTSLibError("Kernel module %s exists " % module + "but fails to load: %s" % msg) else: return True else: raise RTSLibError("Kernel module %s does not exists on disk " % module + "and is not loaded.") else: return False def exec_argv(argv, strip=True, shell=False): ''' Executes a command line given as an argv table and either: - raise an exception if return != 0 - return the output If strip is True, then output lines will be stripped. If shell is True, the argv must be a string that will be evaluated by the shell, instead of the argv list. ''' process = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell) (stdoutdata, stderrdata) = process.communicate() # Remove indents, trailing space and empty lines in output. if strip: stdoutdata = "\n".join([line.strip() for line in stdoutdata.split("\n") if line.strip()]) stderrdata = "\n".join([line.strip() for line in stderrdata.split("\n") if line.strip()]) if process.returncode != 0: raise RTSLibError(stderrdata) else: return stdoutdata def list_eth_names(max_eth=1024): ''' List the max_eth first local ethernet interfaces names from SIOCGIFCONF struct. ''' SIOCGIFCONF = 0x8912 if os.uname()[4].endswith("_64"): offset = 40 else: offset = 32 bytes = 32 * max_eth sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ifaces = array('B', '\0' * bytes) packed = pack('iL', bytes, ifaces.buffer_info()[0]) outbytes = unpack('iL', ioctl(sock.fileno(), SIOCGIFCONF, packed))[0] names = ifaces.tostring() return [names[i:i+offset].split('\0', 1)[0] for i in range(0, outbytes, offset)] def list_eth_ips(ifnames=None): ''' List the IPv4 and IPv6 non-loopback, non link-local addresses (in the RFC3330 sense, not addresses attached to lo) of a list of ethernet interfaces from the SIOCGIFADDR struct. If ifname is omitted, list all IPs of all ifaces excepted for lo. ''' if ifnames is None: ifnames = [iface for iface in list_eth_names() if iface != 'lo'] addrs = [] for iface in ifnames: ifaddresses = netifaces.ifaddresses(iface) if netifaces.AF_INET in ifaddresses: addrs.extend(addr['addr'] for addr in ifaddresses[netifaces.AF_INET] if not addr['addr'].startswith('127.')) if netifaces.AF_INET6 in ifaddresses: addrs.extend(addr['addr'] for addr in ifaddresses[netifaces.AF_INET6] if not '%' in addr['addr'] if not addr['addr'].startswith('::')) return sorted(set(addrs)) def is_ipv4_address(addr): try: ipaddr.IPv4Address(addr) except: return False else: return True def is_ipv6_address(addr): try: ipaddr.IPv6Address(addr) except: return False else: return True def get_main_ip(): ''' Try to guess the local machine non-loopback IP. If available, local hostname resolution is used (if non-loopback), else try to find an other non-loopback IP on configured NICs. If no usable IP address is found, returns None. ''' # socket.gethostbyname does no have a timeout parameter # Let's use a thread to implement that in the background def start_thread(func): thread = Thread(target = func) thread.setDaemon(True) thread.start() def gethostbyname_timeout(hostname, timeout = 1): queue = Queue(1) def try_gethostbyname(hostname): try: hostname = socket.gethostbyname(hostname) except socket.gaierror: hostname = None return hostname def queue_try_gethostbyname(): queue.put(try_gethostbyname(hostname)) start_thread(queue_try_gethostbyname) try: result = queue.get(block = True, timeout = timeout) except Empty: result = None return result local_ips = list_eth_ips() # try to get a resolution in less than 1 second host_ip = gethostbyname_timeout(socket.gethostname()) # Put the host IP in first position of the IP list if it exists if host_ip in local_ips: local_ips.remove(host_ip) local_ips.insert(0, host_ip) for ip_addr in local_ips: if not ip_addr.startswith("127.") and ip_addr.strip(): return ip_addr return None def _test(): '''Run the doctests''' import doctest doctest.testmod() if __name__ == "__main__": _test() rtslib-2.2/COPYING0000644000175000017500000002363712176066620012126 0ustar rrsrrs Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. rtslib-2.2/setup.py0000755000175000017500000000216312176066620012577 0ustar rrsrrs#! /usr/bin/env python ''' This file is part of RTSLib. Copyright (c) 2011-2013 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import re from distutils.core import setup import rtslib PKG = rtslib VERSION = str(PKG.__version__) (AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', PKG.__author__).groups() URL = PKG.__url__ LICENSE = PKG.__license__ SCRIPTS = [] DESCRIPTION = PKG.__description__ setup(name=PKG.__name__, description=DESCRIPTION, version=VERSION, author=AUTHOR, author_email=EMAIL, license=LICENSE, url=URL, scripts=SCRIPTS, packages=[PKG.__name__], package_data = {'':[]}) rtslib-2.2/rpm/0000755000175000017500000000000012176066620011656 5ustar rrsrrsrtslib-2.2/rpm/python-rtslib.spec.tmpl0000644000175000017500000000241112176066620016321 0ustar rrsrrs%define oname rtslib Name: python-rtslib License: Apache License 2.0 Group: System Environment/Libraries Summary: A framework to implement simple but nice CLIs. Version: VERSION Release: 1%{?dist} URL: http://www.risingtidesystems.com/git/ Source: %{oname}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-rpmroot BuildArch: noarch BuildRequires: python-devel, epydoc Requires: python-configobj, python-netifaces, python-ipaddr Vendor: Datera, Inc. %description API for RisingTide Systems generic SCSI target. %prep %setup -q -n %{oname}-%{version} %build %{__python} setup.py build mkdir -p doc epydoc --no-sourcecode --html -n %{oname} --exclude configobj %{oname}/*.py mv html doc/ %install rm -rf %{buildroot} %{__python} setup.py install --skip-build --root %{buildroot} --prefix usr mkdir -p %{buildroot}/var/target/fabric cp specs/* %{buildroot}/var/target/fabric mkdir -p %{buildroot}/usr/share/doc/python-rtslib-doc-%{version} cp -r doc/* %{buildroot}/usr/share/doc/python-rtslib-doc-%{version}/ %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %{python_sitelib} /var/target /usr/share/doc/python-rtslib-doc-%{version} %doc COPYING README %changelog rtslib-2.2/Makefile0000644000175000017500000001143612176066620012525 0ustar rrsrrs# This file is part of RTSLib. # Copyright (c) 2011-2013 by Datera, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NAME = rtslib GIT_BRANCH = $$(git branch | grep \* | tr -d \*) VERSION = $$(basename $$(git describe --tags | tr - .)) all: @echo "Usage:" @echo @echo " make deb - Builds debian packages." @echo " make rpm - Builds rpm packages." @echo " make release - Generates the release tarball." @echo @echo " make clean - Cleanup the local repository build files." @echo " make cleanall - Also remove dist/*" clean: @rm -fv ${NAME}/*.pyc ${NAME}/*.html @rm -frv doc @rm -frv ${NAME}.egg-info MANIFEST build @rm -frv debian/tmp @rm -fv build-stamp @rm -fv dpkg-buildpackage.log dpkg-buildpackage.version @rm -frv *.rpm @rm -fv debian/files debian/*.log debian/*.substvars @rm -frv debian/${NAME}-doc/ debian/python2.5-${NAME}/ @rm -frv debian/python2.6-${NAME}/ debian/python-${NAME}/ @rm -frv results @rm -fv rpm/*.spec *.spec rpm/sed* sed* @rm -frv ${NAME}-* @echo "Finished cleanup." cleanall: clean @rm -frv dist release: build/release-stamp build/release-stamp: @mkdir -p build @echo "Exporting the repository files..." @git archive ${GIT_BRANCH} --prefix ${NAME}-${VERSION}/ \ | (cd build; tar xfp -) @echo "Cleaning up the target tree..." @rm -f build/${NAME}-${VERSION}/Makefile @rm -f build/${NAME}-${VERSION}/.gitignore @echo "Fixing version string..." @sed -i "s/__version__ = .*/__version__ = '${VERSION}'/g" \ build/${NAME}-${VERSION}/${NAME}/__init__.py @echo "Generating rpm specfile from template..." @cd build/${NAME}-${VERSION}; \ for spectmpl in rpm/*.spec.tmpl; do \ sed -i "s/Version:\( *\).*/Version:\1${VERSION}/g" $${spectmpl}; \ mv $${spectmpl} $$(basename $${spectmpl} .tmpl); \ done; \ rm -r rpm @echo "Generating rpm changelog..." @( \ version=$$(basename $$(git describe HEAD --tags | tr - .)); \ author=$$(git show HEAD --format="format:%an <%ae>" -s); \ date=$$(git show HEAD --format="format:%ad" -s \ | awk '{print $$1,$$2,$$3,$$5}'); \ hash=$$(git show HEAD --format="format:%H" -s); \ echo '* '"$${date} $${author} $${version}-1"; \ echo " - Generated from git commit $${hash}."; \ ) >> $$(ls build/${NAME}-${VERSION}/*.spec) @echo "Generating debian changelog..." @( \ version=$$(basename $$(git describe HEAD --tags | tr - .)); \ author=$$(git show HEAD --format="format:%an <%ae>" -s); \ date=$$(git show HEAD --format="format:%aD" -s); \ day=$$(git show HEAD --format='format:%ai' -s \ | awk '{print $$1}' \ | awk -F '-' '{print $$3}' | sed 's/^0/ /g'); \ date=$$(echo $${date} \ | awk '{print $$1, "'"$${day}"'", $$3, $$4, $$5, $$6}'); \ hash=$$(git show HEAD --format="format:%H" -s); \ echo "${NAME} ($${version}) unstable; urgency=low"; \ echo; \ echo " * Generated from git commit $${hash}."; \ echo; \ echo " -- $${author} $${date}"; \ echo; \ ) > build/${NAME}-${VERSION}/debian/changelog @find build/${NAME}-${VERSION}/ -exec \ touch -t $$(date -d @$$(git show -s --format="format:%at") \ +"%Y%m%d%H%M.%S") {} \; @mkdir -p dist @cd build; tar -c --owner=0 --group=0 --numeric-owner \ --format=gnu -b20 --quoting-style=escape \ -f ../dist/${NAME}-${VERSION}.tar \ $$(find ${NAME}-${VERSION} -type f | sort) @gzip -6 -n dist/${NAME}-${VERSION}.tar @echo "Generated release tarball:" @echo " $$(ls dist/${NAME}-${VERSION}.tar.gz)" @touch build/release-stamp deb: release build/deb-stamp build/deb-stamp: @echo "Building debian packages..." @cd build/${NAME}-${VERSION}; \ dpkg-buildpackage -rfakeroot -us -uc @mv build/*_${VERSION}_*.deb dist/ @echo "Generated debian packages:" @for pkg in $$(ls dist/*_${VERSION}_*.deb); do echo " $${pkg}"; done @touch build/deb-stamp rpm: release build/rpm-stamp build/rpm-stamp: @echo "Building rpm packages..." @mkdir -p build/rpm @build=$$(pwd)/build/rpm; dist=$$(pwd)/dist/; rpmbuild \ --define "_topdir $${build}" --define "_sourcedir $${dist}" \ --define "_rpmdir $${build}" --define "_buildir $${build}" \ --define "_srcrpmdir $${build}" -ba build/${NAME}-${VERSION}/*.spec @mv build/rpm/*-${VERSION}*.src.rpm dist/ @mv build/rpm/*/*-${VERSION}*.rpm dist/ @echo "Generated rpm packages:" @for pkg in $$(ls dist/*-${VERSION}*.rpm); do echo " $${pkg}"; done @touch build/rpm-stamp rtslib-2.2/README0000644000175000017500000000121512176066620011737 0ustar rrsrrsRTSLib Community Edition is a python library that provides an object API to RisingTide Systems generic SCSI Target as well as third-party target fabric modules written for it and backend storage objects. The latest version of this program might be obtained at: http://www.risingtidesystems.com/git/ The git reposirory can be directly accessed from: git://risingtidesystems.com/rtslib.git git://linux-iscsi.org/rtslib.git It is useful for developing 3rd-party applications, as well as serving as a foundation for RisingTide Systems userspace tools. For more information, see the rtslib API reference, available in html format as a separate package. rtslib-2.2/specs/0000755000175000017500000000000012176066620012175 5ustar rrsrrsrtslib-2.2/specs/loopback.spec0000644000175000017500000000035112176066620014642 0ustar rrsrrs# The tcm_loop fabric module specfile. # # The fabric module feature set features = nexus # Use naa WWNs. wwn_type = naa # Non-standard module naming scheme kernel_module = tcm_loop # The configfs group configfs_group = loopback rtslib-2.2/specs/ib_srpt.spec0000644000175000017500000000072612176066620014520 0ustar rrsrrs# The ib_srpt fabric module specfile. # # The fabric module feature set features = acls # Non-standard module naming scheme kernel_module = ib_srpt # The module uses hardware addresses from there wwn_from_files = /sys/class/infiniband/*/ports/*/gids/0 # Transform 'fe80:0000:0000:0000:0002:1903:000e:8acd' WWN notation to # '0x00000000000000000002c903000e8acd' wwn_from_files_filter = "sed -e s/fe80/0xfe80/ -e 's/\://g'" # The configfs group configfs_group = srpt rtslib-2.2/specs/iscsi.spec0000644000175000017500000000050512176066620014163 0ustar rrsrrs# The iscsi fabric module specfile. # # The iscsi fabric module features set. features = discovery_auth, acls, acls_auth, nps, tpgts # Obviously, this module uses IQN strings as WWNs. wwn_type = iqn # This is default too # kernel_module = iscsi_target_mod # The configfs group name, default too # configfs_group = iscsi rtslib-2.2/specs/vhost.spec0000644000175000017500000000030412176066620014211 0ustar rrsrrs# The fabric module feature set features = nexus, tpgts # Use naa WWNs. wwn_type = naa # Non-standard module naming scheme kernel_module = tcm_vhost # The configfs group configfs_group = vhost rtslib-2.2/specs/README0000644000175000017500000001021612176066620013055 0ustar rrsrrsThis directory (normally /var/lib/target) contains the spec files for RisingTide Systems's LIO SCSI target subsystem fabric modules. Each spec file should be named MODULE.spec, where MODULE is the name the fabric module is to be referred as. It contains a series of KEY = VALUE pairs, one per line. KEY is an alphanumeric (no spaces) string. VALUE can be anything. Quotes can be used for strings, but are not mandatory. Lists of VALUEs are comma-separated. Syntax ------ * Strings String values can either be enclosed in double quotes or not. Those examples are equivalent: kernel_module = "my_module" kernel_module = my_module * Lists Lists are comma-separated lists of values. If you want to use a comma in a string, use double quotes. Example: my_string = value1, value2, "value3, with comma", value4 * Comments All lines beginning with a pound sign (#) will be ignored. Empty lines will be ignored too. Available keys -------------- * features Lists the target fabric available features. Default value: discovery_auth, acls, acls_auth, nps exemple: features = discovery_auth, acls, acls_auth Detail of features: * tpgts The target fabric module is using iSCSI-style target portal group tags. * discovery_auth The target fabric module supports a fabric-wide authentication for discovery. * acls The target's TPGTs do support explicit initiator ACLs. * acls_auth The target's TPGT's ACLs do support per-ACL initiator authentication. * nps The TPGTs do support iSCSI-like IPv4/IPv6 network portals, using IP:PORT group names. * nexus The TPGTs do have a 'nexus' attribute that contains the local initiator serial unit. This attribute must be set before being able to create any LUNs. * wwn_type Sets the type of WWN expected by the target fabric. Defaults to 'free'. Example: wwn_type = iqn Current valid types are: * free Freeform WWN. * iqn The fabric module targets are using iSCSI-type IQNs. * naa NAA SAS address type WWN. * unit_serial Disk-type unit serial. * wwn_from_files In some cases, and independently from the wwn type, the target WWNs must be picked from a list of existing ones, the most obvious case being hardware-set WWNs. Only the WWNs both matching the wwn_type (after filtering if set, see below) and fetched from the specified files will be allowed for targets. The value of this key is a list (one or more, comma-separated) of UNIX style pathname patterns: * and ? wildcards can be used, and character ranges expressed with [] will be correctly expanded. Each file is assumed to contain one or more WWNs, and line ends, spaces, tabs and null (\0) will be considered as separators chars. Example: wwn_from_files = /sys/class/fc_host/host[0-9]/port_name * wwn_from_files_filter Empty by default, this one allows specifying a shell command to which each WWN from files will be fed, and the output of the filter will be used as the final WWN to use. Examples: wwn_from_files_filter = "sed -e s/0x// -e 's/../&:/g' -e s/:$//" wwn_from_files_filter = "sed -e s/0x// -e 's/../&:/g' -e s/:$// | tr [a-z] [A-Z]" The first example transforms strings like '0x21000024ff314c48' into '21:00:00:24:ff:31:4c:48', the second one also shifts lower cases into upper case, demonstrating that you can pipe as many commands you want into another. * wwn_from_cmds Same as wwn_from_files, but instead of taking a list of file patterns, takes a list of shell commands. Each commands output will be considered as a list of WWNs to be used, separated ny line ends, spaces, tabs and null (\0) chararcters. * wwn_from_cmds_filter Same as wwn_from_files_filter, but filters/transforms the WWNs gotten from the results of the wwn_from_cmds shell commands. * kernel_module Sets the name of the kernel module implementing the fabric modules. If not specified, it will be assumed to be MODULE_target_mod, where MODNAME is the name of the fabric module, as used to name the spec file. Note that you must not specify any .ko or such extension here. Example: kernel_module = my_module * configfs_group Sets the name of the configfs group used by the fabric module. Defaults to the name of the module as used to name the spec file. Example: configfs_group = iscsi rtslib-2.2/specs/tcm_fc.spec0000644000175000017500000000065712176066620014314 0ustar rrsrrs# The tcm_fc fabric module specfile. # # The fabric module feature set features = acls # Non-standard module naming scheme kernel_module = tcm_fc # The module uses hardware addresses from there wwn_from_files = /sys/class/fc_host/host*/port_name # Transform '0x1234567812345678' WWN notation to '12:34:56:78:12:34:56:78' wwn_from_files_filter = "sed -e s/0x// -e 's/../&:/g' -e s/:$//" # The configfs group configfs_group = fc rtslib-2.2/specs/qla2xxx.spec0000644000175000017500000000062412176066620014462 0ustar rrsrrs# The qla2xxx fabric module specfile. # # The qla2xxx fabric module feature set features = acls # Non-standard module naming scheme kernel_module = tcm_qla2xxx # The module uses hardware addresses from there wwn_from_files = /sys/class/fc_host/host*/port_name # Transform '0x1234567812345678' WWN notation to '12:34:56:78:12:34:56:78' wwn_from_files_filter = "sed -e s/0x// -e 's/../&:/g' -e s/:$//" rtslib-2.2/specs/example.spec.txt0000644000175000017500000000062712176066620015327 0ustar rrsrrs# Example LIO target fabric module. # # The example fabric module uses the default feature set. # features = discovery_auth, acls, acls_auth, nps # This module uses anything as WWNs. wwn_type = free # Convoluted kernel module name. Default would be example_target_mod kernel_module = my_complex_kernel_module_name # The configfs group name. Defauklt would be "example" configfs_group = "example_group" rtslib-2.2/.gitignore0000644000175000017500000000062412176066620013052 0ustar rrsrrsdebian/changelog dpkg-buildpackage.log dpkg-buildpackage.version *.swp *.swo build-stamp build/* debian/files debian/python-rtslib.debhelper.log debian/python-rtslib.substvars debian/python-rtslib/ debian/rtslib-doc.debianebhelper.log debian/rtslib-doc.substvars debian/rtslib-doc/ debian/tmp/ dist/* doc/* *.pyc debian/python-rtslib.substvars debian/rtslib-doc.debhelper.log debian/tmp/ *.spec rtslib-*