diff --git a/Makefile.am b/Makefile.am index e427760cd17fb0da912a0e01b91d0fc2f06ab181..01df96a3ef836b7e66877a0d08a2153fa0aa32c9 100644 --- a/Makefile.am +++ b/Makefile.am @@ -344,7 +344,7 @@ apidoc: CDIR=`pwd` && \ cd $$TMPDIR && \ mv lib ganeti && \ - epydoc --conf $$CDIR/epydoc.conf -o $$CDIR/doc/api \ + epydoc -v --conf $$CDIR/epydoc.conf -o $$CDIR/doc/api \ ) ; \ rm -rf $$TMPDIR ; \ } diff --git a/daemons/ganeti-masterd b/daemons/ganeti-masterd index c8f220b3fcc27ba61a63212eb9ba49b014c10870..050a4841b2fbd984a7973548eb7e62e7c6773a71 100755 --- a/daemons/ganeti-masterd +++ b/daemons/ganeti-masterd @@ -89,9 +89,8 @@ class IOServer(SocketServer.UnixStreamServer): def __init__(self, address, rqhandler): """IOServer constructor - Args: - address: the address to bind this IOServer to - rqhandler: RequestHandler type object + @param address: the address to bind this IOServer to + @param rqhandler: RequestHandler type object """ SocketServer.UnixStreamServer.__init__(self, address, rqhandler) @@ -348,8 +347,7 @@ class GanetiContext(object): def ParseOptions(): """Parse the command line options. - Returns: - (options, args) as from OptionParser.parse_args() + @return: (options, args) as from OptionParser.parse_args() """ parser = OptionParser(description="Ganeti master daemon", diff --git a/daemons/ganeti-noded b/daemons/ganeti-noded index 10705333fdee236399252c9293433d4e34c4eff1..187f77eabcb157569e97e30c733e5d5df10abf92 100755 --- a/daemons/ganeti-noded +++ b/daemons/ganeti-noded @@ -647,8 +647,7 @@ class NodeHttpServer(http.server.HttpServer): def ParseOptions(): """Parse the command line options. - Returns: - (options, args) as from OptionParser.parse_args() + @return: (options, args) as from OptionParser.parse_args() """ parser = OptionParser(description="Ganeti node daemon", diff --git a/daemons/ganeti-rapi b/daemons/ganeti-rapi index 1522871057e3214c91f04544bd662bd2f4d2c63b..bfee078c3e165bcf4b7f03c213fe90cb994d48a1 100755 --- a/daemons/ganeti-rapi +++ b/daemons/ganeti-rapi @@ -75,8 +75,7 @@ class RemoteApiHttpServer(http.server.HttpServer): def ParseOptions(): """Parse the command line options. - Returns: - (options, args) as from OptionParser.parse_args() + @return: (options, args) as from OptionParser.parse_args() """ parser = optparse.OptionParser(description="Ganeti Remote API", diff --git a/daemons/ganeti-watcher b/daemons/ganeti-watcher index c3f4ac990af476a79f0ae377ebee9e29efce86b2..61bd384fc414e2cd108bbf8ddae838b421d11214 100755 --- a/daemons/ganeti-watcher +++ b/daemons/ganeti-watcher @@ -62,9 +62,8 @@ class NotMasterError(errors.GenericError): def Indent(s, prefix='| '): """Indent a piece of text with a given prefix before each line. - Args: - s: The string to indent - prefix: The string to prepend each line. + @param s: the string to indent + @param prefix: the string to prepend each line """ return "%s%s\n" % (prefix, ('\n' + prefix).join(s.splitlines())) @@ -158,8 +157,8 @@ class WatcherState(object): def NumberOfRestartAttempts(self, instance): """Returns number of previous restart attempts. - Args: - instance - the instance to look up. + @type instance: L{Instance} + @param instance: the instance to look up """ idata = self._data["instance"] @@ -172,8 +171,8 @@ class WatcherState(object): def RecordRestartAttempt(self, instance): """Record a restart attempt. - Args: - instance - the instance being restarted + @type instance: L{Instance} + @param instance: the instance being restarted """ idata = self._data["instance"] @@ -187,12 +186,13 @@ class WatcherState(object): inst[KEY_RESTART_COUNT] = inst.get(KEY_RESTART_COUNT, 0) + 1 def RemoveInstance(self, instance): - """Update state to reflect that a machine is running, i.e. remove record. + """Update state to reflect that a machine is running. - Args: - instance - the instance to remove from books + This method removes the record for a named instance (as we only + track down instances). - This method removes the record for a named instance. + @type instance: L{Instance} + @param instance: the instance to remove from books """ idata = self._data["instance"] @@ -204,9 +204,6 @@ class WatcherState(object): class Instance(object): """Abstraction for a Virtual Machine instance. - Methods: - Restart(): issue a command to restart the represented machine. - """ def __init__(self, name, state, autostart): self.name = name @@ -399,8 +396,7 @@ class Watcher(object): def ParseOptions(): """Parse the command line options. - Returns: - (options, args) as from OptionParser.parse_args() + @return: (options, args) as from OptionParser.parse_args() """ parser = OptionParser(description="Ganeti cluster watcher", diff --git a/lib/backend.py b/lib/backend.py index 7a9e0d8419cf88b1d7ee606ad6a5d00af1815d35..dd4788a78a2b5fada115d4b8202aaded70d09dd6 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -279,7 +279,7 @@ def LeaveCluster(): from the cluster. If processing is successful, then it raises an - L{errors.GanetiQuitException} which is used as a special case to + L{errors.QuitGanetiException} which is used as a special case to shutdown the node daemon. """ @@ -970,7 +970,7 @@ def RemoveBlockDevice(disk): @note: This is intended to be called recursively. - @type disk: L{objects.disk} + @type disk: L{objects.Disk} @param disk: the disk object we should remove @rtype: boolean @return: the success of the operation @@ -1069,8 +1069,9 @@ def AssembleBlockDevice(disk, owner, as_primary): def ShutdownBlockDevice(disk): """Shut down a block device. - First, if the device is assembled (can L{Attach()}), then the device - is shutdown. Then the children of the device are shutdown. + First, if the device is assembled (Attach() is successfull), then + the device is shutdown. Then the children of the device are + shutdown. This function is called recursively. Note that we don't cache the children or such, as oppossed to assemble, shutdown of different @@ -1161,7 +1162,7 @@ def GetMirrorStatus(disks): @rtype: disk @return: a list of (mirror_done, estimated_time) tuples, which - are the result of L{bdev.BlockDevice.CombinedSyncStatus} + are the result of L{bdev.BlockDev.CombinedSyncStatus} @raise errors.BlockDeviceError: if any of the disks cannot be found @@ -1981,7 +1982,7 @@ def JobQueueUpdate(file_name, content): def JobQueueRename(old, new): """Renames a job queue file. - This is just a wrapper over L{os.rename} with proper checking. + This is just a wrapper over os.rename with proper checking. @type old: str @param old: the old (actual) file name @@ -2305,7 +2306,7 @@ class DevCacheManager(object): node nor not @type iv_name: str @param iv_name: the instance-visible name of the - device, as in L{objects.Disk.iv_name} + device, as in objects.Disk.iv_name @rtype: None diff --git a/lib/bdev.py b/lib/bdev.py index e6291e137303f5435c5a49f218b03e82fb21eb11..f29da8fe1dab95d46559826651c4e717c9a407f0 100644 --- a/lib/bdev.py +++ b/lib/bdev.py @@ -202,9 +202,6 @@ class BlockDev(object): If this device is a mirroring device, this function returns the status of the mirror. - Returns: - (sync_percent, estimated_time, is_degraded, ldisk) - If sync_percent is None, it means the device is not syncing. If estimated_time is None, it means we can't estimate @@ -218,6 +215,9 @@ class BlockDev(object): data. This is only valid for some devices, the rest will always return False (not degraded). + @rtype: tuple + @return: (sync_percent, estimated_time, is_degraded, ldisk) + """ return None, None, False, False @@ -259,10 +259,7 @@ class BlockDev(object): def Grow(self, amount): """Grow the block device. - Arguments: - amount: the amount (in mebibytes) to grow with - - Returns: None + @param amount: the amount (in mebibytes) to grow with """ raise NotImplementedError @@ -326,11 +323,10 @@ class LogicalVolume(BlockDev): def GetPVInfo(vg_name): """Get the free space info for PVs in a volume group. - Args: - vg_name: the volume group name + @param vg_name: the volume group name - Returns: - list of (free_space, name) with free_space in mebibytes + @rtype: list + @return: list of tuples (free_space, name) with free_space in mebibytes """ command = ["pvs", "--noheadings", "--nosuffix", "--units=m", @@ -456,9 +452,6 @@ class LogicalVolume(BlockDev): If this device is a mirroring device, this function returns the status of the mirror. - Returns: - (sync_percent, estimated_time, is_degraded, ldisk) - For logical volumes, sync_percent and estimated_time are always None (no recovery in progress, as we don't handle the mirrored LV case). The is_degraded parameter is the inverse of the ldisk @@ -472,6 +465,9 @@ class LogicalVolume(BlockDev): The status was already read in Attach, so we just return it. + @rtype: tuple + @return: (sync_percent, estimated_time, is_degraded, ldisk) + """ return None, None, self._degraded, self._degraded @@ -642,8 +638,8 @@ class BaseDRBD(BlockDev): def _MassageProcData(data): """Transform the output of _GetProdData into a nicer form. - Returns: - a dictionary of minor: joined lines from /proc/drbd for that minor + @return: a dictionary of minor: joined lines from /proc/drbd + for that minor """ lmatch = re.compile("^ *([0-9]+):.*$") @@ -669,12 +665,12 @@ class BaseDRBD(BlockDev): """Return the DRBD version. This will return a dict with keys: - k_major, - k_minor, - k_point, - api, - proto, - proto2 (only on drbd > 8.2.X) + - k_major + - k_minor + - k_point + - api + - proto + - proto2 (only on drbd > 8.2.X) """ proc_data = cls._GetProcData() @@ -1176,8 +1172,6 @@ class DRBD8(BaseDRBD): def GetSyncStatus(self): """Returns the sync status of the device. - Returns: - (sync_percent, estimated_time, is_degraded) If sync_percent is None, it means all is ok If estimated_time is None, it means we can't esimate @@ -1190,6 +1184,9 @@ class DRBD8(BaseDRBD): We compute the ldisk parameter based on wheter we have a local disk or not. + @rtype: tuple + @return: (sync_percent, estimated_time, is_degraded, ldisk) + """ if self.minor is None and not self.Attach(): raise errors.BlockDeviceError("Can't attach to device in GetSyncStatus") @@ -1504,8 +1501,8 @@ class FileStorage(BlockDev): def Remove(self): """Remove the file backing the block device. - Returns: - boolean indicating wheter removal of file was successful or not. + @rtype: boolean + @return: True if the removal was successful """ if not os.path.exists(self.dev_path): @@ -1522,8 +1519,8 @@ class FileStorage(BlockDev): Check if this file already exists. - Returns: - boolean indicating if file exists or not. + @rtype: boolean + @return: True if file exists """ self.attached = os.path.exists(self.dev_path) @@ -1533,12 +1530,10 @@ class FileStorage(BlockDev): def Create(cls, unique_id, children, size): """Create a new file. - Args: - children: - size: integer size of file in MiB + @param size: the size of file in MiB - Returns: - A ganeti.bdev.FileStorage object. + @rtype: L{bdev.FileStorage} + @return: an instance of FileStorage """ if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: diff --git a/lib/bootstrap.py b/lib/bootstrap.py index b302d2c9241fcdc4108871d87588f5e3c4e0062b..200a978e08ce8cb545cd141e7f57582a8d60cb90 100644 --- a/lib/bootstrap.py +++ b/lib/bootstrap.py @@ -41,12 +41,10 @@ from ganeti import ssconf def _InitSSHSetup(node): """Setup the SSH configuration for the cluster. - This generates a dsa keypair for root, adds the pub key to the permitted hosts and adds the hostkey to its own known hosts. - Args: - node: the name of this host as a fqdn + @param node: the name of this host as an FQDN """ priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS) @@ -243,16 +241,16 @@ def InitConfig(version, cluster_config, master_node_config, node, and no instances. @type version: int - @param version: Configuration version - @type cluster_config: objects.Cluster - @param cluster_config: Cluster configuration - @type master_node_config: objects.Node - @param master_node_config: Master node configuration - @type file_name: string - @param file_name: Configuration file path - - @rtype: ssconf.SimpleConfigWriter - @returns: Initialized config instance + @param version: configuration version + @type cluster_config: L{objects.Cluster} + @param cluster_config: cluster configuration + @type master_node_config: L{objects.Node} + @param master_node_config: master node configuration + @type cfg_file: string + @param cfg_file: configuration file path + + @rtype: L{ssconf.SimpleConfigWriter} + @returns: initialized config instance """ nodes = { diff --git a/lib/cli.py b/lib/cli.py index dde5936de3e9d84acab4b974658cf31642bccb1e..737c4016eb61e947bb9c1b4d6057e0639e4c995b 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -314,15 +314,15 @@ keyval_option = KeyValOption def _ParseArgs(argv, commands, aliases): - """Parses the command line and return the function which must be - executed together with its arguments + """Parser for the command line arguments. - Arguments: - argv: the command line + This function parses the arguements and returns the function which + must be executed together with its (modified) arguments. - commands: dictionary with special contents, see the design doc for - cmdline handling - aliases: dictionary with command aliases {'alias': 'target, ...} + @param argv: the command line + @param commands: dictionary with special contents, see the design + doc for cmdline handling + @param aliases: dictionary with command aliases {'alias': 'target, ...} """ if len(argv) == 0: @@ -439,17 +439,16 @@ def UsesRPC(fn): def AskUser(text, choices=None): """Ask the user a question. - Args: - text - the question to ask. + @param text: the question to ask - choices - list with elements tuples (input_char, return_value, - description); if not given, it will default to: [('y', True, - 'Perform the operation'), ('n', False, 'Do no do the operation')]; - note that the '?' char is reserved for help + @param choices: list with elements tuples (input_char, return_value, + description); if not given, it will default to: [('y', True, + 'Perform the operation'), ('n', False, 'Do no do the operation')]; + note that the '?' char is reserved for help - Returns: one of the return values from the choices list; if input is - not possible (i.e. not running with a tty, we return the last entry - from the list + @return: one of the return values from the choices list; if input is + not possible (i.e. not running with a tty, we return the last + entry from the list """ if choices is None: diff --git a/lib/config.py b/lib/config.py index b74d68d64c127cfa8a1a5a07d7119a3eedbebd71..b1661fd87aa533bc437f7bd68d4239cd7dbccd09 100644 --- a/lib/config.py +++ b/lib/config.py @@ -49,6 +49,14 @@ _config_lock = locking.SharedLock() def _ValidateConfig(data): + """Verifies that a configuration objects looks valid. + + This only verifies the version of the configuration. + + @raise errors.ConfigurationError: if the version differs from what + we expect + + """ if data.version != constants.CONFIG_VERSION: raise errors.ConfigurationError("Cluster configuration version" " mismatch, got %s instead of %s" % @@ -155,13 +163,13 @@ class ConfigWriter: This checks the current node, instances and disk names for duplicates. - Args: - - exceptions: a list with some other names which should be checked - for uniqueness (used for example when you want to get - more than one id at one time without adding each one in - turn to the config file + @param exceptions: a list with some other names which should be checked + for uniqueness (used for example when you want to get + more than one id at one time without adding each one in + turn to the config file) - Returns: the unique id as a string + @rtype: string + @return: the unique id """ existing = set() @@ -185,6 +193,9 @@ class ConfigWriter: def _AllMACs(self): """Return all MACs present in the config. + @rtype: list + @return: the list of all MACs + """ result = [] for instance in self._config_data.instances.values(): @@ -196,6 +207,9 @@ class ConfigWriter: def _AllDRBDSecrets(self): """Return all DRBD secrets present in the config. + @rtype: list + @return: the list of all DRBD secrets + """ def helper(disk, result): """Recursively gather secrets from this disk.""" @@ -377,9 +391,9 @@ class ConfigWriter: def _ComputeDRBDMap(self, instance): """Compute the used DRBD minor/nodes. - Return: dictionary of node_name: dict of minor: instance_name. The - returned dict will have all the nodes in it (even if with an empty - list). + @return: dictionary of node_name: dict of minor: instance_name; + the returned dict will have all the nodes in it (even if with + an empty list). """ def _AppendUsedPorts(instance_name, disk, used): @@ -521,9 +535,9 @@ class ConfigWriter: def GetHostKey(self): """Return the rsa hostkey from the config. - Args: None + @rtype: string + @return: the rsa hostkey - Returns: rsa hostkey """ return self._config_data.cluster.rsahostkeypub @@ -533,8 +547,9 @@ class ConfigWriter: This should be used after creating a new instance. - Args: - instance: the instance object + @type instance: L{objects.Instance} + @param instance: the instance object + """ if not isinstance(instance, objects.Instance): raise errors.ProgrammerError("Invalid type passed to AddInstance") @@ -628,9 +643,8 @@ class ConfigWriter: def GetInstanceList(self): """Get the list of instances. - Returns: - array of instances, ex. ['instance2.example.com','instance1.example.com'] - these contains all the instances, also the ones in Admin_down state + @return: array of instances, ex. ['instance2.example.com', + 'instance1.example.com'] """ return self._UnlockedGetInstanceList() @@ -661,11 +675,11 @@ class ConfigWriter: It takes the information from the configuration file. Other informations of an instance are taken from the live systems. - Args: - instance: name of the instance, ex instance1.example.com + @param instance_name: name of the instance, e.g. + I{instance1.example.com} - Returns: - the instance object + @rtype: L{objects.Instance} + @return: the instance object """ return self._UnlockedGetInstanceInfo(instance_name) @@ -687,8 +701,8 @@ class ConfigWriter: def AddNode(self, node): """Add a node to the configuration. - Args: - node: an object.Node instance + @type node: L{objects.Node} + @param node: a Node instance """ logging.info("Adding node %s to configuration" % node.name) @@ -723,11 +737,13 @@ class ConfigWriter: def _UnlockedGetNodeInfo(self, node_name): """Get the configuration of a node, as stored in the config. - This function is for internal use, when the config lock is already held. + This function is for internal use, when the config lock is already + held. - Args: node: nodename (tuple) of the node + @param node_name: the node name, e.g. I{node1.example.com} - Returns: the node object + @rtype: L{objects.Node} + @return: the node object """ if node_name not in self._config_data.nodes: @@ -740,9 +756,12 @@ class ConfigWriter: def GetNodeInfo(self, node_name): """Get the configuration of a node, as stored in the config. - Args: node: nodename (tuple) of the node + This is just a locked wrapper over L{_UnlockedGetNodeInfo}. - Returns: the node object + @param node_name: the node name, e.g. I{node1.example.com} + + @rtype: L{objects.Node} + @return: the node object """ return self._UnlockedGetNodeInfo(node_name) @@ -750,7 +769,10 @@ class ConfigWriter: def _UnlockedGetNodeList(self): """Return the list of nodes which are in the configuration. - This function is for internal use, when the config lock is already held. + This function is for internal use, when the config lock is already + held. + + @rtype: list """ return self._config_data.nodes.keys() @@ -846,10 +868,6 @@ class ConfigWriter: def _OpenConfig(self): """Read the config data from disk. - In case we already have configuration data and the config file has - the same mtime as when we read it, we skip the parsing of the - file, since de-serialisation could be slow. - """ f = open(self._cfg_file, 'r') try: @@ -1026,8 +1044,8 @@ class ConfigWriter: def GetClusterInfo(self): """Returns informations about the cluster - Returns: - the cluster object + @rtype: L{objects.Cluster} + @return: the cluster object """ return self._config_data.cluster @@ -1042,6 +1060,10 @@ class ConfigWriter: that all modified objects will be saved, but the target argument is the one the caller wants to ensure that it's saved. + @param target: an instance of either L{objects.Cluster}, + L{objects.Node} or L{objects.Instance} which is existing in + the cluster + """ if self._config_data is None: raise errors.ProgrammerError("Configuration file not read," diff --git a/lib/errors.py b/lib/errors.py index 6efd9aa052836c38dbcaa3d8da169cec2d7dd2ca..4f3d86d5ae396448b7478d521827812aa971def6 100644 --- a/lib/errors.py +++ b/lib/errors.py @@ -228,7 +228,8 @@ class QuitGanetiException(Exception): error should returned to the caller, and the second one will be the returned result (either as an error or as a normal result). - Examples: + Examples:: + # Return a result of "True" to the caller, but quit ganeti afterwards raise QuitGanetiException(False, True) # Send an error to the caller, and quit ganeti diff --git a/lib/http/server.py b/lib/http/server.py index cee77bb87c50f88dc3f89b27f4c215dceaa98bda..f94baaefade546760b682b0ad170efe18367952e 100644 --- a/lib/http/server.py +++ b/lib/http/server.py @@ -393,7 +393,7 @@ class HttpServer(http.HttpSocketBase): @type mainloop: ganeti.daemon.Mainloop @param mainloop: Mainloop used to poll for I/O events - @type local_addess: string + @type local_address: string @param local_address: Local IP address to bind to @type port: int @param port: TCP port to listen on diff --git a/lib/hypervisor/hv_base.py b/lib/hypervisor/hv_base.py index ad760f6ae5fff844fec7678144f21f8f45635756..0962004f50a9fef3e1157abdfe1a814b7eed4afc 100644 --- a/lib/hypervisor/hv_base.py +++ b/lib/hypervisor/hv_base.py @@ -57,11 +57,9 @@ class BaseHypervisor(object): def GetInstanceInfo(self, instance_name): """Get instance properties. - Args: - instance_name: the instance name + @param instance_name: the instance name - Returns: - (name, id, memory, vcpus, state, times) + @return: tuple (name, id, memory, vcpus, state, times) """ raise NotImplementedError @@ -69,19 +67,18 @@ class BaseHypervisor(object): def GetAllInstancesInfo(self): """Get properties of all instances. - Returns: - [(name, id, memory, vcpus, stat, times),...] + @return: list of tuples (name, id, memory, vcpus, stat, times) + """ raise NotImplementedError def GetNodeInfo(self): """Return information about the node. - The return value is a dict, which has to have the following items: - (all values in MiB) - - memory_total: the total memory size on the node - - memory_free: the available memory on the node for instances - - memory_dom0: the memory used by the node itself, if available + @return: a dict with the following keys (values in MiB): + - memory_total: the total memory size on the node + - memory_free: the available memory on the node for instances + - memory_dom0: the memory used by the node itself, if available """ raise NotImplementedError diff --git a/lib/hypervisor/hv_fake.py b/lib/hypervisor/hv_fake.py index e688099b8eb0334b30007a67ab087ce5bece26be..53dc6264471cf408f8600e4e9afb57062c075185 100644 --- a/lib/hypervisor/hv_fake.py +++ b/lib/hypervisor/hv_fake.py @@ -56,11 +56,10 @@ class FakeHypervisor(hv_base.BaseHypervisor): def GetInstanceInfo(self, instance_name): """Get instance properties. - Args: - instance_name: the instance name + @param instance_name: the instance name + + @return: tuple of (name, id, memory, vcpus, stat, times) - Returns: - (name, id, memory, vcpus, stat, times) """ file_name = "%s/%s" % (self._ROOT_DIR, instance_name) if not os.path.exists(file_name): @@ -83,8 +82,8 @@ class FakeHypervisor(hv_base.BaseHypervisor): def GetAllInstancesInfo(self): """Get properties of all instances. - Returns: - [(name, id, memory, vcpus, stat, times),...] + @return: list of tuples (name, id, memory, vcpus, stat, times) + """ data = [] for file_name in os.listdir(self._ROOT_DIR): @@ -155,11 +154,10 @@ class FakeHypervisor(hv_base.BaseHypervisor): def GetNodeInfo(self): """Return information about the node. - The return value is a dict, which has to have the following items: - (all values in MiB) - - memory_total: the total memory size on the node - - memory_free: the available memory on the node for instances - - memory_dom0: the memory used by the node itself, if available + @return: a dict with the following keys (values in MiB): + - memory_total: the total memory size on the node + - memory_free: the available memory on the node for instances + - memory_dom0: the memory used by the node itself, if available """ # global ram usage from the xm info command diff --git a/lib/hypervisor/hv_kvm.py b/lib/hypervisor/hv_kvm.py index eea3d0ef305c6ad567760903fec5943e072670ec..a78bd56055b9268fffbf724a580cdb7f00b21e70 100644 --- a/lib/hypervisor/hv_kvm.py +++ b/lib/hypervisor/hv_kvm.py @@ -105,8 +105,8 @@ class KVMHypervisor(hv_base.BaseHypervisor): def ListInstances(self): """Get the list of running instances. - We can do this by listing our live instances directory and checking whether - the associated kvm process is still alive. + We can do this by listing our live instances directory and + checking whether the associated kvm process is still alive. """ result = [] @@ -119,11 +119,10 @@ class KVMHypervisor(hv_base.BaseHypervisor): def GetInstanceInfo(self, instance_name): """Get instance properties. - Args: - instance_name: the instance name + @param instance_name: the instance name + + @return: tuple (name, id, memory, vcpus, stat, times) - Returns: - (name, id, memory, vcpus, stat, times) """ pidfile = "%s/%s" % (self._PIDS_DIR, instance_name) pid = utils.ReadPidFile(pidfile) @@ -159,8 +158,8 @@ class KVMHypervisor(hv_base.BaseHypervisor): def GetAllInstancesInfo(self): """Get properties of all instances. - Returns: - [(name, id, memory, vcpus, stat, times),...] + @return: list of tuples (name, id, memory, vcpus, stat, times) + """ data = [] for name in os.listdir(self._PIDS_DIR): @@ -288,11 +287,10 @@ class KVMHypervisor(hv_base.BaseHypervisor): def GetNodeInfo(self): """Return information about the node. - The return value is a dict, which has to have the following items: - (all values in MiB) - - memory_total: the total memory size on the node - - memory_free: the available memory on the node for instances - - memory_dom0: the memory used by the node itself, if available + @return: a dict with the following keys (values in MiB): + - memory_total: the total memory size on the node + - memory_free: the available memory on the node for instances + - memory_dom0: the memory used by the node itself, if available """ # global ram usage from the xm info command diff --git a/lib/hypervisor/hv_xen.py b/lib/hypervisor/hv_xen.py index 8dc48ddc5dc4139ac4b5e78af118d1da9d98b5f5..96a800fed87774971a539275dc48fb06b3820158 100644 --- a/lib/hypervisor/hv_xen.py +++ b/lib/hypervisor/hv_xen.py @@ -61,10 +61,10 @@ class XenHypervisor(hv_base.BaseHypervisor): def _GetXMList(include_node): """Return the list of running instances. - If the `include_node` argument is True, then we return information + If the include_node argument is True, then we return information for dom0 also, otherwise we filter that from the return value. - The return value is a list of (name, id, memory, vcpus, state, time spent) + @return: list of (name, id, memory, vcpus, state, time spent) """ for dummy in range(5): @@ -117,11 +117,10 @@ class XenHypervisor(hv_base.BaseHypervisor): def GetInstanceInfo(self, instance_name): """Get instance properties. - Args: - instance_name: the instance name + @param instance_name: the instance name + + @return: tuple (name, id, memory, vcpus, stat, times) - Returns: - (name, id, memory, vcpus, stat, times) """ xm_list = self._GetXMList(instance_name=="Domain-0") result = None @@ -134,14 +133,16 @@ class XenHypervisor(hv_base.BaseHypervisor): def GetAllInstancesInfo(self): """Get properties of all instances. - Returns: - [(name, id, memory, vcpus, stat, times),...] + @return: list of tuples (name, id, memory, vcpus, stat, times) + """ xm_list = self._GetXMList(False) return xm_list def StartInstance(self, instance, block_devices, extra_args): - """Start an instance.""" + """Start an instance. + + """ self._WriteConfigFile(instance, block_devices, extra_args) result = utils.RunCmd(["xm", "create", instance.name]) @@ -151,7 +152,9 @@ class XenHypervisor(hv_base.BaseHypervisor): result.output)) def StopInstance(self, instance, force=False): - """Stop an instance.""" + """Stop an instance. + + """ self._RemoveConfigFile(instance) if force: command = ["xm", "destroy", instance.name] @@ -164,7 +167,9 @@ class XenHypervisor(hv_base.BaseHypervisor): (instance.name, result.fail_reason)) def RebootInstance(self, instance): - """Reboot an instance.""" + """Reboot an instance. + + """ result = utils.RunCmd(["xm", "reboot", instance.name]) if result.failed: @@ -174,11 +179,10 @@ class XenHypervisor(hv_base.BaseHypervisor): def GetNodeInfo(self): """Return information about the node. - The return value is a dict, which has to have the following items: - (all values in MiB) - - memory_total: the total memory size on the node - - memory_free: the available memory on the node for instances - - memory_dom0: the memory used by the node itself, if available + @return: a dict with the following keys (values in MiB): + - memory_total: the total memory size on the node + - memory_free: the available memory on the node for instances + - memory_dom0: the memory used by the node itself, if available """ # note: in xen 3, memory has changed to total_memory @@ -233,15 +237,12 @@ class XenHypervisor(hv_base.BaseHypervisor): This method builds the xen config disk directive according to the given disk_template and block_devices. - Args: - disk_template: String containing instance disk template - block_devices: List[tuple1,tuple2,...] - tuple: (cfdev, rldev) - cfdev: dict containing ganeti config disk part - rldev: ganeti.bdev.BlockDev object + @param disk_template: string containing instance disk template + @param block_devices: list of tuples (cfdev, rldev): + - cfdev: dict containing ganeti config disk part + - rldev: ganeti.bdev.BlockDev object - Returns: - String containing disk directive for xen instance config file + @return: string containing disk directive for xen instance config file """ FILE_DRIVER_MAP = { diff --git a/lib/jqueue.py b/lib/jqueue.py index b3c7dbb316c8e5c7ee28ebf90f41b3ea884ccef3..2c2a5fab25576468e10724ecee0c2afceccc4fd5 100644 --- a/lib/jqueue.py +++ b/lib/jqueue.py @@ -1101,7 +1101,7 @@ class JobQueue(object): """Archives a job. @type job_id: string - @param job_id: Job ID of job to be archived. + @param job_id: the ID of job to be archived """ logging.info("Archiving job %s", job_id) diff --git a/lib/locking.py b/lib/locking.py index fd84fe663d3087272fad8eb0cb18fbc29650c583..1e4f1d180d1e8a7f3f376c0d67c75331eeadffc6 100644 --- a/lib/locking.py +++ b/lib/locking.py @@ -104,11 +104,10 @@ class SharedLock: def _is_owned(self, shared=-1): """Is the current thread somehow owning the lock at this time? - Args: - shared: - < 0: check for any type of ownership (default) - 0: check for exclusive ownership - > 0: check for shared ownership + @param shared: + - < 0: check for any type of ownership (default) + - 0: check for exclusive ownership + - > 0: check for shared ownership """ self.__lock.acquire() @@ -123,8 +122,7 @@ class SharedLock: """Wait on the given condition, and raise an exception if the current lock is declared deleted in the meantime. - Args: - c: condition to wait on + @param c: the condition to wait on """ c.wait() @@ -158,11 +156,10 @@ class SharedLock: def acquire(self, blocking=1, shared=0): """Acquire a shared lock. - Args: - shared: whether to acquire in shared mode. By default an exclusive lock - will be acquired. - blocking: whether to block while trying to acquire or to operate in - try-lock mode. this locking mode is not supported yet. + @param shared: whether to acquire in shared mode; by default an + exclusive lock will be acquired + @param blocking: whether to block while trying to acquire or to + operate in try-lock mode (this locking mode is not supported yet) """ if not blocking: @@ -268,10 +265,9 @@ class SharedLock: acquired in exclusive mode if you don't already own it, then the lock will be put in a state where any future and pending acquire() fail. - Args: - blocking: whether to block while trying to acquire or to operate in - try-lock mode. this locking mode is not supported yet unless - you are already holding exclusively the lock. + @param blocking: whether to block while trying to acquire or to + operate in try-lock mode. this locking mode is not supported + yet unless you are already holding exclusively the lock. """ self.__lock.acquire() @@ -317,8 +313,7 @@ class LockSet: def __init__(self, members=None): """Constructs a new LockSet. - Args: - members: initial members of the set + @param members: initial members of the set """ # Used internally to guarantee coherency. @@ -406,21 +401,18 @@ class LockSet: def acquire(self, names, blocking=1, shared=0): """Acquire a set of resource locks. - Args: - names: the names of the locks which shall be acquired. - (special lock names, or instance/node names) - shared: whether to acquire in shared mode. By default an exclusive lock - will be acquired. - blocking: whether to block while trying to acquire or to operate in - try-lock mode. this locking mode is not supported yet. + @param names: the names of the locks which shall be acquired + (special lock names, or instance/node names) + @param shared: whether to acquire in shared mode; by default an + exclusive lock will be acquired + @param blocking: whether to block while trying to acquire or to + operate in try-lock mode (this locking mode is not supported yet) - Returns: - True: when all the locks are successfully acquired + @return: True when all the locks are successfully acquired - Raises: - errors.LockError: when any lock we try to acquire has been deleted - before we succeed. In this case none of the locks requested will be - acquired. + @raise errors.LockError: when any lock we try to acquire has + been deleted before we succeed. In this case none of the + locks requested will be acquired. """ if not blocking: @@ -520,9 +512,8 @@ class LockSet: You must have acquired the locks, either in shared or in exclusive mode, before releasing them. - Args: - names: the names of the locks which shall be released. - (defaults to all the locks acquired at that level). + @param names: the names of the locks which shall be released + (defaults to all the locks acquired at that level). """ assert self._is_owned(), "release() on lock set while not owner" @@ -554,10 +545,9 @@ class LockSet: def add(self, names, acquired=0, shared=0): """Add a new set of elements to the set - Args: - names: names of the new elements to add - acquired: pre-acquire the new resource? - shared: is the pre-acquisition shared? + @param names: names of the new elements to add + @param acquired: pre-acquire the new resource? + @param shared: is the pre-acquisition shared? """ # Check we don't already own locks at this level @@ -616,15 +606,14 @@ class LockSet: You can either not hold anything in the lockset or already hold a superset of the elements you want to delete, exclusively. - Args: - names: names of the resource to remove. - blocking: whether to block while trying to acquire or to operate in - try-lock mode. this locking mode is not supported yet unless - you are already holding exclusively the locks. + @param names: names of the resource to remove. + @param blocking: whether to block while trying to acquire or to + operate in try-lock mode (this locking mode is not supported + yet unless you are already holding exclusively the locks) - Returns: - A list of lock which we removed. The list is always equal to the names - list if we were holding all the locks exclusively. + @return:: a list of locks which we removed; the list is always + equal to the names list if we were holding all the locks + exclusively """ if not blocking and not self._is_owned(): @@ -712,12 +701,13 @@ class GanetiLockManager: There should be only a GanetiLockManager object at any time, so this function raises an error if this is not the case. - Args: - nodes: list of node names - instances: list of instance names + @param nodes: list of node names + @param instances: list of instance names """ - assert self.__class__._instance is None, "double GanetiLockManager instance" + assert self.__class__._instance is None, \ + "double GanetiLockManager instance" + self.__class__._instance = self # The keyring contains all the locks, at their level and in the correct @@ -730,10 +720,10 @@ class GanetiLockManager: def _names(self, level): """List the lock names at the given level. - Used for debugging/testing purposes. - Args: - level: the level whose list of locks to get + This can be used for debugging/testing purposes. + + @param level: the level whose list of locks to get """ assert level in LEVELS, "Invalid locking level %s" % level @@ -770,8 +760,10 @@ class GanetiLockManager: return BGL in self.__keyring[LEVEL_CLUSTER]._list_owned() def _contains_BGL(self, level, names): - """Check if acting on the given level and set of names will change the - status of the Big Ganeti Lock. + """Check if the level contains the BGL. + + Check if acting on the given level and set of names will change + the status of the Big Ganeti Lock. """ return level == LEVEL_CLUSTER and (names is None or BGL in names) @@ -779,15 +771,14 @@ class GanetiLockManager: def acquire(self, level, names, blocking=1, shared=0): """Acquire a set of resource locks, at the same level. - Args: - level: the level at which the locks shall be acquired. - It must be a memmber of LEVELS. - names: the names of the locks which shall be acquired. - (special lock names, or instance/node names) - shared: whether to acquire in shared mode. By default an exclusive lock - will be acquired. - blocking: whether to block while trying to acquire or to operate in - try-lock mode. this locking mode is not supported yet. + @param level: the level at which the locks shall be acquired; + it must be a memmber of LEVELS. + @param names: the names of the locks which shall be acquired + (special lock names, or instance/node names) + @param shared: whether to acquire in shared mode; by default + an exclusive lock will be acquired + @param blocking: whether to block while trying to acquire or to + operate in try-lock mode (this locking mode is not supported yet) """ assert level in LEVELS, "Invalid locking level %s" % level @@ -812,14 +803,13 @@ class GanetiLockManager: def release(self, level, names=None): """Release a set of resource locks, at the same level. - You must have acquired the locks, either in shared or in exclusive mode, - before releasing them. + You must have acquired the locks, either in shared or in exclusive + mode, before releasing them. - Args: - level: the level at which the locks shall be released. - It must be a memmber of LEVELS. - names: the names of the locks which shall be released. - (defaults to all the locks acquired at that level). + @param level: the level at which the locks shall be released; + it must be a memmber of LEVELS + @param names: the names of the locks which shall be released + (defaults to all the locks acquired at that level) """ assert level in LEVELS, "Invalid locking level %s" % level @@ -834,12 +824,12 @@ class GanetiLockManager: def add(self, level, names, acquired=0, shared=0): """Add locks at the specified level. - Args: - level: the level at which the locks shall be added. - It must be a memmber of LEVELS_MOD. - names: names of the locks to acquire - acquired: whether to acquire the newly added locks - shared: whether the acquisition will be shared + @param level: the level at which the locks shall be added; + it must be a memmber of LEVELS_MOD. + @param names: names of the locks to acquire + @param acquired: whether to acquire the newly added locks + @param shared: whether the acquisition will be shared + """ assert level in LEVELS_MOD, "Invalid or immutable level %s" % level assert self._BGL_owned(), ("You must own the BGL before performing other" @@ -851,16 +841,15 @@ class GanetiLockManager: def remove(self, level, names, blocking=1): """Remove locks from the specified level. - You must either already own the locks you are trying to remove exclusively - or not own any lock at an upper level. + You must either already own the locks you are trying to remove + exclusively or not own any lock at an upper level. - Args: - level: the level at which the locks shall be removed. - It must be a memmber of LEVELS_MOD. - names: the names of the locks which shall be removed. - (special lock names, or instance/node names) - blocking: whether to block while trying to operate in try-lock mode. - this locking mode is not supported yet. + @param level: the level at which the locks shall be removed; + it must be a member of LEVELS_MOD + @param names: the names of the locks which shall be removed + (special lock names, or instance/node names) + @param blocking: whether to block while trying to operate in + try-lock mode (this locking mode is not supported yet) """ assert level in LEVELS_MOD, "Invalid or immutable level %s" % level diff --git a/lib/objects.py b/lib/objects.py index 63d5bec9e29041177c92f62882ad791f0e01d762..6836526422202d11d37319f11496c10746d396b7 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -549,15 +549,14 @@ class Instance(TaggableObject): def MapLVsByNode(self, lvmap=None, devs=None, node=None): """Provide a mapping of nodes to LVs this instance owns. - This function figures out what logical volumes should belong on which - nodes, recursing through a device tree. + This function figures out what logical volumes should belong on + which nodes, recursing through a device tree. - Args: - lvmap: (optional) a dictionary to receive the 'node' : ['lv', ...] data. + @param lvmap: optional dictionary to receive the + 'node' : ['lv', ...] data. - Returns: - None if lvmap arg is given. - Otherwise, { 'nodename' : ['volume1', 'volume2', ...], ... } + @return: None if lvmap arg is given, otherwise, a dictionary + of the form { 'nodename' : ['volume1', 'volume2', ...], ... } """ if node == None: diff --git a/lib/rapi/baserlib.py b/lib/rapi/baserlib.py index e70bce4273728bee3b213696aa88e531bec7faee..58d68a165d94df5a78d3531260d7bcc2ffccec8d 100644 --- a/lib/rapi/baserlib.py +++ b/lib/rapi/baserlib.py @@ -32,10 +32,9 @@ from ganeti import luxi def BuildUriList(ids, uri_format, uri_fields=("name", "uri")): """Builds a URI list as used by index resources. - Args: - - ids: List of ids as strings - - uri_format: Format to be applied for URI - - uri_fields: Optional parameter for field ids + @param ids: list of ids as strings + @param uri_format: format to be applied for URI + @param uri_fields: optional parameter for field IDs """ (field_id, field_uri) = uri_fields @@ -53,9 +52,8 @@ def BuildUriList(ids, uri_format, uri_fields=("name", "uri")): def ExtractField(sequence, index): """Creates a list containing one column out of a list of lists. - Args: - - sequence: Sequence of lists - - index: Index of field + @param sequence: sequence of lists + @param index: index of field """ return map(lambda item: item[index], sequence) @@ -64,13 +62,12 @@ def ExtractField(sequence, index): def MapFields(names, data): """Maps two lists into one dictionary. - Args: - - names: Field names (list of strings) - - data: Field data (list) + Example:: + >>> MapFields(["a", "b"], ["foo", 123]) + {'a': 'foo', 'b': 123} - Example: - >>> MapFields(["a", "b"], ["foo", 123]) - {'a': 'foo', 'b': 123} + @param names: field names (list of strings) + @param data: field data (list) """ if len(names) != len(data): @@ -108,12 +105,11 @@ def _Tags_DELETE(kind, tags, name=""): def MapBulkFields(itemslist, fields): """Map value to field name in to one dictionary. - Args: - - itemslist: A list of items values - - instance: A list of items names + @param itemslist: a list of items values + @param fields: a list of items names + + @return: a list of mapped dictionaries - Returns: - A list of mapped dictionaries """ items_details = [] for item in itemslist: @@ -123,7 +119,7 @@ def MapBulkFields(itemslist, fields): def MakeParamsDict(opts, params): - """ Makes params dictionary out of a option set. + """Makes params dictionary out of a option set. This function returns a dictionary needed for hv or be parameters. But only those fields which provided in the option set. Takes parameters frozensets @@ -156,9 +152,8 @@ class R_Generic(object): def __init__(self, items, queryargs, req): """Generic resource constructor. - Args: - items: a list with variables encoded in the URL - queryargs: a dictionary with additional options from URL + @param items: a list with variables encoded in the URL + @param queryargs: a dictionary with additional options from URL """ self.items = items diff --git a/lib/rapi/connector.py b/lib/rapi/connector.py index 929159221d78b48e1684654308d74e45de9e5d1b..2358287105a52143d2899b5ad3820774a415e2a9 100644 --- a/lib/rapi/connector.py +++ b/lib/rapi/connector.py @@ -43,8 +43,7 @@ class Mapper: def __init__(self, connector=CONNECTOR): """Resource mapper constructor. - Args: - con: a dictionary, mapping method name with URL path regexp + @param connector: a dictionary, mapping method name with URL path regexp """ self._connector = connector @@ -52,14 +51,13 @@ class Mapper: def getController(self, uri): """Find method for a given URI. - Args: - uri: string with URI + @param uri: string with URI - Returns: - None if no method is found or a tuple containing the following fields: - methd: name of method mapped to URI - items: a list of variable intems in the path - args: a dictionary with additional parameters from URL + @return: None if no method is found or a tuple containing + the following fields: + - method: name of method mapped to URI + - items: a list of variable intems in the path + - args: a dictionary with additional parameters from URL """ if '?' in uri: @@ -100,8 +98,7 @@ class R_root(baserlib.R_Generic): def GET(self): """Show the list of mapped resources. - Returns: - A dictionary with 'name' and 'uri' keys for each of them. + @return: a dictionary with 'name' and 'uri' keys for each of them. """ root_pattern = re.compile('^R_([a-zA-Z0-9]+)$') diff --git a/lib/rapi/rlib1.py b/lib/rapi/rlib1.py index 06c9b5ecc64929d02c680ca46fd82910c6928cab..f4ef9a9c5fcbe462f2395edbeeb673d2e6208b8d 100644 --- a/lib/rapi/rlib1.py +++ b/lib/rapi/rlib1.py @@ -45,8 +45,8 @@ N_FIELDS = ["name", "dtotal", "dfree", class R_version(baserlib.R_Generic): """/version resource. - This resource should be used to determine the remote API version and to adapt - clients accordingly. + This resource should be used to determine the remote API version and + to adapt clients accordingly. """ DOC_URI = "/version" @@ -84,20 +84,22 @@ class R_info(baserlib.R_Generic): def GET(self): """Returns cluster information. - Example: { - "config_version": 3, - "name": "cluster1.example.com", - "software_version": "1.2.4", - "os_api_version": 5, - "export_version": 0, - "master": "node1.example.com", - "architecture": [ - "64bit", - "x86_64" - ], - "hypervisor_type": "xen-pvm", - "protocol_version": 12 - } + Example:: + + { + "config_version": 3, + "name": "cluster1.example.com", + "software_version": "1.2.4", + "os_api_version": 5, + "export_version": 0, + "master": "node1.example.com", + "architecture": [ + "64bit", + "x86_64" + ], + "hypervisor_type": "xen-pvm", + "protocol_version": 12 + } """ op = ganeti.opcodes.OpQueryClusterInfo() diff --git a/lib/rapi/rlib2.py b/lib/rapi/rlib2.py index a1298b193b82ba759b5eff7fe10c64578341841c..fb607ed8412f9d4f0650e49d5f1c3a7c8e380015 100644 --- a/lib/rapi/rlib2.py +++ b/lib/rapi/rlib2.py @@ -41,8 +41,7 @@ class R_2_jobs(baserlib.R_Generic): def GET(self): """Returns a dictionary of jobs. - Returns: - A dictionary with jobs id and uri. + @return: a dictionary with jobs id and uri. """ fields = ["id"] @@ -60,16 +59,14 @@ class R_2_jobs_id(baserlib.R_Generic): def GET(self): """Returns a job status. - Returns: - A dictionary with job parameters. - - The result includes: - id - job ID as a number - status - current job status as a string - ops - involved OpCodes as a list of dictionaries for each opcodes in - the job - opstatus - OpCodes status as a list - opresult - OpCodes results as a list of lists + @return: a dictionary with job parameters. + The result includes: + - id: job ID as a number + - status: current job status as a string + - ops: involved OpCodes as a list of dictionaries for each + opcodes in the job + - opstatus: OpCodes status as a list + - opresult: OpCodes results as a list of lists """ fields = ["id", "ops", "status", "opstatus", "opresult"] @@ -95,10 +92,9 @@ class R_2_nodes(baserlib.R_Generic): def GET(self): """Returns a list of all nodes. - Returns: - A dictionary with 'name' and 'uri' keys for each of them. + Example:: - Example: [ + [ { "id": "node1.example.com", "uri": "\/instances\/node1.example.com" @@ -106,13 +102,16 @@ class R_2_nodes(baserlib.R_Generic): { "id": "node2.example.com", "uri": "\/instances\/node2.example.com" - }] + } + ] If the optional 'bulk' argument is provided and set to 'true' value (i.e '?bulk=1'), the output contains detailed information about nodes as a list. - Example: [ + Example:: + + [ { "pinst_cnt": 1, "mfree": 31280, @@ -125,7 +124,9 @@ class R_2_nodes(baserlib.R_Generic): "dfree": 5171712 }, ... - ] + ] + + @return: a dictionary with 'name' and 'uri' keys for each of them """ client = luxi.Client() @@ -157,10 +158,10 @@ class R_2_instances(baserlib.R_Generic): def GET(self): """Returns a list of all available instances. - Returns: - A dictionary with 'name' and 'uri' keys for each of them. - Example: [ + Example:: + + [ { "name": "web.example.com", "uri": "\/instances\/web.example.com" @@ -168,13 +169,16 @@ class R_2_instances(baserlib.R_Generic): { "name": "mail.example.com", "uri": "\/instances\/mail.example.com" - }] + } + ] If the optional 'bulk' argument is provided and set to 'true' value (i.e '?bulk=1'), the output contains detailed information about instances as a list. - Example: [ + Example:: + + [ { "status": "running", "bridge": "xen-br0", @@ -194,7 +198,9 @@ class R_2_instances(baserlib.R_Generic): "oper_state": true }, ... - ] + ] + + @returns: a dictionary with 'name' and 'uri' keys for each of them. """ client = luxi.Client() @@ -213,8 +219,7 @@ class R_2_instances(baserlib.R_Generic): def POST(self): """Create an instance. - Returns: - A job id. + @returns: a job id """ opts = self.req.request_post_data @@ -300,8 +305,8 @@ class R_2_instances_name_startup(baserlib.R_Generic): def PUT(self): """Startup an instance. - The URI takes force=[False|True] parameter to start the instance if even if - secondary disks are failing. + The URI takes force=[False|True] parameter to start the instance + if even if secondary disks are failing. """ instance_name = self.items[0] @@ -354,8 +359,8 @@ class R_2_instances_name_tags(baserlib.R_Generic): def PUT(self): """Add a set of tags to the instance. - The request as a list of strings should be PUT to this URI. And you'll have - back a job id. + The request as a list of strings should be PUT to this URI. And + you'll have back a job id. """ return baserlib._Tags_PUT(constants.TAG_INSTANCE, @@ -364,8 +369,9 @@ class R_2_instances_name_tags(baserlib.R_Generic): def DELETE(self): """Delete a tag. - In order to delete a set of tags from a instance, DELETE request should be - addressed to URI like: /2/instances/[instance_name]/tags?tag=[tag]&tag=[tag] + In order to delete a set of tags from a instance, the DELETE + request should be addressed to URI like: + /2/instances/[instance_name]/tags?tag=[tag]&tag=[tag] """ if 'tag' not in self.queryargs: diff --git a/lib/rpc.py b/lib/rpc.py index 534675648fc77f098e3a8f01fc1f33451a5ac046..d120f15548595202c28f33548f81f043cb77cda4 100644 --- a/lib/rpc.py +++ b/lib/rpc.py @@ -524,8 +524,8 @@ class RpcRunner(object): @type node_list: list @param node_list: the list of nodes to query - @type vgname: C{string} - @param vgname: the name of the volume group to ask for disk space + @type vg_name: C{string} + @param vg_name: the name of the volume group to ask for disk space information @type hypervisor_type: C{str} @param hypervisor_type: the name of the hypervisor to ask for diff --git a/lib/serializer.py b/lib/serializer.py index 05bc334988a21e7a75f3a9cebe30456885fbe8ae..fcde99270d79c6f30176c2e22116ca6a247dafe5 100644 --- a/lib/serializer.py +++ b/lib/serializer.py @@ -42,8 +42,10 @@ _RE_EOLSP = re.compile('[ \t]+$', re.MULTILINE) def DumpJson(data, indent=True): """Serialize a given object. - Args: - - indent: Whether to indent output (depends on simplejson version) + @param data: the data to serialize + @param indent: whether to indent output (depends on simplejson version) + + @return: the string representation of data """ if not indent or _JSON_INDENT is None: @@ -60,6 +62,10 @@ def DumpJson(data, indent=True): def LoadJson(txt): """Unserialize data from a string. + @param txt: the json-encoded form + + @return: the original data + """ return simplejson.loads(txt) diff --git a/lib/ssh.py b/lib/ssh.py index b2d0cfa8d26c633b9b61f4f8fb9aedf22d91514e..aabb4bb0ebef19b870c9e0dc2b853e8ae3a44030 100644 --- a/lib/ssh.py +++ b/lib/ssh.py @@ -111,18 +111,18 @@ class SshRunner: tty=False, use_cluster_key=True, strict_host_check=True): """Build an ssh command to execute a command on a remote node. - Args: - hostname: the target host, string - user: user to auth as - command: the command - batch: if true, ssh will run in batch mode with no prompting - ask_key: if true, ssh will run with StrictHostKeyChecking=ask, so that - we can connect to an unknown host (not valid in batch mode) - use_cluster_key: Whether to expect and use the cluster-global SSH key - strict_host_check: Whether to check the host's SSH key at all - - Returns: - The ssh call to run 'command' on the remote host. + @param hostname: the target host, string + @param user: user to auth as + @param command: the command + @param batch: if true, ssh will run in batch mode with no prompting + @param ask_key: if true, ssh will run with + StrictHostKeyChecking=ask, so that we can connect to an + unknown host (not valid in batch mode) + @param use_cluster_key: whether to expect and use the + cluster-global SSH key + @param strict_host_check: whether to check the host's SSH key at all + + @return: the ssh call to run 'command' on the remote host. """ argv = [constants.SSH, "-q"] @@ -139,11 +139,10 @@ class SshRunner: This method has the same return value as `utils.RunCmd()`, which it uses to launch ssh. - Args: - See SshRunner.BuildCmd. + Args: see SshRunner.BuildCmd. - Returns: - `utils.RunResult` like `utils.RunCmd()` + @rtype: L{utils.RunResult} + @return: the result as from L{utils.RunCmd()} """ return utils.RunCmd(self.BuildCmd(*args, **kwargs)) @@ -151,12 +150,11 @@ class SshRunner: def CopyFileToNode(self, node, filename): """Copy a file to another node with scp. - Args: - node: node in the cluster - filename: absolute pathname of a local file + @param node: node in the cluster + @param filename: absolute pathname of a local file - Returns: - success: True/False + @rtype: boolean + @return: the success of the operation """ if not os.path.isabs(filename): @@ -192,14 +190,12 @@ class SshRunner: (conflicting known hosts) and incosistencies between dns/hosts entries and local machine names - Args: - node: nodename of a host to check. can be short or full qualified hostname + @param node: nodename of a host to check; can be short or + full qualified hostname - Returns: - (success, detail) - where - success: True/False - detail: String with details + @return: (success, detail), where: + - success: True/False + - detail: string with details """ retval = self.Run(node, 'root', 'hostname') diff --git a/scripts/gnt-job b/scripts/gnt-job index f39009f78ee04a35862d2601e42573fbe988d50e..edd4afa406f00b3a66d8be7d781983702735cacf 100755 --- a/scripts/gnt-job +++ b/scripts/gnt-job @@ -141,8 +141,8 @@ def AutoArchiveJobs(opts, args): @param opts: the command line options selected by the user @type args: list @param args: should contain only one element, the age as a time spec - that can be parsed by L{cli.ParseTimespec} or the keyword I{all}, - which will cause all jobs to be archived + that can be parsed by L{ganeti.cli.ParseTimespec} or the + keyword I{all}, which will cause all jobs to be archived @rtype: int @return: the desired exit code