diff --git a/lib/asyncnotifier.py b/lib/asyncnotifier.py
index e90fb7cb7656ee49f7c7a73f9a5d9bffc3f1d66d..1498ad0e614e15a009c39d9fad3666ced47a1152 100644
--- a/lib/asyncnotifier.py
+++ b/lib/asyncnotifier.py
@@ -31,17 +31,18 @@ class AsyncNotifier(asyncore.file_dispatcher):
 
   """
 
-  def __init__(self, watch_manager,
-               default_proc_fun=None,
-               map=None):
-    """
-    Constructor for AsyncNotifier, a special asyncore file_dispatcher that
-    actually wraps a pyinotify Notifier, making it asyncronous.
+  def __init__(self, watch_manager, default_proc_fun=None, map=None):
+    """Initializes this class.
+
+    This is a a special asyncore file_dispatcher that actually wraps a
+    pyinotify Notifier, making it asyncronous.
 
     """
     if default_proc_fun is None:
-      default_proc_fun=pyinotify.ProcessEvent()
+      default_proc_fun = pyinotify.ProcessEvent()
+
     self.notifier = pyinotify.Notifier(watch_manager, default_proc_fun)
+
     # here we need to steal the file descriptor from the notifier, so we can
     # use it in the global asyncore select, and avoid calling the
     # check_events() function of the notifier (which doesn't allow us to select
@@ -52,4 +53,3 @@ class AsyncNotifier(asyncore.file_dispatcher):
   def handle_read(self):
     self.notifier.read_events()
     self.notifier.process_events()
-
diff --git a/lib/backend.py b/lib/backend.py
index ec87aae307a4097d5c5a120525df253f547be01a..1248aa67259f45cf423d169b1bbdf6a810ee6b10 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -1768,7 +1768,7 @@ def OSFromDisk(name, base_dir=None):
   @raise RPCFail: if we don't find a valid OS
 
   """
-  name_only = name.split('+',1)[0]
+  name_only = name.split("+", 1)[0]
   status, payload = _TryOSFromDisk(name_only, base_dir)
 
   if not status:
diff --git a/lib/cli.py b/lib/cli.py
index b864527906bad17faffe8bf83e05e2e0b26e4daa..292cab08f26fd9cf66b94149f527cbb1863839a8 100644
--- a/lib/cli.py
+++ b/lib/cli.py
@@ -25,7 +25,6 @@
 import sys
 import textwrap
 import os.path
-import copy
 import time
 import logging
 from cStringIO import StringIO
@@ -813,7 +812,7 @@ IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
                                     default=False, action="store_true",
                                     help="Ignore errors from secondaries")
 
-NOSHUTDOWN_OPT = cli_option("","--noshutdown", dest="shutdown",
+NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
                             action="store_false", default=True,
                             help="Don't shutdown the instance (unsafe)")
 
diff --git a/lib/confd/client.py b/lib/confd/client.py
index e9444318e21e9289e687cd114ee56eee98508ff1..db9a71c815664e0cb8e54893e81c61cab1469101 100644
--- a/lib/confd/client.py
+++ b/lib/confd/client.py
@@ -29,7 +29,8 @@ This way the client library doesn't ever need to "wait" on a particular answer,
 and can proceed even if some udp packets are lost. It's up to the user to
 reschedule queries if they haven't received responses and they need them.
 
-Example usage:
+Example usage::
+
   client = ConfdClient(...) # includes callback specification
   req = confd_client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
   client.SendRequest(req)
@@ -43,6 +44,7 @@ You can use the provided ConfdFilterCallback to act as a filter, only passing
 confirming what you already got.
 
 """
+
 import socket
 import time
 import random
@@ -97,7 +99,7 @@ class ConfdClient:
     @param callback: function to call when getting answers
     @type port: integer
     @keyword port: confd port (default: use GetDaemonPort)
-    @type logger: L{logging.Logger}
+    @type logger: logging.Logger
     @keyword logger: optional logger for internal conditions
 
     """
@@ -318,7 +320,7 @@ class ConfdFilterCallback:
 
     @type callback: f(L{ConfdUpcallPayload})
     @param callback: function to call when getting answers
-    @type logger: L{logging.Logger}
+    @type logger: logging.Logger
     @keyword logger: optional logger for internal conditions
 
     """
diff --git a/lib/daemon.py b/lib/daemon.py
index d357349a300215cbb6362c63e6d11ac10eb7870e..809e538d3bd5ea7f642d88f78ad5b31490d90bf1 100644
--- a/lib/daemon.py
+++ b/lib/daemon.py
@@ -24,7 +24,6 @@
 
 import asyncore
 import os
-import select
 import signal
 import errno
 import logging
@@ -158,25 +157,22 @@ class AsyncUDPSocket(asyncore.dispatcher):
 class Mainloop(object):
   """Generic mainloop for daemons
 
+  @ivar scheduler: A sched.scheduler object, which can be used to register
+    timed events
+
   """
   def __init__(self):
     """Constructs a new Mainloop instance.
 
-    @ivar scheduler: A L{sched.scheduler} object, which can be used to register
-    timed events
-
     """
     self._signal_wait = []
     self.scheduler = AsyncoreScheduler(time.time)
 
   @utils.SignalHandled([signal.SIGCHLD])
   @utils.SignalHandled([signal.SIGTERM])
-  def Run(self, stop_on_empty=False, signal_handlers=None):
+  def Run(self, signal_handlers=None):
     """Runs the mainloop.
 
-    @type stop_on_empty: bool
-    @param stop_on_empty: Whether to stop mainloop once all I/O waiters
-                          unregistered
     @type signal_handlers: dict
     @param signal_handlers: signal->L{utils.SignalHandler} passed by decorator
 
@@ -187,10 +183,6 @@ class Mainloop(object):
     running = True
     # Start actual main loop
     while running:
-      # Stop if nothing is listening anymore
-      if stop_on_empty and not (self._io_wait):
-        break
-
       if not self.scheduler.empty():
         try:
           self.scheduler.run()
@@ -234,11 +226,9 @@ def GenericMain(daemon_name, optionparser, dirs, check_fn, exec_fn):
 
   @type daemon_name: string
   @param daemon_name: daemon name
-  @type optionparser: L{optparse.OptionParser}
+  @type optionparser: optparse.OptionParser
   @param optionparser: initialized optionparser with daemon-specific options
                        (common -f -d options will be handled by this module)
-  @type options: object @param options: OptionParser result, should contain at
-                 least the fork and the debug options
   @type dirs: list of strings
   @param dirs: list of directories that must exist for this daemon to work
   @type check_fn: function which accepts (options, args)
diff --git a/lib/hypervisor/hv_kvm.py b/lib/hypervisor/hv_kvm.py
index 6c90c278326240729f969947febac36bdde93068..8b3af52520b2815e4218d010eab40572b6aef622 100644
--- a/lib/hypervisor/hv_kvm.py
+++ b/lib/hypervisor/hv_kvm.py
@@ -370,8 +370,8 @@ class KVMHypervisor(hv_base.BaseHypervisor):
     else:
       kvm_cmd.extend(['-nographic'])
 
-    monitor_dev = 'unix:%s,server,nowait' % \
-      self._InstanceMonitor(instance.name)
+    monitor_dev = ("unix:%s,server,nowait" %
+                   self._InstanceMonitor(instance.name))
     kvm_cmd.extend(['-monitor', monitor_dev])
     if hvp[constants.HV_SERIAL_CONSOLE]:
       serial_dev = ('unix:%s,server,nowait' %
diff --git a/lib/hypervisor/hv_xen.py b/lib/hypervisor/hv_xen.py
index 28101023abfda7ce31766db061fab9da5399e417..22a0945b9e960cc520313e486492bcabb37a6bcd 100644
--- a/lib/hypervisor/hv_xen.py
+++ b/lib/hypervisor/hv_xen.py
@@ -508,8 +508,9 @@ class XenPvmHypervisor(XenHypervisor):
 class XenHvmHypervisor(XenHypervisor):
   """Xen HVM hypervisor interface"""
 
-  ANCILLARY_FILES = XenHypervisor.ANCILLARY_FILES + \
-    [constants.VNC_PASSWORD_FILE]
+  ANCILLARY_FILES = XenHypervisor.ANCILLARY_FILES + [
+    constants.VNC_PASSWORD_FILE,
+    ]
 
   PARAMETERS = {
     constants.HV_ACPI: hv_base.NO_CHECK,
diff --git a/lib/jqueue.py b/lib/jqueue.py
index 5dd55786c6049dbed8a02114917a2690d65af5e0..91a721a01606201005042a9bc03eace6e421bdec 100644
--- a/lib/jqueue.py
+++ b/lib/jqueue.py
@@ -1027,7 +1027,7 @@ class JobQueue(object):
     queue, in order for it to be picked up by the queue processors.
 
     @type job_id: job ID
-    @param jod_id: the job ID for the new job
+    @param job_id: the job ID for the new job
     @type ops: list
     @param ops: The list of OpCodes that will become the new job.
     @rtype: job ID
diff --git a/lib/locking.py b/lib/locking.py
index 7980e50ca9bb9b71c5d9f7b74d2668678c2b98dc..382a45c91c0fceb8c54b00d46a8ec02c494c04f2 100644
--- a/lib/locking.py
+++ b/lib/locking.py
@@ -18,6 +18,9 @@
 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 # 02110-1301, USA.
 
+# Disable "Invalid name ..." message
+# pylint: disable-msg=C0103
+
 """Module implementing the Ganeti locking code."""
 
 import os
@@ -113,7 +116,7 @@ class _BaseCondition(object):
   def __init__(self, lock):
     """Constructor for _BaseCondition.
 
-    @type lock: L{threading.Lock}
+    @type lock: threading.Lock
     @param lock: condition base lock
 
     """
@@ -180,6 +183,9 @@ class SingleNotifyPipeCondition(_BaseCondition):
     self._poller = None
 
   def _check_unnotified(self):
+    """Throws an exception if already notified.
+
+    """
     if self._notified:
       raise RuntimeError("cannot use already notified condition")
 
diff --git a/lib/mcpu.py b/lib/mcpu.py
index 5bcfc273fa62fd6781a157e2e65c710f86889e61..1ad93ca323654db69e55454f5f7c5b5403f662a1 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -38,7 +38,6 @@ from ganeti import errors
 from ganeti import rpc
 from ganeti import cmdlib
 from ganeti import locking
-from ganeti import utils
 
 
 class _LockAcquireTimeout(Exception):
diff --git a/lib/rapi/connector.py b/lib/rapi/connector.py
index 4f932b0daf0a139b980bf0d8901c3b5f6626486b..8c206400180e1eb30d5f384bb787a018b29c420e 100644
--- a/lib/rapi/connector.py
+++ b/lib/rapi/connector.py
@@ -114,7 +114,7 @@ class R_root(baserlib.R_Generic):
     return baserlib.BuildUriList(rootlist, "/%s")
 
 
-def _getResources(id):
+def _getResources(id_):
   """Return a list of resources underneath given id.
 
   This is to generalize querying of version resources lists.
@@ -122,7 +122,7 @@ def _getResources(id):
   @return: a list of resources names.
 
   """
-  r_pattern = re.compile('^R_%s_([a-zA-Z0-9]+)$' % id)
+  r_pattern = re.compile('^R_%s_([a-zA-Z0-9]+)$' % id_)
 
   rlist = []
   for handler in CONNECTOR.values():
diff --git a/lib/rapi/rlib2.py b/lib/rapi/rlib2.py
index e9fea3187b898773e6dd6ddcdb96b708ab95d5f1..66b59176182eedb9a3a6d01dd5ed530fc7601524 100644
--- a/lib/rapi/rlib2.py
+++ b/lib/rapi/rlib2.py
@@ -439,7 +439,7 @@ class R_2_instances(baserlib.R_Generic):
     for idx, d in enumerate(disk_data):
       if not isinstance(d, int):
         raise http.HttpBadRequest("Disk %d specification wrong: should"
-                                  " be an integer")
+                                  " be an integer" % idx)
       disks.append({"size": d})
     # nic processing (one nic only)
     nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
@@ -450,7 +450,7 @@ class R_2_instances(baserlib.R_Generic):
     if fn("link", None) is not None:
       nics[0]["link"] = fn("link")
     if fn("bridge", None) is not None:
-       nics[0]["bridge"] = fn("bridge")
+      nics[0]["bridge"] = fn("bridge")
 
     op = opcodes.OpCreateInstance(
       mode=constants.INSTANCE_CREATE,
diff --git a/lib/ssconf.py b/lib/ssconf.py
index 582564a73256c57186e0126b8e319553bd8466d1..eb17fcd350c8054e7c3f59ea17f99707104216e3 100644
--- a/lib/ssconf.py
+++ b/lib/ssconf.py
@@ -57,6 +57,14 @@ class SimpleConfigReader(object):
     self._last_inode = None
     self._last_mtime = None
     self._last_size = None
+
+    self._config_data = None
+    self._instances_ips = None
+    self._inst_ips_by_link = None
+    self._ip_to_instance = None
+    self._mc_primary_ips = None
+    self._nodes_primary_ips = None
+
     # we need a forced reload at class init time, to initialize _last_*
     self._Load(force=True)
 
@@ -79,16 +87,14 @@ class SimpleConfigReader(object):
     mtime = cfg_stat.st_mtime
     size = cfg_stat.st_size
 
-    reload = False
-    if force or inode != self._last_inode or \
-       mtime > self._last_mtime or \
-       size != self._last_size:
+    if (force or inode != self._last_inode or
+        mtime > self._last_mtime or
+        size != self._last_size):
       self._last_inode = inode
       self._last_mtime = mtime
       self._last_size = size
-      reload = True
-
-    if not reload:
+    else:
+      # Don't reload
       return False
 
     try: