Commit 319856a9 authored by Michael Hanselmann's avatar Michael Hanselmann
Browse files

Change configuration storage format from Pickle to JSON.

- Add NEWS file with major changes between versions.
- Bump RPC version number
- No longer serialize in RPC, but just convert to dict

Old Pickle based configuration files can be converted using the cfgupgrade
utility.

Reviewed-by: iustinp, ultrotter
parent 694e2444
......@@ -36,6 +36,8 @@ Before installing, please verify that you have the following programs:
http://twistedmatrix.com/
- Python OpenSSL bindings
http://pyopenssl.sourceforge.net/
- simplejson Python module
http://www.undefined.org/python/#simplejson
For testing, you also need the YAML module for Python (http://pyyaml.org/).
......
......@@ -97,7 +97,7 @@ class ServerObject(pb.Avatar):
"""
bdev_s, size, on_primary, info = params
bdev = objects.ConfigObject.Loads(bdev_s)
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.CreateBlockDevice(bdev, size, on_primary, info)
......@@ -108,7 +108,7 @@ class ServerObject(pb.Avatar):
"""
bdev_s = params[0]
bdev = objects.ConfigObject.Loads(bdev_s)
bdev = objects.Disk.FromDict(bdev_s)
return backend.RemoveBlockDevice(bdev)
@staticmethod
......@@ -117,7 +117,7 @@ class ServerObject(pb.Avatar):
"""
bdev_s, on_primary = params
bdev = objects.ConfigObject.Loads(bdev_s)
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.AssembleBlockDevice(bdev, on_primary)
......@@ -128,7 +128,7 @@ class ServerObject(pb.Avatar):
"""
bdev_s = params[0]
bdev = objects.ConfigObject.Loads(bdev_s)
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.ShutdownBlockDevice(bdev)
......@@ -142,8 +142,8 @@ class ServerObject(pb.Avatar):
"""
bdev_s, ndev_s = params
bdev = objects.ConfigObject.Loads(bdev_s)
ndev = objects.ConfigObject.Loads(ndev_s)
bdev = objects.Disk.FromDict(bdev_s)
ndev = objects.Disk.FromDict(ndev_s)
if bdev is None or ndev is None:
raise ValueError("can't unserialize data!")
return backend.MirrorAddChild(bdev, ndev)
......@@ -157,8 +157,8 @@ class ServerObject(pb.Avatar):
"""
bdev_s, ndev_s = params
bdev = objects.ConfigObject.Loads(bdev_s)
ndev = objects.ConfigObject.Loads(ndev_s)
bdev = objects.Disk.FromDict(bdev_s)
ndev = objects.Disk.FromDict(ndev_s)
if bdev is None or ndev is None:
raise ValueError("can't unserialize data!")
return backend.MirrorRemoveChild(bdev, ndev)
......@@ -168,7 +168,7 @@ class ServerObject(pb.Avatar):
"""Return the mirror status for a list of disks.
"""
disks = [objects.ConfigObject.Loads(dsk_s)
disks = [objects.Disk.FromDict(dsk_s)
for dsk_s in params]
return backend.GetMirrorStatus(disks)
......@@ -179,7 +179,7 @@ class ServerObject(pb.Avatar):
This will try to find but not activate a disk.
"""
disk = objects.ConfigObject.Loads(params[0])
disk = objects.Disk.FromDict(params[0])
return backend.FindBlockDevice(disk)
@staticmethod
......@@ -191,7 +191,7 @@ class ServerObject(pb.Avatar):
remove by calling the generic block device remove call.
"""
cfbd = objects.ConfigObject.Loads(params[0])
cfbd = objects.Disk.FromDict(params[0])
return backend.SnapshotBlockDevice(cfbd)
# export/import --------------------------
......@@ -201,9 +201,9 @@ class ServerObject(pb.Avatar):
"""Export a given snapshot.
"""
disk = objects.ConfigObject.Loads(params[0])
disk = objects.Disk.FromDict(params[0])
dest_node = params[1]
instance = objects.ConfigObject.Loads(params[2])
instance = objects.Instance.FromDict(params[2])
return backend.ExportSnapshot(disk, dest_node, instance)
@staticmethod
......@@ -211,8 +211,8 @@ class ServerObject(pb.Avatar):
"""Expose the finalize export functionality.
"""
instance = objects.ConfigObject.Loads(params[0])
snap_disks = [objects.ConfigObject.Loads(str_data)
instance = objects.Instance.FromDict(params[0])
snap_disks = [objects.Disk.FromDict(str_data)
for str_data in params[1]]
return backend.FinalizeExport(instance, snap_disks)
......@@ -284,7 +284,7 @@ class ServerObject(pb.Avatar):
"""
inst_s, os_disk, swap_disk = params
inst = objects.ConfigObject.Loads(inst_s)
inst = objects.Instance.FromDict(inst_s)
return backend.AddOSToInstance(inst, os_disk, swap_disk)
@staticmethod
......@@ -293,7 +293,7 @@ class ServerObject(pb.Avatar):
"""
inst_s, old_name, os_disk, swap_disk = params
inst = objects.ConfigObject.Loads(inst_s)
inst = objects.Instance.FromDict(inst_s)
return backend.RunRenameInstance(inst, old_name, os_disk, swap_disk)
@staticmethod
......@@ -302,7 +302,7 @@ class ServerObject(pb.Avatar):
"""
inst_s, os_disk, swap_disk, src_node, src_image = params
inst = objects.ConfigObject.Loads(inst_s)
inst = objects.Instance.FromDict(inst_s)
return backend.ImportOSIntoInstance(inst, os_disk, swap_disk,
src_node, src_image)
......@@ -311,7 +311,7 @@ class ServerObject(pb.Avatar):
"""Shutdown an instance.
"""
instance = objects.ConfigObject.Loads(params[0])
instance = objects.Instance.FromDict(params[0])
return backend.ShutdownInstance(instance)
@staticmethod
......@@ -319,7 +319,7 @@ class ServerObject(pb.Avatar):
"""Start an instance.
"""
instance = objects.ConfigObject.Loads(params[0])
instance = objects.Instance.FromDict(params[0])
extra_args = params[1]
return backend.StartInstance(instance, extra_args)
......@@ -432,7 +432,7 @@ class ServerObject(pb.Avatar):
result = []
for data in os_list:
if isinstance(data, objects.OS):
result.append(data.Dumps())
result.append(data.ToDict())
elif isinstance(data, errors.InvalidOS):
result.append(data.args)
else:
......@@ -449,7 +449,7 @@ class ServerObject(pb.Avatar):
"""
name = params[0]
try:
os_obj = backend.OSFromDisk(name).Dumps()
os_obj = backend.OSFromDisk(name).ToDict()
except errors.InvalidOS, err:
os_obj = err.args
return os_obj
......
......@@ -407,6 +407,11 @@ skip resource "r1" {
url="http://pyopenssl.sourceforge.net/">Python OpenSSL
bindings</ulink></simpara>
</listitem>
<listitem>
<simpara><ulink
url="http://www.undefined.org/python/#simplejson">simplejson Python
module</ulink></simpara>
</listitem>
</itemizedlist>
<para>
......
......@@ -21,16 +21,13 @@
"""Configuration management for Ganeti
This module provides the interface to the ganeti cluster configuration.
This module provides the interface to the Ganeti cluster configuration.
The configuration data is stored on every node but is updated on the master
only. After each update, the master distributes the data to the other nodes.
The configuration data is stored on every node but is updated on the
master only. After each update, the master distributes the data to the
other nodes.
Currently the data storage format is pickle as yaml was initially not
available, then we used it but it was a memory-eating slow beast, so
we reverted to pickle using custom Unpicklers.
Currently, the data storage format is JSON. YAML was slow and consuming too
much memory.
"""
......@@ -45,6 +42,7 @@ from ganeti import constants
from ganeti import rpc
from ganeti import objects
def _my_uuidgen():
"""Poor-man's uuidgen using the uuidgen binary.
......@@ -497,8 +495,9 @@ class ConfigWriter:
f = open(self._cfg_file, 'r')
try:
try:
data = objects.ConfigObject.Load(f)
data = objects.ConfigData.Load(f)
except Exception, err:
raise
raise errors.ConfigurationError(err)
finally:
f.close()
......
......@@ -27,7 +27,7 @@ pass to and from external parties.
"""
import cPickle
import simplejson
from cStringIO import StringIO
import ConfigParser
import re
......@@ -56,8 +56,8 @@ class ConfigObject(object):
__slots__ = []
def __init__(self, **kwargs):
for i in kwargs:
setattr(self, i, kwargs[i])
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __getattr__(self, name):
if name not in self.__slots__:
......@@ -82,70 +82,29 @@ class ConfigObject(object):
if name in self.__slots__:
setattr(self, name, state[name])
@staticmethod
def FindGlobal(module, name):
"""Function filtering the allowed classes to be un-pickled.
Currently, we only allow the classes from this module which are
derived from ConfigObject.
"""
# Also support the old module name (ganeti.config)
cls = None
if module == "ganeti.config" or module == "ganeti.objects":
if name == "ConfigData":
cls = ConfigData
elif name == "NIC":
cls = NIC
elif name == "Disk" or name == "BlockDev":
cls = Disk
elif name == "Instance":
cls = Instance
elif name == "OS":
cls = OS
elif name == "Node":
cls = Node
elif name == "Cluster":
cls = Cluster
elif module == "__builtin__":
if name == "set":
cls = set
if cls is None:
raise cPickle.UnpicklingError("Class %s.%s not allowed due to"
" security concerns" % (module, name))
return cls
def Dump(self, fobj):
"""Dump this instance to a file object.
Note that we use the HIGHEST_PROTOCOL, as it brings benefits for
the new classes.
"""Dump to a file object.
"""
dumper = cPickle.Pickler(fobj, cPickle.HIGHEST_PROTOCOL)
dumper.dump(self)
simplejson.dump(self.ToDict(), fobj)
@staticmethod
def Load(fobj):
"""Unpickle data from the given stream.
This uses the `FindGlobal` function to filter the allowed classes.
@classmethod
def Load(cls, fobj):
"""Load data from the given stream.
"""
loader = cPickle.Unpickler(fobj)
loader.find_global = ConfigObject.FindGlobal
return loader.load()
return cls.FromDict(simplejson.load(fobj))
def Dumps(self):
"""Dump this instance and return the string representation."""
"""Dump and return the string representation."""
buf = StringIO()
self.Dump(buf)
return buf.getvalue()
@staticmethod
def Loads(data):
@classmethod
def Loads(cls, data):
"""Load data from a string."""
return ConfigObject.Load(StringIO(data))
return cls.Load(StringIO(data))
def ToDict(self):
"""Convert to a dict holding only standard python types.
......@@ -175,7 +134,8 @@ class ConfigObject(object):
if not isinstance(val, dict):
raise errors.ConfigurationError("Invalid object passed to FromDict:"
" expected dict, got %s" % type(val))
obj = cls(**val)
val_str = dict([(str(k), v) for k, v in val.iteritems()])
obj = cls(**val_str)
return obj
@staticmethod
......@@ -239,7 +199,8 @@ class TaggableObject(ConfigObject):
if not isinstance(tag, basestring):
raise errors.TagError("Invalid tag type (not a string)")
if len(tag) > constants.MAX_TAG_LEN:
raise errors.TagError("Tag too long (>%d)" % constants.MAX_TAG_LEN)
raise errors.TagError("Tag too long (>%d characters)" %
constants.MAX_TAG_LEN)
if not tag:
raise errors.TagError("Tags cannot be empty")
if not re.match("^[ \w.+*/:-]+$", tag):
......@@ -607,6 +568,24 @@ class Cluster(TaggableObject):
"default_bridge",
]
def ToDict(self):
"""Custom function for cluster.
"""
mydict = super(TaggableObject, self).ToDict()
mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for cluster.
"""
obj = super(TaggableObject, cls).FromDict(val)
if not isinstance(obj.tcpudp_port_pool, set):
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
return obj
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.
......
......@@ -293,7 +293,7 @@ def call_instance_start(node, instance, extra_args):
This is a single-node call.
"""
c = Client("instance_start", [instance.Dumps(), extra_args])
c = Client("instance_start", [instance.ToDict(), extra_args])
c.connect(node)
c.run()
return c.getresult().get(node, False)
......@@ -305,7 +305,7 @@ def call_instance_shutdown(node, instance):
This is a single-node call.
"""
c = Client("instance_shutdown", [instance.Dumps()])
c = Client("instance_shutdown", [instance.ToDict()])
c.connect(node)
c.run()
return c.getresult().get(node, False)
......@@ -317,7 +317,7 @@ def call_instance_os_add(node, inst, osdev, swapdev):
This is a single-node call.
"""
params = [inst.Dumps(), osdev, swapdev]
params = [inst.ToDict(), osdev, swapdev]
c = Client("instance_os_add", params)
c.connect(node)
c.run()
......@@ -330,7 +330,7 @@ def call_instance_run_rename(node, inst, old_name, osdev, swapdev):
This is a single-node call.
"""
params = [inst.Dumps(), old_name, osdev, swapdev]
params = [inst.ToDict(), old_name, osdev, swapdev]
c = Client("instance_run_rename", params)
c.connect(node)
c.run()
......@@ -471,7 +471,7 @@ def call_blockdev_create(node, bdev, size, on_primary, info):
This is a single-node call.
"""
params = [bdev.Dumps(), size, on_primary, info]
params = [bdev.ToDict(), size, on_primary, info]
c = Client("blockdev_create", params)
c.connect(node)
c.run()
......@@ -484,7 +484,7 @@ def call_blockdev_remove(node, bdev):
This is a single-node call.
"""
c = Client("blockdev_remove", [bdev.Dumps()])
c = Client("blockdev_remove", [bdev.ToDict()])
c.connect(node)
c.run()
return c.getresult().get(node, False)
......@@ -496,7 +496,7 @@ def call_blockdev_assemble(node, disk, on_primary):
This is a single-node call.
"""
params = [disk.Dumps(), on_primary]
params = [disk.ToDict(), on_primary]
c = Client("blockdev_assemble", params)
c.connect(node)
c.run()
......@@ -509,7 +509,7 @@ def call_blockdev_shutdown(node, disk):
This is a single-node call.
"""
c = Client("blockdev_shutdown", [disk.Dumps()])
c = Client("blockdev_shutdown", [disk.ToDict()])
c.connect(node)
c.run()
return c.getresult().get(node, False)
......@@ -521,7 +521,7 @@ def call_blockdev_addchild(node, bdev, ndev):
This is a single-node call.
"""
params = [bdev.Dumps(), ndev.Dumps()]
params = [bdev.ToDict(), ndev.ToDict()]
c = Client("blockdev_addchild", params)
c.connect(node)
c.run()
......@@ -534,7 +534,7 @@ def call_blockdev_removechild(node, bdev, ndev):
This is a single-node call.
"""
params = [bdev.Dumps(), ndev.Dumps()]
params = [bdev.ToDict(), ndev.ToDict()]
c = Client("blockdev_removechild", params)
c.connect(node)
c.run()
......@@ -547,7 +547,7 @@ def call_blockdev_getmirrorstatus(node, disks):
This is a single-node call.
"""
params = [dsk.Dumps() for dsk in disks]
params = [dsk.ToDict() for dsk in disks]
c = Client("blockdev_getmirrorstatus", params)
c.connect(node)
c.run()
......@@ -560,7 +560,7 @@ def call_blockdev_find(node, disk):
This is a single-node call.
"""
c = Client("blockdev_find", [disk.Dumps()])
c = Client("blockdev_find", [disk.ToDict()])
c.connect(node)
c.run()
return c.getresult().get(node, False)
......@@ -605,8 +605,8 @@ def call_os_diagnose(node_list):
if result[node_name]:
for data in result[node_name]:
if data:
if isinstance(data, basestring):
nr.append(objects.ConfigObject.Loads(data))
if isinstance(data, dict):
nr.append(objects.OS.FromDict(data))
elif isinstance(data, tuple) and len(data) == 2:
nr.append(errors.InvalidOS(data[0], data[1]))
else:
......@@ -629,8 +629,8 @@ def call_os_get(node_list, name):
new_result = {}
for node_name in result:
data = result[node_name]
if isinstance(data, basestring):
new_result[node_name] = objects.ConfigObject.Loads(data)
if isinstance(data, dict):
new_result[node_name] = objects.OS.FromDict(data)
elif isinstance(data, tuple) and len(data) == 2:
new_result[node_name] = errors.InvalidOS(data[0], data[1])
else:
......@@ -662,7 +662,7 @@ def call_blockdev_snapshot(node, cf_bdev):
This is a single-node call.
"""
c = Client("blockdev_snapshot", [cf_bdev.Dumps()])
c = Client("blockdev_snapshot", [cf_bdev.ToDict()])
c.connect(node)
c.run()
return c.getresult().get(node, False)
......@@ -674,7 +674,7 @@ def call_snapshot_export(node, snap_bdev, dest_node, instance):
This is a single-node call.
"""
params = [snap_bdev.Dumps(), dest_node, instance.Dumps()]
params = [snap_bdev.ToDict(), dest_node, instance.ToDict()]
c = Client("snapshot_export", params)
c.connect(node)
c.run()
......@@ -691,8 +691,8 @@ def call_finalize_export(node, instance, snap_disks):
"""
flat_disks = []
for disk in snap_disks:
flat_disks.append(disk.Dumps())
params = [instance.Dumps(), flat_disks]
flat_disks.append(disk.ToDict())
params = [instance.ToDict(), flat_disks]
c = Client("finalize_export", params)
c.connect(node)
c.run()
......@@ -720,7 +720,7 @@ def call_instance_os_import(node, inst, osdev, swapdev, src_node, src_image):
This is a single-node call.
"""
params = [inst.Dumps(), osdev, swapdev, src_node, src_image]
params = [inst.ToDict(), osdev, swapdev, src_node, src_image]
c = Client("instance_os_import", params)
c.connect(node)
c.run()
......
......@@ -21,13 +21,9 @@
"""Tool to upgrade the configuration file.
The upgrade is done by unpickling the configuration file into custom classes
derivating from dict. We then update the configuration by modifying these
dicts. To save the configuration, it's pickled into a buffer and unpickled
again using the Ganeti objects before being finally pickled into a file.
Not using the custom classes wouldn't allow us to rename or remove attributes
between versions without loosing their values.
This code handles only the types supported by simplejson. As an example, "set"
is a "list". Old Pickle based configurations files are converted to JSON during
the process.
"""
......@@ -35,26 +31,25 @@ between versions without loosing their values.
import os
import os.path
import sys
import re
import optparse
import cPickle
import tempfile
from cStringIO import StringIO
import simplejson
from ganeti import objects
from ganeti import utils
from ganeti.cli import AskUser, FORCE_OPT
class Error(Exception):
"""Generic exception"""
pass
options = None
args = None
def _BaseFindGlobal(module, name):
"""Helper function for the other FindGlobal functions.
"""
return getattr(sys.modules[module], name)
class Error(Exception):
"""Generic exception"""
pass
# Internal config representation
# {{{ Support for old Pickle files
class UpgradeDict(dict):
"""Base class for internal config classes.
......@@ -66,96 +61,78 @@ class UpgradeDict(dict):
return self.copy()
class UpgradeConfigData(UpgradeDict): pass
class UpgradeCluster(UpgradeDict): pass
class UpgradeNode(UpgradeDict): pass
class UpgradeInstance(UpgradeDict): pass
class UpgradeDisk(UpgradeDict): pass
class UpgradeNIC(UpgradeDict): pass
class UpgradeOS(UpgradeDict): pass
def FindGlobal(module, name):
"""Wraps Ganeti config classes to internal ones.
This function may only return types supported by simplejson.
_ClassMap = {
objects.ConfigData: UpgradeConfigData,
objects.Cluster: UpgradeCluster,
objects.Node: UpgradeNode,
objects.Instance: UpgradeInstance,
objects.Disk: UpgradeDisk,
objects.NIC: UpgradeNIC,
objects.OS: UpgradeOS,
}
"""
if module == "ganeti.objects":
return UpgradeDict
elif module == "__builtin__" and name == "set":
return list
# Build mapping dicts
WriteMapping = dict()
ReadMapping = dict()
for key, value in _ClassMap.iteritems():
WriteMapping[value.__name__] = key
ReadMapping[key.__name__] = value
return getattr(sys.modules[module], name)
# Read config
def _ReadFindGlobal(module, name):
"""Wraps Ganeti config classes to internal ones.
def ReadPickleFile(f):
"""Reads an old Pickle configuration.
"""
if module == "ganeti.objects" and name in ReadMapping:
return ReadMapping[name]
import cPickle
<