config.py 61.8 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
#

4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Configuration management for Ganeti

24
This module provides the interface to the Ganeti cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
25

26
27
The configuration data is stored on every node but is updated on the master
only. After each update, the master distributes the data to the other nodes.
Iustin Pop's avatar
Iustin Pop committed
28

29
30
Currently, the data storage format is JSON. YAML was slow and consuming too
much memory.
Iustin Pop's avatar
Iustin Pop committed
31
32
33

"""

34
35
36
# pylint: disable-msg=R0904
# R0904: Too many public methods

Iustin Pop's avatar
Iustin Pop committed
37
38
import os
import random
39
import logging
40
import time
Iustin Pop's avatar
Iustin Pop committed
41
42

from ganeti import errors
43
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
44
45
46
47
from ganeti import utils
from ganeti import constants
from ganeti import rpc
from ganeti import objects
48
from ganeti import serializer
Balazs Lecz's avatar
Balazs Lecz committed
49
from ganeti import uidpool
50
from ganeti import netutils
51
from ganeti import runtime
52
53


54
_config_lock = locking.SharedLock("ConfigWriter")
55

56
# job id used for resource management at config upgrade time
Michael Hanselmann's avatar
Michael Hanselmann committed
57
_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
58

59

Michael Hanselmann's avatar
Michael Hanselmann committed
60
def _ValidateConfig(data):
Iustin Pop's avatar
Iustin Pop committed
61
62
63
64
65
66
67
68
  """Verifies that a configuration objects looks valid.

  This only verifies the version of the configuration.

  @raise errors.ConfigurationError: if the version differs from what
      we expect

  """
Michael Hanselmann's avatar
Michael Hanselmann committed
69
  if data.version != constants.CONFIG_VERSION:
70
    raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, data.version)
Iustin Pop's avatar
Iustin Pop committed
71

72

Guido Trotter's avatar
Guido Trotter committed
73
74
75
76
77
78
79
80
81
82
83
class TemporaryReservationManager:
  """A temporary resource reservation manager.

  This is used to reserve resources in a job, before using them, making sure
  other jobs cannot get them in the meantime.

  """
  def __init__(self):
    self._ec_reserved = {}

  def Reserved(self, resource):
84
    for holder_reserved in self._ec_reserved.values():
Guido Trotter's avatar
Guido Trotter committed
85
86
87
88
89
90
      if resource in holder_reserved:
        return True
    return False

  def Reserve(self, ec_id, resource):
    if self.Reserved(resource):
91
92
      raise errors.ReservationError("Duplicate reservation for resource '%s'"
                                    % str(resource))
Guido Trotter's avatar
Guido Trotter committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
    if ec_id not in self._ec_reserved:
      self._ec_reserved[ec_id] = set([resource])
    else:
      self._ec_reserved[ec_id].add(resource)

  def DropECReservations(self, ec_id):
    if ec_id in self._ec_reserved:
      del self._ec_reserved[ec_id]

  def GetReserved(self):
    all_reserved = set()
    for holder_reserved in self._ec_reserved.values():
      all_reserved.update(holder_reserved)
    return all_reserved

  def Generate(self, existing, generate_one_fn, ec_id):
    """Generate a new resource of this type

    """
    assert callable(generate_one_fn)

    all_elems = self.GetReserved()
    all_elems.update(existing)
    retries = 64
    while retries > 0:
      new_resource = generate_one_fn()
      if new_resource is not None and new_resource not in all_elems:
        break
    else:
      raise errors.ConfigurationError("Not able generate new resource"
                                      " (last tried: %s)" % new_resource)
    self.Reserve(ec_id, new_resource)
    return new_resource


Iustin Pop's avatar
Iustin Pop committed
128
class ConfigWriter:
129
  """The interface to the cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
130

131
132
133
  @ivar _temporary_lvs: reservation manager for temporary LVs
  @ivar _all_rms: a list of all temporary reservation managers

134
  """
135
136
  def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
               accept_foreign=False):
137
    self.write_count = 0
138
    self._lock = _config_lock
Iustin Pop's avatar
Iustin Pop committed
139
140
141
142
143
144
    self._config_data = None
    self._offline = offline
    if cfg_file is None:
      self._cfg_file = constants.CLUSTER_CONF_FILE
    else:
      self._cfg_file = cfg_file
145
    self._getents = _getents
146
    self._temporary_ids = TemporaryReservationManager()
147
    self._temporary_drbds = {}
148
    self._temporary_macs = TemporaryReservationManager()
149
    self._temporary_secrets = TemporaryReservationManager()
150
151
152
    self._temporary_lvs = TemporaryReservationManager()
    self._all_rms = [self._temporary_ids, self._temporary_macs,
                     self._temporary_secrets, self._temporary_lvs]
153
154
155
156
    # Note: in order to prevent errors when resolving our name in
    # _DistributeConfig, we compute it here once and reuse it; it's
    # better to raise an error before starting to modify the config
    # file than after it was modified
157
    self._my_hostname = netutils.Hostname.GetSysName()
158
    self._last_cluster_serial = -1
159
    self._cfg_id = None
160
    self._OpenConfig(accept_foreign)
Iustin Pop's avatar
Iustin Pop committed
161
162
163
164
165
166
167
168
169

  # this method needs to be static, so that we can call it on the class
  @staticmethod
  def IsCluster():
    """Check if the cluster is configured.

    """
    return os.path.exists(constants.CLUSTER_CONF_FILE)

170
171
172
173
174
175
176
177
178
179
180
  def _GenerateOneMAC(self):
    """Generate one mac address

    """
    prefix = self._config_data.cluster.mac_prefix
    byte1 = random.randrange(0, 256)
    byte2 = random.randrange(0, 256)
    byte3 = random.randrange(0, 256)
    mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
    return mac

181
182
183
184
185
186
187
188
189
190
191
192
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNdParams(self, node):
    """Get the node params populated with cluster defaults.

    @type node: L{object.Node}
    @param node: The node we want to know the params for
    @return: A dict with the filled in node params

    """
    nodegroup = self._UnlockedGetNodeGroup(node.group)
    return self._config_data.cluster.FillND(node, nodegroup)

193
  @locking.ssynchronized(_config_lock, shared=1)
194
  def GenerateMAC(self, ec_id):
Iustin Pop's avatar
Iustin Pop committed
195
196
197
198
199
    """Generate a MAC for an instance.

    This should check the current instances for duplicates.

    """
200
201
    existing = self._AllMACs()
    return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id)
Iustin Pop's avatar
Iustin Pop committed
202

203
  @locking.ssynchronized(_config_lock, shared=1)
204
205
  def ReserveMAC(self, mac, ec_id):
    """Reserve a MAC for an instance.
206
207
208
209
210
211

    This only checks instances managed by this cluster, it does not
    check for potential collisions elsewhere.

    """
    all_macs = self._AllMACs()
212
213
214
215
    if mac in all_macs:
      raise errors.ReservationError("mac already in use")
    else:
      self._temporary_macs.Reserve(mac, ec_id)
216

217
218
219
220
221
222
223
224
225
226
227
228
229
230
  @locking.ssynchronized(_config_lock, shared=1)
  def ReserveLV(self, lv_name, ec_id):
    """Reserve an VG/LV pair for an instance.

    @type lv_name: string
    @param lv_name: the logical volume name to reserve

    """
    all_lvs = self._AllLVs()
    if lv_name in all_lvs:
      raise errors.ReservationError("LV already in use")
    else:
      self._temporary_lvs.Reserve(lv_name, ec_id)

231
  @locking.ssynchronized(_config_lock, shared=1)
232
  def GenerateDRBDSecret(self, ec_id):
233
234
235
236
237
    """Generate a DRBD secret.

    This checks the current disks for duplicates.

    """
238
239
240
    return self._temporary_secrets.Generate(self._AllDRBDSecrets(),
                                            utils.GenerateSecret,
                                            ec_id)
Michael Hanselmann's avatar
Michael Hanselmann committed
241

242
  def _AllLVs(self):
243
244
245
246
247
248
249
250
251
252
    """Compute the list of all LVs.

    """
    lvnames = set()
    for instance in self._config_data.instances.values():
      node_data = instance.MapLVsByNode()
      for lv_list in node_data.values():
        lvnames.update(lv_list)
    return lvnames

253
254
255
256
257
258
259
260
261
262
263
  def _AllIDs(self, include_temporary):
    """Compute the list of all UUIDs and names we have.

    @type include_temporary: boolean
    @param include_temporary: whether to include the _temporary_ids set
    @rtype: set
    @return: a set of IDs

    """
    existing = set()
    if include_temporary:
264
      existing.update(self._temporary_ids.GetReserved())
265
266
267
    existing.update(self._AllLVs())
    existing.update(self._config_data.instances.keys())
    existing.update(self._config_data.nodes.keys())
268
    existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
269
270
    return existing

271
  def _GenerateUniqueID(self, ec_id):
272
    """Generate an unique UUID.
273
274
275
276

    This checks the current node, instances and disk names for
    duplicates.

Iustin Pop's avatar
Iustin Pop committed
277
278
    @rtype: string
    @return: the unique id
279
280

    """
281
282
    existing = self._AllIDs(include_temporary=False)
    return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
283

284
  @locking.ssynchronized(_config_lock, shared=1)
285
  def GenerateUniqueID(self, ec_id):
286
287
288
289
    """Generate an unique ID.

    This is just a wrapper over the unlocked version.

290
291
    @type ec_id: string
    @param ec_id: unique id for the job to reserve the id to
292
293

    """
294
    return self._GenerateUniqueID(ec_id)
295

Iustin Pop's avatar
Iustin Pop committed
296
297
298
  def _AllMACs(self):
    """Return all MACs present in the config.

Iustin Pop's avatar
Iustin Pop committed
299
300
301
    @rtype: list
    @return: the list of all MACs

Iustin Pop's avatar
Iustin Pop committed
302
303
304
305
306
307
308
309
    """
    result = []
    for instance in self._config_data.instances.values():
      for nic in instance.nics:
        result.append(nic.mac)

    return result

310
311
312
  def _AllDRBDSecrets(self):
    """Return all DRBD secrets present in the config.

Iustin Pop's avatar
Iustin Pop committed
313
314
315
    @rtype: list
    @return: the list of all DRBD secrets

316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
    """
    def helper(disk, result):
      """Recursively gather secrets from this disk."""
      if disk.dev_type == constants.DT_DRBD8:
        result.append(disk.logical_id[5])
      if disk.children:
        for child in disk.children:
          helper(child, result)

    result = []
    for instance in self._config_data.instances.values():
      for disk in instance.disks:
        helper(disk, result)

    return result

332
333
334
335
336
337
338
339
340
341
342
343
344
345
  def _CheckDiskIDs(self, disk, l_ids, p_ids):
    """Compute duplicate disk IDs

    @type disk: L{objects.Disk}
    @param disk: the disk at which to start searching
    @type l_ids: list
    @param l_ids: list of current logical ids
    @type p_ids: list
    @param p_ids: list of current physical ids
    @rtype: list
    @return: a list of error messages

    """
    result = []
346
347
348
349
350
351
352
353
354
355
    if disk.logical_id is not None:
      if disk.logical_id in l_ids:
        result.append("duplicate logical id %s" % str(disk.logical_id))
      else:
        l_ids.append(disk.logical_id)
    if disk.physical_id is not None:
      if disk.physical_id in p_ids:
        result.append("duplicate physical id %s" % str(disk.physical_id))
      else:
        p_ids.append(disk.physical_id)
356
357
358
359
360
361

    if disk.children:
      for child in disk.children:
        result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
    return result

362
  def _UnlockedVerifyConfig(self):
363
364
    """Verify function.

365
366
367
368
    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

Iustin Pop's avatar
Iustin Pop committed
369
    """
370
    # pylint: disable-msg=R0914
Iustin Pop's avatar
Iustin Pop committed
371
372
    result = []
    seen_macs = []
373
    ports = {}
Iustin Pop's avatar
Iustin Pop committed
374
    data = self._config_data
375
    cluster = data.cluster
376
377
    seen_lids = []
    seen_pids = []
378
379

    # global cluster checks
380
    if not cluster.enabled_hypervisors:
381
      result.append("enabled hypervisors list doesn't have any entries")
382
    invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES
383
384
385
    if invalid_hvs:
      result.append("enabled hypervisors contains invalid entries: %s" %
                    invalid_hvs)
386
387
    missing_hvp = (set(cluster.enabled_hypervisors) -
                   set(cluster.hvparams.keys()))
388
389
390
    if missing_hvp:
      result.append("hypervisor parameters missing for the enabled"
                    " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
391

392
    if cluster.master_node not in data.nodes:
393
      result.append("cluster has invalid primary node '%s'" %
394
                    cluster.master_node)
395

396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
    def _helper(owner, attr, value, template):
      try:
        utils.ForceDictType(value, template)
      except errors.GenericError, err:
        result.append("%s has invalid %s: %s" % (owner, attr, err))

    def _helper_nic(owner, params):
      try:
        objects.NIC.CheckParameterSyntax(params)
      except errors.ConfigurationError, err:
        result.append("%s has invalid nicparams: %s" % (owner, err))

    # check cluster parameters
    _helper("cluster", "beparams", cluster.SimpleFillBE({}),
            constants.BES_PARAMETER_TYPES)
    _helper("cluster", "nicparams", cluster.SimpleFillNIC({}),
            constants.NICS_PARAMETER_TYPES)
    _helper_nic("cluster", cluster.SimpleFillNIC({}))
    _helper("cluster", "ndparams", cluster.SimpleFillND({}),
            constants.NDS_PARAMETER_TYPES)

417
    # per-instance checks
Iustin Pop's avatar
Iustin Pop committed
418
419
    for instance_name in data.instances:
      instance = data.instances[instance_name]
420
421
422
      if instance.name != instance_name:
        result.append("instance '%s' is indexed by wrong name '%s'" %
                      (instance.name, instance_name))
Iustin Pop's avatar
Iustin Pop committed
423
      if instance.primary_node not in data.nodes:
424
        result.append("instance '%s' has invalid primary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
425
426
427
                      (instance_name, instance.primary_node))
      for snode in instance.secondary_nodes:
        if snode not in data.nodes:
428
          result.append("instance '%s' has invalid secondary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
429
430
431
                        (instance_name, snode))
      for idx, nic in enumerate(instance.nics):
        if nic.mac in seen_macs:
432
          result.append("instance '%s' has NIC %d mac %s duplicate" %
Iustin Pop's avatar
Iustin Pop committed
433
434
435
                        (instance_name, idx, nic.mac))
        else:
          seen_macs.append(nic.mac)
436
437
438
439
440
441
442
443
444
445
446
        if nic.nicparams:
          filled = cluster.SimpleFillNIC(nic.nicparams)
          owner = "instance %s nic %d" % (instance.name, idx)
          _helper(owner, "nicparams",
                  filled, constants.NICS_PARAMETER_TYPES)
          _helper_nic(owner, filled)

      # parameter checks
      if instance.beparams:
        _helper("instance %s" % instance.name, "beparams",
                cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461

      # gather the drbd ports for duplicate checks
      for dsk in instance.disks:
        if dsk.dev_type in constants.LDS_DRBD:
          tcp_port = dsk.logical_id[2]
          if tcp_port not in ports:
            ports[tcp_port] = []
          ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
      # gather network port reservation
      net_port = getattr(instance, "network_port", None)
      if net_port is not None:
        if net_port not in ports:
          ports[net_port] = []
        ports[net_port].append((instance.name, "network port"))

462
463
464
465
      # instance disk verify
      for idx, disk in enumerate(instance.disks):
        result.extend(["instance '%s' disk %d error: %s" %
                       (instance.name, idx, msg) for msg in disk.Verify()])
466
        result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
467

468
    # cluster-wide pool of free ports
469
    for free_port in cluster.tcpudp_port_pool:
470
471
472
473
474
475
476
477
478
479
      if free_port not in ports:
        ports[free_port] = []
      ports[free_port].append(("cluster", "port marked as free"))

    # compute tcp/udp duplicate ports
    keys = ports.keys()
    keys.sort()
    for pnum in keys:
      pdata = ports[pnum]
      if len(pdata) > 1:
480
        txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
481
482
483
484
        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))

    # highest used tcp port check
    if keys:
485
      if keys[-1] > cluster.highest_used_port:
486
        result.append("Highest used port mismatch, saved %s, computed %s" %
487
                      (cluster.highest_used_port, keys[-1]))
488

489
    if not data.nodes[cluster.master_node].master_candidate:
490
491
      result.append("Master node is not a master candidate")

492
    # master candidate checks
493
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
494
495
496
    if mc_now < mc_max:
      result.append("Not enough master candidates: actual %d, target %d" %
                    (mc_now, mc_max))
497

498
    # node checks
499
500
501
502
    for node_name, node in data.nodes.items():
      if node.name != node_name:
        result.append("Node '%s' is indexed by wrong name '%s'" %
                      (node.name, node_name))
503
504
505
      if [node.master_candidate, node.drained, node.offline].count(True) > 1:
        result.append("Node %s state is invalid: master_candidate=%s,"
                      " drain=%s, offline=%s" %
506
                      (node.name, node.master_candidate, node.drained,
507
                       node.offline))
508
509
510
511
512
513
514
      if node.group not in data.nodegroups:
        result.append("Node '%s' has invalid group '%s'" %
                      (node.name, node.group))
      else:
        _helper("node %s" % node.name, "ndparams",
                cluster.FillND(node, data.nodegroups[node.group]),
                constants.NDS_PARAMETER_TYPES)
515

516
    # nodegroups checks
517
    nodegroups_names = set()
518
519
520
    for nodegroup_uuid in data.nodegroups:
      nodegroup = data.nodegroups[nodegroup_uuid]
      if nodegroup.uuid != nodegroup_uuid:
521
        result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'"
522
                      % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
523
      if utils.UUID_RE.match(nodegroup.name.lower()):
524
        result.append("node group '%s' (uuid: '%s') has uuid-like name" %
525
                      (nodegroup.name, nodegroup.uuid))
526
      if nodegroup.name in nodegroups_names:
527
        result.append("duplicate node group name '%s'" % nodegroup.name)
528
529
      else:
        nodegroups_names.add(nodegroup.name)
530
531
532
533
534
      if nodegroup.ndparams:
        _helper("group %s" % nodegroup.name, "ndparams",
                cluster.SimpleFillND(nodegroup.ndparams),
                constants.NDS_PARAMETER_TYPES)

535

536
    # drbd minors check
537
    _, duplicates = self._UnlockedComputeDRBDMap()
538
539
540
541
    for node, minor, instance_a, instance_b in duplicates:
      result.append("DRBD minor %d on node %s is assigned twice to instances"
                    " %s and %s" % (minor, node, instance_a, instance_b))

542
    # IP checks
543
    default_nicparams = cluster.nicparams[constants.PP_DEFAULT]
544
545
546
547
548
    ips = {}

    def _AddIpAddress(ip, name):
      ips.setdefault(ip, []).append(name)

549
    _AddIpAddress(cluster.master_ip, "cluster_ip")
550
551

    for node in data.nodes.values():
552
      _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
553
      if node.secondary_ip != node.primary_ip:
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
        _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)

    for instance in data.instances.values():
      for idx, nic in enumerate(instance.nics):
        if nic.ip is None:
          continue

        nicparams = objects.FillDict(default_nicparams, nic.nicparams)
        nic_mode = nicparams[constants.NIC_MODE]
        nic_link = nicparams[constants.NIC_LINK]

        if nic_mode == constants.NIC_MODE_BRIDGED:
          link = "bridge:%s" % nic_link
        elif nic_mode == constants.NIC_MODE_ROUTED:
          link = "route:%s" % nic_link
        else:
          raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)

        _AddIpAddress("%s/%s" % (link, nic.ip),
                      "instance:%s/nic:%d" % (instance.name, idx))
574
575
576
577

    for ip, owners in ips.items():
      if len(owners) > 1:
        result.append("IP address %s is used by multiple owners: %s" %
578
                      (ip, utils.CommaJoin(owners)))
579

Iustin Pop's avatar
Iustin Pop committed
580
581
    return result

582
583
584
585
586
587
588
589
590
591
592
593
594
  @locking.ssynchronized(_config_lock, shared=1)
  def VerifyConfig(self):
    """Verify function.

    This is just a wrapper over L{_UnlockedVerifyConfig}.

    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

    """
    return self._UnlockedVerifyConfig()

595
  def _UnlockedSetDiskID(self, disk, node_name):
Iustin Pop's avatar
Iustin Pop committed
596
597
598
599
600
601
602
603
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

604
605
    This function is for internal use, when the config lock is already held.

Iustin Pop's avatar
Iustin Pop committed
606
607
608
    """
    if disk.children:
      for child in disk.children:
609
        self._UnlockedSetDiskID(child, node_name)
Iustin Pop's avatar
Iustin Pop committed
610
611
612

    if disk.logical_id is None and disk.physical_id is not None:
      return
613
    if disk.dev_type == constants.LD_DRBD8:
614
      pnode, snode, port, pminor, sminor, secret = disk.logical_id
Iustin Pop's avatar
Iustin Pop committed
615
      if node_name not in (pnode, snode):
616
617
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
                                        node_name)
618
619
      pnode_info = self._UnlockedGetNodeInfo(pnode)
      snode_info = self._UnlockedGetNodeInfo(snode)
Iustin Pop's avatar
Iustin Pop committed
620
621
622
      if pnode_info is None or snode_info is None:
        raise errors.ConfigurationError("Can't find primary or secondary node"
                                        " for %s" % str(disk))
623
624
      p_data = (pnode_info.secondary_ip, port)
      s_data = (snode_info.secondary_ip, port)
Iustin Pop's avatar
Iustin Pop committed
625
      if pnode == node_name:
626
        disk.physical_id = p_data + s_data + (pminor, secret)
Iustin Pop's avatar
Iustin Pop committed
627
      else: # it must be secondary, we tested above
628
        disk.physical_id = s_data + p_data + (sminor, secret)
Iustin Pop's avatar
Iustin Pop committed
629
630
631
632
    else:
      disk.physical_id = disk.logical_id
    return

633
634
635
636
637
638
639
640
641
642
643
644
645
646
  @locking.ssynchronized(_config_lock)
  def SetDiskID(self, disk, node_name):
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

    """
    return self._UnlockedSetDiskID(disk, node_name)

  @locking.ssynchronized(_config_lock)
647
648
649
650
  def AddTcpUdpPort(self, port):
    """Adds a new port to the available port pool.

    """
651
    if not isinstance(port, int):
652
      raise errors.ProgrammerError("Invalid type passed for port")
653

654
    self._config_data.cluster.tcpudp_port_pool.add(port)
655
656
    self._WriteConfig()

657
  @locking.ssynchronized(_config_lock, shared=1)
658
  def GetPortList(self):
659
660
661
    """Returns a copy of the current port list.

    """
662
    return self._config_data.cluster.tcpudp_port_pool.copy()
663

664
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
665
666
667
  def AllocatePort(self):
    """Allocate a port.

668
669
670
    The port will be taken from the available port pool or from the
    default port range (and in this case we increase
    highest_used_port).
Iustin Pop's avatar
Iustin Pop committed
671
672

    """
673
    # If there are TCP/IP ports configured, we use them first.
674
675
    if self._config_data.cluster.tcpudp_port_pool:
      port = self._config_data.cluster.tcpudp_port_pool.pop()
676
677
678
    else:
      port = self._config_data.cluster.highest_used_port + 1
      if port >= constants.LAST_DRBD_PORT:
679
680
681
        raise errors.ConfigurationError("The highest used port is greater"
                                        " than %s. Aborting." %
                                        constants.LAST_DRBD_PORT)
682
      self._config_data.cluster.highest_used_port = port
Iustin Pop's avatar
Iustin Pop committed
683
684
685
686

    self._WriteConfig()
    return port

687
  def _UnlockedComputeDRBDMap(self):
688
689
    """Compute the used DRBD minor/nodes.

690
    @rtype: (dict, list)
Iustin Pop's avatar
Iustin Pop committed
691
692
    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
693
694
695
        an empty list), and a list of duplicates; if the duplicates
        list is not empty, the configuration is corrupted and its caller
        should raise an exception
696
697
698

    """
    def _AppendUsedPorts(instance_name, disk, used):
699
      duplicates = []
700
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
Michael Hanselmann's avatar
Michael Hanselmann committed
701
702
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
703
704
          assert node in used, ("Node '%s' of instance '%s' not found"
                                " in node list" % (node, instance_name))
705
          if port in used[node]:
706
707
708
            duplicates.append((node, port, instance_name, used[node][port]))
          else:
            used[node][port] = instance_name
709
710
      if disk.children:
        for child in disk.children:
711
712
          duplicates.extend(_AppendUsedPorts(instance_name, child, used))
      return duplicates
713

714
    duplicates = []
715
    my_dict = dict((node, {}) for node in self._config_data.nodes)
716
717
718
    for instance in self._config_data.instances.itervalues():
      for disk in instance.disks:
        duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
719
    for (node, minor), instance in self._temporary_drbds.iteritems():
720
      if minor in my_dict[node] and my_dict[node][minor] != instance:
721
722
723
724
        duplicates.append((node, minor, instance, my_dict[node][minor]))
      else:
        my_dict[node][minor] = instance
    return my_dict, duplicates
725

726
727
728
729
730
731
732
733
734
735
736
  @locking.ssynchronized(_config_lock)
  def ComputeDRBDMap(self):
    """Compute the used DRBD minor/nodes.

    This is just a wrapper over L{_UnlockedComputeDRBDMap}.

    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
        an empty list).

    """
737
738
739
740
741
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
    return d_map
742

743
744
745
746
747
748
749
750
751
  @locking.ssynchronized(_config_lock)
  def AllocateDRBDMinor(self, nodes, instance):
    """Allocate a drbd minor.

    The free minor will be automatically computed from the existing
    devices. A node can be given multiple times in order to allocate
    multiple minors. The result is the list of minors, in the same
    order as the passed nodes.

752
753
754
    @type instance: string
    @param instance: the instance for which we allocate minors

755
    """
756
    assert isinstance(instance, basestring), \
757
           "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
758

759
760
761
762
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
763
764
765
766
767
768
769
    result = []
    for nname in nodes:
      ndata = d_map[nname]
      if not ndata:
        # no minors used, we can start at 0
        result.append(0)
        ndata[0] = instance
770
        self._temporary_drbds[(nname, 0)] = instance
771
772
773
774
775
776
777
778
779
780
        continue
      keys = ndata.keys()
      keys.sort()
      ffree = utils.FirstFree(keys)
      if ffree is None:
        # return the next minor
        # TODO: implement high-limit check
        minor = keys[-1] + 1
      else:
        minor = ffree
781
782
783
784
785
      # double-check minor against current instances
      assert minor not in d_map[nname], \
             ("Attempt to reuse allocated DRBD minor %d on node %s,"
              " already allocated to instance %s" %
              (minor, nname, d_map[nname][minor]))
786
      ndata[minor] = instance
787
788
789
790
791
792
793
794
      # double-check minor against reservation
      r_key = (nname, minor)
      assert r_key not in self._temporary_drbds, \
             ("Attempt to reuse reserved DRBD minor %d on node %s,"
              " reserved for instance %s" %
              (minor, nname, self._temporary_drbds[r_key]))
      self._temporary_drbds[r_key] = instance
      result.append(minor)
795
796
797
798
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
                  nodes, result)
    return result

799
  def _UnlockedReleaseDRBDMinors(self, instance):
800
801
802
803
804
805
806
    """Release temporary drbd minors allocated for a given instance.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
807
808
    assert isinstance(instance, basestring), \
           "Invalid argument passed to ReleaseDRBDMinors"
809
810
811
812
    for key, name in self._temporary_drbds.items():
      if name == instance:
        del self._temporary_drbds[key]

813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
  @locking.ssynchronized(_config_lock)
  def ReleaseDRBDMinors(self, instance):
    """Release temporary drbd minors allocated for a given instance.

    This should be called on the error paths, on the success paths
    it's automatically called by the ConfigWriter add and update
    functions.

    This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
    self._UnlockedReleaseDRBDMinors(instance)

830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
  @locking.ssynchronized(_config_lock, shared=1)
  def GetConfigVersion(self):
    """Get the configuration version.

    @return: Config version

    """
    return self._config_data.version

  @locking.ssynchronized(_config_lock, shared=1)
  def GetClusterName(self):
    """Get cluster name.

    @return: Cluster name

    """
    return self._config_data.cluster.cluster_name

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNode(self):
    """Get the hostname of the master node for this cluster.

    @return: Master hostname

    """
    return self._config_data.cluster.master_node

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterIP(self):
    """Get the IP of the master node for this cluster.

    @return: Master IP

    """
    return self._config_data.cluster.master_ip

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNetdev(self):
    """Get the master network device for this cluster.

    """
    return self._config_data.cluster.master_netdev

  @locking.ssynchronized(_config_lock, shared=1)
  def GetFileStorageDir(self):
    """Get the file storage dir for this cluster.

    """
    return self._config_data.cluster.file_storage_dir

  @locking.ssynchronized(_config_lock, shared=1)
  def GetHypervisorType(self):
    """Get the hypervisor type for this cluster.

    """
885
    return self._config_data.cluster.enabled_hypervisors[0]
886

887
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
888
889
890
  def GetHostKey(self):
    """Return the rsa hostkey from the config.

Iustin Pop's avatar
Iustin Pop committed
891
892
    @rtype: string
    @return: the rsa hostkey
Iustin Pop's avatar
Iustin Pop committed
893
894
895
896

    """
    return self._config_data.cluster.rsahostkeypub

897
898
899
900
901
902
903
  @locking.ssynchronized(_config_lock, shared=1)
  def GetDefaultIAllocator(self):
    """Get the default instance allocator for this cluster.

    """
    return self._config_data.cluster.default_iallocator

904
905
906
907
908
909
910
911
912
  @locking.ssynchronized(_config_lock, shared=1)
  def GetPrimaryIPFamily(self):
    """Get cluster primary ip family.

    @return: primary ip family

    """
    return self._config_data.cluster.primary_ip_family

913
914
915
916
  @locking.ssynchronized(_config_lock)
  def AddNodeGroup(self, group, ec_id, check_uuid=True):
    """Add a node group to the configuration.

917
918
919
    This method calls group.UpgradeConfig() to fill any missing attributes
    according to their default values.

920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
    @type group: L{objects.NodeGroup}
    @param group: the NodeGroup object to add
    @type ec_id: string
    @param ec_id: unique id for the job to use when creating a missing UUID
    @type check_uuid: bool
    @param check_uuid: add an UUID to the group if it doesn't have one or, if
                       it does, ensure that it does not exist in the
                       configuration already

    """
    self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
    self._WriteConfig()

  def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
    """Add a node group to the configuration.

    """
    logging.info("Adding node group %s to configuration", group.name)

    # Some code might need to add a node group with a pre-populated UUID
    # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass
    # the "does this UUID" exist already check.
    if check_uuid:
      self._EnsureUUID(group, ec_id)

    group.serial_no = 1
    group.ctime = group.mtime = time.time()
947
    group.UpgradeConfig()
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964

    self._config_data.nodegroups[group.uuid] = group
    self._config_data.cluster.serial_no += 1

  @locking.ssynchronized(_config_lock)
  def RemoveNodeGroup(self, group_uuid):
    """Remove a node group from the configuration.

    @type group_uuid: string
    @param group_uuid: the UUID of the node group to remove

    """
    logging.info("Removing node group %s from configuration", group_uuid)

    if group_uuid not in self._config_data.nodegroups:
      raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid)

965
966
967
    assert len(self._config_data.nodegroups) != 1, \
            "Group '%s' is the only group, cannot be removed" % group_uuid

968
969
970
971
    del self._config_data.nodegroups[group_uuid]
    self._config_data.cluster.serial_no += 1
    self._WriteConfig()

972
  def _UnlockedLookupNodeGroup(self, target):
973
    """Lookup a node group's UUID.
Guido Trotter's avatar
Guido Trotter committed
974
975

    @type target: string or None
976
    @param target: group name or UUID or None to look for the default
Guido Trotter's avatar
Guido Trotter committed
977
    @rtype: string
978
    @return: nodegroup UUID
Guido Trotter's avatar
Guido Trotter committed
979
980
981
982
983
    @raises errors.OpPrereqError: when the target group cannot be found

    """
    if target is None:
      if len(self._config_data.nodegroups) != 1:
984
        raise errors.OpPrereqError("More than one node group exists. Target"
Guido Trotter's avatar
Guido Trotter committed
985
986
987
988
989
990
991
992
                                   " group must be specified explicitely.")
      else:
        return self._config_data.nodegroups.keys()[0]
    if target in self._config_data.nodegroups:
      return target
    for nodegroup in self._config_data.nodegroups.values():
      if nodegroup.name == target:
        return nodegroup.uuid
993
994
    raise errors.OpPrereqError("Node group '%s' not found" % target,
                               errors.ECODE_NOENT)
Guido Trotter's avatar
Guido Trotter committed
995

996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
  @locking.ssynchronized(_config_lock, shared=1)
  def LookupNodeGroup(self, target):
    """Lookup a node group's UUID.

    This function is just a wrapper over L{_UnlockedLookupNodeGroup}.

    @type target: string or None
    @param target: group name or UUID or None to look for the default
    @rtype: string
    @return: nodegroup UUID

    """
    return self._UnlockedLookupNodeGroup(target)

1010
  def _UnlockedGetNodeGroup(self, uuid):
Guido Trotter's avatar
Guido Trotter committed
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
    """Lookup a node group.

    @type uuid: string
    @param uuid: group UUID
    @rtype: L{objects.NodeGroup} or None
    @return: nodegroup object, or None if not found

    """
    if uuid not in self._config_data.nodegroups:
      return None

    return self._config_data.nodegroups[uuid]

1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeGroup(self, uuid):
    """Lookup a node group.

    @type uuid: string
    @param uuid: group UUID
    @rtype: L{objects.NodeGroup} or None
    @return: nodegroup object, or None if not found

    """
    return self._UnlockedGetNodeGroup(uuid)

1036
1037
1038
1039
1040
1041
1042
  @locking.ssynchronized(_config_lock, shared=1)
  def GetAllNodeGroupsInfo(self):
    """Get the configuration of all node groups.

    """
    return dict(self._config_data.nodegroups)

Guido Trotter's avatar
Guido Trotter committed
1043
1044
1045
1046
1047
1048
1049
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeGroupList(self):
    """Get a list of node groups.

    """
    return self._config_data.nodegroups.keys()

1050
  @locking.ssynchronized(_config_lock)
1051
  def AddInstance(self, instance, ec_id):
Iustin Pop's avatar
Iustin Pop committed
1052
1053
1054
1055
    """Add an instance to the config.

    This should be used after creating a new instance.

Iustin Pop's avatar
Iustin Pop committed
1056
1057
1058
    @type instance: L{objects.Instance}
    @param instance: the instance object

Iustin Pop's avatar
Iustin Pop committed
1059
1060
1061
1062
    """
    if not isinstance(instance, objects.Instance):
      raise errors.ProgrammerError("Invalid type passed to AddInstance")

1063
1064
    if instance.disk_template != constants.DT_DISKLESS:
      all_lvs = instance.MapLVsByNode()
Iustin Pop's avatar
Iustin Pop committed
1065
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
1066

1067
1068
1069
1070
    all_macs = self._AllMACs()
    for nic in instance.nics:
      if nic.mac in all_macs:
        raise errors.ConfigurationError("Cannot add instance %s:"
1071
1072
1073
                                        " MAC address '%s' already in use." %
                                        (instance.name, nic.mac))

1074
    self._EnsureUUID(instance, ec_id)
1075

1076
    instance.serial_no = 1
1077
    instance.ctime = instance.mtime = time.time()
Iustin Pop's avatar
Iustin Pop committed
1078
    self._config_data.instances[instance.name] = instance
1079
    self._config_data.cluster.serial_no += 1
1080
    self._UnlockedReleaseDRBDMinors(instance.name)
Iustin Pop's avatar
Iustin Pop committed
1081
1082
    self._WriteConfig()

1083
  def _EnsureUUID(self, item, ec_id):
1084
1085
1086
    """Ensures a given object has a valid UUID.

    @param item: the instance or node to be checked
1087
    @param ec_id: the execution context id for the uuid reservation
1088
1089
1090

    """
    if not item.uuid:
1091
      item.uuid = self._GenerateUniqueID(ec_id)
1092
1093
1094
    elif item.uuid in self._AllIDs(include_temporary=True):
      raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
                                      " in use" % (item.name, item.uuid))
1095

1096
1097
  def _SetInstanceStatus(self, instance_name, status):
    """Set the instance's status to a given value.
Iustin Pop's avatar
Iustin Pop committed
1098
1099

    """
1100
1101
    assert isinstance(status, bool), \
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
Iustin Pop's avatar
Iustin Pop committed
1102
1103

    if instance_name not in self._config_data.instances:
1104
1105
      raise errors.ConfigurationError("Unknown instance '%s'" %
                                      instance_name)
Iustin Pop's avatar
Iustin Pop committed
1106
    instance = self._config_data.instances[instance_name]
1107
1108
    if instance.admin_up != status:
      instance.admin_up = status
1109
      instance.serial_no += 1
1110
      instance.mtime = time.time()
1111
      self._WriteConfig()
Iustin Pop's avatar
Iustin Pop committed
1112

1113
  @locking.ssynchronized(_config_lock)
1114
1115
1116
1117
  def MarkInstanceUp(self, instance_name):
    """Mark the instance status to up in the config.

    """
1118
    self._SetInstanceStatus(instance_name, True)
1119

1120
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
1121
1122
1123
1124
1125
  def RemoveInstance(self, instance_name):
    """Remove the instance from the configuration.

    """
    if instance_name not in self._config_data.instances:
1126
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
Iustin Pop's avatar
Iustin Pop committed
1127
    del self._config_data.instances[instance_name]
1128
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
1129
1130
    self._WriteConfig()

1131
  @locking.ssynchronized(_config_lock)
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
  def RenameInstance(self, old_name, new_name):
    """Rename an instance.

    This needs to be done in ConfigWriter and not by RemoveInstance
    combined with AddInstance as only we can guarantee an atomic
    rename.

    """
    if old_name not in self._config_data.instances:
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
    inst = self._config_data.instances[old_name]
    del self._config_data.instances[old_name]
    inst.name = new_name
1145
1146
1147
1148
1149

    for disk in inst.disks:
      if disk.dev_type == constants.LD_FILE:
        # rename the file paths in logical and physical id
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
1150
        disk_fname = "disk%s" % disk.iv_name.split("/")[1]
1151
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
1152
1153
                                              utils.PathJoin(file_storage_dir,
                                                             inst.name,
1154
                                                             disk_fname))
1155

1156
1157
1158
    # Force update of ssconf files
    self._config_data.cluster.serial_no += 1

1159
1160
1161
    self._config_data.instances[inst.name] = inst
    self._WriteConfig()

1162
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
1163
1164
1165
1166
  def MarkInstanceDown(self, instance_name):
    """Mark the status of an instance to down in the configuration.

    """
1167
    self._SetInstanceStatus(instance_name, False)
Iustin Pop's avatar
Iustin Pop committed
1168

1169
1170
1171
1172
1173
1174
1175
1176
  def _UnlockedGetInstanceList(self):
    """Get the list of instances.

    This function is for internal use, when the config lock is already held.

    """
    return self._config_data.instances.keys()

1177
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1178
1179
1180
  def GetInstanceList(self):
    """Get the list of instances.

Iustin Pop's avatar
Iustin Pop committed
1181
1182
    @return: array of instances, ex. ['instance2.example.com',
        'instance1.example.com']
Iustin Pop's avatar
Iustin Pop committed
1183
1184

    """
1185
    return self._UnlockedGetInstanceList()
Iustin Pop's avatar
Iustin Pop committed
1186

1187
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1188
1189
1190
1191
1192
  def ExpandInstanceName(self, short_name):
    """Attempt to expand an incomplete instance name.

    """
    return utils.MatchNameComponent(short_name,
1193
1194
                                    self._config_data.instances.keys(),
                                    case_sensitive=False)
Iustin Pop's avatar
Iustin Pop committed
1195

1196
  def _UnlockedGetInstanceInfo(self, instance_name):
Michael Hanselmann's avatar
Michael Hanselmann committed
1197
    """Returns information about an instance.
1198
1199
1200
1201
1202
1203
1204
1205
1206

    This function is for internal use, when the config lock is already held.