config.py 65.6 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
#

4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Configuration management for Ganeti

24
This module provides the interface to the Ganeti cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
25

26
27
The configuration data is stored on every node but is updated on the master
only. After each update, the master distributes the data to the other nodes.
Iustin Pop's avatar
Iustin Pop committed
28

29
30
Currently, the data storage format is JSON. YAML was slow and consuming too
much memory.
Iustin Pop's avatar
Iustin Pop committed
31
32
33

"""

34
35
36
# pylint: disable-msg=R0904
# R0904: Too many public methods

Iustin Pop's avatar
Iustin Pop committed
37
38
import os
import random
39
import logging
40
import time
Iustin Pop's avatar
Iustin Pop committed
41
42

from ganeti import errors
43
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
44
45
46
47
from ganeti import utils
from ganeti import constants
from ganeti import rpc
from ganeti import objects
48
from ganeti import serializer
Balazs Lecz's avatar
Balazs Lecz committed
49
from ganeti import uidpool
50
from ganeti import netutils
51
from ganeti import runtime
52
53


54
_config_lock = locking.SharedLock("ConfigWriter")
55

56
# job id used for resource management at config upgrade time
Michael Hanselmann's avatar
Michael Hanselmann committed
57
_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
58

59

Michael Hanselmann's avatar
Michael Hanselmann committed
60
def _ValidateConfig(data):
Iustin Pop's avatar
Iustin Pop committed
61
62
63
64
65
66
67
68
  """Verifies that a configuration objects looks valid.

  This only verifies the version of the configuration.

  @raise errors.ConfigurationError: if the version differs from what
      we expect

  """
Michael Hanselmann's avatar
Michael Hanselmann committed
69
  if data.version != constants.CONFIG_VERSION:
70
    raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, data.version)
Iustin Pop's avatar
Iustin Pop committed
71

72

Guido Trotter's avatar
Guido Trotter committed
73
74
75
76
77
78
79
80
81
82
83
class TemporaryReservationManager:
  """A temporary resource reservation manager.

  This is used to reserve resources in a job, before using them, making sure
  other jobs cannot get them in the meantime.

  """
  def __init__(self):
    self._ec_reserved = {}

  def Reserved(self, resource):
84
    for holder_reserved in self._ec_reserved.values():
Guido Trotter's avatar
Guido Trotter committed
85
86
87
88
89
90
      if resource in holder_reserved:
        return True
    return False

  def Reserve(self, ec_id, resource):
    if self.Reserved(resource):
91
92
      raise errors.ReservationError("Duplicate reservation for resource '%s'"
                                    % str(resource))
Guido Trotter's avatar
Guido Trotter committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
    if ec_id not in self._ec_reserved:
      self._ec_reserved[ec_id] = set([resource])
    else:
      self._ec_reserved[ec_id].add(resource)

  def DropECReservations(self, ec_id):
    if ec_id in self._ec_reserved:
      del self._ec_reserved[ec_id]

  def GetReserved(self):
    all_reserved = set()
    for holder_reserved in self._ec_reserved.values():
      all_reserved.update(holder_reserved)
    return all_reserved

  def Generate(self, existing, generate_one_fn, ec_id):
    """Generate a new resource of this type

    """
    assert callable(generate_one_fn)

    all_elems = self.GetReserved()
    all_elems.update(existing)
    retries = 64
    while retries > 0:
      new_resource = generate_one_fn()
      if new_resource is not None and new_resource not in all_elems:
        break
    else:
      raise errors.ConfigurationError("Not able generate new resource"
                                      " (last tried: %s)" % new_resource)
    self.Reserve(ec_id, new_resource)
    return new_resource


128
def _MatchNameComponentIgnoreCase(short_name, names):
129
  """Wrapper around L{utils.text.MatchNameComponent}.
130
131
132
133
134

  """
  return utils.MatchNameComponent(short_name, names, case_sensitive=False)


Iustin Pop's avatar
Iustin Pop committed
135
class ConfigWriter:
136
  """The interface to the cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
137

138
139
140
  @ivar _temporary_lvs: reservation manager for temporary LVs
  @ivar _all_rms: a list of all temporary reservation managers

141
  """
142
143
  def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
               accept_foreign=False):
144
    self.write_count = 0
145
    self._lock = _config_lock
Iustin Pop's avatar
Iustin Pop committed
146
147
148
149
150
151
    self._config_data = None
    self._offline = offline
    if cfg_file is None:
      self._cfg_file = constants.CLUSTER_CONF_FILE
    else:
      self._cfg_file = cfg_file
152
    self._getents = _getents
153
    self._temporary_ids = TemporaryReservationManager()
154
    self._temporary_drbds = {}
155
    self._temporary_macs = TemporaryReservationManager()
156
    self._temporary_secrets = TemporaryReservationManager()
157
158
159
    self._temporary_lvs = TemporaryReservationManager()
    self._all_rms = [self._temporary_ids, self._temporary_macs,
                     self._temporary_secrets, self._temporary_lvs]
160
161
162
163
    # Note: in order to prevent errors when resolving our name in
    # _DistributeConfig, we compute it here once and reuse it; it's
    # better to raise an error before starting to modify the config
    # file than after it was modified
164
    self._my_hostname = netutils.Hostname.GetSysName()
165
    self._last_cluster_serial = -1
166
    self._cfg_id = None
167
    self._OpenConfig(accept_foreign)
Iustin Pop's avatar
Iustin Pop committed
168
169
170
171
172
173
174
175
176

  # this method needs to be static, so that we can call it on the class
  @staticmethod
  def IsCluster():
    """Check if the cluster is configured.

    """
    return os.path.exists(constants.CLUSTER_CONF_FILE)

177
178
179
180
181
182
183
184
185
186
187
  def _GenerateOneMAC(self):
    """Generate one mac address

    """
    prefix = self._config_data.cluster.mac_prefix
    byte1 = random.randrange(0, 256)
    byte2 = random.randrange(0, 256)
    byte3 = random.randrange(0, 256)
    mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
    return mac

188
189
190
191
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNdParams(self, node):
    """Get the node params populated with cluster defaults.

192
    @type node: L{objects.Node}
193
194
195
196
197
198
199
    @param node: The node we want to know the params for
    @return: A dict with the filled in node params

    """
    nodegroup = self._UnlockedGetNodeGroup(node.group)
    return self._config_data.cluster.FillND(node, nodegroup)

200
  @locking.ssynchronized(_config_lock, shared=1)
201
  def GenerateMAC(self, ec_id):
Iustin Pop's avatar
Iustin Pop committed
202
203
204
205
206
    """Generate a MAC for an instance.

    This should check the current instances for duplicates.

    """
207
208
    existing = self._AllMACs()
    return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id)
Iustin Pop's avatar
Iustin Pop committed
209

210
  @locking.ssynchronized(_config_lock, shared=1)
211
212
  def ReserveMAC(self, mac, ec_id):
    """Reserve a MAC for an instance.
213
214
215
216
217
218

    This only checks instances managed by this cluster, it does not
    check for potential collisions elsewhere.

    """
    all_macs = self._AllMACs()
219
220
221
    if mac in all_macs:
      raise errors.ReservationError("mac already in use")
    else:
222
      self._temporary_macs.Reserve(ec_id, mac)
223

224
225
226
227
228
229
230
231
232
233
234
235
  @locking.ssynchronized(_config_lock, shared=1)
  def ReserveLV(self, lv_name, ec_id):
    """Reserve an VG/LV pair for an instance.

    @type lv_name: string
    @param lv_name: the logical volume name to reserve

    """
    all_lvs = self._AllLVs()
    if lv_name in all_lvs:
      raise errors.ReservationError("LV already in use")
    else:
236
      self._temporary_lvs.Reserve(ec_id, lv_name)
237

238
  @locking.ssynchronized(_config_lock, shared=1)
239
  def GenerateDRBDSecret(self, ec_id):
240
241
242
243
244
    """Generate a DRBD secret.

    This checks the current disks for duplicates.

    """
245
246
247
    return self._temporary_secrets.Generate(self._AllDRBDSecrets(),
                                            utils.GenerateSecret,
                                            ec_id)
Michael Hanselmann's avatar
Michael Hanselmann committed
248

249
  def _AllLVs(self):
250
251
252
253
254
255
256
257
258
259
    """Compute the list of all LVs.

    """
    lvnames = set()
    for instance in self._config_data.instances.values():
      node_data = instance.MapLVsByNode()
      for lv_list in node_data.values():
        lvnames.update(lv_list)
    return lvnames

260
261
262
263
264
265
266
267
268
269
270
  def _AllIDs(self, include_temporary):
    """Compute the list of all UUIDs and names we have.

    @type include_temporary: boolean
    @param include_temporary: whether to include the _temporary_ids set
    @rtype: set
    @return: a set of IDs

    """
    existing = set()
    if include_temporary:
271
      existing.update(self._temporary_ids.GetReserved())
272
273
274
    existing.update(self._AllLVs())
    existing.update(self._config_data.instances.keys())
    existing.update(self._config_data.nodes.keys())
275
    existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
276
277
    return existing

278
  def _GenerateUniqueID(self, ec_id):
279
    """Generate an unique UUID.
280
281
282
283

    This checks the current node, instances and disk names for
    duplicates.

Iustin Pop's avatar
Iustin Pop committed
284
285
    @rtype: string
    @return: the unique id
286
287

    """
288
289
    existing = self._AllIDs(include_temporary=False)
    return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
290

291
  @locking.ssynchronized(_config_lock, shared=1)
292
  def GenerateUniqueID(self, ec_id):
293
294
295
296
    """Generate an unique ID.

    This is just a wrapper over the unlocked version.

297
298
    @type ec_id: string
    @param ec_id: unique id for the job to reserve the id to
299
300

    """
301
    return self._GenerateUniqueID(ec_id)
302

Iustin Pop's avatar
Iustin Pop committed
303
304
305
  def _AllMACs(self):
    """Return all MACs present in the config.

Iustin Pop's avatar
Iustin Pop committed
306
307
308
    @rtype: list
    @return: the list of all MACs

Iustin Pop's avatar
Iustin Pop committed
309
310
311
312
313
314
315
316
    """
    result = []
    for instance in self._config_data.instances.values():
      for nic in instance.nics:
        result.append(nic.mac)

    return result

317
318
319
  def _AllDRBDSecrets(self):
    """Return all DRBD secrets present in the config.

Iustin Pop's avatar
Iustin Pop committed
320
321
322
    @rtype: list
    @return: the list of all DRBD secrets

323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
    """
    def helper(disk, result):
      """Recursively gather secrets from this disk."""
      if disk.dev_type == constants.DT_DRBD8:
        result.append(disk.logical_id[5])
      if disk.children:
        for child in disk.children:
          helper(child, result)

    result = []
    for instance in self._config_data.instances.values():
      for disk in instance.disks:
        helper(disk, result)

    return result

339
340
341
342
343
344
345
346
347
348
349
350
351
352
  def _CheckDiskIDs(self, disk, l_ids, p_ids):
    """Compute duplicate disk IDs

    @type disk: L{objects.Disk}
    @param disk: the disk at which to start searching
    @type l_ids: list
    @param l_ids: list of current logical ids
    @type p_ids: list
    @param p_ids: list of current physical ids
    @rtype: list
    @return: a list of error messages

    """
    result = []
353
354
355
356
357
358
359
360
361
362
    if disk.logical_id is not None:
      if disk.logical_id in l_ids:
        result.append("duplicate logical id %s" % str(disk.logical_id))
      else:
        l_ids.append(disk.logical_id)
    if disk.physical_id is not None:
      if disk.physical_id in p_ids:
        result.append("duplicate physical id %s" % str(disk.physical_id))
      else:
        p_ids.append(disk.physical_id)
363
364
365
366
367
368

    if disk.children:
      for child in disk.children:
        result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
    return result

369
  def _UnlockedVerifyConfig(self):
370
371
    """Verify function.

372
373
374
375
    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

Iustin Pop's avatar
Iustin Pop committed
376
    """
377
    # pylint: disable-msg=R0914
Iustin Pop's avatar
Iustin Pop committed
378
379
    result = []
    seen_macs = []
380
    ports = {}
Iustin Pop's avatar
Iustin Pop committed
381
    data = self._config_data
382
    cluster = data.cluster
383
384
    seen_lids = []
    seen_pids = []
385
386

    # global cluster checks
387
    if not cluster.enabled_hypervisors:
388
      result.append("enabled hypervisors list doesn't have any entries")
389
    invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES
390
391
392
    if invalid_hvs:
      result.append("enabled hypervisors contains invalid entries: %s" %
                    invalid_hvs)
393
394
    missing_hvp = (set(cluster.enabled_hypervisors) -
                   set(cluster.hvparams.keys()))
395
396
397
    if missing_hvp:
      result.append("hypervisor parameters missing for the enabled"
                    " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
398

399
    if cluster.master_node not in data.nodes:
400
      result.append("cluster has invalid primary node '%s'" %
401
                    cluster.master_node)
402

403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
    def _helper(owner, attr, value, template):
      try:
        utils.ForceDictType(value, template)
      except errors.GenericError, err:
        result.append("%s has invalid %s: %s" % (owner, attr, err))

    def _helper_nic(owner, params):
      try:
        objects.NIC.CheckParameterSyntax(params)
      except errors.ConfigurationError, err:
        result.append("%s has invalid nicparams: %s" % (owner, err))

    # check cluster parameters
    _helper("cluster", "beparams", cluster.SimpleFillBE({}),
            constants.BES_PARAMETER_TYPES)
    _helper("cluster", "nicparams", cluster.SimpleFillNIC({}),
            constants.NICS_PARAMETER_TYPES)
    _helper_nic("cluster", cluster.SimpleFillNIC({}))
    _helper("cluster", "ndparams", cluster.SimpleFillND({}),
            constants.NDS_PARAMETER_TYPES)

424
    # per-instance checks
Iustin Pop's avatar
Iustin Pop committed
425
426
    for instance_name in data.instances:
      instance = data.instances[instance_name]
427
428
429
      if instance.name != instance_name:
        result.append("instance '%s' is indexed by wrong name '%s'" %
                      (instance.name, instance_name))
Iustin Pop's avatar
Iustin Pop committed
430
      if instance.primary_node not in data.nodes:
431
        result.append("instance '%s' has invalid primary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
432
433
434
                      (instance_name, instance.primary_node))
      for snode in instance.secondary_nodes:
        if snode not in data.nodes:
435
          result.append("instance '%s' has invalid secondary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
436
437
438
                        (instance_name, snode))
      for idx, nic in enumerate(instance.nics):
        if nic.mac in seen_macs:
439
          result.append("instance '%s' has NIC %d mac %s duplicate" %
Iustin Pop's avatar
Iustin Pop committed
440
441
442
                        (instance_name, idx, nic.mac))
        else:
          seen_macs.append(nic.mac)
443
444
445
446
447
448
449
450
451
452
453
        if nic.nicparams:
          filled = cluster.SimpleFillNIC(nic.nicparams)
          owner = "instance %s nic %d" % (instance.name, idx)
          _helper(owner, "nicparams",
                  filled, constants.NICS_PARAMETER_TYPES)
          _helper_nic(owner, filled)

      # parameter checks
      if instance.beparams:
        _helper("instance %s" % instance.name, "beparams",
                cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468

      # gather the drbd ports for duplicate checks
      for dsk in instance.disks:
        if dsk.dev_type in constants.LDS_DRBD:
          tcp_port = dsk.logical_id[2]
          if tcp_port not in ports:
            ports[tcp_port] = []
          ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
      # gather network port reservation
      net_port = getattr(instance, "network_port", None)
      if net_port is not None:
        if net_port not in ports:
          ports[net_port] = []
        ports[net_port].append((instance.name, "network port"))

469
470
471
472
      # instance disk verify
      for idx, disk in enumerate(instance.disks):
        result.extend(["instance '%s' disk %d error: %s" %
                       (instance.name, idx, msg) for msg in disk.Verify()])
473
        result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
474

475
    # cluster-wide pool of free ports
476
    for free_port in cluster.tcpudp_port_pool:
477
478
479
480
481
482
483
484
485
486
      if free_port not in ports:
        ports[free_port] = []
      ports[free_port].append(("cluster", "port marked as free"))

    # compute tcp/udp duplicate ports
    keys = ports.keys()
    keys.sort()
    for pnum in keys:
      pdata = ports[pnum]
      if len(pdata) > 1:
487
        txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
488
489
490
491
        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))

    # highest used tcp port check
    if keys:
492
      if keys[-1] > cluster.highest_used_port:
493
        result.append("Highest used port mismatch, saved %s, computed %s" %
494
                      (cluster.highest_used_port, keys[-1]))
495

496
    if not data.nodes[cluster.master_node].master_candidate:
497
498
      result.append("Master node is not a master candidate")

499
    # master candidate checks
500
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
501
502
503
    if mc_now < mc_max:
      result.append("Not enough master candidates: actual %d, target %d" %
                    (mc_now, mc_max))
504

505
    # node checks
506
507
508
509
    for node_name, node in data.nodes.items():
      if node.name != node_name:
        result.append("Node '%s' is indexed by wrong name '%s'" %
                      (node.name, node_name))
510
511
512
      if [node.master_candidate, node.drained, node.offline].count(True) > 1:
        result.append("Node %s state is invalid: master_candidate=%s,"
                      " drain=%s, offline=%s" %
513
                      (node.name, node.master_candidate, node.drained,
514
                       node.offline))
515
516
517
518
519
520
521
      if node.group not in data.nodegroups:
        result.append("Node '%s' has invalid group '%s'" %
                      (node.name, node.group))
      else:
        _helper("node %s" % node.name, "ndparams",
                cluster.FillND(node, data.nodegroups[node.group]),
                constants.NDS_PARAMETER_TYPES)
522

523
    # nodegroups checks
524
    nodegroups_names = set()
525
526
527
    for nodegroup_uuid in data.nodegroups:
      nodegroup = data.nodegroups[nodegroup_uuid]
      if nodegroup.uuid != nodegroup_uuid:
528
        result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'"
529
                      % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
530
      if utils.UUID_RE.match(nodegroup.name.lower()):
531
        result.append("node group '%s' (uuid: '%s') has uuid-like name" %
532
                      (nodegroup.name, nodegroup.uuid))
533
      if nodegroup.name in nodegroups_names:
534
        result.append("duplicate node group name '%s'" % nodegroup.name)
535
536
      else:
        nodegroups_names.add(nodegroup.name)
537
538
539
540
541
      if nodegroup.ndparams:
        _helper("group %s" % nodegroup.name, "ndparams",
                cluster.SimpleFillND(nodegroup.ndparams),
                constants.NDS_PARAMETER_TYPES)

542
    # drbd minors check
543
    _, duplicates = self._UnlockedComputeDRBDMap()
544
545
546
547
    for node, minor, instance_a, instance_b in duplicates:
      result.append("DRBD minor %d on node %s is assigned twice to instances"
                    " %s and %s" % (minor, node, instance_a, instance_b))

548
    # IP checks
549
    default_nicparams = cluster.nicparams[constants.PP_DEFAULT]
550
551
552
553
554
    ips = {}

    def _AddIpAddress(ip, name):
      ips.setdefault(ip, []).append(name)

555
    _AddIpAddress(cluster.master_ip, "cluster_ip")
556
557

    for node in data.nodes.values():
558
      _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
559
      if node.secondary_ip != node.primary_ip:
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
        _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)

    for instance in data.instances.values():
      for idx, nic in enumerate(instance.nics):
        if nic.ip is None:
          continue

        nicparams = objects.FillDict(default_nicparams, nic.nicparams)
        nic_mode = nicparams[constants.NIC_MODE]
        nic_link = nicparams[constants.NIC_LINK]

        if nic_mode == constants.NIC_MODE_BRIDGED:
          link = "bridge:%s" % nic_link
        elif nic_mode == constants.NIC_MODE_ROUTED:
          link = "route:%s" % nic_link
        else:
          raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)

        _AddIpAddress("%s/%s" % (link, nic.ip),
                      "instance:%s/nic:%d" % (instance.name, idx))
580
581
582
583

    for ip, owners in ips.items():
      if len(owners) > 1:
        result.append("IP address %s is used by multiple owners: %s" %
584
                      (ip, utils.CommaJoin(owners)))
585

Iustin Pop's avatar
Iustin Pop committed
586
587
    return result

588
589
590
591
592
593
594
595
596
597
598
599
600
  @locking.ssynchronized(_config_lock, shared=1)
  def VerifyConfig(self):
    """Verify function.

    This is just a wrapper over L{_UnlockedVerifyConfig}.

    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

    """
    return self._UnlockedVerifyConfig()

601
  def _UnlockedSetDiskID(self, disk, node_name):
Iustin Pop's avatar
Iustin Pop committed
602
603
604
605
606
607
608
609
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

610
611
    This function is for internal use, when the config lock is already held.

Iustin Pop's avatar
Iustin Pop committed
612
613
614
    """
    if disk.children:
      for child in disk.children:
615
        self._UnlockedSetDiskID(child, node_name)
Iustin Pop's avatar
Iustin Pop committed
616
617
618

    if disk.logical_id is None and disk.physical_id is not None:
      return
619
    if disk.dev_type == constants.LD_DRBD8:
620
      pnode, snode, port, pminor, sminor, secret = disk.logical_id
Iustin Pop's avatar
Iustin Pop committed
621
      if node_name not in (pnode, snode):
622
623
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
                                        node_name)
624
625
      pnode_info = self._UnlockedGetNodeInfo(pnode)
      snode_info = self._UnlockedGetNodeInfo(snode)
Iustin Pop's avatar
Iustin Pop committed
626
627
628
      if pnode_info is None or snode_info is None:
        raise errors.ConfigurationError("Can't find primary or secondary node"
                                        " for %s" % str(disk))
629
630
      p_data = (pnode_info.secondary_ip, port)
      s_data = (snode_info.secondary_ip, port)
Iustin Pop's avatar
Iustin Pop committed
631
      if pnode == node_name:
632
        disk.physical_id = p_data + s_data + (pminor, secret)
Iustin Pop's avatar
Iustin Pop committed
633
      else: # it must be secondary, we tested above
634
        disk.physical_id = s_data + p_data + (sminor, secret)
Iustin Pop's avatar
Iustin Pop committed
635
636
637
638
    else:
      disk.physical_id = disk.logical_id
    return

639
640
641
642
643
644
645
646
647
648
649
650
651
652
  @locking.ssynchronized(_config_lock)
  def SetDiskID(self, disk, node_name):
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

    """
    return self._UnlockedSetDiskID(disk, node_name)

  @locking.ssynchronized(_config_lock)
653
654
655
656
  def AddTcpUdpPort(self, port):
    """Adds a new port to the available port pool.

    """
657
    if not isinstance(port, int):
658
      raise errors.ProgrammerError("Invalid type passed for port")
659

660
    self._config_data.cluster.tcpudp_port_pool.add(port)
661
662
    self._WriteConfig()

663
  @locking.ssynchronized(_config_lock, shared=1)
664
  def GetPortList(self):
665
666
667
    """Returns a copy of the current port list.

    """
668
    return self._config_data.cluster.tcpudp_port_pool.copy()
669

670
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
671
672
673
  def AllocatePort(self):
    """Allocate a port.

674
675
676
    The port will be taken from the available port pool or from the
    default port range (and in this case we increase
    highest_used_port).
Iustin Pop's avatar
Iustin Pop committed
677
678

    """
679
    # If there are TCP/IP ports configured, we use them first.
680
681
    if self._config_data.cluster.tcpudp_port_pool:
      port = self._config_data.cluster.tcpudp_port_pool.pop()
682
683
684
    else:
      port = self._config_data.cluster.highest_used_port + 1
      if port >= constants.LAST_DRBD_PORT:
685
686
687
        raise errors.ConfigurationError("The highest used port is greater"
                                        " than %s. Aborting." %
                                        constants.LAST_DRBD_PORT)
688
      self._config_data.cluster.highest_used_port = port
Iustin Pop's avatar
Iustin Pop committed
689
690
691
692

    self._WriteConfig()
    return port

693
  def _UnlockedComputeDRBDMap(self):
694
695
    """Compute the used DRBD minor/nodes.

696
    @rtype: (dict, list)
Iustin Pop's avatar
Iustin Pop committed
697
698
    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
699
700
701
        an empty list), and a list of duplicates; if the duplicates
        list is not empty, the configuration is corrupted and its caller
        should raise an exception
702
703
704

    """
    def _AppendUsedPorts(instance_name, disk, used):
705
      duplicates = []
706
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
Michael Hanselmann's avatar
Michael Hanselmann committed
707
708
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
709
710
          assert node in used, ("Node '%s' of instance '%s' not found"
                                " in node list" % (node, instance_name))
711
          if port in used[node]:
712
713
714
            duplicates.append((node, port, instance_name, used[node][port]))
          else:
            used[node][port] = instance_name
715
716
      if disk.children:
        for child in disk.children:
717
718
          duplicates.extend(_AppendUsedPorts(instance_name, child, used))
      return duplicates
719

720
    duplicates = []
721
    my_dict = dict((node, {}) for node in self._config_data.nodes)
722
723
724
    for instance in self._config_data.instances.itervalues():
      for disk in instance.disks:
        duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
725
    for (node, minor), instance in self._temporary_drbds.iteritems():
726
      if minor in my_dict[node] and my_dict[node][minor] != instance:
727
728
729
730
        duplicates.append((node, minor, instance, my_dict[node][minor]))
      else:
        my_dict[node][minor] = instance
    return my_dict, duplicates
731

732
733
734
735
736
737
738
739
740
741
742
  @locking.ssynchronized(_config_lock)
  def ComputeDRBDMap(self):
    """Compute the used DRBD minor/nodes.

    This is just a wrapper over L{_UnlockedComputeDRBDMap}.

    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
        an empty list).

    """
743
744
745
746
747
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
    return d_map
748

749
750
751
752
753
754
755
756
757
  @locking.ssynchronized(_config_lock)
  def AllocateDRBDMinor(self, nodes, instance):
    """Allocate a drbd minor.

    The free minor will be automatically computed from the existing
    devices. A node can be given multiple times in order to allocate
    multiple minors. The result is the list of minors, in the same
    order as the passed nodes.

758
759
760
    @type instance: string
    @param instance: the instance for which we allocate minors

761
    """
762
    assert isinstance(instance, basestring), \
763
           "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
764

765
766
767
768
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
769
770
771
772
773
774
775
    result = []
    for nname in nodes:
      ndata = d_map[nname]
      if not ndata:
        # no minors used, we can start at 0
        result.append(0)
        ndata[0] = instance
776
        self._temporary_drbds[(nname, 0)] = instance
777
778
779
780
781
782
783
784
785
786
        continue
      keys = ndata.keys()
      keys.sort()
      ffree = utils.FirstFree(keys)
      if ffree is None:
        # return the next minor
        # TODO: implement high-limit check
        minor = keys[-1] + 1
      else:
        minor = ffree
787
788
789
790
791
      # double-check minor against current instances
      assert minor not in d_map[nname], \
             ("Attempt to reuse allocated DRBD minor %d on node %s,"
              " already allocated to instance %s" %
              (minor, nname, d_map[nname][minor]))
792
      ndata[minor] = instance
793
794
795
796
797
798
799
800
      # double-check minor against reservation
      r_key = (nname, minor)
      assert r_key not in self._temporary_drbds, \
             ("Attempt to reuse reserved DRBD minor %d on node %s,"
              " reserved for instance %s" %
              (minor, nname, self._temporary_drbds[r_key]))
      self._temporary_drbds[r_key] = instance
      result.append(minor)
801
802
803
804
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
                  nodes, result)
    return result

805
  def _UnlockedReleaseDRBDMinors(self, instance):
806
807
808
809
810
811
812
    """Release temporary drbd minors allocated for a given instance.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
813
814
    assert isinstance(instance, basestring), \
           "Invalid argument passed to ReleaseDRBDMinors"
815
816
817
818
    for key, name in self._temporary_drbds.items():
      if name == instance:
        del self._temporary_drbds[key]

819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
  @locking.ssynchronized(_config_lock)
  def ReleaseDRBDMinors(self, instance):
    """Release temporary drbd minors allocated for a given instance.

    This should be called on the error paths, on the success paths
    it's automatically called by the ConfigWriter add and update
    functions.

    This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
    self._UnlockedReleaseDRBDMinors(instance)

836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
  @locking.ssynchronized(_config_lock, shared=1)
  def GetConfigVersion(self):
    """Get the configuration version.

    @return: Config version

    """
    return self._config_data.version

  @locking.ssynchronized(_config_lock, shared=1)
  def GetClusterName(self):
    """Get cluster name.

    @return: Cluster name

    """
    return self._config_data.cluster.cluster_name

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNode(self):
    """Get the hostname of the master node for this cluster.

    @return: Master hostname

    """
    return self._config_data.cluster.master_node

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterIP(self):
    """Get the IP of the master node for this cluster.

    @return: Master IP

    """
    return self._config_data.cluster.master_ip

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNetdev(self):
    """Get the master network device for this cluster.

    """
    return self._config_data.cluster.master_netdev

  @locking.ssynchronized(_config_lock, shared=1)
  def GetFileStorageDir(self):
    """Get the file storage dir for this cluster.

    """
    return self._config_data.cluster.file_storage_dir

886
887
888
889
890
891
892
  @locking.ssynchronized(_config_lock, shared=1)
  def GetSharedFileStorageDir(self):
    """Get the shared file storage dir for this cluster.

    """
    return self._config_data.cluster.shared_file_storage_dir

893
894
895
896
897
  @locking.ssynchronized(_config_lock, shared=1)
  def GetHypervisorType(self):
    """Get the hypervisor type for this cluster.

    """
898
    return self._config_data.cluster.enabled_hypervisors[0]
899

900
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
901
902
903
  def GetHostKey(self):
    """Return the rsa hostkey from the config.

Iustin Pop's avatar
Iustin Pop committed
904
905
    @rtype: string
    @return: the rsa hostkey
Iustin Pop's avatar
Iustin Pop committed
906
907
908
909

    """
    return self._config_data.cluster.rsahostkeypub

910
911
912
913
914
915
916
  @locking.ssynchronized(_config_lock, shared=1)
  def GetDefaultIAllocator(self):
    """Get the default instance allocator for this cluster.

    """
    return self._config_data.cluster.default_iallocator

917
918
919
920
921
922
923
924
925
  @locking.ssynchronized(_config_lock, shared=1)
  def GetPrimaryIPFamily(self):
    """Get cluster primary ip family.

    @return: primary ip family

    """
    return self._config_data.cluster.primary_ip_family

926
927
928
929
  @locking.ssynchronized(_config_lock)
  def AddNodeGroup(self, group, ec_id, check_uuid=True):
    """Add a node group to the configuration.

930
931
932
    This method calls group.UpgradeConfig() to fill any missing attributes
    according to their default values.

933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
    @type group: L{objects.NodeGroup}
    @param group: the NodeGroup object to add
    @type ec_id: string
    @param ec_id: unique id for the job to use when creating a missing UUID
    @type check_uuid: bool
    @param check_uuid: add an UUID to the group if it doesn't have one or, if
                       it does, ensure that it does not exist in the
                       configuration already

    """
    self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
    self._WriteConfig()

  def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
    """Add a node group to the configuration.

    """
    logging.info("Adding node group %s to configuration", group.name)

    # Some code might need to add a node group with a pre-populated UUID
    # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass
    # the "does this UUID" exist already check.
    if check_uuid:
      self._EnsureUUID(group, ec_id)

958
959
960
961
962
963
964
965
966
967
    try:
      existing_uuid = self._UnlockedLookupNodeGroup(group.name)
    except errors.OpPrereqError:
      pass
    else:
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
                                 " node group (UUID: %s)" %
                                 (group.name, existing_uuid),
                                 errors.ECODE_EXISTS)

968
969
    group.serial_no = 1
    group.ctime = group.mtime = time.time()
970
    group.UpgradeConfig()
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987

    self._config_data.nodegroups[group.uuid] = group
    self._config_data.cluster.serial_no += 1

  @locking.ssynchronized(_config_lock)
  def RemoveNodeGroup(self, group_uuid):
    """Remove a node group from the configuration.

    @type group_uuid: string
    @param group_uuid: the UUID of the node group to remove

    """
    logging.info("Removing node group %s from configuration", group_uuid)

    if group_uuid not in self._config_data.nodegroups:
      raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid)

988
989
990
    assert len(self._config_data.nodegroups) != 1, \
            "Group '%s' is the only group, cannot be removed" % group_uuid

991
992
993
994
    del self._config_data.nodegroups[group_uuid]
    self._config_data.cluster.serial_no += 1
    self._WriteConfig()

995
  def _UnlockedLookupNodeGroup(self, target):
996
    """Lookup a node group's UUID.
Guido Trotter's avatar
Guido Trotter committed
997
998

    @type target: string or None
999
    @param target: group name or UUID or None to look for the default
Guido Trotter's avatar
Guido Trotter committed
1000
    @rtype: string
1001
    @return: nodegroup UUID
Guido Trotter's avatar
Guido Trotter committed
1002
1003
1004
1005
1006
    @raises errors.OpPrereqError: when the target group cannot be found

    """
    if target is None:
      if len(self._config_data.nodegroups) != 1:
1007
        raise errors.OpPrereqError("More than one node group exists. Target"
Guido Trotter's avatar
Guido Trotter committed
1008
1009
1010
1011
1012
1013
1014
1015
                                   " group must be specified explicitely.")
      else:
        return self._config_data.nodegroups.keys()[0]
    if target in self._config_data.nodegroups:
      return target
    for nodegroup in self._config_data.nodegroups.values():
      if nodegroup.name == target:
        return nodegroup.uuid
1016
1017
    raise errors.OpPrereqError("Node group '%s' not found" % target,
                               errors.ECODE_NOENT)
Guido Trotter's avatar
Guido Trotter committed
1018

1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
  @locking.ssynchronized(_config_lock, shared=1)
  def LookupNodeGroup(self, target):
    """Lookup a node group's UUID.

    This function is just a wrapper over L{_UnlockedLookupNodeGroup}.

    @type target: string or None
    @param target: group name or UUID or None to look for the default
    @rtype: string
    @return: nodegroup UUID

    """
    return self._UnlockedLookupNodeGroup(target)

1033
  def _UnlockedGetNodeGroup(self, uuid):
Guido Trotter's avatar
Guido Trotter committed
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
    """Lookup a node group.

    @type uuid: string
    @param uuid: group UUID
    @rtype: L{objects.NodeGroup} or None
    @return: nodegroup object, or None if not found

    """
    if uuid not in self._config_data.nodegroups:
      return None

    return self._config_data.nodegroups[uuid]

1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeGroup(self, uuid):
    """Lookup a node group.

    @type uuid: string
    @param uuid: group UUID
    @rtype: L{objects.NodeGroup} or None
    @return: nodegroup object, or None if not found

    """
    return self._UnlockedGetNodeGroup(uuid)

1059
1060
1061
1062
1063
1064
1065
  @locking.ssynchronized(_config_lock, shared=1)
  def GetAllNodeGroupsInfo(self):
    """Get the configuration of all node groups.

    """
    return dict(self._config_data.nodegroups)

Guido Trotter's avatar
Guido Trotter committed
1066
1067
1068
1069
1070
1071
1072
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeGroupList(self):
    """Get a list of node groups.

    """
    return self._config_data.nodegroups.keys()

1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeGroupMembersByNodes(self, nodes):
    """Get nodes which are member in the same nodegroups as the given nodes.

    """
    ngfn = lambda node_name: self._UnlockedGetNodeInfo(node_name).group
    return frozenset(member_name
                     for node_name in nodes
                     for member_name in
                       self._UnlockedGetNodeGroup(ngfn(node_name)).members)

1084
  @locking.ssynchronized(_config_lock)
1085
  def AddInstance(self, instance, ec_id):
Iustin Pop's avatar
Iustin Pop committed
1086
1087
1088
1089
    """Add an instance to the config.

    This should be used after creating a new instance.

Iustin Pop's avatar
Iustin Pop committed
1090
1091
1092
    @type instance: L{objects.Instance}
    @param instance: the instance object

Iustin Pop's avatar
Iustin Pop committed
1093
1094
1095
1096
    """
    if not isinstance(instance, objects.Instance):
      raise errors.ProgrammerError("Invalid type passed to AddInstance")

1097
1098
    if instance.disk_template != constants.DT_DISKLESS:
      all_lvs = instance.MapLVsByNode()
Iustin Pop's avatar
Iustin Pop committed
1099
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
1100

1101
1102
1103
1104
    all_macs = self._AllMACs()
    for nic in instance.nics:
      if nic.mac in all_macs:
        raise errors.ConfigurationError("Cannot add instance %s:"
1105
1106
1107
                                        " MAC address '%s' already in use." %
                                        (instance.name, nic.mac))

1108
    self._EnsureUUID(instance, ec_id)
1109

1110
    instance.serial_no = 1
1111
    instance.ctime = instance.mtime = time.time()
Iustin Pop's avatar
Iustin Pop committed
1112
    self._config_data.instances[instance.name] = instance
1113
    self._config_data.cluster.serial_no += 1
1114
    self._UnlockedReleaseDRBDMinors(instance.name)
Iustin Pop's avatar
Iustin Pop committed
1115
1116
    self._WriteConfig()

1117
  def _EnsureUUID(self, item, ec_id):
1118
1119
1120
    """Ensures a given object has a valid UUID.

    @param item: the instance or node to be checked
1121
    @param ec_id: the execution context id for the uuid reservation
1122
1123
1124

    """
    if not item.uuid:
1125
      item.uuid = self._GenerateUniqueID(ec_id)
1126
1127
1128
    elif item.uuid in self._AllIDs(include_temporary=True):
      raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
                                      " in use" % (item.name, item.uuid))
1129

1130
1131
  def _SetInstanceStatus(self, instance_name, status):
    """Set the instance's status to a given value.
Iustin Pop's avatar
Iustin Pop committed
1132
1133

    """
1134
1135
    assert isinstance(status, bool), \
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
Iustin Pop's avatar
Iustin Pop committed
1136
1137

    if instance_name not in self._config_data.instances:
1138
1139
      raise errors.ConfigurationError("Unknown instance '%s'" %
                                      instance_name)
Iustin Pop's avatar
Iustin Pop committed
1140
    instance = self._config_data.instances[instance_name]
1141
1142
    if instance.admin_up != status:
      instance.admin_up = status
1143
      instance.serial_no += 1
1144
      instance.mtime = time.time()
1145
      self._WriteConfig()
Iustin Pop's avatar
Iustin Pop committed
1146

1147
  @locking.ssynchronized(_config_lock)
1148
1149
1150
1151
  def MarkInstanceUp(self, instance_name):
    """Mark the instance status to up in the config.

    """
1152
    self._SetInstanceStatus(instance_name, True)
1153

1154
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
1155
1156
1157
1158
1159
  def RemoveInstance(self, instance_name):
    """Remove the instance from the configuration.

    """
    if instance_name not in self._config_data.instances:
1160
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
Iustin Pop's avatar
Iustin Pop committed
1161
    del self._config_data.instances[instance_name]
1162
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
1163
1164
    self._WriteConfig()

1165
  @locking.ssynchronized(_config_lock)
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
  def RenameInstance(self, old_name, new_name):
    """Rename an instance.

    This needs to be done in ConfigWriter and not by RemoveInstance
    combined with AddInstance as only we can guarantee an atomic
    rename.

    """
    if old_name not in self._config_data.instances:
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
    inst = self._config_data.instances[old_name]
    del self._config_data.instances[old_name]
    inst.name = new_name
1179
1180
1181
1182
1183

    for disk in inst.disks:
      if disk.dev_type == constants.LD_FILE:
        # rename the file paths in logical and physical id
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
1184
        disk_fname = "disk%s" % disk.iv_name.split("/")[1]
1185
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
1186
1187
                                              utils.PathJoin(file_storage_dir,
                                                             inst.name,
1188
                                                             disk_fname))
1189

1190
1191
1192
    # Force update of ssconf files
    self._config_data.cluster.serial_no += 1

1193
1194
1195
    self._config_data.instances[inst.name] = inst
    self._WriteConfig()

1196
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
1197
1198
1199
1200
  def MarkInstanceDown(self, instance_name):
    """Mark the status of an instance to down in the configuration.

    """
1201
    self._SetInstanceStatus(instance_name, False)
Iustin Pop's avatar
Iustin Pop committed
1202