config.py 53.8 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
#

4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Configuration management for Ganeti

24
This module provides the interface to the Ganeti cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
25

26
27
The configuration data is stored on every node but is updated on the master
only. After each update, the master distributes the data to the other nodes.
Iustin Pop's avatar
Iustin Pop committed
28

29
30
Currently, the data storage format is JSON. YAML was slow and consuming too
much memory.
Iustin Pop's avatar
Iustin Pop committed
31
32
33

"""

34
35
36
# pylint: disable-msg=R0904
# R0904: Too many public methods

Iustin Pop's avatar
Iustin Pop committed
37
38
import os
import random
39
import logging
40
import time
Iustin Pop's avatar
Iustin Pop committed
41
42

from ganeti import errors
43
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
44
45
46
47
from ganeti import utils
from ganeti import constants
from ganeti import rpc
from ganeti import objects
48
from ganeti import serializer
Balazs Lecz's avatar
Balazs Lecz committed
49
from ganeti import uidpool
50
from ganeti import netutils
51
from ganeti import runtime
52
53


54
_config_lock = locking.SharedLock("ConfigWriter")
55

56
# job id used for resource management at config upgrade time
Michael Hanselmann's avatar
Michael Hanselmann committed
57
_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
58

59

Michael Hanselmann's avatar
Michael Hanselmann committed
60
def _ValidateConfig(data):
Iustin Pop's avatar
Iustin Pop committed
61
62
63
64
65
66
67
68
  """Verifies that a configuration objects looks valid.

  This only verifies the version of the configuration.

  @raise errors.ConfigurationError: if the version differs from what
      we expect

  """
Michael Hanselmann's avatar
Michael Hanselmann committed
69
  if data.version != constants.CONFIG_VERSION:
70
71
    raise errors.ConfigurationError("Cluster configuration version"
                                    " mismatch, got %s instead of %s" %
Michael Hanselmann's avatar
Michael Hanselmann committed
72
                                    (data.version,
73
                                     constants.CONFIG_VERSION))
Iustin Pop's avatar
Iustin Pop committed
74

75

Guido Trotter's avatar
Guido Trotter committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
class TemporaryReservationManager:
  """A temporary resource reservation manager.

  This is used to reserve resources in a job, before using them, making sure
  other jobs cannot get them in the meantime.

  """
  def __init__(self):
    self._ec_reserved = {}

  def Reserved(self, resource):
    for holder_reserved in self._ec_reserved.items():
      if resource in holder_reserved:
        return True
    return False

  def Reserve(self, ec_id, resource):
    if self.Reserved(resource):
      raise errors.ReservationError("Duplicate reservation for resource: %s." %
                                    (resource))
    if ec_id not in self._ec_reserved:
      self._ec_reserved[ec_id] = set([resource])
    else:
      self._ec_reserved[ec_id].add(resource)

  def DropECReservations(self, ec_id):
    if ec_id in self._ec_reserved:
      del self._ec_reserved[ec_id]

  def GetReserved(self):
    all_reserved = set()
    for holder_reserved in self._ec_reserved.values():
      all_reserved.update(holder_reserved)
    return all_reserved

  def Generate(self, existing, generate_one_fn, ec_id):
    """Generate a new resource of this type

    """
    assert callable(generate_one_fn)

    all_elems = self.GetReserved()
    all_elems.update(existing)
    retries = 64
    while retries > 0:
      new_resource = generate_one_fn()
      if new_resource is not None and new_resource not in all_elems:
        break
    else:
      raise errors.ConfigurationError("Not able generate new resource"
                                      " (last tried: %s)" % new_resource)
    self.Reserve(ec_id, new_resource)
    return new_resource


Iustin Pop's avatar
Iustin Pop committed
131
class ConfigWriter:
132
  """The interface to the cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
133

134
135
136
  @ivar _temporary_lvs: reservation manager for temporary LVs
  @ivar _all_rms: a list of all temporary reservation managers

137
  """
138
  def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts):
139
    self.write_count = 0
140
    self._lock = _config_lock
Iustin Pop's avatar
Iustin Pop committed
141
142
143
144
145
146
    self._config_data = None
    self._offline = offline
    if cfg_file is None:
      self._cfg_file = constants.CLUSTER_CONF_FILE
    else:
      self._cfg_file = cfg_file
147
    self._getents = _getents
148
    self._temporary_ids = TemporaryReservationManager()
149
    self._temporary_drbds = {}
150
    self._temporary_macs = TemporaryReservationManager()
151
    self._temporary_secrets = TemporaryReservationManager()
152
153
154
    self._temporary_lvs = TemporaryReservationManager()
    self._all_rms = [self._temporary_ids, self._temporary_macs,
                     self._temporary_secrets, self._temporary_lvs]
155
156
157
158
    # Note: in order to prevent errors when resolving our name in
    # _DistributeConfig, we compute it here once and reuse it; it's
    # better to raise an error before starting to modify the config
    # file than after it was modified
159
    self._my_hostname = netutils.Hostname.GetSysName()
160
    self._last_cluster_serial = -1
Iustin Pop's avatar
Iustin Pop committed
161
    self._OpenConfig()
Iustin Pop's avatar
Iustin Pop committed
162
163
164
165
166
167
168
169
170

  # this method needs to be static, so that we can call it on the class
  @staticmethod
  def IsCluster():
    """Check if the cluster is configured.

    """
    return os.path.exists(constants.CLUSTER_CONF_FILE)

171
172
173
174
175
176
177
178
179
180
181
  def _GenerateOneMAC(self):
    """Generate one mac address

    """
    prefix = self._config_data.cluster.mac_prefix
    byte1 = random.randrange(0, 256)
    byte2 = random.randrange(0, 256)
    byte3 = random.randrange(0, 256)
    mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
    return mac

182
  @locking.ssynchronized(_config_lock, shared=1)
183
  def GenerateMAC(self, ec_id):
Iustin Pop's avatar
Iustin Pop committed
184
185
186
187
188
    """Generate a MAC for an instance.

    This should check the current instances for duplicates.

    """
189
190
    existing = self._AllMACs()
    return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id)
Iustin Pop's avatar
Iustin Pop committed
191

192
  @locking.ssynchronized(_config_lock, shared=1)
193
194
  def ReserveMAC(self, mac, ec_id):
    """Reserve a MAC for an instance.
195
196
197
198
199
200

    This only checks instances managed by this cluster, it does not
    check for potential collisions elsewhere.

    """
    all_macs = self._AllMACs()
201
202
203
204
    if mac in all_macs:
      raise errors.ReservationError("mac already in use")
    else:
      self._temporary_macs.Reserve(mac, ec_id)
205

206
207
208
209
210
211
212
213
214
215
216
217
218
219
  @locking.ssynchronized(_config_lock, shared=1)
  def ReserveLV(self, lv_name, ec_id):
    """Reserve an VG/LV pair for an instance.

    @type lv_name: string
    @param lv_name: the logical volume name to reserve

    """
    all_lvs = self._AllLVs()
    if lv_name in all_lvs:
      raise errors.ReservationError("LV already in use")
    else:
      self._temporary_lvs.Reserve(lv_name, ec_id)

220
  @locking.ssynchronized(_config_lock, shared=1)
221
  def GenerateDRBDSecret(self, ec_id):
222
223
224
225
226
    """Generate a DRBD secret.

    This checks the current disks for duplicates.

    """
227
228
229
    return self._temporary_secrets.Generate(self._AllDRBDSecrets(),
                                            utils.GenerateSecret,
                                            ec_id)
Michael Hanselmann's avatar
Michael Hanselmann committed
230

231
  def _AllLVs(self):
232
233
234
235
236
237
238
239
240
241
    """Compute the list of all LVs.

    """
    lvnames = set()
    for instance in self._config_data.instances.values():
      node_data = instance.MapLVsByNode()
      for lv_list in node_data.values():
        lvnames.update(lv_list)
    return lvnames

242
243
244
245
246
247
248
249
250
251
252
  def _AllIDs(self, include_temporary):
    """Compute the list of all UUIDs and names we have.

    @type include_temporary: boolean
    @param include_temporary: whether to include the _temporary_ids set
    @rtype: set
    @return: a set of IDs

    """
    existing = set()
    if include_temporary:
253
      existing.update(self._temporary_ids.GetReserved())
254
255
256
    existing.update(self._AllLVs())
    existing.update(self._config_data.instances.keys())
    existing.update(self._config_data.nodes.keys())
257
    existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
258
259
    return existing

260
  def _GenerateUniqueID(self, ec_id):
261
    """Generate an unique UUID.
262
263
264
265

    This checks the current node, instances and disk names for
    duplicates.

Iustin Pop's avatar
Iustin Pop committed
266
267
    @rtype: string
    @return: the unique id
268
269

    """
270
271
    existing = self._AllIDs(include_temporary=False)
    return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
272

273
  @locking.ssynchronized(_config_lock, shared=1)
274
  def GenerateUniqueID(self, ec_id):
275
276
277
278
    """Generate an unique ID.

    This is just a wrapper over the unlocked version.

279
280
    @type ec_id: string
    @param ec_id: unique id for the job to reserve the id to
281
282

    """
283
    return self._GenerateUniqueID(ec_id)
284

Iustin Pop's avatar
Iustin Pop committed
285
286
287
  def _AllMACs(self):
    """Return all MACs present in the config.

Iustin Pop's avatar
Iustin Pop committed
288
289
290
    @rtype: list
    @return: the list of all MACs

Iustin Pop's avatar
Iustin Pop committed
291
292
293
294
295
296
297
298
    """
    result = []
    for instance in self._config_data.instances.values():
      for nic in instance.nics:
        result.append(nic.mac)

    return result

299
300
301
  def _AllDRBDSecrets(self):
    """Return all DRBD secrets present in the config.

Iustin Pop's avatar
Iustin Pop committed
302
303
304
    @rtype: list
    @return: the list of all DRBD secrets

305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
    """
    def helper(disk, result):
      """Recursively gather secrets from this disk."""
      if disk.dev_type == constants.DT_DRBD8:
        result.append(disk.logical_id[5])
      if disk.children:
        for child in disk.children:
          helper(child, result)

    result = []
    for instance in self._config_data.instances.values():
      for disk in instance.disks:
        helper(disk, result)

    return result

321
322
323
324
325
326
327
328
329
330
331
332
333
334
  def _CheckDiskIDs(self, disk, l_ids, p_ids):
    """Compute duplicate disk IDs

    @type disk: L{objects.Disk}
    @param disk: the disk at which to start searching
    @type l_ids: list
    @param l_ids: list of current logical ids
    @type p_ids: list
    @param p_ids: list of current physical ids
    @rtype: list
    @return: a list of error messages

    """
    result = []
335
336
337
338
339
340
341
342
343
344
    if disk.logical_id is not None:
      if disk.logical_id in l_ids:
        result.append("duplicate logical id %s" % str(disk.logical_id))
      else:
        l_ids.append(disk.logical_id)
    if disk.physical_id is not None:
      if disk.physical_id in p_ids:
        result.append("duplicate physical id %s" % str(disk.physical_id))
      else:
        p_ids.append(disk.physical_id)
345
346
347
348
349
350

    if disk.children:
      for child in disk.children:
        result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
    return result

351
  def _UnlockedVerifyConfig(self):
352
353
    """Verify function.

354
355
356
357
    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

Iustin Pop's avatar
Iustin Pop committed
358
359
360
    """
    result = []
    seen_macs = []
361
    ports = {}
Iustin Pop's avatar
Iustin Pop committed
362
    data = self._config_data
363
364
    seen_lids = []
    seen_pids = []
365
366
367
368
369
370
371
372

    # global cluster checks
    if not data.cluster.enabled_hypervisors:
      result.append("enabled hypervisors list doesn't have any entries")
    invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
    if invalid_hvs:
      result.append("enabled hypervisors contains invalid entries: %s" %
                    invalid_hvs)
373
374
375
376
377
    missing_hvp = (set(data.cluster.enabled_hypervisors) -
                   set(data.cluster.hvparams.keys()))
    if missing_hvp:
      result.append("hypervisor parameters missing for the enabled"
                    " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
378
379
380
381
382
383

    if data.cluster.master_node not in data.nodes:
      result.append("cluster has invalid primary node '%s'" %
                    data.cluster.master_node)

    # per-instance checks
Iustin Pop's avatar
Iustin Pop committed
384
385
    for instance_name in data.instances:
      instance = data.instances[instance_name]
386
387
388
      if instance.name != instance_name:
        result.append("instance '%s' is indexed by wrong name '%s'" %
                      (instance.name, instance_name))
Iustin Pop's avatar
Iustin Pop committed
389
      if instance.primary_node not in data.nodes:
390
        result.append("instance '%s' has invalid primary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
391
392
393
                      (instance_name, instance.primary_node))
      for snode in instance.secondary_nodes:
        if snode not in data.nodes:
394
          result.append("instance '%s' has invalid secondary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
395
396
397
                        (instance_name, snode))
      for idx, nic in enumerate(instance.nics):
        if nic.mac in seen_macs:
398
          result.append("instance '%s' has NIC %d mac %s duplicate" %
Iustin Pop's avatar
Iustin Pop committed
399
400
401
                        (instance_name, idx, nic.mac))
        else:
          seen_macs.append(nic.mac)
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416

      # gather the drbd ports for duplicate checks
      for dsk in instance.disks:
        if dsk.dev_type in constants.LDS_DRBD:
          tcp_port = dsk.logical_id[2]
          if tcp_port not in ports:
            ports[tcp_port] = []
          ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
      # gather network port reservation
      net_port = getattr(instance, "network_port", None)
      if net_port is not None:
        if net_port not in ports:
          ports[net_port] = []
        ports[net_port].append((instance.name, "network port"))

417
418
419
420
      # instance disk verify
      for idx, disk in enumerate(instance.disks):
        result.extend(["instance '%s' disk %d error: %s" %
                       (instance.name, idx, msg) for msg in disk.Verify()])
421
        result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
422

423
    # cluster-wide pool of free ports
424
    for free_port in data.cluster.tcpudp_port_pool:
425
426
427
428
429
430
431
432
433
434
      if free_port not in ports:
        ports[free_port] = []
      ports[free_port].append(("cluster", "port marked as free"))

    # compute tcp/udp duplicate ports
    keys = ports.keys()
    keys.sort()
    for pnum in keys:
      pdata = ports[pnum]
      if len(pdata) > 1:
435
        txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
436
437
438
439
        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))

    # highest used tcp port check
    if keys:
440
      if keys[-1] > data.cluster.highest_used_port:
441
        result.append("Highest used port mismatch, saved %s, computed %s" %
442
443
                      (data.cluster.highest_used_port, keys[-1]))

444
445
446
    if not data.nodes[data.cluster.master_node].master_candidate:
      result.append("Master node is not a master candidate")

447
    # master candidate checks
448
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
449
450
451
    if mc_now < mc_max:
      result.append("Not enough master candidates: actual %d, target %d" %
                    (mc_now, mc_max))
452

453
    # node checks
454
455
456
457
    for node_name, node in data.nodes.items():
      if node.name != node_name:
        result.append("Node '%s' is indexed by wrong name '%s'" %
                      (node.name, node_name))
458
459
460
461
462
463
      if [node.master_candidate, node.drained, node.offline].count(True) > 1:
        result.append("Node %s state is invalid: master_candidate=%s,"
                      " drain=%s, offline=%s" %
                      (node.name, node.master_candidate, node.drain,
                       node.offline))

464
    # nodegroups checks
465
    nodegroups_names = set()
466
467
468
469
470
    for nodegroup_uuid in data.nodegroups:
      nodegroup = data.nodegroups[nodegroup_uuid]
      if nodegroup.uuid != nodegroup_uuid:
        result.append("nodegroup '%s' (uuid: '%s') indexed by wrong uuid '%s'"
                      % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
471
472
473
474
      if nodegroup.name in nodegroups_names:
        result.append("duplicate nodegroup name '%s'" % nodegroup.name)
      else:
        nodegroups_names.add(nodegroup.name)
475

476
    # drbd minors check
477
    _, duplicates = self._UnlockedComputeDRBDMap()
478
479
480
481
    for node, minor, instance_a, instance_b in duplicates:
      result.append("DRBD minor %d on node %s is assigned twice to instances"
                    " %s and %s" % (minor, node, instance_a, instance_b))

482
    # IP checks
483
484
485
486
487
488
489
    default_nicparams = data.cluster.nicparams[constants.PP_DEFAULT]
    ips = {}

    def _AddIpAddress(ip, name):
      ips.setdefault(ip, []).append(name)

    _AddIpAddress(data.cluster.master_ip, "cluster_ip")
490
491

    for node in data.nodes.values():
492
      _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
493
      if node.secondary_ip != node.primary_ip:
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
        _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)

    for instance in data.instances.values():
      for idx, nic in enumerate(instance.nics):
        if nic.ip is None:
          continue

        nicparams = objects.FillDict(default_nicparams, nic.nicparams)
        nic_mode = nicparams[constants.NIC_MODE]
        nic_link = nicparams[constants.NIC_LINK]

        if nic_mode == constants.NIC_MODE_BRIDGED:
          link = "bridge:%s" % nic_link
        elif nic_mode == constants.NIC_MODE_ROUTED:
          link = "route:%s" % nic_link
        else:
          raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)

        _AddIpAddress("%s/%s" % (link, nic.ip),
                      "instance:%s/nic:%d" % (instance.name, idx))
514
515
516
517

    for ip, owners in ips.items():
      if len(owners) > 1:
        result.append("IP address %s is used by multiple owners: %s" %
518
                      (ip, utils.CommaJoin(owners)))
519

Iustin Pop's avatar
Iustin Pop committed
520
521
    return result

522
523
524
525
526
527
528
529
530
531
532
533
534
  @locking.ssynchronized(_config_lock, shared=1)
  def VerifyConfig(self):
    """Verify function.

    This is just a wrapper over L{_UnlockedVerifyConfig}.

    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

    """
    return self._UnlockedVerifyConfig()

535
  def _UnlockedSetDiskID(self, disk, node_name):
Iustin Pop's avatar
Iustin Pop committed
536
537
538
539
540
541
542
543
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

544
545
    This function is for internal use, when the config lock is already held.

Iustin Pop's avatar
Iustin Pop committed
546
547
548
    """
    if disk.children:
      for child in disk.children:
549
        self._UnlockedSetDiskID(child, node_name)
Iustin Pop's avatar
Iustin Pop committed
550
551
552

    if disk.logical_id is None and disk.physical_id is not None:
      return
553
    if disk.dev_type == constants.LD_DRBD8:
554
      pnode, snode, port, pminor, sminor, secret = disk.logical_id
Iustin Pop's avatar
Iustin Pop committed
555
      if node_name not in (pnode, snode):
556
557
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
                                        node_name)
558
559
      pnode_info = self._UnlockedGetNodeInfo(pnode)
      snode_info = self._UnlockedGetNodeInfo(snode)
Iustin Pop's avatar
Iustin Pop committed
560
561
562
      if pnode_info is None or snode_info is None:
        raise errors.ConfigurationError("Can't find primary or secondary node"
                                        " for %s" % str(disk))
563
564
      p_data = (pnode_info.secondary_ip, port)
      s_data = (snode_info.secondary_ip, port)
Iustin Pop's avatar
Iustin Pop committed
565
      if pnode == node_name:
566
        disk.physical_id = p_data + s_data + (pminor, secret)
Iustin Pop's avatar
Iustin Pop committed
567
      else: # it must be secondary, we tested above
568
        disk.physical_id = s_data + p_data + (sminor, secret)
Iustin Pop's avatar
Iustin Pop committed
569
570
571
572
    else:
      disk.physical_id = disk.logical_id
    return

573
574
575
576
577
578
579
580
581
582
583
584
585
586
  @locking.ssynchronized(_config_lock)
  def SetDiskID(self, disk, node_name):
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

    """
    return self._UnlockedSetDiskID(disk, node_name)

  @locking.ssynchronized(_config_lock)
587
588
589
590
  def AddTcpUdpPort(self, port):
    """Adds a new port to the available port pool.

    """
591
    if not isinstance(port, int):
592
      raise errors.ProgrammerError("Invalid type passed for port")
593

594
    self._config_data.cluster.tcpudp_port_pool.add(port)
595
596
    self._WriteConfig()

597
  @locking.ssynchronized(_config_lock, shared=1)
598
  def GetPortList(self):
599
600
601
    """Returns a copy of the current port list.

    """
602
    return self._config_data.cluster.tcpudp_port_pool.copy()
603

604
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
605
606
607
  def AllocatePort(self):
    """Allocate a port.

608
609
610
    The port will be taken from the available port pool or from the
    default port range (and in this case we increase
    highest_used_port).
Iustin Pop's avatar
Iustin Pop committed
611
612

    """
613
    # If there are TCP/IP ports configured, we use them first.
614
615
    if self._config_data.cluster.tcpudp_port_pool:
      port = self._config_data.cluster.tcpudp_port_pool.pop()
616
617
618
    else:
      port = self._config_data.cluster.highest_used_port + 1
      if port >= constants.LAST_DRBD_PORT:
619
620
621
        raise errors.ConfigurationError("The highest used port is greater"
                                        " than %s. Aborting." %
                                        constants.LAST_DRBD_PORT)
622
      self._config_data.cluster.highest_used_port = port
Iustin Pop's avatar
Iustin Pop committed
623
624
625
626

    self._WriteConfig()
    return port

627
  def _UnlockedComputeDRBDMap(self):
628
629
    """Compute the used DRBD minor/nodes.

630
    @rtype: (dict, list)
Iustin Pop's avatar
Iustin Pop committed
631
632
    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
633
634
635
        an empty list), and a list of duplicates; if the duplicates
        list is not empty, the configuration is corrupted and its caller
        should raise an exception
636
637
638

    """
    def _AppendUsedPorts(instance_name, disk, used):
639
      duplicates = []
640
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
Michael Hanselmann's avatar
Michael Hanselmann committed
641
642
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
643
644
          assert node in used, ("Node '%s' of instance '%s' not found"
                                " in node list" % (node, instance_name))
645
          if port in used[node]:
646
647
648
            duplicates.append((node, port, instance_name, used[node][port]))
          else:
            used[node][port] = instance_name
649
650
      if disk.children:
        for child in disk.children:
651
652
          duplicates.extend(_AppendUsedPorts(instance_name, child, used))
      return duplicates
653

654
    duplicates = []
655
    my_dict = dict((node, {}) for node in self._config_data.nodes)
656
657
658
    for instance in self._config_data.instances.itervalues():
      for disk in instance.disks:
        duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
659
    for (node, minor), instance in self._temporary_drbds.iteritems():
660
      if minor in my_dict[node] and my_dict[node][minor] != instance:
661
662
663
664
        duplicates.append((node, minor, instance, my_dict[node][minor]))
      else:
        my_dict[node][minor] = instance
    return my_dict, duplicates
665

666
667
668
669
670
671
672
673
674
675
676
  @locking.ssynchronized(_config_lock)
  def ComputeDRBDMap(self):
    """Compute the used DRBD minor/nodes.

    This is just a wrapper over L{_UnlockedComputeDRBDMap}.

    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
        an empty list).

    """
677
678
679
680
681
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
    return d_map
682

683
684
685
686
687
688
689
690
691
  @locking.ssynchronized(_config_lock)
  def AllocateDRBDMinor(self, nodes, instance):
    """Allocate a drbd minor.

    The free minor will be automatically computed from the existing
    devices. A node can be given multiple times in order to allocate
    multiple minors. The result is the list of minors, in the same
    order as the passed nodes.

692
693
694
    @type instance: string
    @param instance: the instance for which we allocate minors

695
    """
696
    assert isinstance(instance, basestring), \
697
           "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
698

699
700
701
702
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
703
704
705
706
707
708
709
    result = []
    for nname in nodes:
      ndata = d_map[nname]
      if not ndata:
        # no minors used, we can start at 0
        result.append(0)
        ndata[0] = instance
710
        self._temporary_drbds[(nname, 0)] = instance
711
712
713
714
715
716
717
718
719
720
        continue
      keys = ndata.keys()
      keys.sort()
      ffree = utils.FirstFree(keys)
      if ffree is None:
        # return the next minor
        # TODO: implement high-limit check
        minor = keys[-1] + 1
      else:
        minor = ffree
721
722
723
724
725
      # double-check minor against current instances
      assert minor not in d_map[nname], \
             ("Attempt to reuse allocated DRBD minor %d on node %s,"
              " already allocated to instance %s" %
              (minor, nname, d_map[nname][minor]))
726
      ndata[minor] = instance
727
728
729
730
731
732
733
734
      # double-check minor against reservation
      r_key = (nname, minor)
      assert r_key not in self._temporary_drbds, \
             ("Attempt to reuse reserved DRBD minor %d on node %s,"
              " reserved for instance %s" %
              (minor, nname, self._temporary_drbds[r_key]))
      self._temporary_drbds[r_key] = instance
      result.append(minor)
735
736
737
738
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
                  nodes, result)
    return result

739
  def _UnlockedReleaseDRBDMinors(self, instance):
740
741
742
743
744
745
746
    """Release temporary drbd minors allocated for a given instance.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
747
748
    assert isinstance(instance, basestring), \
           "Invalid argument passed to ReleaseDRBDMinors"
749
750
751
752
    for key, name in self._temporary_drbds.items():
      if name == instance:
        del self._temporary_drbds[key]

753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
  @locking.ssynchronized(_config_lock)
  def ReleaseDRBDMinors(self, instance):
    """Release temporary drbd minors allocated for a given instance.

    This should be called on the error paths, on the success paths
    it's automatically called by the ConfigWriter add and update
    functions.

    This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
    self._UnlockedReleaseDRBDMinors(instance)

770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
  @locking.ssynchronized(_config_lock, shared=1)
  def GetConfigVersion(self):
    """Get the configuration version.

    @return: Config version

    """
    return self._config_data.version

  @locking.ssynchronized(_config_lock, shared=1)
  def GetClusterName(self):
    """Get cluster name.

    @return: Cluster name

    """
    return self._config_data.cluster.cluster_name

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNode(self):
    """Get the hostname of the master node for this cluster.

    @return: Master hostname

    """
    return self._config_data.cluster.master_node

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterIP(self):
    """Get the IP of the master node for this cluster.

    @return: Master IP

    """
    return self._config_data.cluster.master_ip

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNetdev(self):
    """Get the master network device for this cluster.

    """
    return self._config_data.cluster.master_netdev

  @locking.ssynchronized(_config_lock, shared=1)
  def GetFileStorageDir(self):
    """Get the file storage dir for this cluster.

    """
    return self._config_data.cluster.file_storage_dir

  @locking.ssynchronized(_config_lock, shared=1)
  def GetHypervisorType(self):
    """Get the hypervisor type for this cluster.

    """
825
    return self._config_data.cluster.enabled_hypervisors[0]
826

827
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
828
829
830
  def GetHostKey(self):
    """Return the rsa hostkey from the config.

Iustin Pop's avatar
Iustin Pop committed
831
832
    @rtype: string
    @return: the rsa hostkey
Iustin Pop's avatar
Iustin Pop committed
833
834
835
836

    """
    return self._config_data.cluster.rsahostkeypub

837
838
839
840
841
842
843
  @locking.ssynchronized(_config_lock, shared=1)
  def GetDefaultIAllocator(self):
    """Get the default instance allocator for this cluster.

    """
    return self._config_data.cluster.default_iallocator

844
845
846
847
848
849
850
851
852
  @locking.ssynchronized(_config_lock, shared=1)
  def GetPrimaryIPFamily(self):
    """Get cluster primary ip family.

    @return: primary ip family

    """
    return self._config_data.cluster.primary_ip_family

Guido Trotter's avatar
Guido Trotter committed
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
  @locking.ssynchronized(_config_lock, shared=1)
  def LookupNodeGroup(self, target):
    """Lookup a node group.

    @type target: string or None
    @param  target: group name or uuid or None to look for the default
    @rtype: string
    @return: nodegroup uuid
    @raises errors.OpPrereqError: when the target group cannot be found

    """
    if target is None:
      if len(self._config_data.nodegroups) != 1:
        raise errors.OpPrereqError("More than one nodegroup exists. Target"
                                   " group must be specified explicitely.")
      else:
        return self._config_data.nodegroups.keys()[0]
    if target in self._config_data.nodegroups:
      return target
    for nodegroup in self._config_data.nodegroups.values():
      if nodegroup.name == target:
        return nodegroup.uuid
    raise errors.OpPrereqError("Nodegroup '%s' not found", target)

877
  @locking.ssynchronized(_config_lock)
878
  def AddInstance(self, instance, ec_id):
Iustin Pop's avatar
Iustin Pop committed
879
880
881
882
    """Add an instance to the config.

    This should be used after creating a new instance.

Iustin Pop's avatar
Iustin Pop committed
883
884
885
    @type instance: L{objects.Instance}
    @param instance: the instance object

Iustin Pop's avatar
Iustin Pop committed
886
887
888
889
    """
    if not isinstance(instance, objects.Instance):
      raise errors.ProgrammerError("Invalid type passed to AddInstance")

890
891
    if instance.disk_template != constants.DT_DISKLESS:
      all_lvs = instance.MapLVsByNode()
Iustin Pop's avatar
Iustin Pop committed
892
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
893

894
895
896
897
    all_macs = self._AllMACs()
    for nic in instance.nics:
      if nic.mac in all_macs:
        raise errors.ConfigurationError("Cannot add instance %s:"
898
899
900
                                        " MAC address '%s' already in use." %
                                        (instance.name, nic.mac))

901
    self._EnsureUUID(instance, ec_id)
902

903
    instance.serial_no = 1
904
    instance.ctime = instance.mtime = time.time()
Iustin Pop's avatar
Iustin Pop committed
905
    self._config_data.instances[instance.name] = instance
906
    self._config_data.cluster.serial_no += 1
907
    self._UnlockedReleaseDRBDMinors(instance.name)
Iustin Pop's avatar
Iustin Pop committed
908
909
    self._WriteConfig()

910
  def _EnsureUUID(self, item, ec_id):
911
912
913
    """Ensures a given object has a valid UUID.

    @param item: the instance or node to be checked
914
    @param ec_id: the execution context id for the uuid reservation
915
916
917

    """
    if not item.uuid:
918
      item.uuid = self._GenerateUniqueID(ec_id)
919
920
921
    elif item.uuid in self._AllIDs(include_temporary=True):
      raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
                                      " in use" % (item.name, item.uuid))
922

923
924
  def _SetInstanceStatus(self, instance_name, status):
    """Set the instance's status to a given value.
Iustin Pop's avatar
Iustin Pop committed
925
926

    """
927
928
    assert isinstance(status, bool), \
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
Iustin Pop's avatar
Iustin Pop committed
929
930

    if instance_name not in self._config_data.instances:
931
932
      raise errors.ConfigurationError("Unknown instance '%s'" %
                                      instance_name)
Iustin Pop's avatar
Iustin Pop committed
933
    instance = self._config_data.instances[instance_name]
934
935
    if instance.admin_up != status:
      instance.admin_up = status
936
      instance.serial_no += 1
937
      instance.mtime = time.time()
938
      self._WriteConfig()
Iustin Pop's avatar
Iustin Pop committed
939

940
  @locking.ssynchronized(_config_lock)
941
942
943
944
  def MarkInstanceUp(self, instance_name):
    """Mark the instance status to up in the config.

    """
945
    self._SetInstanceStatus(instance_name, True)
946

947
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
948
949
950
951
952
  def RemoveInstance(self, instance_name):
    """Remove the instance from the configuration.

    """
    if instance_name not in self._config_data.instances:
953
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
Iustin Pop's avatar
Iustin Pop committed
954
    del self._config_data.instances[instance_name]
955
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
956
957
    self._WriteConfig()

958
  @locking.ssynchronized(_config_lock)
959
960
961
962
963
964
965
966
967
968
969
970
971
  def RenameInstance(self, old_name, new_name):
    """Rename an instance.

    This needs to be done in ConfigWriter and not by RemoveInstance
    combined with AddInstance as only we can guarantee an atomic
    rename.

    """
    if old_name not in self._config_data.instances:
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
    inst = self._config_data.instances[old_name]
    del self._config_data.instances[old_name]
    inst.name = new_name
972
973
974
975
976
977

    for disk in inst.disks:
      if disk.dev_type == constants.LD_FILE:
        # rename the file paths in logical and physical id
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
978
979
980
                                              utils.PathJoin(file_storage_dir,
                                                             inst.name,
                                                             disk.iv_name))
981

982
983
984
    self._config_data.instances[inst.name] = inst
    self._WriteConfig()

985
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
986
987
988
989
  def MarkInstanceDown(self, instance_name):
    """Mark the status of an instance to down in the configuration.

    """
990
    self._SetInstanceStatus(instance_name, False)
Iustin Pop's avatar
Iustin Pop committed
991

992
993
994
995
996
997
998
999
  def _UnlockedGetInstanceList(self):
    """Get the list of instances.

    This function is for internal use, when the config lock is already held.

    """
    return self._config_data.instances.keys()

1000
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1001
1002
1003
  def GetInstanceList(self):
    """Get the list of instances.

Iustin Pop's avatar
Iustin Pop committed
1004
1005
    @return: array of instances, ex. ['instance2.example.com',
        'instance1.example.com']
Iustin Pop's avatar
Iustin Pop committed
1006
1007

    """
1008
    return self._UnlockedGetInstanceList()
Iustin Pop's avatar
Iustin Pop committed
1009

1010
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1011
1012
1013
1014
1015
  def ExpandInstanceName(self, short_name):
    """Attempt to expand an incomplete instance name.

    """
    return utils.MatchNameComponent(short_name,
1016
1017
                                    self._config_data.instances.keys(),
                                    case_sensitive=False)
Iustin Pop's avatar
Iustin Pop committed
1018

1019
  def _UnlockedGetInstanceInfo(self, instance_name):
Michael Hanselmann's avatar
Michael Hanselmann committed
1020
    """Returns information about an instance.
1021
1022
1023
1024
1025
1026
1027
1028
1029

    This function is for internal use, when the config lock is already held.

    """
    if instance_name not in self._config_data.instances:
      return None

    return self._config_data.instances[instance_name]

1030
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1031
  def GetInstanceInfo(self, instance_name):
Michael Hanselmann's avatar
Michael Hanselmann committed
1032
    """Returns information about an instance.
Iustin Pop's avatar
Iustin Pop committed
1033

Michael Hanselmann's avatar
Michael Hanselmann committed
1034
    It takes the information from the configuration file. Other information of
Iustin Pop's avatar
Iustin Pop committed
1035
1036
    an instance are taken from the live systems.

Iustin Pop's avatar
Iustin Pop committed
1037
1038
    @param instance_name: name of the instance, e.g.
        I{instance1.example.com}
Iustin Pop's avatar
Iustin Pop committed
1039

Iustin Pop's avatar
Iustin Pop committed
1040
1041
    @rtype: L{objects.Instance}
    @return: the instance object
Iustin Pop's avatar
Iustin Pop committed
1042
1043

    """
1044
    return self._UnlockedGetInstanceInfo(instance_name)
Iustin Pop's avatar
Iustin Pop committed
1045

1046
1047
1048
1049
1050
  @locking.ssynchronized(_config_lock, shared=1)
  def GetAllInstancesInfo(self):
    """Get the configuration of all instances.

    @rtype: dict
Iustin Pop's avatar
Iustin Pop committed
1051
    @return: dict of (instance, instance_info), where instance_info is what
1052
1053
1054
              would GetInstanceInfo return for the node

    """
1055
1056
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
                    for instance in self._UnlockedGetInstanceList()])
1057
1058
    return my_dict

1059
  @locking.ssynchronized(_config_lock)
1060
  def AddNode(self, node, ec_id):
Iustin Pop's avatar
Iustin Pop committed
1061
1062
    """Add a node to the configuration.

Iustin Pop's avatar
Iustin Pop committed
1063
1064
    @type node: L{objects.Node}
    @param node: a Node instance
Iustin Pop's avatar
Iustin Pop committed
1065
1066

    """
1067
    logging.info("Adding node %s to configuration", node.name)
1068

1069
    self._EnsureUUID(node, ec_id)
1070

1071
    node.serial_no = 1
1072
    node.ctime = node.mtime = time.time()
Guido Trotter's avatar
Guido Trotter committed
1073
    self._UnlockedAddNodeToGroup(node.name, node.nodegroup)
Iustin Pop's avatar
Iustin Pop committed
1074
    self._config_data.nodes[node.name] = node
1075
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
1076
1077
    self._WriteConfig()

1078
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
1079
1080
1081
1082
  def RemoveNode(self, node_name):
    """Remove a node from the configuration.

    """
1083
    logging.info("Removing node %s from configuration", node_name)
1084

Iustin Pop's avatar
Iustin Pop committed
1085
    if node_name not in self._config_data.nodes:
1086
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
Iustin Pop's avatar
Iustin Pop committed
1087

Guido Trotter's avatar
Guido Trotter committed
1088
    self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_name])
Iustin Pop's avatar
Iustin Pop committed
1089
    del self._config_data.nodes[node_name]
1090
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
1091
1092
    self._WriteConfig()

1093
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1094
1095
1096
1097
1098
  def ExpandNodeName(self, short_name):
    """Attempt to expand an incomplete instance name.

    """
    return utils.MatchNameComponent(short_name,
1099
1100
                                    self._config_data.nodes.keys(),
                                    case_sensitive=False)
Iustin Pop's avatar
Iustin Pop committed
1101

1102
  def _UnlockedGetNodeInfo(self, node_name):
Iustin Pop's avatar
Iustin Pop committed
1103
1104
    """Get the configuration of a node, as stored in the config.

Iustin Pop's avatar
Iustin Pop committed
1105
1106
    This function is for internal use, when the config lock is already
    held.
1107

Iustin Pop's avatar
Iustin Pop committed
1108
    @param node_name: the node name, e.g. I{node1.example.com}
Iustin Pop's avatar
Iustin Pop committed
1109

Iustin Pop's avatar
Iustin Pop committed
1110
1111
    @rtype: L{objects.Node}
    @return: the node object
Iustin Pop's avatar
Iustin Pop committed
1112
1113
1114
1115
1116
1117
1118

    """
    if node_name not in self._config_data.nodes:
      return None

    return self._config_data.nodes[node_name]

1119
1120
1121
1122
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeInfo(self, node_name):
    """Get the configuration of a node, as stored in the config.

Iustin Pop's avatar
Iustin Pop committed
1123
    This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
1124

Iustin Pop's avatar
Iustin Pop committed
1125
1126
1127
1128
    @param node_name: the node name, e.g. I{node1.example.com}

    @rtype: L{objects.Node}
    @return: the node object
1129
1130
1131
1132
1133

    """
    return self._UnlockedGetNodeInfo(node_name)

  def _UnlockedGetNodeList(self):
Iustin Pop's avatar
Iustin Pop committed
1134
1135
    """Return the list of nodes which are in the configuration.

Iustin Pop's avatar
Iustin Pop committed
1136
1137
1138
1139
    This function is for internal use, when the config lock is already
    held.

    @rtype: list
1140

Iustin Pop's avatar
Iustin Pop committed
1141
1142
1143
    """
    return self._config_data.nodes.keys()

1144
1145
1146
1147
1148
1149
1150
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeList(self):
    """Return the list of nodes which are in the configuration.

    """
    return self._UnlockedGetNodeList()

1151
  def _UnlockedGetOnlineNodeList(self):
Iustin Pop's avatar
Iustin Pop committed
1152
1153
1154
1155
1156
1157
1158
    """Return the list of nodes which are online.

    """
    all_nodes = [self._UnlockedGetNodeInfo(node)
                 for node in self._UnlockedGetNodeList()]
    return [node.name for node in all_nodes if not node.offline]

1159
1160
1161
1162
1163
1164
1165
  @locking.ssynchronized(_config_lock, shared=1)
  def GetOnlineNodeList(self):
    """Return the list of nodes which are online.

    """
    return self._UnlockedGetOnlineNodeList()

1166
1167
1168
1169
1170
  @locking.ssynchronized(_config_lock, shared=1)
  def GetAllNodesInfo(self):
    """Get the configuration of all nodes.

    @rtype: dict
1171
    @return: dict of (node, node_info), where node_info is what
1172
1173
1174
1175
1176
1177
1178
              would GetNodeInfo return for the node

    """
    my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
                    for node in self._UnlockedGetNodeList()])
    return my_dict