config.py 59.1 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
#

4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Configuration management for Ganeti

24
This module provides the interface to the Ganeti cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
25

26
27
The configuration data is stored on every node but is updated on the master
only. After each update, the master distributes the data to the other nodes.
Iustin Pop's avatar
Iustin Pop committed
28

29
30
Currently, the data storage format is JSON. YAML was slow and consuming too
much memory.
Iustin Pop's avatar
Iustin Pop committed
31
32
33

"""

34
35
36
# pylint: disable-msg=R0904
# R0904: Too many public methods

Iustin Pop's avatar
Iustin Pop committed
37
38
import os
import random
39
import logging
40
import time
Iustin Pop's avatar
Iustin Pop committed
41
42

from ganeti import errors
43
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
44
45
46
47
from ganeti import utils
from ganeti import constants
from ganeti import rpc
from ganeti import objects
48
from ganeti import serializer
Balazs Lecz's avatar
Balazs Lecz committed
49
from ganeti import uidpool
50
from ganeti import netutils
51
from ganeti import runtime
52
53


54
_config_lock = locking.SharedLock("ConfigWriter")
55

56
# job id used for resource management at config upgrade time
Michael Hanselmann's avatar
Michael Hanselmann committed
57
_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
58

59

Michael Hanselmann's avatar
Michael Hanselmann committed
60
def _ValidateConfig(data):
Iustin Pop's avatar
Iustin Pop committed
61
62
63
64
65
66
67
68
  """Verifies that a configuration objects looks valid.

  This only verifies the version of the configuration.

  @raise errors.ConfigurationError: if the version differs from what
      we expect

  """
Michael Hanselmann's avatar
Michael Hanselmann committed
69
  if data.version != constants.CONFIG_VERSION:
70
    raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, data.version)
Iustin Pop's avatar
Iustin Pop committed
71

72

Guido Trotter's avatar
Guido Trotter committed
73
74
75
76
77
78
79
80
81
82
83
class TemporaryReservationManager:
  """A temporary resource reservation manager.

  This is used to reserve resources in a job, before using them, making sure
  other jobs cannot get them in the meantime.

  """
  def __init__(self):
    self._ec_reserved = {}

  def Reserved(self, resource):
84
    for holder_reserved in self._ec_reserved.values():
Guido Trotter's avatar
Guido Trotter committed
85
86
87
88
89
90
      if resource in holder_reserved:
        return True
    return False

  def Reserve(self, ec_id, resource):
    if self.Reserved(resource):
91
92
      raise errors.ReservationError("Duplicate reservation for resource '%s'"
                                    % str(resource))
Guido Trotter's avatar
Guido Trotter committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
    if ec_id not in self._ec_reserved:
      self._ec_reserved[ec_id] = set([resource])
    else:
      self._ec_reserved[ec_id].add(resource)

  def DropECReservations(self, ec_id):
    if ec_id in self._ec_reserved:
      del self._ec_reserved[ec_id]

  def GetReserved(self):
    all_reserved = set()
    for holder_reserved in self._ec_reserved.values():
      all_reserved.update(holder_reserved)
    return all_reserved

  def Generate(self, existing, generate_one_fn, ec_id):
    """Generate a new resource of this type

    """
    assert callable(generate_one_fn)

    all_elems = self.GetReserved()
    all_elems.update(existing)
    retries = 64
    while retries > 0:
      new_resource = generate_one_fn()
      if new_resource is not None and new_resource not in all_elems:
        break
    else:
      raise errors.ConfigurationError("Not able generate new resource"
                                      " (last tried: %s)" % new_resource)
    self.Reserve(ec_id, new_resource)
    return new_resource


Iustin Pop's avatar
Iustin Pop committed
128
class ConfigWriter:
129
  """The interface to the cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
130

131
132
133
  @ivar _temporary_lvs: reservation manager for temporary LVs
  @ivar _all_rms: a list of all temporary reservation managers

134
  """
135
136
  def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
               accept_foreign=False):
137
    self.write_count = 0
138
    self._lock = _config_lock
Iustin Pop's avatar
Iustin Pop committed
139
140
141
142
143
144
    self._config_data = None
    self._offline = offline
    if cfg_file is None:
      self._cfg_file = constants.CLUSTER_CONF_FILE
    else:
      self._cfg_file = cfg_file
145
    self._getents = _getents
146
    self._temporary_ids = TemporaryReservationManager()
147
    self._temporary_drbds = {}
148
    self._temporary_macs = TemporaryReservationManager()
149
    self._temporary_secrets = TemporaryReservationManager()
150
151
152
    self._temporary_lvs = TemporaryReservationManager()
    self._all_rms = [self._temporary_ids, self._temporary_macs,
                     self._temporary_secrets, self._temporary_lvs]
153
154
155
156
    # Note: in order to prevent errors when resolving our name in
    # _DistributeConfig, we compute it here once and reuse it; it's
    # better to raise an error before starting to modify the config
    # file than after it was modified
157
    self._my_hostname = netutils.Hostname.GetSysName()
158
    self._last_cluster_serial = -1
159
    self._cfg_id = None
160
    self._OpenConfig(accept_foreign)
Iustin Pop's avatar
Iustin Pop committed
161
162
163
164
165
166
167
168
169

  # this method needs to be static, so that we can call it on the class
  @staticmethod
  def IsCluster():
    """Check if the cluster is configured.

    """
    return os.path.exists(constants.CLUSTER_CONF_FILE)

170
171
172
173
174
175
176
177
178
179
180
  def _GenerateOneMAC(self):
    """Generate one mac address

    """
    prefix = self._config_data.cluster.mac_prefix
    byte1 = random.randrange(0, 256)
    byte2 = random.randrange(0, 256)
    byte3 = random.randrange(0, 256)
    mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
    return mac

181
182
183
184
185
186
187
188
189
190
191
192
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNdParams(self, node):
    """Get the node params populated with cluster defaults.

    @type node: L{object.Node}
    @param node: The node we want to know the params for
    @return: A dict with the filled in node params

    """
    nodegroup = self._UnlockedGetNodeGroup(node.group)
    return self._config_data.cluster.FillND(node, nodegroup)

193
  @locking.ssynchronized(_config_lock, shared=1)
194
  def GenerateMAC(self, ec_id):
Iustin Pop's avatar
Iustin Pop committed
195
196
197
198
199
    """Generate a MAC for an instance.

    This should check the current instances for duplicates.

    """
200
201
    existing = self._AllMACs()
    return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id)
Iustin Pop's avatar
Iustin Pop committed
202

203
  @locking.ssynchronized(_config_lock, shared=1)
204
205
  def ReserveMAC(self, mac, ec_id):
    """Reserve a MAC for an instance.
206
207
208
209
210
211

    This only checks instances managed by this cluster, it does not
    check for potential collisions elsewhere.

    """
    all_macs = self._AllMACs()
212
213
214
215
    if mac in all_macs:
      raise errors.ReservationError("mac already in use")
    else:
      self._temporary_macs.Reserve(mac, ec_id)
216

217
218
219
220
221
222
223
224
225
226
227
228
229
230
  @locking.ssynchronized(_config_lock, shared=1)
  def ReserveLV(self, lv_name, ec_id):
    """Reserve an VG/LV pair for an instance.

    @type lv_name: string
    @param lv_name: the logical volume name to reserve

    """
    all_lvs = self._AllLVs()
    if lv_name in all_lvs:
      raise errors.ReservationError("LV already in use")
    else:
      self._temporary_lvs.Reserve(lv_name, ec_id)

231
  @locking.ssynchronized(_config_lock, shared=1)
232
  def GenerateDRBDSecret(self, ec_id):
233
234
235
236
237
    """Generate a DRBD secret.

    This checks the current disks for duplicates.

    """
238
239
240
    return self._temporary_secrets.Generate(self._AllDRBDSecrets(),
                                            utils.GenerateSecret,
                                            ec_id)
Michael Hanselmann's avatar
Michael Hanselmann committed
241

242
  def _AllLVs(self):
243
244
245
246
247
248
249
250
251
252
    """Compute the list of all LVs.

    """
    lvnames = set()
    for instance in self._config_data.instances.values():
      node_data = instance.MapLVsByNode()
      for lv_list in node_data.values():
        lvnames.update(lv_list)
    return lvnames

253
254
255
256
257
258
259
260
261
262
263
  def _AllIDs(self, include_temporary):
    """Compute the list of all UUIDs and names we have.

    @type include_temporary: boolean
    @param include_temporary: whether to include the _temporary_ids set
    @rtype: set
    @return: a set of IDs

    """
    existing = set()
    if include_temporary:
264
      existing.update(self._temporary_ids.GetReserved())
265
266
267
    existing.update(self._AllLVs())
    existing.update(self._config_data.instances.keys())
    existing.update(self._config_data.nodes.keys())
268
    existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
269
270
    return existing

271
  def _GenerateUniqueID(self, ec_id):
272
    """Generate an unique UUID.
273
274
275
276

    This checks the current node, instances and disk names for
    duplicates.

Iustin Pop's avatar
Iustin Pop committed
277
278
    @rtype: string
    @return: the unique id
279
280

    """
281
282
    existing = self._AllIDs(include_temporary=False)
    return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
283

284
  @locking.ssynchronized(_config_lock, shared=1)
285
  def GenerateUniqueID(self, ec_id):
286
287
288
289
    """Generate an unique ID.

    This is just a wrapper over the unlocked version.

290
291
    @type ec_id: string
    @param ec_id: unique id for the job to reserve the id to
292
293

    """
294
    return self._GenerateUniqueID(ec_id)
295

Iustin Pop's avatar
Iustin Pop committed
296
297
298
  def _AllMACs(self):
    """Return all MACs present in the config.

Iustin Pop's avatar
Iustin Pop committed
299
300
301
    @rtype: list
    @return: the list of all MACs

Iustin Pop's avatar
Iustin Pop committed
302
303
304
305
306
307
308
309
    """
    result = []
    for instance in self._config_data.instances.values():
      for nic in instance.nics:
        result.append(nic.mac)

    return result

310
311
312
  def _AllDRBDSecrets(self):
    """Return all DRBD secrets present in the config.

Iustin Pop's avatar
Iustin Pop committed
313
314
315
    @rtype: list
    @return: the list of all DRBD secrets

316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
    """
    def helper(disk, result):
      """Recursively gather secrets from this disk."""
      if disk.dev_type == constants.DT_DRBD8:
        result.append(disk.logical_id[5])
      if disk.children:
        for child in disk.children:
          helper(child, result)

    result = []
    for instance in self._config_data.instances.values():
      for disk in instance.disks:
        helper(disk, result)

    return result

332
333
334
335
336
337
338
339
340
341
342
343
344
345
  def _CheckDiskIDs(self, disk, l_ids, p_ids):
    """Compute duplicate disk IDs

    @type disk: L{objects.Disk}
    @param disk: the disk at which to start searching
    @type l_ids: list
    @param l_ids: list of current logical ids
    @type p_ids: list
    @param p_ids: list of current physical ids
    @rtype: list
    @return: a list of error messages

    """
    result = []
346
347
348
349
350
351
352
353
354
355
    if disk.logical_id is not None:
      if disk.logical_id in l_ids:
        result.append("duplicate logical id %s" % str(disk.logical_id))
      else:
        l_ids.append(disk.logical_id)
    if disk.physical_id is not None:
      if disk.physical_id in p_ids:
        result.append("duplicate physical id %s" % str(disk.physical_id))
      else:
        p_ids.append(disk.physical_id)
356
357
358
359
360
361

    if disk.children:
      for child in disk.children:
        result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
    return result

362
  def _UnlockedVerifyConfig(self):
363
364
    """Verify function.

365
366
367
368
    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

Iustin Pop's avatar
Iustin Pop committed
369
370
371
    """
    result = []
    seen_macs = []
372
    ports = {}
Iustin Pop's avatar
Iustin Pop committed
373
    data = self._config_data
374
375
    seen_lids = []
    seen_pids = []
376
377
378
379
380
381
382
383

    # global cluster checks
    if not data.cluster.enabled_hypervisors:
      result.append("enabled hypervisors list doesn't have any entries")
    invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
    if invalid_hvs:
      result.append("enabled hypervisors contains invalid entries: %s" %
                    invalid_hvs)
384
385
386
387
388
    missing_hvp = (set(data.cluster.enabled_hypervisors) -
                   set(data.cluster.hvparams.keys()))
    if missing_hvp:
      result.append("hypervisor parameters missing for the enabled"
                    " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
389
390
391
392
393
394

    if data.cluster.master_node not in data.nodes:
      result.append("cluster has invalid primary node '%s'" %
                    data.cluster.master_node)

    # per-instance checks
Iustin Pop's avatar
Iustin Pop committed
395
396
    for instance_name in data.instances:
      instance = data.instances[instance_name]
397
398
399
      if instance.name != instance_name:
        result.append("instance '%s' is indexed by wrong name '%s'" %
                      (instance.name, instance_name))
Iustin Pop's avatar
Iustin Pop committed
400
      if instance.primary_node not in data.nodes:
401
        result.append("instance '%s' has invalid primary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
402
403
404
                      (instance_name, instance.primary_node))
      for snode in instance.secondary_nodes:
        if snode not in data.nodes:
405
          result.append("instance '%s' has invalid secondary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
406
407
408
                        (instance_name, snode))
      for idx, nic in enumerate(instance.nics):
        if nic.mac in seen_macs:
409
          result.append("instance '%s' has NIC %d mac %s duplicate" %
Iustin Pop's avatar
Iustin Pop committed
410
411
412
                        (instance_name, idx, nic.mac))
        else:
          seen_macs.append(nic.mac)
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427

      # gather the drbd ports for duplicate checks
      for dsk in instance.disks:
        if dsk.dev_type in constants.LDS_DRBD:
          tcp_port = dsk.logical_id[2]
          if tcp_port not in ports:
            ports[tcp_port] = []
          ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
      # gather network port reservation
      net_port = getattr(instance, "network_port", None)
      if net_port is not None:
        if net_port not in ports:
          ports[net_port] = []
        ports[net_port].append((instance.name, "network port"))

428
429
430
431
      # instance disk verify
      for idx, disk in enumerate(instance.disks):
        result.extend(["instance '%s' disk %d error: %s" %
                       (instance.name, idx, msg) for msg in disk.Verify()])
432
        result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
433

434
    # cluster-wide pool of free ports
435
    for free_port in data.cluster.tcpudp_port_pool:
436
437
438
439
440
441
442
443
444
445
      if free_port not in ports:
        ports[free_port] = []
      ports[free_port].append(("cluster", "port marked as free"))

    # compute tcp/udp duplicate ports
    keys = ports.keys()
    keys.sort()
    for pnum in keys:
      pdata = ports[pnum]
      if len(pdata) > 1:
446
        txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
447
448
449
450
        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))

    # highest used tcp port check
    if keys:
451
      if keys[-1] > data.cluster.highest_used_port:
452
        result.append("Highest used port mismatch, saved %s, computed %s" %
453
454
                      (data.cluster.highest_used_port, keys[-1]))

455
456
457
    if not data.nodes[data.cluster.master_node].master_candidate:
      result.append("Master node is not a master candidate")

458
    # master candidate checks
459
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
460
461
462
    if mc_now < mc_max:
      result.append("Not enough master candidates: actual %d, target %d" %
                    (mc_now, mc_max))
463

464
    # node checks
465
466
467
468
    for node_name, node in data.nodes.items():
      if node.name != node_name:
        result.append("Node '%s' is indexed by wrong name '%s'" %
                      (node.name, node_name))
469
470
471
      if [node.master_candidate, node.drained, node.offline].count(True) > 1:
        result.append("Node %s state is invalid: master_candidate=%s,"
                      " drain=%s, offline=%s" %
472
                      (node.name, node.master_candidate, node.drained,
473
474
                       node.offline))

475
    # nodegroups checks
476
    nodegroups_names = set()
477
478
479
    for nodegroup_uuid in data.nodegroups:
      nodegroup = data.nodegroups[nodegroup_uuid]
      if nodegroup.uuid != nodegroup_uuid:
480
        result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'"
481
                      % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
482
      if utils.UUID_RE.match(nodegroup.name.lower()):
483
        result.append("node group '%s' (uuid: '%s') has uuid-like name" %
484
                      (nodegroup.name, nodegroup.uuid))
485
      if nodegroup.name in nodegroups_names:
486
        result.append("duplicate node group name '%s'" % nodegroup.name)
487
488
      else:
        nodegroups_names.add(nodegroup.name)
489

490
    # drbd minors check
491
    _, duplicates = self._UnlockedComputeDRBDMap()
492
493
494
495
    for node, minor, instance_a, instance_b in duplicates:
      result.append("DRBD minor %d on node %s is assigned twice to instances"
                    " %s and %s" % (minor, node, instance_a, instance_b))

496
    # IP checks
497
498
499
500
501
502
503
    default_nicparams = data.cluster.nicparams[constants.PP_DEFAULT]
    ips = {}

    def _AddIpAddress(ip, name):
      ips.setdefault(ip, []).append(name)

    _AddIpAddress(data.cluster.master_ip, "cluster_ip")
504
505

    for node in data.nodes.values():
506
      _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
507
      if node.secondary_ip != node.primary_ip:
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
        _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)

    for instance in data.instances.values():
      for idx, nic in enumerate(instance.nics):
        if nic.ip is None:
          continue

        nicparams = objects.FillDict(default_nicparams, nic.nicparams)
        nic_mode = nicparams[constants.NIC_MODE]
        nic_link = nicparams[constants.NIC_LINK]

        if nic_mode == constants.NIC_MODE_BRIDGED:
          link = "bridge:%s" % nic_link
        elif nic_mode == constants.NIC_MODE_ROUTED:
          link = "route:%s" % nic_link
        else:
          raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)

        _AddIpAddress("%s/%s" % (link, nic.ip),
                      "instance:%s/nic:%d" % (instance.name, idx))
528
529
530
531

    for ip, owners in ips.items():
      if len(owners) > 1:
        result.append("IP address %s is used by multiple owners: %s" %
532
                      (ip, utils.CommaJoin(owners)))
533

Iustin Pop's avatar
Iustin Pop committed
534
535
    return result

536
537
538
539
540
541
542
543
544
545
546
547
548
  @locking.ssynchronized(_config_lock, shared=1)
  def VerifyConfig(self):
    """Verify function.

    This is just a wrapper over L{_UnlockedVerifyConfig}.

    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

    """
    return self._UnlockedVerifyConfig()

549
  def _UnlockedSetDiskID(self, disk, node_name):
Iustin Pop's avatar
Iustin Pop committed
550
551
552
553
554
555
556
557
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

558
559
    This function is for internal use, when the config lock is already held.

Iustin Pop's avatar
Iustin Pop committed
560
561
562
    """
    if disk.children:
      for child in disk.children:
563
        self._UnlockedSetDiskID(child, node_name)
Iustin Pop's avatar
Iustin Pop committed
564
565
566

    if disk.logical_id is None and disk.physical_id is not None:
      return
567
    if disk.dev_type == constants.LD_DRBD8:
568
      pnode, snode, port, pminor, sminor, secret = disk.logical_id
Iustin Pop's avatar
Iustin Pop committed
569
      if node_name not in (pnode, snode):
570
571
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
                                        node_name)
572
573
      pnode_info = self._UnlockedGetNodeInfo(pnode)
      snode_info = self._UnlockedGetNodeInfo(snode)
Iustin Pop's avatar
Iustin Pop committed
574
575
576
      if pnode_info is None or snode_info is None:
        raise errors.ConfigurationError("Can't find primary or secondary node"
                                        " for %s" % str(disk))
577
578
      p_data = (pnode_info.secondary_ip, port)
      s_data = (snode_info.secondary_ip, port)
Iustin Pop's avatar
Iustin Pop committed
579
      if pnode == node_name:
580
        disk.physical_id = p_data + s_data + (pminor, secret)
Iustin Pop's avatar
Iustin Pop committed
581
      else: # it must be secondary, we tested above
582
        disk.physical_id = s_data + p_data + (sminor, secret)
Iustin Pop's avatar
Iustin Pop committed
583
584
585
586
    else:
      disk.physical_id = disk.logical_id
    return

587
588
589
590
591
592
593
594
595
596
597
598
599
600
  @locking.ssynchronized(_config_lock)
  def SetDiskID(self, disk, node_name):
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

    """
    return self._UnlockedSetDiskID(disk, node_name)

  @locking.ssynchronized(_config_lock)
601
602
603
604
  def AddTcpUdpPort(self, port):
    """Adds a new port to the available port pool.

    """
605
    if not isinstance(port, int):
606
      raise errors.ProgrammerError("Invalid type passed for port")
607

608
    self._config_data.cluster.tcpudp_port_pool.add(port)
609
610
    self._WriteConfig()

611
  @locking.ssynchronized(_config_lock, shared=1)
612
  def GetPortList(self):
613
614
615
    """Returns a copy of the current port list.

    """
616
    return self._config_data.cluster.tcpudp_port_pool.copy()
617

618
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
619
620
621
  def AllocatePort(self):
    """Allocate a port.

622
623
624
    The port will be taken from the available port pool or from the
    default port range (and in this case we increase
    highest_used_port).
Iustin Pop's avatar
Iustin Pop committed
625
626

    """
627
    # If there are TCP/IP ports configured, we use them first.
628
629
    if self._config_data.cluster.tcpudp_port_pool:
      port = self._config_data.cluster.tcpudp_port_pool.pop()
630
631
632
    else:
      port = self._config_data.cluster.highest_used_port + 1
      if port >= constants.LAST_DRBD_PORT:
633
634
635
        raise errors.ConfigurationError("The highest used port is greater"
                                        " than %s. Aborting." %
                                        constants.LAST_DRBD_PORT)
636
      self._config_data.cluster.highest_used_port = port
Iustin Pop's avatar
Iustin Pop committed
637
638
639
640

    self._WriteConfig()
    return port

641
  def _UnlockedComputeDRBDMap(self):
642
643
    """Compute the used DRBD minor/nodes.

644
    @rtype: (dict, list)
Iustin Pop's avatar
Iustin Pop committed
645
646
    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
647
648
649
        an empty list), and a list of duplicates; if the duplicates
        list is not empty, the configuration is corrupted and its caller
        should raise an exception
650
651
652

    """
    def _AppendUsedPorts(instance_name, disk, used):
653
      duplicates = []
654
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
Michael Hanselmann's avatar
Michael Hanselmann committed
655
656
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
657
658
          assert node in used, ("Node '%s' of instance '%s' not found"
                                " in node list" % (node, instance_name))
659
          if port in used[node]:
660
661
662
            duplicates.append((node, port, instance_name, used[node][port]))
          else:
            used[node][port] = instance_name
663
664
      if disk.children:
        for child in disk.children:
665
666
          duplicates.extend(_AppendUsedPorts(instance_name, child, used))
      return duplicates
667

668
    duplicates = []
669
    my_dict = dict((node, {}) for node in self._config_data.nodes)
670
671
672
    for instance in self._config_data.instances.itervalues():
      for disk in instance.disks:
        duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
673
    for (node, minor), instance in self._temporary_drbds.iteritems():
674
      if minor in my_dict[node] and my_dict[node][minor] != instance:
675
676
677
678
        duplicates.append((node, minor, instance, my_dict[node][minor]))
      else:
        my_dict[node][minor] = instance
    return my_dict, duplicates
679

680
681
682
683
684
685
686
687
688
689
690
  @locking.ssynchronized(_config_lock)
  def ComputeDRBDMap(self):
    """Compute the used DRBD minor/nodes.

    This is just a wrapper over L{_UnlockedComputeDRBDMap}.

    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
        an empty list).

    """
691
692
693
694
695
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
    return d_map
696

697
698
699
700
701
702
703
704
705
  @locking.ssynchronized(_config_lock)
  def AllocateDRBDMinor(self, nodes, instance):
    """Allocate a drbd minor.

    The free minor will be automatically computed from the existing
    devices. A node can be given multiple times in order to allocate
    multiple minors. The result is the list of minors, in the same
    order as the passed nodes.

706
707
708
    @type instance: string
    @param instance: the instance for which we allocate minors

709
    """
710
    assert isinstance(instance, basestring), \
711
           "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
712

713
714
715
716
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
717
718
719
720
721
722
723
    result = []
    for nname in nodes:
      ndata = d_map[nname]
      if not ndata:
        # no minors used, we can start at 0
        result.append(0)
        ndata[0] = instance
724
        self._temporary_drbds[(nname, 0)] = instance
725
726
727
728
729
730
731
732
733
734
        continue
      keys = ndata.keys()
      keys.sort()
      ffree = utils.FirstFree(keys)
      if ffree is None:
        # return the next minor
        # TODO: implement high-limit check
        minor = keys[-1] + 1
      else:
        minor = ffree
735
736
737
738
739
      # double-check minor against current instances
      assert minor not in d_map[nname], \
             ("Attempt to reuse allocated DRBD minor %d on node %s,"
              " already allocated to instance %s" %
              (minor, nname, d_map[nname][minor]))
740
      ndata[minor] = instance
741
742
743
744
745
746
747
748
      # double-check minor against reservation
      r_key = (nname, minor)
      assert r_key not in self._temporary_drbds, \
             ("Attempt to reuse reserved DRBD minor %d on node %s,"
              " reserved for instance %s" %
              (minor, nname, self._temporary_drbds[r_key]))
      self._temporary_drbds[r_key] = instance
      result.append(minor)
749
750
751
752
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
                  nodes, result)
    return result

753
  def _UnlockedReleaseDRBDMinors(self, instance):
754
755
756
757
758
759
760
    """Release temporary drbd minors allocated for a given instance.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
761
762
    assert isinstance(instance, basestring), \
           "Invalid argument passed to ReleaseDRBDMinors"
763
764
765
766
    for key, name in self._temporary_drbds.items():
      if name == instance:
        del self._temporary_drbds[key]

767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
  @locking.ssynchronized(_config_lock)
  def ReleaseDRBDMinors(self, instance):
    """Release temporary drbd minors allocated for a given instance.

    This should be called on the error paths, on the success paths
    it's automatically called by the ConfigWriter add and update
    functions.

    This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
    self._UnlockedReleaseDRBDMinors(instance)

784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
  @locking.ssynchronized(_config_lock, shared=1)
  def GetConfigVersion(self):
    """Get the configuration version.

    @return: Config version

    """
    return self._config_data.version

  @locking.ssynchronized(_config_lock, shared=1)
  def GetClusterName(self):
    """Get cluster name.

    @return: Cluster name

    """
    return self._config_data.cluster.cluster_name

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNode(self):
    """Get the hostname of the master node for this cluster.

    @return: Master hostname

    """
    return self._config_data.cluster.master_node

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterIP(self):
    """Get the IP of the master node for this cluster.

    @return: Master IP

    """
    return self._config_data.cluster.master_ip

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNetdev(self):
    """Get the master network device for this cluster.

    """
    return self._config_data.cluster.master_netdev

  @locking.ssynchronized(_config_lock, shared=1)
  def GetFileStorageDir(self):
    """Get the file storage dir for this cluster.

    """
    return self._config_data.cluster.file_storage_dir

  @locking.ssynchronized(_config_lock, shared=1)
  def GetHypervisorType(self):
    """Get the hypervisor type for this cluster.

    """
839
    return self._config_data.cluster.enabled_hypervisors[0]
840

841
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
842
843
844
  def GetHostKey(self):
    """Return the rsa hostkey from the config.

Iustin Pop's avatar
Iustin Pop committed
845
846
    @rtype: string
    @return: the rsa hostkey
Iustin Pop's avatar
Iustin Pop committed
847
848
849
850

    """
    return self._config_data.cluster.rsahostkeypub

851
852
853
854
855
856
857
  @locking.ssynchronized(_config_lock, shared=1)
  def GetDefaultIAllocator(self):
    """Get the default instance allocator for this cluster.

    """
    return self._config_data.cluster.default_iallocator

858
859
860
861
862
863
864
865
866
  @locking.ssynchronized(_config_lock, shared=1)
  def GetPrimaryIPFamily(self):
    """Get cluster primary ip family.

    @return: primary ip family

    """
    return self._config_data.cluster.primary_ip_family

867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
  @locking.ssynchronized(_config_lock)
  def AddNodeGroup(self, group, ec_id, check_uuid=True):
    """Add a node group to the configuration.

    @type group: L{objects.NodeGroup}
    @param group: the NodeGroup object to add
    @type ec_id: string
    @param ec_id: unique id for the job to use when creating a missing UUID
    @type check_uuid: bool
    @param check_uuid: add an UUID to the group if it doesn't have one or, if
                       it does, ensure that it does not exist in the
                       configuration already

    """
    self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
    self._WriteConfig()

  def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
    """Add a node group to the configuration.

    """
    logging.info("Adding node group %s to configuration", group.name)

    # Some code might need to add a node group with a pre-populated UUID
    # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass
    # the "does this UUID" exist already check.
    if check_uuid:
      self._EnsureUUID(group, ec_id)

    group.serial_no = 1
    group.ctime = group.mtime = time.time()

    self._config_data.nodegroups[group.uuid] = group
    self._config_data.cluster.serial_no += 1

  @locking.ssynchronized(_config_lock)
  def RemoveNodeGroup(self, group_uuid):
    """Remove a node group from the configuration.

    @type group_uuid: string
    @param group_uuid: the UUID of the node group to remove

    """
    logging.info("Removing node group %s from configuration", group_uuid)

    if group_uuid not in self._config_data.nodegroups:
      raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid)

    del self._config_data.nodegroups[group_uuid]
    self._config_data.cluster.serial_no += 1
    self._WriteConfig()

Guido Trotter's avatar
Guido Trotter committed
919
920
  @locking.ssynchronized(_config_lock, shared=1)
  def LookupNodeGroup(self, target):
921
    """Lookup a node group's UUID.
Guido Trotter's avatar
Guido Trotter committed
922
923

    @type target: string or None
924
    @param target: group name or UUID or None to look for the default
Guido Trotter's avatar
Guido Trotter committed
925
    @rtype: string
926
    @return: nodegroup UUID
Guido Trotter's avatar
Guido Trotter committed
927
928
929
930
931
    @raises errors.OpPrereqError: when the target group cannot be found

    """
    if target is None:
      if len(self._config_data.nodegroups) != 1:
932
        raise errors.OpPrereqError("More than one node group exists. Target"
Guido Trotter's avatar
Guido Trotter committed
933
934
935
936
937
938
939
940
                                   " group must be specified explicitely.")
      else:
        return self._config_data.nodegroups.keys()[0]
    if target in self._config_data.nodegroups:
      return target
    for nodegroup in self._config_data.nodegroups.values():
      if nodegroup.name == target:
        return nodegroup.uuid
941
942
    raise errors.OpPrereqError("Node group '%s' not found" % target,
                               errors.ECODE_NOENT)
Guido Trotter's avatar
Guido Trotter committed
943

944
  def _UnlockedGetNodeGroup(self, uuid):
Guido Trotter's avatar
Guido Trotter committed
945
946
947
948
949
950
951
952
953
954
955
956
957
    """Lookup a node group.

    @type uuid: string
    @param uuid: group UUID
    @rtype: L{objects.NodeGroup} or None
    @return: nodegroup object, or None if not found

    """
    if uuid not in self._config_data.nodegroups:
      return None

    return self._config_data.nodegroups[uuid]

958
959
960
961
962
963
964
965
966
967
968
969
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeGroup(self, uuid):
    """Lookup a node group.

    @type uuid: string
    @param uuid: group UUID
    @rtype: L{objects.NodeGroup} or None
    @return: nodegroup object, or None if not found

    """
    return self._UnlockedGetNodeGroup(uuid)

970
971
972
973
974
975
976
  @locking.ssynchronized(_config_lock, shared=1)
  def GetAllNodeGroupsInfo(self):
    """Get the configuration of all node groups.

    """
    return dict(self._config_data.nodegroups)

Guido Trotter's avatar
Guido Trotter committed
977
978
979
980
981
982
983
  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeGroupList(self):
    """Get a list of node groups.

    """
    return self._config_data.nodegroups.keys()

984
  @locking.ssynchronized(_config_lock)
985
  def AddInstance(self, instance, ec_id):
Iustin Pop's avatar
Iustin Pop committed
986
987
988
989
    """Add an instance to the config.

    This should be used after creating a new instance.

Iustin Pop's avatar
Iustin Pop committed
990
991
992
    @type instance: L{objects.Instance}
    @param instance: the instance object

Iustin Pop's avatar
Iustin Pop committed
993
994
995
996
    """
    if not isinstance(instance, objects.Instance):
      raise errors.ProgrammerError("Invalid type passed to AddInstance")

997
998
    if instance.disk_template != constants.DT_DISKLESS:
      all_lvs = instance.MapLVsByNode()
Iustin Pop's avatar
Iustin Pop committed
999
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
1000

1001
1002
1003
1004
    all_macs = self._AllMACs()
    for nic in instance.nics:
      if nic.mac in all_macs:
        raise errors.ConfigurationError("Cannot add instance %s:"
1005
1006
1007
                                        " MAC address '%s' already in use." %
                                        (instance.name, nic.mac))

1008
    self._EnsureUUID(instance, ec_id)
1009

1010
    instance.serial_no = 1
1011
    instance.ctime = instance.mtime = time.time()
Iustin Pop's avatar
Iustin Pop committed
1012
    self._config_data.instances[instance.name] = instance
1013
    self._config_data.cluster.serial_no += 1
1014
    self._UnlockedReleaseDRBDMinors(instance.name)
Iustin Pop's avatar
Iustin Pop committed
1015
1016
    self._WriteConfig()

1017
  def _EnsureUUID(self, item, ec_id):
1018
1019
1020
    """Ensures a given object has a valid UUID.

    @param item: the instance or node to be checked
1021
    @param ec_id: the execution context id for the uuid reservation
1022
1023
1024

    """
    if not item.uuid:
1025
      item.uuid = self._GenerateUniqueID(ec_id)
1026
1027
1028
    elif item.uuid in self._AllIDs(include_temporary=True):
      raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
                                      " in use" % (item.name, item.uuid))
1029

1030
1031
  def _SetInstanceStatus(self, instance_name, status):
    """Set the instance's status to a given value.
Iustin Pop's avatar
Iustin Pop committed
1032
1033

    """
1034
1035
    assert isinstance(status, bool), \
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
Iustin Pop's avatar
Iustin Pop committed
1036
1037

    if instance_name not in self._config_data.instances:
1038
1039
      raise errors.ConfigurationError("Unknown instance '%s'" %
                                      instance_name)
Iustin Pop's avatar
Iustin Pop committed
1040
    instance = self._config_data.instances[instance_name]
1041
1042
    if instance.admin_up != status:
      instance.admin_up = status
1043
      instance.serial_no += 1
1044
      instance.mtime = time.time()
1045
      self._WriteConfig()
Iustin Pop's avatar
Iustin Pop committed
1046

1047
  @locking.ssynchronized(_config_lock)
1048
1049
1050
1051
  def MarkInstanceUp(self, instance_name):
    """Mark the instance status to up in the config.

    """
1052
    self._SetInstanceStatus(instance_name, True)
1053

1054
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
1055
1056
1057
1058
1059
  def RemoveInstance(self, instance_name):
    """Remove the instance from the configuration.

    """
    if instance_name not in self._config_data.instances:
1060
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
Iustin Pop's avatar
Iustin Pop committed
1061
    del self._config_data.instances[instance_name]
1062
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
1063
1064
    self._WriteConfig()

1065
  @locking.ssynchronized(_config_lock)
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
  def RenameInstance(self, old_name, new_name):
    """Rename an instance.

    This needs to be done in ConfigWriter and not by RemoveInstance
    combined with AddInstance as only we can guarantee an atomic
    rename.

    """
    if old_name not in self._config_data.instances:
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
    inst = self._config_data.instances[old_name]
    del self._config_data.instances[old_name]
    inst.name = new_name
1079
1080
1081
1082
1083

    for disk in inst.disks:
      if disk.dev_type == constants.LD_FILE:
        # rename the file paths in logical and physical id
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
1084
        disk_fname = "disk%s" % disk.iv_name.split("/")[1]
1085
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
1086
1087
                                              utils.PathJoin(file_storage_dir,
                                                             inst.name,
1088
                                                             disk_fname))
1089

1090
1091
1092
    # Force update of ssconf files
    self._config_data.cluster.serial_no += 1

1093
1094
1095
    self._config_data.instances[inst.name] = inst
    self._WriteConfig()

1096
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
1097
1098
1099
1100
  def MarkInstanceDown(self, instance_name):
    """Mark the status of an instance to down in the configuration.

    """
1101
    self._SetInstanceStatus(instance_name, False)
Iustin Pop's avatar
Iustin Pop committed
1102

1103
1104
1105
1106
1107
1108
1109
1110
  def _UnlockedGetInstanceList(self):
    """Get the list of instances.

    This function is for internal use, when the config lock is already held.

    """
    return self._config_data.instances.keys()

1111
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1112
1113
1114
  def GetInstanceList(self):
    """Get the list of instances.

Iustin Pop's avatar
Iustin Pop committed
1115
1116
    @return: array of instances, ex. ['instance2.example.com',
        'instance1.example.com']
Iustin Pop's avatar
Iustin Pop committed
1117
1118

    """
1119
    return self._UnlockedGetInstanceList()
Iustin Pop's avatar
Iustin Pop committed
1120

1121
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1122
1123
1124
1125
1126
  def ExpandInstanceName(self, short_name):
    """Attempt to expand an incomplete instance name.

    """
    return utils.MatchNameComponent(short_name,
1127
1128
                                    self._config_data.instances.keys(),
                                    case_sensitive=False)
Iustin Pop's avatar
Iustin Pop committed
1129

1130
  def _UnlockedGetInstanceInfo(self, instance_name):
Michael Hanselmann's avatar
Michael Hanselmann committed
1131
    """Returns information about an instance.
1132
1133
1134
1135
1136
1137
1138
1139
1140

    This function is for internal use, when the config lock is already held.

    """
    if instance_name not in self._config_data.instances:
      return None

    return self._config_data.instances[instance_name]

1141
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
1142
  def GetInstanceInfo(self, instance_name):
Michael Hanselmann's avatar
Michael Hanselmann committed
1143
    """Returns information about an instance.
Iustin Pop's avatar
Iustin Pop committed
1144

Michael Hanselmann's avatar
Michael Hanselmann committed
1145
    It takes the information from the configuration file. Other information of
Iustin Pop's avatar
Iustin Pop committed
1146
1147
    an instance are taken from the live systems.

Iustin Pop's avatar
Iustin Pop committed
1148
1149
    @param instance_name: name of the instance, e.g.
        I{instance1.example.com}
Iustin Pop's avatar
Iustin Pop committed
1150

Iustin Pop's avatar
Iustin Pop committed
1151
1152
    @rtype: L{objects.Instance}
    @return: the instance object
Iustin Pop's avatar
Iustin Pop committed
1153
1154

    """
1155
    return self._UnlockedGetInstanceInfo(instance_name)
Iustin Pop's avatar
Iustin Pop committed
1156

1157
1158
1159
1160
1161
  @locking.ssynchronized(_config_lock, shared=1)
  def GetAllInstancesInfo(self):
    """Get the configuration of all instances.

    @rtype: dict
Iustin Pop's avatar
Iustin Pop committed
1162
    @return: dict of (instance, instance_info), where instance_info is what
1163
1164
1165
              would GetInstanceInfo return for the node

    """
1166
1167
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
                    for instance in self._UnlockedGetInstanceList()])
1168
1169
    return my_dict

1170
  @locking.ssynchronized(_config_lock)
1171
  def AddNode(self, node, ec_id):
Iustin Pop's avatar
Iustin Pop committed
1172
1173
    """Add a node to the configuration.

Iustin Pop's avatar
Iustin Pop committed
1174
1175
    @type node: L{objects.Node}
    @param node: a Node instance
Iustin Pop's avatar
Iustin Pop committed
1176
1177

    """
1178
    logging.info("Adding node %s to configuration", node.name)
1179

1180
    self._EnsureUUID(node, ec_id)
1181

1182
    node.serial_no = 1
1183
    node.ctime = node.mtime = time.time()
1184
    self._UnlockedAddNodeToGroup(node.name, node.group)
Iustin Pop's avatar
Iustin Pop committed
1185
    self._config_data.nodes[node.name] = node
1186
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
1187
1188
    self._WriteConfig()

1189
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed