config.py 40.8 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#

# Copyright (C) 2006, 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Configuration management for Ganeti

24
This module provides the interface to the Ganeti cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
25

26
27
The configuration data is stored on every node but is updated on the master
only. After each update, the master distributes the data to the other nodes.
Iustin Pop's avatar
Iustin Pop committed
28

29
30
Currently, the data storage format is JSON. YAML was slow and consuming too
much memory.
Iustin Pop's avatar
Iustin Pop committed
31
32
33
34
35
36

"""

import os
import tempfile
import random
37
import logging
Iustin Pop's avatar
Iustin Pop committed
38
39

from ganeti import errors
40
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
41
42
43
44
from ganeti import utils
from ganeti import constants
from ganeti import rpc
from ganeti import objects
45
from ganeti import serializer
46
47


48
49
50
_config_lock = locking.SharedLock()


Michael Hanselmann's avatar
Michael Hanselmann committed
51
def _ValidateConfig(data):
Iustin Pop's avatar
Iustin Pop committed
52
53
54
55
56
57
58
59
  """Verifies that a configuration objects looks valid.

  This only verifies the version of the configuration.

  @raise errors.ConfigurationError: if the version differs from what
      we expect

  """
Michael Hanselmann's avatar
Michael Hanselmann committed
60
  if data.version != constants.CONFIG_VERSION:
61
62
    raise errors.ConfigurationError("Cluster configuration version"
                                    " mismatch, got %s instead of %s" %
Michael Hanselmann's avatar
Michael Hanselmann committed
63
                                    (data.version,
64
                                     constants.CONFIG_VERSION))
Iustin Pop's avatar
Iustin Pop committed
65

66

Iustin Pop's avatar
Iustin Pop committed
67
class ConfigWriter:
68
  """The interface to the cluster configuration.
Iustin Pop's avatar
Iustin Pop committed
69

70
  """
Iustin Pop's avatar
Iustin Pop committed
71
  def __init__(self, cfg_file=None, offline=False):
72
    self.write_count = 0
73
    self._lock = _config_lock
Iustin Pop's avatar
Iustin Pop committed
74
75
76
77
78
79
    self._config_data = None
    self._offline = offline
    if cfg_file is None:
      self._cfg_file = constants.CLUSTER_CONF_FILE
    else:
      self._cfg_file = cfg_file
80
    self._temporary_ids = set()
81
    self._temporary_drbds = {}
82
    self._temporary_macs = set()
83
84
85
86
87
    # Note: in order to prevent errors when resolving our name in
    # _DistributeConfig, we compute it here once and reuse it; it's
    # better to raise an error before starting to modify the config
    # file than after it was modified
    self._my_hostname = utils.HostInfo().name
88
    self._last_cluster_serial = -1
Iustin Pop's avatar
Iustin Pop committed
89
    self._OpenConfig()
Iustin Pop's avatar
Iustin Pop committed
90
91
92
93
94
95
96
97
98

  # this method needs to be static, so that we can call it on the class
  @staticmethod
  def IsCluster():
    """Check if the cluster is configured.

    """
    return os.path.exists(constants.CLUSTER_CONF_FILE)

99
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  def GenerateMAC(self):
    """Generate a MAC for an instance.

    This should check the current instances for duplicates.

    """
    prefix = self._config_data.cluster.mac_prefix
    all_macs = self._AllMACs()
    retries = 64
    while retries > 0:
      byte1 = random.randrange(0, 256)
      byte2 = random.randrange(0, 256)
      byte3 = random.randrange(0, 256)
      mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
114
      if mac not in all_macs and mac not in self._temporary_macs:
Iustin Pop's avatar
Iustin Pop committed
115
116
117
        break
      retries -= 1
    else:
118
      raise errors.ConfigurationError("Can't generate unique MAC")
119
    self._temporary_macs.add(mac)
Iustin Pop's avatar
Iustin Pop committed
120
121
    return mac

122
  @locking.ssynchronized(_config_lock, shared=1)
123
124
125
126
127
128
129
130
  def IsMacInUse(self, mac):
    """Predicate: check if the specified MAC is in use in the Ganeti cluster.

    This only checks instances managed by this cluster, it does not
    check for potential collisions elsewhere.

    """
    all_macs = self._AllMACs()
131
    return mac in all_macs or mac in self._temporary_macs
132

133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
  @locking.ssynchronized(_config_lock, shared=1)
  def GenerateDRBDSecret(self):
    """Generate a DRBD secret.

    This checks the current disks for duplicates.

    """
    all_secrets = self._AllDRBDSecrets()
    retries = 64
    while retries > 0:
      secret = utils.GenerateSecret()
      if secret not in all_secrets:
        break
      retries -= 1
    else:
      raise errors.ConfigurationError("Can't generate unique DRBD secret")
    return secret

151
152
153
154
155
156
157
158
159
160
161
  def _ComputeAllLVs(self):
    """Compute the list of all LVs.

    """
    lvnames = set()
    for instance in self._config_data.instances.values():
      node_data = instance.MapLVsByNode()
      for lv_list in node_data.values():
        lvnames.update(lv_list)
    return lvnames

162
  @locking.ssynchronized(_config_lock, shared=1)
163
164
165
166
167
168
  def GenerateUniqueID(self, exceptions=None):
    """Generate an unique disk name.

    This checks the current node, instances and disk names for
    duplicates.

Iustin Pop's avatar
Iustin Pop committed
169
170
171
172
    @param exceptions: a list with some other names which should be checked
        for uniqueness (used for example when you want to get
        more than one id at one time without adding each one in
        turn to the config file)
173

Iustin Pop's avatar
Iustin Pop committed
174
175
    @rtype: string
    @return: the unique id
176
177
178
179
180
181
182
183
184
185
186

    """
    existing = set()
    existing.update(self._temporary_ids)
    existing.update(self._ComputeAllLVs())
    existing.update(self._config_data.instances.keys())
    existing.update(self._config_data.nodes.keys())
    if exceptions is not None:
      existing.update(exceptions)
    retries = 64
    while retries > 0:
187
      unique_id = utils.NewUUID()
188
189
190
      if unique_id not in existing and unique_id is not None:
        break
    else:
191
192
      raise errors.ConfigurationError("Not able generate an unique ID"
                                      " (last tried ID: %s" % unique_id)
193
194
195
    self._temporary_ids.add(unique_id)
    return unique_id

Iustin Pop's avatar
Iustin Pop committed
196
197
198
  def _AllMACs(self):
    """Return all MACs present in the config.

Iustin Pop's avatar
Iustin Pop committed
199
200
201
    @rtype: list
    @return: the list of all MACs

Iustin Pop's avatar
Iustin Pop committed
202
203
204
205
206
207
208
209
    """
    result = []
    for instance in self._config_data.instances.values():
      for nic in instance.nics:
        result.append(nic.mac)

    return result

210
211
212
  def _AllDRBDSecrets(self):
    """Return all DRBD secrets present in the config.

Iustin Pop's avatar
Iustin Pop committed
213
214
215
    @rtype: list
    @return: the list of all DRBD secrets

216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
    """
    def helper(disk, result):
      """Recursively gather secrets from this disk."""
      if disk.dev_type == constants.DT_DRBD8:
        result.append(disk.logical_id[5])
      if disk.children:
        for child in disk.children:
          helper(child, result)

    result = []
    for instance in self._config_data.instances.values():
      for disk in instance.disks:
        helper(disk, result)

    return result

232
233
234
235
236
237
238
239
240
241
242
243
244
245
  def _CheckDiskIDs(self, disk, l_ids, p_ids):
    """Compute duplicate disk IDs

    @type disk: L{objects.Disk}
    @param disk: the disk at which to start searching
    @type l_ids: list
    @param l_ids: list of current logical ids
    @type p_ids: list
    @param p_ids: list of current physical ids
    @rtype: list
    @return: a list of error messages

    """
    result = []
246
247
248
249
250
251
252
253
254
255
    if disk.logical_id is not None:
      if disk.logical_id in l_ids:
        result.append("duplicate logical id %s" % str(disk.logical_id))
      else:
        l_ids.append(disk.logical_id)
    if disk.physical_id is not None:
      if disk.physical_id in p_ids:
        result.append("duplicate physical id %s" % str(disk.physical_id))
      else:
        p_ids.append(disk.physical_id)
256
257
258
259
260
261

    if disk.children:
      for child in disk.children:
        result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
    return result

262
  def _UnlockedVerifyConfig(self):
263
264
    """Verify function.

265
266
267
268
    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

Iustin Pop's avatar
Iustin Pop committed
269
270
271
    """
    result = []
    seen_macs = []
272
    ports = {}
Iustin Pop's avatar
Iustin Pop committed
273
    data = self._config_data
274
275
    seen_lids = []
    seen_pids = []
276
277
278
279
280
281
282
283
284
285
286
287
288
289

    # global cluster checks
    if not data.cluster.enabled_hypervisors:
      result.append("enabled hypervisors list doesn't have any entries")
    invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
    if invalid_hvs:
      result.append("enabled hypervisors contains invalid entries: %s" %
                    invalid_hvs)

    if data.cluster.master_node not in data.nodes:
      result.append("cluster has invalid primary node '%s'" %
                    data.cluster.master_node)

    # per-instance checks
Iustin Pop's avatar
Iustin Pop committed
290
291
292
    for instance_name in data.instances:
      instance = data.instances[instance_name]
      if instance.primary_node not in data.nodes:
293
        result.append("instance '%s' has invalid primary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
294
295
296
                      (instance_name, instance.primary_node))
      for snode in instance.secondary_nodes:
        if snode not in data.nodes:
297
          result.append("instance '%s' has invalid secondary node '%s'" %
Iustin Pop's avatar
Iustin Pop committed
298
299
300
                        (instance_name, snode))
      for idx, nic in enumerate(instance.nics):
        if nic.mac in seen_macs:
301
          result.append("instance '%s' has NIC %d mac %s duplicate" %
Iustin Pop's avatar
Iustin Pop committed
302
303
304
                        (instance_name, idx, nic.mac))
        else:
          seen_macs.append(nic.mac)
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319

      # gather the drbd ports for duplicate checks
      for dsk in instance.disks:
        if dsk.dev_type in constants.LDS_DRBD:
          tcp_port = dsk.logical_id[2]
          if tcp_port not in ports:
            ports[tcp_port] = []
          ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
      # gather network port reservation
      net_port = getattr(instance, "network_port", None)
      if net_port is not None:
        if net_port not in ports:
          ports[net_port] = []
        ports[net_port].append((instance.name, "network port"))

320
321
322
323
      # instance disk verify
      for idx, disk in enumerate(instance.disks):
        result.extend(["instance '%s' disk %d error: %s" %
                       (instance.name, idx, msg) for msg in disk.Verify()])
324
        result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
325

326
    # cluster-wide pool of free ports
327
    for free_port in data.cluster.tcpudp_port_pool:
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
      if free_port not in ports:
        ports[free_port] = []
      ports[free_port].append(("cluster", "port marked as free"))

    # compute tcp/udp duplicate ports
    keys = ports.keys()
    keys.sort()
    for pnum in keys:
      pdata = ports[pnum]
      if len(pdata) > 1:
        txt = ", ".join(["%s/%s" % val for val in pdata])
        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))

    # highest used tcp port check
    if keys:
343
      if keys[-1] > data.cluster.highest_used_port:
344
        result.append("Highest used port mismatch, saved %s, computed %s" %
345
346
                      (data.cluster.highest_used_port, keys[-1]))

347
348
349
    if not data.nodes[data.cluster.master_node].master_candidate:
      result.append("Master node is not a master candidate")

350
    # master candidate checks
351
352
353
354
    mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
    if mc_now < mc_max:
      result.append("Not enough master candidates: actual %d, target %d" %
                    (mc_now, mc_max))
355

356
357
358
359
360
361
362
363
    # node checks
    for node in data.nodes.values():
      if [node.master_candidate, node.drained, node.offline].count(True) > 1:
        result.append("Node %s state is invalid: master_candidate=%s,"
                      " drain=%s, offline=%s" %
                      (node.name, node.master_candidate, node.drain,
                       node.offline))

364
365
366
367
368
369
    # drbd minors check
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    for node, minor, instance_a, instance_b in duplicates:
      result.append("DRBD minor %d on node %s is assigned twice to instances"
                    " %s and %s" % (minor, node, instance_a, instance_b))

Iustin Pop's avatar
Iustin Pop committed
370
371
    return result

372
373
374
375
376
377
378
379
380
381
382
383
384
  @locking.ssynchronized(_config_lock, shared=1)
  def VerifyConfig(self):
    """Verify function.

    This is just a wrapper over L{_UnlockedVerifyConfig}.

    @rtype: list
    @return: a list of error messages; a non-empty list signifies
        configuration errors

    """
    return self._UnlockedVerifyConfig()

385
  def _UnlockedSetDiskID(self, disk, node_name):
Iustin Pop's avatar
Iustin Pop committed
386
387
388
389
390
391
392
393
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

394
395
    This function is for internal use, when the config lock is already held.

Iustin Pop's avatar
Iustin Pop committed
396
397
398
    """
    if disk.children:
      for child in disk.children:
399
        self._UnlockedSetDiskID(child, node_name)
Iustin Pop's avatar
Iustin Pop committed
400
401
402

    if disk.logical_id is None and disk.physical_id is not None:
      return
403
    if disk.dev_type == constants.LD_DRBD8:
404
      pnode, snode, port, pminor, sminor, secret = disk.logical_id
Iustin Pop's avatar
Iustin Pop committed
405
      if node_name not in (pnode, snode):
406
407
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
                                        node_name)
408
409
      pnode_info = self._UnlockedGetNodeInfo(pnode)
      snode_info = self._UnlockedGetNodeInfo(snode)
Iustin Pop's avatar
Iustin Pop committed
410
411
412
      if pnode_info is None or snode_info is None:
        raise errors.ConfigurationError("Can't find primary or secondary node"
                                        " for %s" % str(disk))
413
414
      p_data = (pnode_info.secondary_ip, port)
      s_data = (snode_info.secondary_ip, port)
Iustin Pop's avatar
Iustin Pop committed
415
      if pnode == node_name:
416
        disk.physical_id = p_data + s_data + (pminor, secret)
Iustin Pop's avatar
Iustin Pop committed
417
      else: # it must be secondary, we tested above
418
        disk.physical_id = s_data + p_data + (sminor, secret)
Iustin Pop's avatar
Iustin Pop committed
419
420
421
422
    else:
      disk.physical_id = disk.logical_id
    return

423
424
425
426
427
428
429
430
431
432
433
434
435
436
  @locking.ssynchronized(_config_lock)
  def SetDiskID(self, disk, node_name):
    """Convert the unique ID to the ID needed on the target nodes.

    This is used only for drbd, which needs ip/port configuration.

    The routine descends down and updates its children also, because
    this helps when the only the top device is passed to the remote
    node.

    """
    return self._UnlockedSetDiskID(disk, node_name)

  @locking.ssynchronized(_config_lock)
437
438
439
440
  def AddTcpUdpPort(self, port):
    """Adds a new port to the available port pool.

    """
441
    if not isinstance(port, int):
442
      raise errors.ProgrammerError("Invalid type passed for port")
443

444
    self._config_data.cluster.tcpudp_port_pool.add(port)
445
446
    self._WriteConfig()

447
  @locking.ssynchronized(_config_lock, shared=1)
448
  def GetPortList(self):
449
450
451
    """Returns a copy of the current port list.

    """
452
    return self._config_data.cluster.tcpudp_port_pool.copy()
453

454
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
455
456
457
  def AllocatePort(self):
    """Allocate a port.

458
459
460
    The port will be taken from the available port pool or from the
    default port range (and in this case we increase
    highest_used_port).
Iustin Pop's avatar
Iustin Pop committed
461
462

    """
463
    # If there are TCP/IP ports configured, we use them first.
464
465
    if self._config_data.cluster.tcpudp_port_pool:
      port = self._config_data.cluster.tcpudp_port_pool.pop()
466
467
468
    else:
      port = self._config_data.cluster.highest_used_port + 1
      if port >= constants.LAST_DRBD_PORT:
469
470
471
        raise errors.ConfigurationError("The highest used port is greater"
                                        " than %s. Aborting." %
                                        constants.LAST_DRBD_PORT)
472
      self._config_data.cluster.highest_used_port = port
Iustin Pop's avatar
Iustin Pop committed
473
474
475
476

    self._WriteConfig()
    return port

477
  def _UnlockedComputeDRBDMap(self):
478
479
    """Compute the used DRBD minor/nodes.

480
    @rtype: (dict, list)
Iustin Pop's avatar
Iustin Pop committed
481
482
    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
483
484
485
        an empty list), and a list of duplicates; if the duplicates
        list is not empty, the configuration is corrupted and its caller
        should raise an exception
486
487
488

    """
    def _AppendUsedPorts(instance_name, disk, used):
489
      duplicates = []
490
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
Michael Hanselmann's avatar
Michael Hanselmann committed
491
492
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
493
494
          assert node in used, ("Node '%s' of instance '%s' not found"
                                " in node list" % (node, instance_name))
495
          if port in used[node]:
496
497
498
            duplicates.append((node, port, instance_name, used[node][port]))
          else:
            used[node][port] = instance_name
499
500
      if disk.children:
        for child in disk.children:
501
502
          duplicates.extend(_AppendUsedPorts(instance_name, child, used))
      return duplicates
503

504
    duplicates = []
505
    my_dict = dict((node, {}) for node in self._config_data.nodes)
506
507
508
    for instance in self._config_data.instances.itervalues():
      for disk in instance.disks:
        duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
509
    for (node, minor), instance in self._temporary_drbds.iteritems():
510
      if minor in my_dict[node] and my_dict[node][minor] != instance:
511
512
513
514
        duplicates.append((node, minor, instance, my_dict[node][minor]))
      else:
        my_dict[node][minor] = instance
    return my_dict, duplicates
515

516
517
518
519
520
521
522
523
524
525
526
  @locking.ssynchronized(_config_lock)
  def ComputeDRBDMap(self):
    """Compute the used DRBD minor/nodes.

    This is just a wrapper over L{_UnlockedComputeDRBDMap}.

    @return: dictionary of node_name: dict of minor: instance_name;
        the returned dict will have all the nodes in it (even if with
        an empty list).

    """
527
528
529
530
531
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
    return d_map
532

533
534
535
536
537
538
539
540
541
  @locking.ssynchronized(_config_lock)
  def AllocateDRBDMinor(self, nodes, instance):
    """Allocate a drbd minor.

    The free minor will be automatically computed from the existing
    devices. A node can be given multiple times in order to allocate
    multiple minors. The result is the list of minors, in the same
    order as the passed nodes.

542
543
544
    @type instance: string
    @param instance: the instance for which we allocate minors

545
    """
546
    assert isinstance(instance, basestring), \
547
           "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
548

549
550
551
552
    d_map, duplicates = self._UnlockedComputeDRBDMap()
    if duplicates:
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                      str(duplicates))
553
554
555
556
557
558
559
    result = []
    for nname in nodes:
      ndata = d_map[nname]
      if not ndata:
        # no minors used, we can start at 0
        result.append(0)
        ndata[0] = instance
560
        self._temporary_drbds[(nname, 0)] = instance
561
562
563
564
565
566
567
568
569
570
        continue
      keys = ndata.keys()
      keys.sort()
      ffree = utils.FirstFree(keys)
      if ffree is None:
        # return the next minor
        # TODO: implement high-limit check
        minor = keys[-1] + 1
      else:
        minor = ffree
571
572
573
574
575
      # double-check minor against current instances
      assert minor not in d_map[nname], \
             ("Attempt to reuse allocated DRBD minor %d on node %s,"
              " already allocated to instance %s" %
              (minor, nname, d_map[nname][minor]))
576
      ndata[minor] = instance
577
578
579
580
581
582
583
584
      # double-check minor against reservation
      r_key = (nname, minor)
      assert r_key not in self._temporary_drbds, \
             ("Attempt to reuse reserved DRBD minor %d on node %s,"
              " reserved for instance %s" %
              (minor, nname, self._temporary_drbds[r_key]))
      self._temporary_drbds[r_key] = instance
      result.append(minor)
585
586
587
588
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
                  nodes, result)
    return result

589
  def _UnlockedReleaseDRBDMinors(self, instance):
590
591
592
593
594
595
596
    """Release temporary drbd minors allocated for a given instance.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
597
598
    assert isinstance(instance, basestring), \
           "Invalid argument passed to ReleaseDRBDMinors"
599
600
601
602
    for key, name in self._temporary_drbds.items():
      if name == instance:
        del self._temporary_drbds[key]

603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
  @locking.ssynchronized(_config_lock)
  def ReleaseDRBDMinors(self, instance):
    """Release temporary drbd minors allocated for a given instance.

    This should be called on the error paths, on the success paths
    it's automatically called by the ConfigWriter add and update
    functions.

    This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.

    @type instance: string
    @param instance: the instance for which temporary minors should be
                     released

    """
    self._UnlockedReleaseDRBDMinors(instance)

620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
  @locking.ssynchronized(_config_lock, shared=1)
  def GetConfigVersion(self):
    """Get the configuration version.

    @return: Config version

    """
    return self._config_data.version

  @locking.ssynchronized(_config_lock, shared=1)
  def GetClusterName(self):
    """Get cluster name.

    @return: Cluster name

    """
    return self._config_data.cluster.cluster_name

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNode(self):
    """Get the hostname of the master node for this cluster.

    @return: Master hostname

    """
    return self._config_data.cluster.master_node

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterIP(self):
    """Get the IP of the master node for this cluster.

    @return: Master IP

    """
    return self._config_data.cluster.master_ip

  @locking.ssynchronized(_config_lock, shared=1)
  def GetMasterNetdev(self):
    """Get the master network device for this cluster.

    """
    return self._config_data.cluster.master_netdev

  @locking.ssynchronized(_config_lock, shared=1)
  def GetFileStorageDir(self):
    """Get the file storage dir for this cluster.

    """
    return self._config_data.cluster.file_storage_dir

  @locking.ssynchronized(_config_lock, shared=1)
  def GetHypervisorType(self):
    """Get the hypervisor type for this cluster.

    """
675
    return self._config_data.cluster.enabled_hypervisors[0]
676

677
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
678
679
680
  def GetHostKey(self):
    """Return the rsa hostkey from the config.

Iustin Pop's avatar
Iustin Pop committed
681
682
    @rtype: string
    @return: the rsa hostkey
Iustin Pop's avatar
Iustin Pop committed
683
684
685
686

    """
    return self._config_data.cluster.rsahostkeypub

687
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
688
689
690
691
692
  def AddInstance(self, instance):
    """Add an instance to the config.

    This should be used after creating a new instance.

Iustin Pop's avatar
Iustin Pop committed
693
694
695
    @type instance: L{objects.Instance}
    @param instance: the instance object

Iustin Pop's avatar
Iustin Pop committed
696
697
698
699
    """
    if not isinstance(instance, objects.Instance):
      raise errors.ProgrammerError("Invalid type passed to AddInstance")

700
701
    if instance.disk_template != constants.DT_DISKLESS:
      all_lvs = instance.MapLVsByNode()
Iustin Pop's avatar
Iustin Pop committed
702
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
703

704
705
706
707
708
709
    all_macs = self._AllMACs()
    for nic in instance.nics:
      if nic.mac in all_macs:
        raise errors.ConfigurationError("Cannot add instance %s:"
          " MAC address '%s' already in use." % (instance.name, nic.mac))

710
    instance.serial_no = 1
Iustin Pop's avatar
Iustin Pop committed
711
    self._config_data.instances[instance.name] = instance
712
    self._config_data.cluster.serial_no += 1
713
    self._UnlockedReleaseDRBDMinors(instance.name)
714
715
    for nic in instance.nics:
      self._temporary_macs.discard(nic.mac)
Iustin Pop's avatar
Iustin Pop committed
716
717
    self._WriteConfig()

718
719
  def _SetInstanceStatus(self, instance_name, status):
    """Set the instance's status to a given value.
Iustin Pop's avatar
Iustin Pop committed
720
721

    """
722
723
    assert isinstance(status, bool), \
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
Iustin Pop's avatar
Iustin Pop committed
724
725

    if instance_name not in self._config_data.instances:
726
727
      raise errors.ConfigurationError("Unknown instance '%s'" %
                                      instance_name)
Iustin Pop's avatar
Iustin Pop committed
728
    instance = self._config_data.instances[instance_name]
729
730
    if instance.admin_up != status:
      instance.admin_up = status
731
      instance.serial_no += 1
732
      self._WriteConfig()
Iustin Pop's avatar
Iustin Pop committed
733

734
  @locking.ssynchronized(_config_lock)
735
736
737
738
  def MarkInstanceUp(self, instance_name):
    """Mark the instance status to up in the config.

    """
739
    self._SetInstanceStatus(instance_name, True)
740

741
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
742
743
744
745
746
  def RemoveInstance(self, instance_name):
    """Remove the instance from the configuration.

    """
    if instance_name not in self._config_data.instances:
747
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
Iustin Pop's avatar
Iustin Pop committed
748
    del self._config_data.instances[instance_name]
749
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
750
751
    self._WriteConfig()

752
  @locking.ssynchronized(_config_lock)
753
754
755
756
757
758
759
760
761
762
763
764
765
  def RenameInstance(self, old_name, new_name):
    """Rename an instance.

    This needs to be done in ConfigWriter and not by RemoveInstance
    combined with AddInstance as only we can guarantee an atomic
    rename.

    """
    if old_name not in self._config_data.instances:
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
    inst = self._config_data.instances[old_name]
    del self._config_data.instances[old_name]
    inst.name = new_name
766
767
768
769
770
771
772
773
774
775

    for disk in inst.disks:
      if disk.dev_type == constants.LD_FILE:
        # rename the file paths in logical and physical id
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
                                              os.path.join(file_storage_dir,
                                                           inst.name,
                                                           disk.iv_name))

776
777
778
    self._config_data.instances[inst.name] = inst
    self._WriteConfig()

779
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
780
781
782
783
  def MarkInstanceDown(self, instance_name):
    """Mark the status of an instance to down in the configuration.

    """
784
    self._SetInstanceStatus(instance_name, False)
Iustin Pop's avatar
Iustin Pop committed
785

786
787
788
789
790
791
792
793
  def _UnlockedGetInstanceList(self):
    """Get the list of instances.

    This function is for internal use, when the config lock is already held.

    """
    return self._config_data.instances.keys()

794
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
795
796
797
  def GetInstanceList(self):
    """Get the list of instances.

Iustin Pop's avatar
Iustin Pop committed
798
799
    @return: array of instances, ex. ['instance2.example.com',
        'instance1.example.com']
Iustin Pop's avatar
Iustin Pop committed
800
801

    """
802
    return self._UnlockedGetInstanceList()
Iustin Pop's avatar
Iustin Pop committed
803

804
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
805
806
807
808
809
810
811
  def ExpandInstanceName(self, short_name):
    """Attempt to expand an incomplete instance name.

    """
    return utils.MatchNameComponent(short_name,
                                    self._config_data.instances.keys())

812
  def _UnlockedGetInstanceInfo(self, instance_name):
Michael Hanselmann's avatar
Michael Hanselmann committed
813
    """Returns information about an instance.
814
815
816
817
818
819
820
821
822

    This function is for internal use, when the config lock is already held.

    """
    if instance_name not in self._config_data.instances:
      return None

    return self._config_data.instances[instance_name]

823
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
824
  def GetInstanceInfo(self, instance_name):
Michael Hanselmann's avatar
Michael Hanselmann committed
825
    """Returns information about an instance.
Iustin Pop's avatar
Iustin Pop committed
826

Michael Hanselmann's avatar
Michael Hanselmann committed
827
    It takes the information from the configuration file. Other information of
Iustin Pop's avatar
Iustin Pop committed
828
829
    an instance are taken from the live systems.

Iustin Pop's avatar
Iustin Pop committed
830
831
    @param instance_name: name of the instance, e.g.
        I{instance1.example.com}
Iustin Pop's avatar
Iustin Pop committed
832

Iustin Pop's avatar
Iustin Pop committed
833
834
    @rtype: L{objects.Instance}
    @return: the instance object
Iustin Pop's avatar
Iustin Pop committed
835
836

    """
837
    return self._UnlockedGetInstanceInfo(instance_name)
Iustin Pop's avatar
Iustin Pop committed
838

839
840
841
842
843
  @locking.ssynchronized(_config_lock, shared=1)
  def GetAllInstancesInfo(self):
    """Get the configuration of all instances.

    @rtype: dict
Iustin Pop's avatar
Iustin Pop committed
844
    @return: dict of (instance, instance_info), where instance_info is what
845
846
847
              would GetInstanceInfo return for the node

    """
848
849
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
                    for instance in self._UnlockedGetInstanceList()])
850
851
    return my_dict

852
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
853
854
855
  def AddNode(self, node):
    """Add a node to the configuration.

Iustin Pop's avatar
Iustin Pop committed
856
857
    @type node: L{objects.Node}
    @param node: a Node instance
Iustin Pop's avatar
Iustin Pop committed
858
859

    """
860
861
    logging.info("Adding node %s to configuration" % node.name)

862
    node.serial_no = 1
Iustin Pop's avatar
Iustin Pop committed
863
    self._config_data.nodes[node.name] = node
864
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
865
866
    self._WriteConfig()

867
  @locking.ssynchronized(_config_lock)
Iustin Pop's avatar
Iustin Pop committed
868
869
870
871
  def RemoveNode(self, node_name):
    """Remove a node from the configuration.

    """
872
873
    logging.info("Removing node %s from configuration" % node_name)

Iustin Pop's avatar
Iustin Pop committed
874
    if node_name not in self._config_data.nodes:
875
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
Iustin Pop's avatar
Iustin Pop committed
876
877

    del self._config_data.nodes[node_name]
878
    self._config_data.cluster.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
879
880
    self._WriteConfig()

881
  @locking.ssynchronized(_config_lock, shared=1)
Iustin Pop's avatar
Iustin Pop committed
882
883
884
885
886
887
888
  def ExpandNodeName(self, short_name):
    """Attempt to expand an incomplete instance name.

    """
    return utils.MatchNameComponent(short_name,
                                    self._config_data.nodes.keys())

889
  def _UnlockedGetNodeInfo(self, node_name):
Iustin Pop's avatar
Iustin Pop committed
890
891
    """Get the configuration of a node, as stored in the config.

Iustin Pop's avatar
Iustin Pop committed
892
893
    This function is for internal use, when the config lock is already
    held.
894

Iustin Pop's avatar
Iustin Pop committed
895
    @param node_name: the node name, e.g. I{node1.example.com}
Iustin Pop's avatar
Iustin Pop committed
896

Iustin Pop's avatar
Iustin Pop committed
897
898
    @rtype: L{objects.Node}
    @return: the node object
Iustin Pop's avatar
Iustin Pop committed
899
900
901
902
903
904
905

    """
    if node_name not in self._config_data.nodes:
      return None

    return self._config_data.nodes[node_name]

906
907
908
909
910

  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeInfo(self, node_name):
    """Get the configuration of a node, as stored in the config.

Iustin Pop's avatar
Iustin Pop committed
911
    This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
912

Iustin Pop's avatar
Iustin Pop committed
913
914
915
916
    @param node_name: the node name, e.g. I{node1.example.com}

    @rtype: L{objects.Node}
    @return: the node object
917
918
919
920
921

    """
    return self._UnlockedGetNodeInfo(node_name)

  def _UnlockedGetNodeList(self):
Iustin Pop's avatar
Iustin Pop committed
922
923
    """Return the list of nodes which are in the configuration.

Iustin Pop's avatar
Iustin Pop committed
924
925
926
927
    This function is for internal use, when the config lock is already
    held.

    @rtype: list
928

Iustin Pop's avatar
Iustin Pop committed
929
930
931
    """
    return self._config_data.nodes.keys()

932
933
934
935
936
937
938
939

  @locking.ssynchronized(_config_lock, shared=1)
  def GetNodeList(self):
    """Return the list of nodes which are in the configuration.

    """
    return self._UnlockedGetNodeList()

Iustin Pop's avatar
Iustin Pop committed
940
941
942
943
944
945
946
947
948
  @locking.ssynchronized(_config_lock, shared=1)
  def GetOnlineNodeList(self):
    """Return the list of nodes which are online.

    """
    all_nodes = [self._UnlockedGetNodeInfo(node)
                 for node in self._UnlockedGetNodeList()]
    return [node.name for node in all_nodes if not node.offline]

949
950
951
952
953
  @locking.ssynchronized(_config_lock, shared=1)
  def GetAllNodesInfo(self):
    """Get the configuration of all nodes.

    @rtype: dict
954
    @return: dict of (node, node_info), where node_info is what
955
956
957
958
959
960
961
              would GetNodeInfo return for the node

    """
    my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
                    for node in self._UnlockedGetNodeList()])
    return my_dict

962
  def _UnlockedGetMasterCandidateStats(self, exceptions=None):
963
964
    """Get the number of current and maximum desired and possible candidates.

965
966
    @type exceptions: list
    @param exceptions: if passed, list of nodes that should be ignored
967
968
969
970
971
    @rtype: tuple
    @return: tuple of (current, desired and possible)

    """
    mc_now = mc_max = 0
972
973
974
    for node in self._config_data.nodes.values():
      if exceptions and node.name in exceptions:
        continue
975
      if not (node.offline or node.drained):
976
977
978
979
980
981
982
        mc_max += 1
      if node.master_candidate:
        mc_now += 1
    mc_max = min(mc_max, self._config_data.cluster.candidate_pool_size)
    return (mc_now, mc_max)

  @locking.ssynchronized(_config_lock, shared=1)
983
  def GetMasterCandidateStats(self, exceptions=None):
984
985
986
987
    """Get the number of current and maximum possible candidates.

    This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.

988
989
    @type exceptions: list
    @param exceptions: if passed, list of nodes that should be ignored
990
991
992
993
    @rtype: tuple
    @return: tuple of (current, max)

    """
994
    return self._UnlockedGetMasterCandidateStats(exceptions)
995
996
997
998
999
1000

  @locking.ssynchronized(_config_lock)
  def MaintainCandidatePool(self):
    """Try to grow the candidate pool to the desired size.

    @rtype: list
Iustin Pop's avatar
Iustin Pop committed
1001
    @return: list with the adjusted nodes (L{objects.Node} instances)
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012

    """
    mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
    mod_list = []
    if mc_now < mc_max:
      node_list = self._config_data.nodes.keys()
      random.shuffle(node_list)
      for name in node_list:
        if mc_now >= mc_max:
          break
        node = self._config_data.nodes[name]
1013
        if node.master_candidate or node.offline or node.drained:
1014
          continue
Iustin Pop's avatar
Iustin Pop committed
1015
        mod_list.append(node)
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
        node.master_candidate = True
        node.serial_no += 1
        mc_now += 1
      if mc_now != mc_max:
        # this should not happen
        logging.warning("Warning: MaintainCandidatePool didn't manage to"
                        " fill the candidate pool (%d/%d)", mc_now, mc_max)
      if mod_list:
        self._config_data.cluster.serial_no += 1
        self._WriteConfig()

    return mod_list

Iustin Pop's avatar
Iustin Pop committed
1029
1030
1031
1032
  def _BumpSerialNo(self):
    """Bump up the serial number of the config.

    """
1033
    self._config_data.serial_no += 1
Iustin Pop's avatar
Iustin Pop committed
1034
1035
1036
1037
1038
1039
1040
1041

  def _OpenConfig(self):
    """Read the config data from disk.

    """
    f = open(self._cfg_file, 'r')
    try:
      try:
1042
        data = objects.ConfigData.FromDict(serializer.Load(f.read()))
Iustin Pop's avatar
Iustin Pop committed
1043
      except Exception, err:
1044
        raise errors.ConfigurationError(err)
Iustin Pop's avatar
Iustin Pop committed
1045
1046
    finally:
      f.close()
Michael Hanselmann's avatar
Michael Hanselmann committed
1047
1048
1049
1050

    # Make sure the configuration has the right version
    _ValidateConfig(data)

Iustin Pop's avatar
Iustin Pop committed
1051
    if (not hasattr(data, 'cluster') or
1052
        not hasattr(data.cluster, 'rsahostkeypub')):
1053
      raise errors.ConfigurationError("Incomplete configuration"
1054
                                      " (missing cluster.rsahostkeypub)")
Iustin Pop's avatar
Iustin Pop committed
1055
    self._config_data = data
1056
    # reset the last serial as -1 so that the next write will cause
1057
1058
    # ssconf update
    self._last_cluster_serial = -1
Iustin Pop's avatar
Iustin Pop committed
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070

  def _DistributeConfig(self):
    """Distribute the configuration to the other nodes.

    Currently, this only copies the configuration file. In the future,
    it could be used to encapsulate the 2/3-phase update mechanism.

    """
    if self._offline:
      return True
    bad = False

1071
1072
1073
    node_list = []
    addr_list = []
    myhostname = self._my_hostname
1074
1075
1076
1077
    # we can skip checking whether _UnlockedGetNodeInfo returns None
    # since the node list comes from _UnlocketGetNodeList, and we are
    # called with the lock held, so no modifications should take place
    # in between
1078
1079
1080
1081
1082
1083
1084
1085
    for node_name in self._UnlockedGetNodeList():
      if node_name == myhostname:
        continue
      node_info = self._UnlockedGetNodeInfo(node_name)
      if not node_info.master_candidate:
        continue
      node_list.append(node_info.name)
      addr_list.append(node_info.primary_ip)
1086

1087
1088
    result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
                                            address_list=addr_list)
1089
1090
1091
1092
    for to_node, to_result in result.items():
      msg = to_result.RemoteFailMsg()
      if msg:
        msg = ("Copy of file %s to node %s failed: %s" %
1093
               (self._cfg_file, to_node, msg))
1094
        logging.error(msg)
Iustin Pop's avatar
Iustin Pop committed
1095
1096
1097
1098
1099
1100
1101
        bad = True
    return not bad

  def _WriteConfig(self, destination=None):
    """Write the configuration data to persistent storage.

    """
1102
1103
1104
1105
1106
    config_errors = self._UnlockedVerifyConfig()
    if config_errors:
      raise errors.ConfigurationError("Configuration data is not"
                                      " consistent: %s" %
                                      (", ".join(config_errors)))
Iustin Pop's avatar
Iustin Pop committed
1107
1108
1109
    if destination is None:
      destination = self._cfg_file
    self._BumpSerialNo()
1110
    txt = serializer.Dump(self._config_data.ToDict())
Iustin Pop's avatar
Iustin Pop committed
1111
1112
1113
1114
    dir_name, file_name = os.path.split(destination)
    fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
    f = os.fdopen(fd, 'w')
    try:
1115
      f.write(txt)
Iustin Pop's avatar
Iustin Pop committed
1116
1117
1118
1119
1120
      os.fsync(f.fileno())
    finally:
      f.close()
    # we don't need to do os.close(fd) as f.close() did it
    os.rename(name, destination)
1121
    self.write_count += 1
Iustin Pop's avatar
Iustin Pop committed
1122

1123
    # and redistribute the config file to master candidates
Iustin Pop's avatar
Iustin Pop committed
1124
1125
    self._DistributeConfig()

1126
    # Write ssconf files on all nodes (including locally)
1127
    if self._last_cluster_serial < self._config_data.cluster.serial_no:
1128
      if not self._offline:
1129
1130
1131
1132
1133
1134
1135
1136
        result = rpc.RpcRunner.call_write_ssconf_files(\
          self._UnlockedGetNodeList(),
          self._UnlockedGetSsconfValues())
        for nname, nresu in result.items():
          msg = nresu.RemoteFailMsg()
          if msg:
            logging.warning("Error while uploading ssconf files to"
                            " node %s: %s", nname, msg)
1137
      self._last_cluster_serial = self._config_data.cluster.serial_no
1138

1139
  def _UnlockedGetSsconfValues(self):
Iustin Pop's avatar
Iustin Pop committed
1140
1141
1142
1143
1144
1145
1146
    """Return the values needed by ssconf.

    @rtype: dict
    @return: a dictionary with keys the ssconf names and values their
        associated value

    """
1147
    fn = "\n".join
1148
    instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
1149
1150
    node_names = utils.NiceSort(self._UnlockedGetNodeList())
    node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
1151
    node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
1152
                    for ninfo in node_info]
1153
    node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
1154
                    for ninfo in node_info]
1155

1156
    instance_data = fn(instance_names)
1157
    off_data = fn(node.name for node in node_info if node.offline)
1158
    on_data = fn(node.name for node in node_info if not node.offline)
1159
    mc_data = fn(node.name for node in node_info if node.master_candidate)
1160
1161
    mc_ips_data = fn(node.primary_ip for node in node_info
                     if node.master_candidate)
1162
    node_data = fn(node_names)
1163
1164
    node_pri_ips_data = fn(node_pri_ips)
    node_snd_ips_data = fn(node_snd_ips)
1165

Iustin Pop's avatar
Iustin Pop committed
1166
    cluster = self._config_data.cluster
1167
    cluster_tags = fn(cluster.GetTags())
1168
    return {
Iustin Pop's avatar
Iustin Pop committed
1169
      constants.SS_CLUSTER_NAME: cluster.cluster_name,
1170
      constants.SS_CLUSTER_TAGS: cluster_tags,
Iustin Pop's avatar
Iustin Pop committed
1171
      constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
1172
      constants.SS_MASTER_CANDIDATES: mc_data,
1173
      constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
Iustin Pop's avatar
Iustin Pop committed
1174
1175
1176
      constants.SS_MASTER_IP: cluster.master_ip,
      constants.SS_MASTER_NETDEV: cluster.master_netdev,
      constants.SS_MASTER_NODE: cluster.master_node,
1177
      constants.SS_NODE_LIST: node_data,
1178
1179
      constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
      constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
1180
      constants.SS_OFFLINE_NODES: off_data,
1181
1182
      constants.SS_ONLINE_NODES: on_data,
      constants.SS_INSTANCE_LIST: instance_data,