instance.py 149 KB
Newer Older
1
2
3
#
#

4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Logical units dealing with instances."""

import OpenSSL
import copy
import logging
import os

from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import hypervisor
from ganeti import locking
from ganeti.masterd import iallocator
from ganeti import masterd
from ganeti import netutils
from ganeti import objects
from ganeti import pathutils
from ganeti import rpc
from ganeti import utils

43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
47
48
49
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
Thomas Thrainer's avatar
Thomas Thrainer committed
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
  CheckSpindlesExclusiveStorage
59
60
61
62
63
64
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65
66
67
68

import ganeti.masterd.instance


69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
#: callbacks
_TApplyContModsCbChanges = \
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
    ht.TNonEmptyString,
    ht.TAny,
    ])))


def _CheckHostnameSane(lu, name):
  """Ensures that a given hostname resolves to a 'sane' name.

  The given name is required to be a prefix of the resolved hostname,
  to prevent accidental mismatches.

  @param lu: the logical unit on behalf of which we're checking
  @param name: the name we should resolve and check
  @return: the resolved hostname object

  """
  hostname = netutils.GetHostname(name=name)
  if hostname.name != name:
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
  if not utils.MatchNameComponent(name, [hostname.name]):
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
                                " same as given hostname '%s'") %
                               (hostname.name, name), errors.ECODE_INVAL)
  return hostname


def _CheckOpportunisticLocking(op):
  """Generate error if opportunistic locking is not possible.

  """
  if op.opportunistic_locking and not op.iallocator:
    raise errors.OpPrereqError("Opportunistic locking is only available in"
                               " combination with an instance allocator",
                               errors.ECODE_INVAL)


Thomas Thrainer's avatar
Thomas Thrainer committed
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
111
112
113
114
115
  """Wrapper around IAReqInstanceAlloc.

  @param op: The instance opcode
  @param disks: The computed disks
  @param nics: The computed nics
  @param beparams: The full filled beparams
Thomas Thrainer's avatar
Thomas Thrainer committed
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
    allocator (unless the node is already marked offline)

  @returns: A filled L{iallocator.IAReqInstanceAlloc}

  """
  spindle_use = beparams[constants.BE_SPINDLE_USE]
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
                                       disk_template=op.disk_template,
                                       tags=op.tags,
                                       os=op.os_type,
                                       vcpus=beparams[constants.BE_VCPUS],
                                       memory=beparams[constants.BE_MAXMEM],
                                       spindle_use=spindle_use,
                                       disks=disks,
                                       nics=[n.ToDict() for n in nics],
                                       hypervisor=op.hypervisor,
Thomas Thrainer's avatar
Thomas Thrainer committed
133
                                       node_whitelist=node_name_whitelist)
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175


def _ComputeFullBeParams(op, cluster):
  """Computes the full beparams.

  @param op: The instance opcode
  @param cluster: The cluster config object

  @return: The fully filled beparams

  """
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
  for param, value in op.beparams.iteritems():
    if value == constants.VALUE_AUTO:
      op.beparams[param] = default_beparams[param]
  objects.UpgradeBeParams(op.beparams)
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
  return cluster.SimpleFillBE(op.beparams)


def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
  """Computes the nics.

  @param op: The instance opcode
  @param cluster: Cluster configuration object
  @param default_ip: The default ip to assign
  @param cfg: An instance of the configuration object
  @param ec_id: Execution context ID

  @returns: The build up nics

  """
  nics = []
  for nic in op.nics:
    nic_mode_req = nic.get(constants.INIC_MODE, None)
    nic_mode = nic_mode_req
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]

    net = nic.get(constants.INIC_NETWORK, None)
    link = nic.get(constants.NIC_LINK, None)
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233

    if net is None or net.lower() == constants.VALUE_NONE:
      net = None
    else:
      if nic_mode_req is not None or link is not None:
        raise errors.OpPrereqError("If network is given, no mode or link"
                                   " is allowed to be passed",
                                   errors.ECODE_INVAL)

    # ip validity checks
    if ip is None or ip.lower() == constants.VALUE_NONE:
      nic_ip = None
    elif ip.lower() == constants.VALUE_AUTO:
      if not op.name_check:
        raise errors.OpPrereqError("IP address set to auto but name checks"
                                   " have been skipped",
                                   errors.ECODE_INVAL)
      nic_ip = default_ip
    else:
      # We defer pool operations until later, so that the iallocator has
      # filled in the instance's node(s) dimara
      if ip.lower() == constants.NIC_IP_POOL:
        if net is None:
          raise errors.OpPrereqError("if ip=pool, parameter network"
                                     " must be passed too",
                                     errors.ECODE_INVAL)

      elif not netutils.IPAddress.IsValid(ip):
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
                                   errors.ECODE_INVAL)

      nic_ip = ip

    # TODO: check the ip address for uniqueness
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
                                 errors.ECODE_INVAL)

    # MAC address verification
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
      mac = utils.NormalizeAndValidateMac(mac)

      try:
        # TODO: We need to factor this out
        cfg.ReserveMAC(mac, ec_id)
      except errors.ReservationError:
        raise errors.OpPrereqError("MAC address %s already in use"
                                   " in cluster" % mac,
                                   errors.ECODE_NOTUNIQUE)

    #  Build nic parameters
    nicparams = {}
    if nic_mode_req:
      nicparams[constants.NIC_MODE] = nic_mode
    if link:
      nicparams[constants.NIC_LINK] = link
234
235
    if vlan:
      nicparams[constants.NIC_VLAN] = vlan
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250

    check_params = cluster.SimpleFillNIC(nicparams)
    objects.NIC.CheckParameterSyntax(check_params)
    net_uuid = cfg.LookupNetwork(net)
    name = nic.get(constants.INIC_NAME, None)
    if name is not None and name.lower() == constants.VALUE_NONE:
      name = None
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
                          network=net_uuid, nicparams=nicparams)
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
    nics.append(nic_obj)

  return nics


Thomas Thrainer's avatar
Thomas Thrainer committed
251
def _CheckForConflictingIp(lu, ip, node_uuid):
252
253
254
255
  """In case of conflicting IP address raise error.

  @type ip: string
  @param ip: IP address
Thomas Thrainer's avatar
Thomas Thrainer committed
256
257
  @type node_uuid: string
  @param node_uuid: node UUID
258
259

  """
Thomas Thrainer's avatar
Thomas Thrainer committed
260
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261
262
263
264
265
266
267
268
269
270
271
  if conf_net is not None:
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
                                " network %s, but the target NIC does not." %
                                (ip, conf_net)),
                               errors.ECODE_STATE)

  return (None, None)


def _ComputeIPolicyInstanceSpecViolation(
  ipolicy, instance_spec, disk_template,
272
  _compute_fn=ComputeIPolicySpecViolation):
273
274
275
276
277
278
279
280
281
  """Compute if instance specs meets the specs of ipolicy.

  @type ipolicy: dict
  @param ipolicy: The ipolicy to verify against
  @param instance_spec: dict
  @param instance_spec: The instance spec to verify
  @type disk_template: string
  @param disk_template: the disk template of the instance
  @param _compute_fn: The function to verify ipolicy (unittest only)
282
  @see: L{ComputeIPolicySpecViolation}
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327

  """
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)

  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
                     disk_sizes, spindle_use, disk_template)


def _CheckOSVariant(os_obj, name):
  """Check whether an OS name conforms to the os variants specification.

  @type os_obj: L{objects.OS}
  @param os_obj: OS object to check
  @type name: string
  @param name: OS name passed by the user, to check for validity

  """
  variant = objects.OS.GetVariant(name)
  if not os_obj.supported_variants:
    if variant:
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
                                 " passed)" % (os_obj.name, variant),
                                 errors.ECODE_INVAL)
    return
  if not variant:
    raise errors.OpPrereqError("OS name must include a variant",
                               errors.ECODE_INVAL)

  if variant not in os_obj.supported_variants:
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)


class LUInstanceCreate(LogicalUnit):
  """Create an instance.

  """
  HPATH = "instance-add"
  HTYPE = constants.HTYPE_INSTANCE
  REQ_BGL = False

328
329
  def _CheckDiskTemplateValid(self):
    """Checks validity of disk template.
330
331
332

    """
    cluster = self.cfg.GetClusterInfo()
333
334
335
336
337
338
339
    if self.op.disk_template is None:
      # FIXME: It would be better to take the default disk template from the
      # ipolicy, but for the ipolicy we need the primary node, which we get from
      # the iallocator, which wants the disk template as input. To solve this
      # chicken-and-egg problem, it should be possible to specify just a node
      # group from the iallocator and take the ipolicy from that.
      self.op.disk_template = cluster.enabled_disk_templates[0]
340
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
341

342
343
344
345
346
347
348
349
350
  def _CheckDiskArguments(self):
    """Checks validity of disk-related arguments.

    """
    # check that disk's names are unique and valid
    utils.ValidateDeviceNames("disk", self.op.disks)

    self._CheckDiskTemplateValid()

351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
    # check disks. parameter names and consistent adopt/no-adopt strategy
    has_adopt = has_no_adopt = False
    for disk in self.op.disks:
      if self.op.disk_template != constants.DT_EXT:
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
      if constants.IDISK_ADOPT in disk:
        has_adopt = True
      else:
        has_no_adopt = True
    if has_adopt and has_no_adopt:
      raise errors.OpPrereqError("Either all disks are adopted or none is",
                                 errors.ECODE_INVAL)
    if has_adopt:
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
        raise errors.OpPrereqError("Disk adoption is not supported for the"
                                   " '%s' disk template" %
                                   self.op.disk_template,
                                   errors.ECODE_INVAL)
      if self.op.iallocator is not None:
        raise errors.OpPrereqError("Disk adoption not allowed with an"
                                   " iallocator script", errors.ECODE_INVAL)
      if self.op.mode == constants.INSTANCE_IMPORT:
        raise errors.OpPrereqError("Disk adoption not allowed for"
                                   " instance import", errors.ECODE_INVAL)
    else:
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
                                   " but no 'adopt' parameter given" %
                                   self.op.disk_template,
                                   errors.ECODE_INVAL)

    self.adopt_disks = has_adopt

384
385
386
387
388
  def _CheckVLANArguments(self):
    """ Check validity of VLANs if given

    """
    for nic in self.op.nics:
389
390
      vlan = nic.get(constants.INIC_VLAN, None)
      if vlan:
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
        if vlan[0] == ".":
          # vlan starting with dot means single untagged vlan,
          # might be followed by trunk (:)
          if not vlan[1:].isdigit():
            vlanlist = vlan[1:].split(':')
            for vl in vlanlist:
              if not vl.isdigit():
                raise errors.OpPrereqError("Specified VLAN parameter is "
                                           "invalid : %s" % vlan,
                                             errors.ECODE_INVAL)
        elif vlan[0] == ":":
          # Trunk - tagged only
          vlanlist = vlan[1:].split(':')
          for vl in vlanlist:
            if not vl.isdigit():
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
                                           " : %s" % vlan, errors.ECODE_INVAL)
        elif vlan.isdigit():
          # This is the simplest case. No dots, only single digit
          # -> Create untagged access port, dot needs to be added
          nic[constants.INIC_VLAN] = "." + vlan
        else:
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
                                       " : %s" % vlan, errors.ECODE_INVAL)

416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
  def CheckArguments(self):
    """Check arguments.

    """
    # do not require name_check to ease forward/backward compatibility
    # for tools
    if self.op.no_install and self.op.start:
      self.LogInfo("No-installation mode selected, disabling startup")
      self.op.start = False
    # validate/normalize the instance name
    self.op.instance_name = \
      netutils.Hostname.GetNormalizedName(self.op.instance_name)

    if self.op.ip_check and not self.op.name_check:
      # TODO: make the ip check more flexible and not depend on the name check
      raise errors.OpPrereqError("Cannot do IP address check without a name"
                                 " check", errors.ECODE_INVAL)

    # check nics' parameter names
    for nic in self.op.nics:
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
    # check that NIC's parameters names are unique and valid
    utils.ValidateDeviceNames("NIC", self.op.nics)

440
441
    self._CheckVLANArguments()

442
    self._CheckDiskArguments()
443
    assert self.op.disk_template is not None
444

445
446
    # instance name verification
    if self.op.name_check:
447
448
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
      self.op.instance_name = self.hostname.name
449
      # used in CheckPrereq for ip ping check
450
      self.check_ip = self.hostname.ip
451
452
453
454
455
456
457
458
459
    else:
      self.check_ip = None

    # file storage checks
    if (self.op.file_driver and
        not self.op.file_driver in constants.FILE_DRIVER):
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
                                 self.op.file_driver, errors.ECODE_INVAL)

460
461
462
463
    # set default file_driver if unset and required
    if (not self.op.file_driver and
        self.op.disk_template in [constants.DT_FILE,
                                  constants.DT_SHARED_FILE]):
464
      self.op.file_driver = constants.FD_DEFAULT
465

466
    ### Node/iallocator related checks
467
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498

    if self.op.pnode is not None:
      if self.op.disk_template in constants.DTS_INT_MIRROR:
        if self.op.snode is None:
          raise errors.OpPrereqError("The networked disk templates need"
                                     " a mirror node", errors.ECODE_INVAL)
      elif self.op.snode:
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
                        " template")
        self.op.snode = None

    _CheckOpportunisticLocking(self.op)

    if self.op.mode == constants.INSTANCE_IMPORT:
      # On import force_variant must be True, because if we forced it at
      # initial install, our only chance when importing it back is that it
      # works again!
      self.op.force_variant = True

      if self.op.no_install:
        self.LogInfo("No-installation mode has no effect during import")

    elif self.op.mode == constants.INSTANCE_CREATE:
      if self.op.os_type is None:
        raise errors.OpPrereqError("No guest OS specified",
                                   errors.ECODE_INVAL)
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
                                   " installation" % self.op.os_type,
                                   errors.ECODE_STATE)
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
499
500
      self._cds = GetClusterDomainSecret()

501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
      # Check handshake to ensure both clusters have the same domain secret
      src_handshake = self.op.source_handshake
      if not src_handshake:
        raise errors.OpPrereqError("Missing source handshake",
                                   errors.ECODE_INVAL)

      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
                                                           src_handshake)
      if errmsg:
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
                                   errors.ECODE_INVAL)

      # Load and check source CA
      self.source_x509_ca_pem = self.op.source_x509_ca
      if not self.source_x509_ca_pem:
        raise errors.OpPrereqError("Missing source X509 CA",
                                   errors.ECODE_INVAL)

      try:
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
                                                    self._cds)
      except OpenSSL.crypto.Error, err:
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
                                   (err, ), errors.ECODE_INVAL)

      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
      if errcode is not None:
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
                                   errors.ECODE_INVAL)

      self.source_x509_ca = cert

      src_instance_name = self.op.source_instance_name
      if not src_instance_name:
        raise errors.OpPrereqError("Missing source instance name",
                                   errors.ECODE_INVAL)

      self.source_instance_name = \
        netutils.GetHostname(name=src_instance_name).name

    else:
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
                                 self.op.mode, errors.ECODE_INVAL)

  def ExpandNames(self):
    """ExpandNames for CreateInstance.

    Figure out the right locks for instance creation.

    """
    self.needed_locks = {}

    # this is just a preventive check, but someone might still add this
    # instance in the meantime, and creation will fail at lock-add time
555
556
    if self.op.instance_name in\
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
557
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
558
                                 self.op.instance_name, errors.ECODE_EXISTS)
559

560
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
561
562
563
564
565
566
567
568
569
570

    if self.op.iallocator:
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
      # specifying a group on instance creation and then selecting nodes from
      # that group
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET

      if self.op.opportunistic_locking:
        self.opportunistic_locks[locking.LEVEL_NODE] = True
571
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
572
    else:
Thomas Thrainer's avatar
Thomas Thrainer committed
573
574
575
      (self.op.pnode_uuid, self.op.pnode) = \
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
      nodelist = [self.op.pnode_uuid]
576
      if self.op.snode is not None:
Thomas Thrainer's avatar
Thomas Thrainer committed
577
578
579
        (self.op.snode_uuid, self.op.snode) = \
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
        nodelist.append(self.op.snode_uuid)
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
      self.needed_locks[locking.LEVEL_NODE] = nodelist

    # in case of import lock the source node too
    if self.op.mode == constants.INSTANCE_IMPORT:
      src_node = self.op.src_node
      src_path = self.op.src_path

      if src_path is None:
        self.op.src_path = src_path = self.op.instance_name

      if src_node is None:
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
        self.op.src_node = None
        if os.path.isabs(src_path):
          raise errors.OpPrereqError("Importing an instance from a path"
                                     " requires a source node option",
                                     errors.ECODE_INVAL)
      else:
Thomas Thrainer's avatar
Thomas Thrainer committed
599
600
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
601
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
Thomas Thrainer's avatar
Thomas Thrainer committed
602
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
603
        if not os.path.isabs(src_path):
604
          self.op.src_path = \
605
606
607
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)

    self.needed_locks[locking.LEVEL_NODE_RES] = \
608
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
609

610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
    # Optimistically acquire shared group locks (we're reading the
    # configuration).  We can't just call GetInstanceNodeGroups, because the
    # instance doesn't exist yet. Therefore we lock all node groups of all
    # nodes we have.
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
      # In the case we lock all nodes for opportunistic allocation, we have no
      # choice than to lock all groups, because they're allocated before nodes.
      # This is sad, but true. At least we release all those we don't need in
      # CheckPrereq later.
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
    else:
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
        list(self.cfg.GetNodeGroupsFromNodes(
          self.needed_locks[locking.LEVEL_NODE]))
    self.share_locks[locking.LEVEL_NODEGROUP] = 1

626
627
628
629
630
631
  def _RunAllocator(self):
    """Run the allocator based on input opcode.

    """
    if self.op.opportunistic_locking:
      # Only consider nodes for which a lock is held
632
      node_name_whitelist = self.cfg.GetNodeNames(
633
634
        set(self.owned_locks(locking.LEVEL_NODE)) &
        set(self.owned_locks(locking.LEVEL_NODE_RES)))
635
    else:
636
      node_name_whitelist = None
637
638
639

    req = _CreateInstanceAllocRequest(self.op, self.disks,
                                      self.nics, self.be_full,
640
                                      node_name_whitelist)
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)

    ial.Run(self.op.iallocator)

    if not ial.success:
      # When opportunistic locks are used only a temporary failure is generated
      if self.op.opportunistic_locking:
        ecode = errors.ECODE_TEMP_NORES
      else:
        ecode = errors.ECODE_NORES

      raise errors.OpPrereqError("Can't compute nodes using"
                                 " iallocator '%s': %s" %
                                 (self.op.iallocator, ial.info),
                                 ecode)

Thomas Thrainer's avatar
Thomas Thrainer committed
657
658
    (self.op.pnode_uuid, self.op.pnode) = \
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
659
660
661
662
663
664
665
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
                 self.op.instance_name, self.op.iallocator,
                 utils.CommaJoin(ial.result))

    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"

    if req.RequiredNodes() == 2:
Thomas Thrainer's avatar
Thomas Thrainer committed
666
667
      (self.op.snode_uuid, self.op.snode) = \
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682

  def BuildHooksEnv(self):
    """Build hooks env.

    This runs on master, primary and secondary nodes of the instance.

    """
    env = {
      "ADD_MODE": self.op.mode,
      }
    if self.op.mode == constants.INSTANCE_IMPORT:
      env["SRC_NODE"] = self.op.src_node
      env["SRC_PATH"] = self.op.src_path
      env["SRC_IMAGES"] = self.src_images

683
    env.update(BuildInstanceHookEnv(
684
      name=self.op.instance_name,
Thomas Thrainer's avatar
Thomas Thrainer committed
685
686
      primary_node_name=self.op.pnode,
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
687
688
689
690
691
      status=self.op.start,
      os_type=self.op.os_type,
      minmem=self.be_full[constants.BE_MINMEM],
      maxmem=self.be_full[constants.BE_MAXMEM],
      vcpus=self.be_full[constants.BE_VCPUS],
692
      nics=NICListToTuple(self, self.nics),
693
      disk_template=self.op.disk_template,
694
695
696
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
             for d in self.disks],
697
698
699
700
701
702
703
704
705
706
707
708
      bep=self.be_full,
      hvp=self.hv_full,
      hypervisor_name=self.op.hypervisor,
      tags=self.op.tags,
      ))

    return env

  def BuildHooksNodes(self):
    """Build hooks nodes.

    """
Thomas Thrainer's avatar
Thomas Thrainer committed
709
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
710
711
712
713
714
715
716
717
718
719
720
721
722
    return nl, nl

  def _ReadExportInfo(self):
    """Reads the export information from disk.

    It will override the opcode source node and path with the actual
    information, if these two were not specified before.

    @return: the export information

    """
    assert self.op.mode == constants.INSTANCE_IMPORT

723
    if self.op.src_node_uuid is None:
724
725
726
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
      exp_list = self.rpc.call_export_list(locked_nodes)
      found = False
727
728
      for node_uuid in exp_list:
        if exp_list[node_uuid].fail_msg:
729
          continue
730
        if self.op.src_path in exp_list[node_uuid].payload:
731
          found = True
732
733
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
          self.op.src_node_uuid = node_uuid
734
735
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
                                            self.op.src_path)
736
737
738
          break
      if not found:
        raise errors.OpPrereqError("No export found for relative path %s" %
739
                                   self.op.src_path, errors.ECODE_INVAL)
740

741
742
743
744
    CheckNodeOnline(self, self.op.src_node_uuid)
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
    result.Raise("No export or invalid export found in dir %s" %
                 self.op.src_path)
745
746
747
748
749
750
751

    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
    if not export_info.has_section(constants.INISECT_EXP):
      raise errors.ProgrammerError("Corrupted export config",
                                   errors.ECODE_ENVIRON)

    ei_version = export_info.get(constants.INISECT_EXP, "version")
Thomas Thrainer's avatar
Thomas Thrainer committed
752
    if int(ei_version) != constants.EXPORT_VERSION:
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
                                 (ei_version, constants.EXPORT_VERSION),
                                 errors.ECODE_ENVIRON)
    return export_info

  def _ReadExportParams(self, einfo):
    """Use export parameters as defaults.

    In case the opcode doesn't specify (as in override) some instance
    parameters, then try to use them from the export information, if
    that declares them.

    """
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")

    if not self.op.disks:
      disks = []
      # TODO: import the disk iv_name too
      for idx in range(constants.MAX_DISKS):
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
774
775
776
777
778
779
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
          disk = {
            constants.IDISK_SIZE: disk_sz,
            constants.IDISK_NAME: disk_name
            }
          disks.append(disk)
780
781
782
783
784
785
786
787
788
789
790
      self.op.disks = disks
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
        raise errors.OpPrereqError("No disk info specified and the export"
                                   " is missing the disk information",
                                   errors.ECODE_INVAL)

    if not self.op.nics:
      nics = []
      for idx in range(constants.MAX_NICS):
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
          ndict = {}
791
792
          for name in [constants.INIC_IP,
                       constants.INIC_MAC, constants.INIC_NAME]:
793
794
            nic_param_name = "nic%d_%s" % (idx, name)
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
795
796
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
              ndict[name] = v
797
798
799
800
801
802
803
804
805
          network = einfo.get(constants.INISECT_INS,
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
          # in case network is given link and mode are inherited
          # from nodegroup's netparams and thus should not be passed here
          if network:
            ndict[constants.INIC_NETWORK] = network
          else:
            for name in list(constants.NICS_PARAMETERS):
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
806
              ndict[name] = v
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
          nics.append(ndict)
        else:
          break
      self.op.nics = nics

    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()

    if (self.op.hypervisor is None and
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")

    if einfo.has_section(constants.INISECT_HYP):
      # use the export parameters but do not override the ones
      # specified by the user
      for name, value in einfo.items(constants.INISECT_HYP):
        if name not in self.op.hvparams:
          self.op.hvparams[name] = value

    if einfo.has_section(constants.INISECT_BEP):
      # use the parameters, without overriding
      for name, value in einfo.items(constants.INISECT_BEP):
        if name not in self.op.beparams:
          self.op.beparams[name] = value
        # Compatibility for the old "memory" be param
        if name == constants.BE_MEMORY:
          if constants.BE_MAXMEM not in self.op.beparams:
            self.op.beparams[constants.BE_MAXMEM] = value
          if constants.BE_MINMEM not in self.op.beparams:
            self.op.beparams[constants.BE_MINMEM] = value
    else:
      # try to read the parameters old style, from the main section
      for name in constants.BES_PARAMETERS:
        if (name not in self.op.beparams and
            einfo.has_option(constants.INISECT_INS, name)):
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)

    if einfo.has_section(constants.INISECT_OSP):
      # use the parameters, without overriding
      for name, value in einfo.items(constants.INISECT_OSP):
        if name not in self.op.osparams:
          self.op.osparams[name] = value

  def _RevertToDefaults(self, cluster):
    """Revert the instance parameters to the default values.

    """
    # hvparams
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
    for name in self.op.hvparams.keys():
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
        del self.op.hvparams[name]
    # beparams
    be_defs = cluster.SimpleFillBE({})
    for name in self.op.beparams.keys():
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
        del self.op.beparams[name]
    # nic params
    nic_defs = cluster.SimpleFillNIC({})
    for nic in self.op.nics:
      for name in constants.NICS_PARAMETERS:
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
          del nic[name]
    # osparams
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
    for name in self.op.osparams.keys():
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
        del self.op.osparams[name]

  def _CalculateFileStorageDir(self):
    """Calculate final instance file storage dir.

    """
    # file storage dir calculation/check
    self.instance_file_storage_dir = None
    if self.op.disk_template in constants.DTS_FILEBASED:
      # build the full file storage dir path
      joinargs = []

      if self.op.disk_template == constants.DT_SHARED_FILE:
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
      else:
        get_fsd_fn = self.cfg.GetFileStorageDir

      cfg_storagedir = get_fsd_fn()
      if not cfg_storagedir:
        raise errors.OpPrereqError("Cluster file storage dir not defined",
                                   errors.ECODE_STATE)
      joinargs.append(cfg_storagedir)

      if self.op.file_storage_dir is not None:
        joinargs.append(self.op.file_storage_dir)

      joinargs.append(self.op.instance_name)

      # pylint: disable=W0142
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)

  def CheckPrereq(self): # pylint: disable=R0914
    """Check prerequisites.

    """
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
    # Check that the optimistically acquired groups are correct wrt the
    # acquired nodes
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
    if not owned_groups.issuperset(cur_groups):
      raise errors.OpPrereqError("New instance %s's node groups changed since"
                                 " locks were acquired, current groups are"
                                 " are '%s', owning groups '%s'; retry the"
                                 " operation" %
                                 (self.op.instance_name,
                                  utils.CommaJoin(cur_groups),
                                  utils.CommaJoin(owned_groups)),
                                 errors.ECODE_STATE)

924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
    self._CalculateFileStorageDir()

    if self.op.mode == constants.INSTANCE_IMPORT:
      export_info = self._ReadExportInfo()
      self._ReadExportParams(export_info)
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
    else:
      self._old_instance_name = None

    if (not self.cfg.GetVGName() and
        self.op.disk_template not in constants.DTS_NOT_LVM):
      raise errors.OpPrereqError("Cluster does not support lvm-based"
                                 " instances", errors.ECODE_STATE)

    if (self.op.hypervisor is None or
        self.op.hypervisor == constants.VALUE_AUTO):
      self.op.hypervisor = self.cfg.GetHypervisorType()

    cluster = self.cfg.GetClusterInfo()
    enabled_hvs = cluster.enabled_hypervisors
    if self.op.hypervisor not in enabled_hvs:
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
                                 " cluster (%s)" %
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
                                 errors.ECODE_STATE)

    # Check tag validity
    for tag in self.op.tags:
      objects.TaggableObject.ValidateTag(tag)

    # check hypervisor parameter syntax (locally)
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
                                      self.op.hvparams)
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
    hv_type.CheckParameterSyntax(filled_hvp)
    self.hv_full = filled_hvp
    # check that we don't specify global parameters on an instance
962
963
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
                         "instance", "cluster")
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981

    # fill and remember the beparams dict
    self.be_full = _ComputeFullBeParams(self.op, cluster)

    # build os parameters
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)

    # now that hvp/bep are in final format, let's reset to defaults,
    # if told to do so
    if self.op.identify_defaults:
      self._RevertToDefaults(cluster)

    # NIC buildup
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
                             self.proc.GetECId())

    # disk checks/pre-build
    default_vg = self.cfg.GetVGName()
982
    self.disks = ComputeDisks(self.op, default_vg)
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030

    if self.op.mode == constants.INSTANCE_IMPORT:
      disk_images = []
      for idx in range(len(self.disks)):
        option = "disk%d_dump" % idx
        if export_info.has_option(constants.INISECT_INS, option):
          # FIXME: are the old os-es, disk sizes, etc. useful?
          export_name = export_info.get(constants.INISECT_INS, option)
          image = utils.PathJoin(self.op.src_path, export_name)
          disk_images.append(image)
        else:
          disk_images.append(False)

      self.src_images = disk_images

      if self.op.instance_name == self._old_instance_name:
        for idx, nic in enumerate(self.nics):
          if nic.mac == constants.VALUE_AUTO:
            nic_mac_ini = "nic%d_mac" % idx
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)

    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT

    # ip ping checks (we use the same ip that was resolved in ExpandNames)
    if self.op.ip_check:
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
                                   (self.check_ip, self.op.instance_name),
                                   errors.ECODE_NOTUNIQUE)

    #### mac address generation
    # By generating here the mac address both the allocator and the hooks get
    # the real final mac address rather than the 'auto' or 'generate' value.
    # There is a race condition between the generation and the instance object
    # creation, which means that we know the mac is valid now, but we're not
    # sure it will be when we actually add the instance. If things go bad
    # adding the instance will abort because of a duplicate mac, and the
    # creation job will fail.
    for nic in self.nics:
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())

    #### allocator run

    if self.op.iallocator is not None:
      self._RunAllocator()

    # Release all unneeded node locks
Thomas Thrainer's avatar
Thomas Thrainer committed
1031
1032
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
                               self.op.src_node_uuid])
1033
1034
1035
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1036
1037
1038
    # Release all unneeded group locks
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1039
1040
1041
1042
1043
1044
1045
1046

    assert (self.owned_locks(locking.LEVEL_NODE) ==
            self.owned_locks(locking.LEVEL_NODE_RES)), \
      "Node locks differ from node resource locks"

    #### node related checks

    # check primary node
Thomas Thrainer's avatar
Thomas Thrainer committed
1047
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1048
    assert self.pnode is not None, \
Thomas Thrainer's avatar
Thomas Thrainer committed
1049
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
    if pnode.offline:
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
                                 pnode.name, errors.ECODE_STATE)
    if pnode.drained:
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
                                 pnode.name, errors.ECODE_STATE)
    if not pnode.vm_capable:
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
                                 " '%s'" % pnode.name, errors.ECODE_STATE)

    self.secondaries = []

    # Fill in any IPs from IP pools. This must happen here, because we need to
    # know the nic's primary node, as specified by the iallocator
    for idx, nic in enumerate(self.nics):
      net_uuid = nic.network
      if net_uuid is not None:
        nobj = self.cfg.GetNetwork(net_uuid)
Thomas Thrainer's avatar
Thomas Thrainer committed
1068
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1069
1070
        if netparams is None:
          raise errors.OpPrereqError("No netparams found for network"
1071
                                     " %s. Probably not connected to"
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
                                     " node's %s nodegroup" %
                                     (nobj.name, self.pnode.name),
                                     errors.ECODE_INVAL)
        self.LogInfo("NIC/%d inherits netparams %s" %
                     (idx, netparams.values()))
        nic.nicparams = dict(netparams)
        if nic.ip is not None:
          if nic.ip.lower() == constants.NIC_IP_POOL:
            try:
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
            except errors.ReservationError:
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
                                         " from the address pool" % idx,
                                         errors.ECODE_STATE)
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
          else:
            try:
1089
1090
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
                                 check=self.op.conflicts_check)
1091
1092
1093
1094
1095
1096
1097
1098
            except errors.ReservationError:
              raise errors.OpPrereqError("IP address %s already in use"
                                         " or does not belong to network %s" %
                                         (nic.ip, nobj.name),
                                         errors.ECODE_NOTUNIQUE)

      # net is None, ip None or given
      elif self.op.conflicts_check:
Thomas Thrainer's avatar
Thomas Thrainer committed
1099
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1100
1101
1102

    # mirror node verification
    if self.op.disk_template in constants.DTS_INT_MIRROR:
Thomas Thrainer's avatar
Thomas Thrainer committed
1103
      if self.op.snode_uuid == pnode.uuid:
1104
1105
        raise errors.OpPrereqError("The secondary node cannot be the"
                                   " primary node", errors.ECODE_INVAL)
Thomas Thrainer's avatar
Thomas Thrainer committed
1106
1107
1108
1109
      CheckNodeOnline(self, self.op.snode_uuid)
      CheckNodeNotDrained(self, self.op.snode_uuid)
      CheckNodeVmCapable(self, self.op.snode_uuid)
      self.secondaries.append(self.op.snode_uuid)
1110

Thomas Thrainer's avatar
Thomas Thrainer committed
1111
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1112
1113
1114
1115
1116
1117
      if pnode.group != snode.group:
        self.LogWarning("The primary and secondary nodes are in two"
                        " different node groups; the disk parameters"
                        " from the first disk's node group will be"
                        " used")

1118
1119
1120
1121
    nodes = [pnode]
    if self.op.disk_template in constants.DTS_INT_MIRROR:
      nodes.append(snode)
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1122
1123
1124
1125
1126
1127
    excl_stor = compat.any(map(has_es, nodes))
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
      raise errors.OpPrereqError("Disk template %s not supported with"
                                 " exclusive storage" % self.op.disk_template,
                                 errors.ECODE_STATE)
    for disk in self.disks:
1128
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1129

Thomas Thrainer's avatar
Thomas Thrainer committed
1130
    node_uuids = [pnode.uuid] + self.secondaries
1131
1132
1133
1134
1135
1136

    if not self.adopt_disks:
      if self.op.disk_template == constants.DT_RBD:
        # _CheckRADOSFreeSpace() is just a placeholder.
        # Any function that checks prerequisites can be placed here.
        # Check if there is enough space on the RADOS cluster.
1137
        CheckRADOSFreeSpace()
1138
1139
1140
      elif self.op.disk_template == constants.DT_EXT:
        # FIXME: Function that checks prereqs if needed
        pass
Helga Velroyen's avatar
Helga Velroyen committed
1141
      elif self.op.disk_template in constants.DTS_LVM:
1142
        # Check lv size requirements, if not adopting
1143
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
Thomas Thrainer's avatar
Thomas Thrainer committed
1144
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1145
1146
1147
      else:
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
        pass
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164

    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
                                disk[constants.IDISK_ADOPT])
                     for disk in self.disks])
      if len(all_lvs) != len(self.disks):
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
                                   errors.ECODE_INVAL)
      for lv_name in all_lvs:
        try:
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
          # to ReserveLV uses the same syntax
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
        except errors.ReservationError:
          raise errors.OpPrereqError("LV named %s used by another instance" %
                                     lv_name, errors.ECODE_NOTUNIQUE)

Thomas Thrainer's avatar
Thomas Thrainer committed
1165
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1166
1167
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)

Thomas Thrainer's avatar
Thomas Thrainer committed
1168
1169
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
                                       vg_names.payload.keys())[pnode.uuid]
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
      node_lvs = node_lvs.payload

      delta = all_lvs.difference(node_lvs.keys())
      if delta:
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
                                   utils.CommaJoin(delta),
                                   errors.ECODE_INVAL)
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
      if online_lvs:
        raise errors.OpPrereqError("Online logical volumes found, cannot"
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
                                   errors.ECODE_STATE)
      # update the size of disk based on what is found
      for dsk in self.disks:
        dsk[constants.IDISK_SIZE] = \
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
                                        dsk[constants.IDISK_ADOPT])][0]))

    elif self.op.disk_template == constants.DT_BLOCK:
      # Normalize and de-duplicate device paths
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
                       for disk in self.disks])
      if len(all_disks) != len(self.disks):
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
                                   errors.ECODE_INVAL)
      baddisks = [d for d in all_disks
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
      if baddisks:
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
                                   " cannot be adopted" %
                                   (utils.CommaJoin(baddisks),
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
                                   errors.ECODE_INVAL)

Thomas Thrainer's avatar
Thomas Thrainer committed
1205
1206
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
                                            list(all_disks))[pnode.uuid]
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
      node_disks.Raise("Cannot get block device information from node %s" %
                       pnode.name)
      node_disks = node_disks.payload
      delta = all_disks.difference(node_disks.keys())
      if delta:
        raise errors.OpPrereqError("Missing block device(s): %s" %
                                   utils.CommaJoin(delta),
                                   errors.ECODE_INVAL)
      for dsk in self.disks:
        dsk[constants.IDISK_SIZE] = \
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))

1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
    # Check disk access param to be compatible with specified hypervisor
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
    node_group = self.cfg.GetNodeGroup(node_info.group)
    disk_params = self.cfg.GetGroupDiskParams(node_group)
    access_type = disk_params[self.op.disk_template].get(
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
    )

    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
                                            self.op.disk_template,
                                            access_type):
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
                                 " used with %s disk access param" %
                                 (self.op.hypervisor, access_type),
                                  errors.ECODE_STATE)

1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
    # Verify instance specs
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
    ispec = {
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
      constants.ISPEC_DISK_COUNT: len(self.disks),
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
                                  for disk in self.disks],
      constants.ISPEC_NIC_COUNT: len(self.nics),
      constants.ISPEC_SPINDLE_USE: spindle_use,
      }

    group_info = self.cfg.GetNodeGroup(pnode.group)
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
                                               self.op.disk_template)
    if not self.op.ignore_ipolicy and res:
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
             (pnode.group, group_info.name, utils.CommaJoin(res)))
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)

Thomas Thrainer's avatar
Thomas Thrainer committed
1256
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1257

Thomas Thrainer's avatar
Thomas Thrainer committed
1258
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1259
    # check OS parameters (remotely)
Thomas Thrainer's avatar
Thomas Thrainer committed
1260
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1261

Thomas Thrainer's avatar
Thomas Thrainer committed
1262
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1263
1264
1265
1266
1267
1268
1269

    #TODO: _CheckExtParams (remotely)
    # Check parameters for extstorage

    # memory check on primary node
    #TODO(dynmem): use MINMEM for checking
    if self.op.start:
1270
1271
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
                                self.op.hvparams)
Thomas Thrainer's avatar
Thomas Thrainer committed
1272
      CheckNodeFreeMemory(self, self.pnode.uuid,
1273
1274
                          "creating instance %s" % self.op.instance_name,
                          self.be_full[constants.BE_MAXMEM],
1275
                          self.op.hypervisor, hvfull)
1276

Thomas Thrainer's avatar
Thomas Thrainer committed
1277
    self.dry_run_result = list(node_uuids)
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293

  def Exec(self, feedback_fn):
    """Create and add the instance to the cluster.

    """
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
                self.owned_locks(locking.LEVEL_NODE)), \
      "Node locks differ from node resource locks"
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)

    ht_kind = self.op.hypervisor
    if ht_kind in constants.HTS_REQ_PORT:
      network_port = self.cfg.AllocatePort()
    else:
      network_port = None

1294
1295
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())

1296
1297
1298
    # This is ugly but we got a chicken-egg problem here
    # We can only take the group disk parameters, as the instance
    # has no disks yet (we are generating them right here).
Thomas Thrainer's avatar
Thomas Thrainer committed
1299
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1300
1301
    disks = GenerateDiskTemplate(self,
                                 self.op.disk_template,
1302
                                 instance_uuid, self.pnode.uuid,
1303
1304
1305
1306
1307
1308
1309
                                 self.secondaries,
                                 self.disks,
                                 self.instance_file_storage_dir,
                                 self.op.file_driver,
                                 0,
                                 feedback_fn,
                                 self.cfg.GetGroupDiskParams(nodegroup))
1310

1311
1312
1313
    iobj = objects.Instance(name=self.op.instance_name,
                            uuid=instance_uuid,
                            os=self.op.os_type,
Thomas Thrainer's avatar
Thomas Thrainer committed
1314
                            primary_node=self.pnode.uuid,
1315
1316
                            nics=self.nics, disks=disks,
                            disk_template=self.op.disk_template,
1317
                            disks_active=False,
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
                            admin_state=constants.ADMINST_DOWN,
                            network_port=network_port,
                            beparams=self.op.beparams,
                            hvparams=self.op.hvparams,
                            hypervisor=self.op.hypervisor,
                            osparams=self.op.osparams,
                            )

    if self.op.tags:
      for tag in self.op.tags:
        iobj.AddTag(tag)

    if self.adopt_disks:
      if self.op.disk_template == constants.DT_PLAIN:
        # rename LVs to the newly-generated names; we need to construct
        # 'fake' LV disks with the old data, plus the new unique_id
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
        rename_to = []
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
          rename_to.append(t_dsk.logical_id)
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
Thomas Thrainer's avatar
Thomas Thrainer committed
1339
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1340
1341
1342
1343
1344
                                               zip(tmp_disks, rename_to))
        result.Raise("Failed to rename adoped LVs")
    else:
      feedback_fn("* creating instance disks...")
      try:
1345
        CreateDisks(self, iobj)
1346
1347
      except errors.OpExecError:
        self.LogWarning("Device creation failed")
1348
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1349
1350
        raise

1351
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1352
1353
1354
1355
1356
1357
1358
1359
1360

    self.cfg.AddInstance(iobj, self.proc.GetECId())

    # Declare that we don't want to remove the instance lock anymore, as we've
    # added the instance to the config
    del self.remove_locks[locking.LEVEL_INSTANCE]

    if self.op.mode == constants.INSTANCE_IMPORT:
      # Release unused nodes
Thomas Thrainer's avatar
Thomas Thrainer committed
1361
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1362
1363
    else:
      # Release all nodes
1364
      ReleaseLocks(self, locking.LEVEL_NODE)
1365
1366
1367
1368
1369

    disk_abort = False
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
      feedback_fn("* wiping instance disks...")
      try:
1370
        WipeDisks(self, iobj)
1371
1372
1373
1374
1375
1376
1377
1378
1379
      except errors.OpExecError, err:
        logging.exception("Wiping disks failed")
        self.LogWarning("Wiping instance disks failed (%s)", err)
        disk_abort = True

    if disk_abort:
      # Something is already wrong with the disks, don't do anything else
      pass
    elif self.op.wait_for_sync:
1380
      disk_abort = not WaitForSync(self, iobj)
1381
1382
1383
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
      # make sure the disks are not degraded (still sync-ing is ok)
      feedback_fn("* checking mirrors status")
1384
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1385
1386
1387
1388
    else:
      disk_abort = False

    if disk_abort:
1389
      RemoveDisks(self, iobj)
1390
      self.cfg.RemoveInstance(iobj.uuid)
1391
1392
1393
1394
1395
      # Make sure the instance lock gets removed
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
      raise errors.OpExecError("There are some degraded disks for"
                               " this instance")

1396
1397
1398
    # instance disks are now active
    iobj.disks_active = True

1399
    # Release all node resource locks
1400
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1401
1402
1403
1404
1405
1406
1407
1408

    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
      if self.op.mode == constants.INSTANCE_CREATE:
        if not self.op.no_install:
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
                        not self.op.wait_for_sync)
          if pause_sync:
            feedback_fn("* pausing disk sync to install instance OS")
Thomas Thrainer's avatar
Thomas Thrainer committed
1409
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1410
1411
1412
1413
1414
                                                              (iobj.disks,
                                                               iobj), True)
            for idx, success in enumerate(result.payload):
              if not success:
                logging.warn("pause-sync of instance %s for disk %d failed",
1415
                             self.op.instance_name, idx)
1416
1417
1418
1419

          feedback_fn("* running the instance OS create scripts...")
          # FIXME: pass debug option from opcode to backend
          os_add_result = \
Thomas Thrainer's avatar
Thomas Thrainer committed
1420
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1421
1422
1423
                                          self.op.debug_level)
          if pause_sync:
            feedback_fn("* resuming disk sync")
Thomas Thrainer's avatar
Thomas Thrainer committed
1424
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1425
1426
1427
1428
1429
                                                              (iobj.disks,
                                                               iobj), False)
            for idx, success in enumerate(result.payload):
              if not success:
                logging.warn("resume-sync of instance %s for disk %d failed",
1430
                             self.op.instance_name, idx)
1431
1432

          os_add_result.Raise("Could not add os for instance %s"
1433
1434
                              " on node %s" % (self.op.instance_name,
                                               self.pnode.name))
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449

      else:
        if self.op.mode == constants.INSTANCE_IMPORT:
          feedback_fn("* running the instance OS import scripts...")

          transfers = []

          for idx, image in enumerate(self.src_images):
            if not image:
              continue

            # FIXME: pass debug option from opcode to backend
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
                                               constants.IEIO_FILE, (image, ),
                                               constants.IEIO_SCRIPT,
1450
                                               ((iobj.disks[idx], iobj), idx),
1451
1452
1453
1454
1455
                                               None)
            transfers.append(dt)

          import_result = \
            masterd.instance.TransferInstanceData(self, feedback_fn,
Thomas Thrainer's avatar
Thomas Thrainer committed
1456
1457
                                                  self.op.src_node_uuid,
                                                  self.pnode.uuid,
1458
1459
1460
1461
                                                  self.pnode.secondary_ip,
                                                  iobj, transfers)
          if not compat.all(import_result):
            self.LogWarning("Some disks for instance %s on node %s were not"
1462
1463
                            " imported successfully" % (self.op.instance_name,
                                                        self.pnode.name))
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476

          rename_from = self._old_instance_name

        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
          feedback_fn("* preparing remote import...")
          # The source cluster will stop the instance before attempting to make
          # a connection. In some cases stopping an instance can take a long
          # time, hence the shutdown timeout is added to the connection
          # timeout.
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
                             self.op.source_shutdown_timeout)
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)

Thomas Thrainer's avatar
Thomas Thrainer committed
1477
          assert iobj.primary_node == self.pnode.uuid
1478
1479
1480
1481
1482
1483
1484
1485
          disk_results = \
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
                                          self.source_x509_ca,
                                          self._cds, timeouts)
          if not compat.all(disk_results):
            # TODO: Should the instance still be started, even if some disks
            # failed to import (valid for local imports, too)?
            self.LogWarning("Some disks for instance %s on node %s were not"
1486
1487
                            " imported successfully" % (self.op.instance_name,
                                                        self.pnode.name))
1488
1489
1490
1491
1492
1493
1494
1495
1496

          rename_from = self.source_instance_name

        else:
          # also checked in the prereq part
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
                                       % self.op.mode)

        # Run rename script on newly imported instance
1497
1498
        assert iobj.name == self.op.instance_name
        feedback_fn("Running rename script for %s" % self.op.instance_name)
Thomas Thrainer's avatar
Thomas Thrainer committed
1499
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1500
1501
                                                   rename_from,
                                                   self.op.debug_level)
1502
        result.Warn("Failed to run rename script for %s on node %s" %
1503
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1504
1505
1506
1507
1508
1509

    assert not self.owned_locks(locking.LEVEL_NODE_RES)

    if self.op.start:
      iobj.admin_state = constants.ADMINST_UP
      self.cfg.Update(iobj, feedback_fn)
1510
1511
      logging.info("Starting instance %s on node %s", self.op.instance_name,
                   self.pnode.name)
1512
      feedback_fn("* starting instance...")
Thomas Thrainer's avatar
Thomas Thrainer committed
1513
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1514
1515
1516
                                            False, self.op.reason)
      result.Raise("Could not start instance")

1517
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
Thomas Thrainer's avatar