Commit 3a46edfa authored by Ioannis Tsafaras's avatar Ioannis Tsafaras
Browse files

Complete single VM provisioning functions

parent bb75f497
---
- hosts: central-vm
user: root
gather_facts: no
roles:
- wait_for_ssh
- hosts: central-vm
user: root
roles:
......
---
- name: Wait for port 22 to be ready
local_action: wait_for port=22 host="{{ inventory_hostname }}" search_regex=OpenSSH
- name: Wait for SSH SIGHUPs to stop
pause: seconds=15
from kamaki.clients import ClientError
import logging
from fokia.provisioner_base import ProvisionerBase
from fokia.cluster_error_constants import *
from fokia.vmprovisioner import VMProvisioner
from fokia.ansible_manager_minimal import Manager
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class VMProvisioner(ProvisionerBase):
def __init__(self, auth_token, cloud_name=None):
super(VMProvisioner, self).__init__(auth_token=auth_token, cloud_name=cloud_name)
def create_central_vm(self, vm_name, wait=True, **kwargs):
quotas = self.get_quotas()
project_id = self.find_project_id(**kwargs)['id']
response = self.check_all_resources(quotas,
vcpus=kwargs['vcpus'],
ram=kwargs['ram'],
disk=kwargs['disk'],
project_name=kwargs['project_name'])
if response:
# Check flavors for VM
flavor = self.find_flavor(vcpus=kwargs['vcpus_master'],
ram=kwargs['ram_master'],
disk=kwargs['disk_master'])
if not flavor:
msg = 'This flavor does not allow create.'
raise ClientError(msg, error_flavor_list)
public_ip = self.reserve_ip(project_id=project_id)
vm = self.create_vm(vm_name=vm_name,
ip=public_ip,
flavor=flavor,
**kwargs)
# Wait for VM to complete being built
if wait:
self.cyclades.wait_server(server_id=vm['id'])
def check_all_resources(self, quotas, **kwargs):
"""
Checks user's quota for every requested resource.
Returns True if everything available.
:param **kwargs: arguments
"""
project_id = self.find_project_id(**kwargs)['id']
# Check for VM
pending_vm = quotas[project_id]['cyclades.vm']['project_pending']
limit_vm = quotas[project_id]['cyclades.vm']['project_limit']
usage_vm = quotas[project_id]['cyclades.vm']['project_usage']
available_vm = limit_vm - usage_vm - pending_vm
if available_vm < 1:
msg = 'Cyclades VMs out of limit'
raise ClientError(msg, error_quotas_cluster_size)
# Check for CPUs
pending_cpu = quotas[project_id]['cyclades.cpu']['project_pending']
limit_cpu = quotas[project_id]['cyclades.cpu']['project_limit']
usage_cpu = quotas[project_id]['cyclades.cpu']['project_usage']
available_cpu = limit_cpu - usage_cpu - pending_cpu
if available_cpu < kwargs['vcpus']:
msg = 'Cyclades cpu out of limit'
raise ClientError(msg, error_quotas_cpu)
# Check for RAM
pending_ram = quotas[project_id]['cyclades.ram']['project_pending']
limit_ram = quotas[project_id]['cyclades.ram']['project_limit']
usage_ram = quotas[project_id]['cyclades.ram']['project_usage']
available_ram = (limit_ram - usage_ram - pending_ram) / self.Bytes_to_MB
if available_ram < kwargs['ram']:
msg = 'Cyclades ram out of limit'
raise ClientError(msg, error_quotas_ram)
# Check for Disk space
pending_cd = quotas[project_id]['cyclades.ram']['project_pending']
limit_cd = quotas[project_id]['cyclades.disk']['project_limit']
usage_cd = quotas[project_id]['cyclades.disk']['project_usage']
available_cyclades_disk_GB = (limit_cd - usage_cd - pending_cd) / self.Bytes_to_GB
if available_cyclades_disk_GB < kwargs['disk']:
msg = 'Cyclades disk out of limit'
raise ClientError(msg, error_quotas_cyclades_disk)
# Check for authorized IPs
pending_ips = quotas[project_id]['cyclades.floating_ip']['project_pending']
limit_ips = quotas[project_id]['cyclades.floating_ip']['project_limit']
usage_ips = quotas[project_id]['cyclades.floating_ip']['project_usage']
available_ips = limit_ips - usage_ips - pending_ips
# TODO: figure out how to handle unassigned floating ips
# for d in list_float_ips:
# if d['instance_id'] is None and d['port_id'] is None:
# available_ips += 1
if available_ips:
msg = 'authorized IPs out of limit'
raise ClientError(msg, error_get_ip)
return True
class CentralServiceManager:
"""
Class deploying dynamically the central service VM.
......@@ -104,12 +13,26 @@ class CentralServiceManager:
the ~okeanos infrastructure.
"""
def central_service_create(self):
def central_service_create(self, auth_token):
"""
Creates the central service vm and installs the relevant s/w.
:return:
"""
raise NotImplementedError
provisioner = VMProvisioner(auth_token=auth_token)
vm_name = 'central_service'
vcpus = 4
ram = 4096
disk = 40
project_name = 'lambda.grnet.gr'
server_id = provisioner.create_single_vm(vm_name=vm_name,
vcpus=vcpus, ram=ram, disk=disk,
project_name=project_name)
hostname = 'snf-' + str(server_id) + '.vm.okeanos.grnet.gr'
group = 'central-vm'
ansible_manager = Manager(hostname, group)
ansible_result = ansible_manager.run_playbook(
playbook_file='../../central_service/ansible/playbooks/setup.yml')
return ansible_result
def central_service_destroy(self):
"""
......
import os
import ansible
from ansible.playbook import PlayBook
from ansible import callbacks
from ansible import utils
class Manager:
def __init__(self, host, group):
private_key_file = os.path.expanduser('~/.ssh/id_rsa')
ansible.constants.DEFAULT_TIMEOUT = 30
ansible.constants.DEFAULT_PRIVATE_KEY_FILE = private_key_file
ansible.constants.HOST_KEY_CHECKING = False
self.ansible_inventory = ansible.inventory.Inventory(host_list=[host])
all_group = self.ansible_inventory.get_group('all')
ansible_host = all_group.get_hosts()[0]
ansible_group = ansible.inventory.group.Group(name=group)
ansible_group.add_host(ansible_host)
self.ansible_inventory.add_group(ansible_group)
all_group.add_child_group(ansible_group)
def run_playbook(self, playbook_file, tags=None):
"""
Run the playbook_file using created inventory and tags specified
:return:
"""
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
pb = PlayBook(playbook=playbook_file, inventory=self.ansible_inventory, stats=stats,
callbacks=playbook_cb,
runner_callbacks=runner_cb, only_tags=tags)
playbook_result = pb.run()
return playbook_result
......@@ -148,7 +148,8 @@ class ProvisionerBase:
ip_obj['uuid'] = ip['floating_network_id']
ip_obj['fixed_ip'] = ip['floating_ip_address']
networks.append(ip_obj)
networks.append({'uuid': kwargs['net_id']})
if kwargs.get('net_id'):
networks.append({'uuid': kwargs['net_id']})
if personality is None:
personality = []
try:
......
from base64 import b64encode
from kamaki.clients import ClientError
from fokia.provisioner_base import ProvisionerBase
from fokia.cluster_error_constants import *
class VMProvisioner(ProvisionerBase):
def __init__(self, auth_token, cloud_name=None):
super(VMProvisioner, self).__init__(auth_token=auth_token, cloud_name=cloud_name)
def create_single_vm(self, vm_name, wait=True, **kwargs):
quotas = self.get_quotas()
project_id = self.find_project_id(**kwargs)['id']
response = self.check_all_resources(quotas,
vcpus=kwargs['vcpus'],
ram=kwargs['ram'],
disk=kwargs['disk'],
project_name=kwargs['project_name'])
if response:
# Check flavors for VM
flavor = self.find_flavor(vcpus=kwargs['vcpus'],
ram=kwargs['ram'],
disk=kwargs['disk'])
if not flavor:
msg = 'This flavor does not allow create.'
raise ClientError(msg, error_flavor_list)
public_ip = self.reserve_ip(project_id=project_id)
with open(os.path.expanduser('~/.ssh/id_rsa.pub'), 'r') as public_key_file:
public_key = public_key_file.read()
authorized = {'contents': b64encode(public_key),
'path': '/root/.ssh/authorized_keys',
'owner': 'root', 'group': 'root', 'mode': 0600}
vm = self.create_vm(vm_name=vm_name,
ip=public_ip,
personality=[authorized],
flavor=flavor,
**kwargs)
server_id = vm['id']
# Wait for VM to complete being built
if wait:
self.cyclades.wait_server(server_id=server_id)
return server_id
return
def check_all_resources(self, quotas, **kwargs):
"""
Checks user's quota for every requested resource.
Returns True if everything available.
:param **kwargs: arguments
"""
project_id = self.find_project_id(**kwargs)['id']
# Check for VM
pending_vm = quotas[project_id]['cyclades.vm']['project_pending']
limit_vm = quotas[project_id]['cyclades.vm']['project_limit']
usage_vm = quotas[project_id]['cyclades.vm']['project_usage']
available_vm = limit_vm - usage_vm - pending_vm
if available_vm < 1:
msg = 'Cyclades VMs out of limit'
raise ClientError(msg, error_quotas_cluster_size)
# Check for CPUs
pending_cpu = quotas[project_id]['cyclades.cpu']['project_pending']
limit_cpu = quotas[project_id]['cyclades.cpu']['project_limit']
usage_cpu = quotas[project_id]['cyclades.cpu']['project_usage']
available_cpu = limit_cpu - usage_cpu - pending_cpu
if available_cpu < kwargs['vcpus']:
msg = 'Cyclades cpu out of limit'
raise ClientError(msg, error_quotas_cpu)
# Check for RAM
pending_ram = quotas[project_id]['cyclades.ram']['project_pending']
limit_ram = quotas[project_id]['cyclades.ram']['project_limit']
usage_ram = quotas[project_id]['cyclades.ram']['project_usage']
available_ram = (limit_ram - usage_ram - pending_ram) / self.Bytes_to_MB
if available_ram < kwargs['ram']:
msg = 'Cyclades ram out of limit'
raise ClientError(msg, error_quotas_ram)
# Check for Disk space
pending_cd = quotas[project_id]['cyclades.ram']['project_pending']
limit_cd = quotas[project_id]['cyclades.disk']['project_limit']
usage_cd = quotas[project_id]['cyclades.disk']['project_usage']
available_cyclades_disk_gb = (limit_cd - usage_cd - pending_cd) / self.Bytes_to_GB
if available_cyclades_disk_gb < kwargs['disk']:
msg = 'Cyclades disk out of limit'
raise ClientError(msg, error_quotas_cyclades_disk)
# Check for authorized IPs
pending_ips = quotas[project_id]['cyclades.floating_ip']['project_pending']
limit_ips = quotas[project_id]['cyclades.floating_ip']['project_limit']
usage_ips = quotas[project_id]['cyclades.floating_ip']['project_usage']
available_ips = limit_ips - usage_ips - pending_ips
# TODO: figure out how to handle unassigned floating ips
# for d in list_float_ips:
# if d['instance_id'] is None and d['port_id'] is None:
# available_ips += 1
if available_ips < 1:
msg = 'authorized IPs out of limit'
raise ClientError(msg, error_get_ip)
return True
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment