Commit b782f338 authored by Themis Zamani's avatar Themis Zamani

Merge pull request #38 from efikalti/fixes

[LAM-88]As a ~okeanos user, I want to be able to destroy a λ instance I own 
parents 48da9c40 bd935851
......@@ -7,7 +7,7 @@ The libraries contained in the core package are responsible for creating a clust
### provisioner
The library is responsible for creating a VM cluster, using the Kamaki python API. It reads the authentication info from the .kamakirc, and accepts the cluster specs as arguments.
The library is responsible for creating/deleting a VM cluster, using the Kamaki python API. It reads the authentication info from the .kamakirc, and accepts the cluster specs as arguments.
### ansible_manager
......@@ -20,12 +20,22 @@ The library is responsible for managing the ansible, that will run on the cluste
### cluster_creator
The script is responsible for creating the entire lambda instance.
The script is responsible for creating/deleting the entire lambda instance.
Run script as `cluster_creator.py --action=create` for creating a lambda cluster.
Run script as `cluster_creator.py --action=delete --cluster_id=<id>` for deleting a lambda cluster.
According to the action selected, certain arguments must be modified.
If action is CREATE
* It sets the provisioner arguments (cluster specs), then calls the provisioner to create the cluster.
* After that, it gets the output dictionary of the provisioner and adds some more values to it, which are obtained using the provisioner, after the cluster creation.
* It calls the ansible_manager, to create the inventory, using the dictionary as input.
* Finally, it uses the created manager object (containing the inventory and constants), to run the required playbooks in the correct order, to create the lambda instance.
If action is DELETE
* It reads the cluster id from the arguments.
* Creates a query to read the cluster information from the database with this id.
* It call the delete_lambda_cluster method of the provisioner with the information it retrieved from the database.
## Prerequisites
* kamaki 0.13.4 or later
......@@ -46,7 +56,7 @@ default_cloud = lambda
url = https://accounts.okeanos.grnet.gr/identity/v2.0
token = your-okeanos-token
```
Note that you may retrieve your ~okeanos API token, after logging into the service, by visiting [this page][api_link].
Note that you may retrieve your ~okeanos API token, after logging into the service, by visiting [this page][api_link].
- Install required packages. Within the `core` directory execute `sudo pip install -r requirements.txt`.
- Install package using `sudo python setup.py install`
......@@ -54,7 +64,7 @@ Note that you may retrieve your ~okeanos API token, after logging into the servi
## Usage
To create a lambda instance, one must run `python cluster_creator.py` from within the `core/fokia` directory. To change the default settings (one master instance and one slave instance) one has to edit the `cluster_creator.py` script prior to executing it.
To create a lambda instance, one must run `python cluster_creator.py` from within the `core/fokia` directory. To change the default settings (one master instance and one slave instance) one has to edit the `cluster_creator.py` script prior to executing it.
......@@ -67,4 +77,4 @@ To test the library we use `tox`. In order to run the tests:
This will automatically create the testing environments required and run the tests
[api_link]: https://accounts.okeanos.grnet.gr/ui/api_access
\ No newline at end of file
[api_link]: https://accounts.okeanos.grnet.gr/ui/api_access
......@@ -5,6 +5,20 @@ import inspect
from fokia.provisioner import Provisioner
from fokia.ansible_manager import Manager
def get_cluster_details(cluster_id):
"""
:param cluster_id: id of the cluster
:returns: the details of the cluster after retrieving them from the database.
"""
#TODO
#1. create a query for the table cluster requesting the cluster info with this id
#2. parse the answer, create a dictionary object with this format:
"""
{'nodes':[master_id,node1_id,node2_id,...], 'vpn':vpn_id}
"""
#3. return dictionary, return null if the query did not return any answer.
if __name__ == "__main__":
start_time = time.time()
script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
......@@ -24,46 +38,53 @@ if __name__ == "__main__":
parser.add_argument('--ip_request', type=int, dest='ip_request', default=1)
parser.add_argument('--network_request', type=int, dest='network_request', default=1)
parser.add_argument('--image_name', type=str, dest='image_name', default='debian')
parser.add_argument('--cluster_size', type=int, dest='cluster_size', default=2)
parser.add_argument('--action', type=str, dest='action', default='create')
parser.add_argument('--cluster_id', type=int, dest='cluster_id', default=0)
args = parser.parse_args()
provisioner = Provisioner(cloud_name=args.cloud)
provisioner.create_lambda_cluster('lambda-master', slaves=args.slaves,
cluster_size=args.cluster_size,
vcpus_master=args.vcpus_master,
vcpus_slave=args.vcpus_slave,
ram_master=args.ram_master,
ram_slave=args.ram_slave,
disk_master=args.disk_master,
disk_slave=args.disk_slave,
ip_request=args.ip_request,
network_request=args.network_request,
project_name=args.project_name)
provisioner_response = provisioner.get_cluster_details()
master_id = provisioner_response['nodes']['master']['id']
master_ip = provisioner.get_server_private_ip(master_id)
provisioner_response['nodes']['master']['internal_ip'] = master_ip
slave_ids = [slave['id'] for slave in provisioner_response['nodes']['slaves']]
for i, slave in enumerate(provisioner_response['nodes']['slaves']):
slave_ip = provisioner.get_server_private_ip(slave['id'])
provisioner_response['nodes']['slaves'][i]['internal_ip'] = slave_ip
provisioner_response['pk'] = provisioner.get_private_key()
if args['action'] == 'create':
provisioner.create_lambda_cluster('lambda-master', slaves=args.slaves,
vcpus_master=args.vcpus_master,
vcpus_slave=args.vcpus_slave,
ram_master=args.ram_master,
ram_slave=args.ram_slave,
disk_master=args.disk_master,
disk_slave=args.disk_slave,
ip_request=args.ip_request,
network_request=args.network_request,
project_name=args.project_name)
provisioner_response = provisioner.get_cluster_details()
master_id = provisioner_response['nodes']['master']['id']
master_ip = provisioner.get_server_private_ip(master_id)
provisioner_response['nodes']['master']['internal_ip'] = master_ip
slave_ids = [slave['id'] for slave in provisioner_response['nodes']['slaves']]
for i, slave in enumerate(provisioner_response['nodes']['slaves']):
slave_ip = provisioner.get_server_private_ip(slave['id'])
provisioner_response['nodes']['slaves'][i]['internal_ip'] = slave_ip
provisioner_response['pk'] = provisioner.get_private_key()
print 'response =', provisioner_response
provisioner_time = time.time()
print 'response =', provisioner_response
provisioner_time = time.time()
manager = Manager(provisioner_response)
manager.create_inventory()
# manager.run_playbook(playbook_file=script_path + "/../../ansible/playbooks/test/testinventory.yml", tags=['hosts'])
# manager.run_playbook(playbook_file=script_path + "/../../ansible/playbooks/test/testproxy.yml", tags=['install'])
manager = Manager(provisioner_response)
manager.create_inventory()
# manager.run_playbook(playbook_file=script_path + "/../../ansible/playbooks/test/testinventory.yml", tags=['hosts'])
# manager.run_playbook(playbook_file=script_path + "/../../ansible/playbooks/test/testproxy.yml", tags=['install'])
manager.run_playbook(playbook_file=script_path + "/../../ansible/playbooks/cluster-install.yml")
manager.run_playbook(playbook_file=script_path + "/../../ansible/playbooks/cluster-install.yml")
manager.cleanup()
manager.cleanup()
provisioner_duration = provisioner_time - start_time
ansible_duration = time.time() - provisioner_time
provisioner_duration = provisioner_time - start_time
ansible_duration = time.time() - provisioner_time
print 'VM provisioning took', round(provisioner_duration), 'seconds'
print 'Ansible playbooks took', round(ansible_duration), 'seconds'
print 'VM provisioning took', round(provisioner_duration), 'seconds'
print 'Ansible playbooks took', round(ansible_duration), 'seconds'
elif args['action'] == 'delete':
details = get_cluster_details(args['cluster_id'])
if details != None:
provisioner.delete_lambda_cluster(details)
......@@ -82,6 +82,9 @@ class Provisioner:
self.private_key = None
self.image_id = 'c6f5adce-21ad-4ce3-8591-acfe7eb73c02'
"""
FIND RESOURCES
"""
def find_flavor(self, **kwargs):
"""
......@@ -128,37 +131,9 @@ class Provisioner:
logger.info("Retrieving project")
return self.astakos.get_projects(**filter)[0]
def create_vm(self, vm_name=None, image_id=None, ip=None, personality=None, **kwargs):
"""
:param vm_name: Name of the virtual machine to create
:param image_id: image id if you want another image than the default
:param kwargs: passed to the functions called for detail options
:return:
"""
flavor_id = self.find_flavor(**kwargs)['id']
# Get image
if image_id == None:
image_id = self.image_id
else:
image_id = self.find_image(**kwargs)['id']
project_id = self.find_project_id(**kwargs)['id']
networks = list()
if ip != None:
ip_obj = dict()
ip_obj['uuid'] = ip['floating_network_id']
ip_obj['fixed_ip'] = ip['floating_ip_address']
networks.append(ip_obj)
networks.append({'uuid': kwargs['net_id']})
if personality == None:
personality = []
try:
okeanos_response = self.cyclades.create_server(name=vm_name, flavor_id=flavor_id,
image_id=image_id,
project_id=project_id,
networks=networks, personality=personality)
except ClientError as ex:
raise ex
return okeanos_response
"""
CREATE RESOURCES
"""
def create_lambda_cluster(self, vm_name, **kwargs):
"""
......@@ -171,13 +146,15 @@ class Provisioner:
ram = kwargs['slaves'] * kwargs['ram_slave'] + kwargs['ram_master']
disk = kwargs['slaves'] * kwargs['disk_slave'] + kwargs['disk_master']
project_id = self.find_project_id(**kwargs)['id']
response = self.check_all_resources(quotas, cluster_size=kwargs['cluster_size'],
cluster_size = kwargs['slaves'] + 1
response = self.check_all_resources(quotas, cluster_size=cluster_size,
vcpus=vcpus,
ram=ram,
disk=disk,
ip_request=kwargs['ip_request'],
network_request=kwargs['network_request'],
project_name=kwargs['project_name'])
if response:
# Get ssh keys
key = RSA.generate(2048)
......@@ -199,8 +176,6 @@ class Provisioner:
master_personality.append(private)
slave_personality = []
slave_personality.append(authorized)
print(master_personality)
print(slave_personality)
# Create private network for cluster
self.vpn = self.create_vpn('lambda-vpn', project_id=project_id)
......@@ -254,56 +229,37 @@ class Provisioner:
inventory["slaves"] = self.slaves
return inventory
def get_cluster_details(self):
def create_vm(self, vm_name=None, image_id=None, ip=None, personality=None, **kwargs):
"""
:returns: dictionary of basic details for the cluster
:param vm_name: Name of the virtual machine to create
:param image_id: image id if you want another image than the default
:param kwargs: passed to the functions called for detail options
:return:
"""
details = dict()
nodes = dict()
master = dict()
master['id'] = self.master['id']
master['name'] = self.master['name']
master['adminPass'] = self.master['adminPass']
nodes['master'] = master
slaves = list()
for slave in self.slaves:
slave_obj = dict()
slave_obj['id'] = slave['id']
slave_obj['name'] = slave['name']
name = slave_obj['name']
slaves.append(slave_obj)
nodes['slaves'] = slaves
details['nodes'] = nodes
vpn = dict()
vpn['id'] = self.vpn['id']
vpn['type'] = self.vpn['type']
details['vpn'] = vpn
details['ips'] = self.ips
ips_list = list()
for ip in self.ips:
flavor_id = self.find_flavor(**kwargs)['id']
# Get image
if image_id == None:
image_id = self.image_id
else:
image_id = self.find_image(**kwargs)['id']
project_id = self.find_project_id(**kwargs)['id']
networks = list()
if ip != None:
ip_obj = dict()
ip_obj['floating_network_id'] = ip['floating_network_id']
ip_obj['floating_ip_address'] = ip['floating_ip_address']
ip_obj['id'] = ip['id']
ips_list.append(ip_obj)
details['ips'] = ips_list
subnet = dict()
subnet['id'] = self.subnet['id']
subnet['cidr'] = self.subnet['cidr']
subnet['gateway_ip'] = self.subnet['gateway_ip']
details['subnet'] = subnet
return details
def get_private_key(self):
"""
:returns: Private key of master
"""
return self.private_key
ip_obj['uuid'] = ip['floating_network_id']
ip_obj['fixed_ip'] = ip['floating_ip_address']
networks.append(ip_obj)
networks.append({'uuid': kwargs['net_id']})
if personality == None:
personality = []
try:
okeanos_response = self.cyclades.create_server(name=vm_name, flavor_id=flavor_id,
image_id=image_id,
project_id=project_id,
networks=networks, personality=personality)
except ClientError as ex:
raise ex
return okeanos_response
def create_vpn(self, network_name, project_id):
"""
......@@ -322,24 +278,15 @@ class Provisioner:
raise ex
return okeanos_response
def destroy_vpn(self, id):
"""
Destroy a virtual private network
:param id: id of the network we want to destroy
:return: True if successfull
"""
try:
self.network_client.delete_network(id)
return True
except ClientError as ex:
raise ex
return okeanos_response
def reserve_ip(self,project_id):
"""
Reserve ip
:return: the ip object if successfull
"""
list_float_ips = self.network_client.list_floatingips()
for ip in list_float_ips:
if ip['instance_id'] is None and ip['port_id'] is None:
return ip
try:
ip = self.network_client.create_floatingip(project_id=project_id)
return ip
......@@ -363,6 +310,7 @@ class Provisioner:
raise ex
return okeanos_response
def connect_vm(self, vm_id, net_id):
"""
Connects the vm with this id to the network with the net_id
......@@ -394,6 +342,117 @@ class Provisioner:
raise ex
return okeanos_response
"""
DELETE RESOURCES
"""
def delete_lambda_cluster(self, details):
"""
Delete a lambda cluster
:param details: details of the cluster we want to delete
:return: True if successfull
"""
self.cyclades.get_server_details
# Delete every node
nodes = details['nodes']
for node in nodes:
if(not self.delete_vm(node)):
msg = 'Error deleting node with id ', node
raise ClientError(msg, error_fatal)
# Wait to complete deleting VMs
for node in nodes:
self.cyclades.wait_server(server_id=node, current_status='ACTIVE')
# Delete vpn
vpn = details['vpn']
if (not self.delete_vpn(vpn)):
msg = 'Error deleting node with id ', node
raise ClientError(msg, error_fatal)
def delete_vm(self, vm_id):
"""
Delete a vm
:param vm_id: id of the vm we want to delete
:return: True if successfull
"""
try:
self.cyclades.delete_server(vm_id)
return True
except ClientError as ex:
raise ex
return False
def delete_vpn(self, net_id):
"""
Delete a virtual private network
:param net_id: id of the network we want to delete
:return: True if successfull
"""
try:
self.network_client.delete_network(net_id)
return True
except ClientError as ex:
raise ex
return False
"""
GET RESOURCES
"""
def get_cluster_details(self):
"""
:returns: dictionary of basic details for the cluster
"""
details = dict()
nodes = dict()
master = dict()
master['id'] = self.master['id']
master['name'] = self.master['name']
master['adminPass'] = self.master['adminPass']
nodes['master'] = master
slaves = list()
for slave in self.slaves:
slave_obj = dict()
slave_obj['id'] = slave['id']
slave_obj['name'] = slave['name']
name = slave_obj['name']
slaves.append(slave_obj)
nodes['slaves'] = slaves
details['nodes'] = nodes
vpn = dict()
vpn['id'] = self.vpn['id']
vpn['type'] = self.vpn['type']
details['vpn'] = vpn
details['ips'] = self.ips
ips_list = list()
for ip in self.ips:
ip_obj = dict()
ip_obj['floating_network_id'] = ip['floating_network_id']
ip_obj['floating_ip_address'] = ip['floating_ip_address']
ip_obj['id'] = ip['id']
ips_list.append(ip_obj)
details['ips'] = ips_list
subnet = dict()
subnet['id'] = self.subnet['id']
subnet['cidr'] = self.subnet['cidr']
subnet['gateway_ip'] = self.subnet['gateway_ip']
details['subnet'] = subnet
return details
def get_private_key(self):
"""
:returns: Private key of master
"""
return self.private_key
def get_quotas(self, **kwargs):
"""
Get the user quotas for the defined project.
......@@ -430,6 +489,9 @@ class Provisioner:
return ip
return None
"""
CHECK RESOURCES
"""
def check_all_resources(self, quotas, **kwargs):
"""
Checks user's quota for every requested resource.
......@@ -437,8 +499,12 @@ class Provisioner:
:param **kwargs: arguments
"""
project_id = self.find_project_id(**kwargs)['id']
# quotas = self.get_quotas()
flavor = self.find_flavor(**kwargs)
#check flavor
if not flavor['SNF:allow_create']:
msg = 'This flavor does not allow create.'
raise ClientError(msg, error_flavor_list)
return False
# Check for VMs
pending_vm = quotas[project_id]['cyclades.vm']['project_pending']
limit_vm = quotas[project_id]['cyclades.vm']['project_limit']
......@@ -498,48 +564,3 @@ class Provisioner:
raise ClientError(msg, error_get_network_quota)
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Okeanos VM provisioning")
parser.add_argument('--cloud', type=str, dest="cloud", default="~okeanos")
parser.add_argument('--project-name', type=str, dest="project_name",
default="lambda.grnet.gr")
parser.add_argument('--name', type=str, dest='name', default="to mikro debian sto livadi")
parser.add_argument('--slaves', type=int, dest='slaves', default=1)
parser.add_argument('--vcpus_master', type=int, dest='vcpus_master', default=4)
parser.add_argument('--vcpus_slave', type=int, dest='vcpus_slave', default=4)
parser.add_argument('--ram_master', type=int, dest='ram_master', default=4096) # in MB
parser.add_argument('--ram_slave', type=int, dest='ram_slave', default=4096) # in MB
parser.add_argument('--disk_master', type=int, dest='disk_master', default=40) # in GB
parser.add_argument('--disk_slave', type=int, dest='disk_slave', default=40) # in GB
parser.add_argument('--ip_request', type=int, dest='ip_request', default=1)
parser.add_argument('--network_request', type=int, dest='network_request', default=1)
parser.add_argument('--image_name', type=str, dest='image_name', default="debian")
parser.add_argument('--cluster_size', type=int, dest='cluster_size', default=2)
args = parser.parse_args()
provisioner = Provisioner(cloud_name=args.cloud)
"""
print(provisioner.create_vm(vm_name=args.name, project_name=args.project_name,
image_name="debian"))
"""
response = provisioner.create_lambda_cluster(vm_name="lambda-master" , slaves=args.slaves,
cluster_size=args.cluster_size,
vcpus_master=args.vcpus_master,
vcpus_slave=args.vcpus_slave,
ram_master=args.ram_master,
ram_slave=args.ram_slave,
disk_master=args.disk_master,
disk_slave=args.disk_slave,
ip_request=args.ip_request,
network_request=args.network_request,
project_name=args.project_name)
# print(response)
# print(provisioner.get_cluster_details())
# print(provisioner.get_private_key())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment