Unverified Commit 078e9265 authored by Ioannis Tsafaras's avatar Ioannis Tsafaras Committed by Avraam Tsantekidis
Browse files

LAM-40 Refactor ansible inventory input, make cluster manager script

parent a628292b
......@@ -3,18 +3,25 @@ from ansible.playbook import PlayBook
from ansible import callbacks
from ansible import utils
class Manager:
def __init__(self, provisioner_response):
self.inventory = {}
for group in provisioner_response.keys():
self.inventory[group] = {"hosts": []}
for response in provisioner_response[group]:
self.inventory[group]["hosts"].append(response[u'SNF:fqdn'])
if group == "master":
self.master_fqdn = response[u'SNF:fqdn']
self.inventory["master"] = {
"name": "snf-" + str(provisioner_response["nodes"]["master"]["id"]),
"ip": provisioner_response["nodes"]["master"]["internal_ip"]}
self.inventory["slaves"] = []
for response in provisioner_response["nodes"]["slaves"]:
self.inventory["slaves"].append(
{"name": "snf-" + str(response["id"]),
"ip": response["internal_ip"]})
self.cidr = provisioner_response["subnet"]["cidr"]
ansible.constants.ANSIBLE_SSH_ARGS = '-o "ProxyCommand ssh -A -W %%h:%%p root@%s"' \
% self.inventory["master"]["name"] + "vm.okeanos.grnet.gr"
# ansible.constants.ANSIBLE_SSH_ARGS = '-o "ProxyCommand ssh root@%s nc %%h %%p"' \
# % self.inventory["master"]["name"] + "vm.okeanos.grnet.gr"
ansible.constants.HOST_KEY_CHECKING = False
def create_inventory(self):
"""
......@@ -22,15 +29,14 @@ class Manager:
:return:
"""
ipdict = {"snf-661243": "192.168.0.3", "snf-661526" : "192.168.0.2", "snf-661527" : "192.168.0.4"}
inventory_groups = []
host_vars = {}
master_group = ansible.inventory.group.Group(name="master")
host = self.inventory["master"]["hosts"][0]
ansible_host = ansible.inventory.host.Host(name=host)
host_vars["internal_ip"] = ipdict[host.split('.')[0]]
host_vars["local_net"] = "192.168.0.0/24"
host = self.inventory["master"]
ansible_host = ansible.inventory.host.Host(name=host["name"] + ".vm.okeanos.grnet.gr")
host_vars["internal_ip"] = self.inventory["master"]["ip"]
host_vars["local_net"] = self.cidr
for var_key, var_value in host_vars.iteritems():
ansible_host.set_variable(var_key, var_value)
ansible_host.set_variable("id", 0)
......@@ -38,21 +44,21 @@ class Manager:
inventory_groups.append(master_group)
slave_group = ansible.inventory.group.Group(name="slaves")
host_vars["proxy_env"] = {"http_proxy": "http://"+self.master_fqdn.split('.')[0]+":3128"}
for host_id, host in enumerate(self.inventory["slaves"]["hosts"], start=1):
ansible_host = ansible.inventory.host.Host(name=host.split('.')[0]+'.local')
host_vars["internal_ip"] = ipdict[host.split('.')[0]]
host_vars["proxy_env"] = {"http_proxy": "http://" + self.inventory["master"]["name"]+".local:3128"}
for host_id, host in enumerate(self.inventory["slaves"], start=1):
ansible_host = ansible.inventory.host.Host(name=host["name"] + ".local")
host_vars["internal_ip"] = host["ip"]
for var_key, var_value in host_vars.iteritems():
ansible_host.set_variable(var_key, var_value)
ansible_host.set_variable("id", host_id)
slave_group.add_host(ansible_host)
inventory_groups.append(slave_group)
self.inventory = ansible.inventory.Inventory(host_list=None)
self.ansible_inventory = ansible.inventory.Inventory(host_list=None)
for group in inventory_groups:
self.inventory.add_group(group)
self.ansible_inventory.add_group(group)
return self.inventory
return self.ansible_inventory
def run_playbook(self, playbook_file, tags=None):
"""
......@@ -62,7 +68,7 @@ class Manager:
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
pb = PlayBook(playbook=playbook_file, inventory=self.inventory, stats=stats,
pb = PlayBook(playbook=playbook_file, inventory=self.ansible_inventory, stats=stats,
callbacks=playbook_cb,
runner_callbacks=runner_cb, only_tags=tags)
pb.run()
......@@ -70,147 +76,12 @@ class Manager:
if __name__ == "__main__":
inv = test_provisioner_response = \
{"master": [{u'addresses': {}, u'links': [
{u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/servers/664664',
u'rel': u'self'},
{u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/servers/664664',
u'rel': u'bookmark'}],
u'image': {
u'id': u'0e399015-8723-4c78-8198-75bdf693cdde',
u'links': [{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'self'}, {
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'bookmark'}, {
u'href': u'https://cyclades.okeanos.grnet.gr/image/v1.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'alternate'}]},
u'suspended': False,
u'flavor': {u'id': 3, u'links': [
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/flavors/3',
u'rel': u'self'},
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/flavors/3',
u'rel': u'bookmark'}]},
u'id': 664664,
u'security_groups': [{u'name': u'default'}],
u'attachments': [],
u'user_id': u'19e4daba-20e2-4d57-a6aa-92ba1c982fd9',
u'accessIPv4': u'', u'accessIPv6': u'',
u'progress': 0, u'config_drive': u'',
u'status': u'BUILD',
u'updated': u'2015-07-08T10:15:38.936455+00:00',
u'hostId': u'',
u'SNF:fqdn': u'snf-661243.vm.okeanos.grnet.gr',
u'deleted': False, u'key_name': None,
u'name': u'to mikro debian sto livadi',
u'adminPass': u'X9yqjSTAFO',
u'tenant_id': u'6ff62e8e-0ce9-41f7-ad99-13a18ecada5f',
u'created': u'2015-07-08T10:15:37.837229+00:00',
u'SNF:task_state': u'BUILDING',
u'volumes': [50369], u'diagnostics': [],
u'metadata': {u'os': u'debian',
u'users': u'root ckaner'},
u'SNF:port_forwarding': {}}],
"slaves": [{u'addresses': {}, u'links': [
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/servers/664664',
u'rel': u'self'},
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/servers/664664',
u'rel': u'bookmark'}],
u'image': {
u'id': u'0e399015-8723-4c78-8198-75bdf693cdde',
u'links': [{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'self'}, {
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'bookmark'}, {
u'href': u'https://cyclades.okeanos.grnet.gr/image/v1.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'alternate'}]},
u'suspended': False,
u'flavor': {u'id': 3, u'links': [
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/flavors/3',
u'rel': u'self'},
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/flavors/3',
u'rel': u'bookmark'}]},
u'id': 664664,
u'security_groups': [{u'name': u'default'}],
u'attachments': [],
u'user_id': u'19e4daba-20e2-4d57-a6aa-92ba1c982fd9',
u'accessIPv4': u'', u'accessIPv6': u'',
u'progress': 0, u'config_drive': u'',
u'status': u'BUILD',
u'updated': u'2015-07-08T10:15:38.936455+00:00',
u'hostId': u'',
u'SNF:fqdn': u'snf-661526.vm.okeanos.grnet.gr',
u'deleted': False, u'key_name': None,
u'name': u'to mikro debian sto livadi',
u'adminPass': u'X9yqjSTAFO',
u'tenant_id': u'6ff62e8e-0ce9-41f7-ad99-13a18ecada5f',
u'created': u'2015-07-08T10:15:37.837229+00:00',
u'SNF:task_state': u'BUILDING',
u'volumes': [50369], u'diagnostics': [],
u'metadata': {u'os': u'debian',
u'users': u'root ckaner'},
u'SNF:port_forwarding': {}},
{u'addresses': {}, u'links': [
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/servers/664664',
u'rel': u'self'},
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/servers/664664',
u'rel': u'bookmark'}], u'image': {
u'id': u'0e399015-8723-4c78-8198-75bdf693cdde',
u'links': [{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'self'}, {
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'bookmark'}, {
u'href': u'https://cyclades.okeanos.grnet.gr/image/v1.0/images/0e399015-8723-4c78-8198-75bdf693cdde',
u'rel': u'alternate'}]},
u'suspended': False,
u'flavor': {u'id': 3, u'links': [
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/flavors/3',
u'rel': u'self'},
{
u'href': u'https://cyclades.okeanos.grnet.gr/compute/v2.0/flavors/3',
u'rel': u'bookmark'}]},
u'id': 664664,
u'security_groups': [{u'name': u'default'}],
u'attachments': [],
u'user_id': u'19e4daba-20e2-4d57-a6aa-92ba1c982fd9',
u'accessIPv4': u'',
u'accessIPv6': u'', u'progress': 0,
u'config_drive': u'', u'status': u'BUILD',
u'updated': u'2015-07-08T10:15:38.936455+00:00',
u'hostId': u'',
u'SNF:fqdn': u'snf-661527.vm.okeanos.grnet.gr',
u'deleted': False, u'key_name': None,
u'name': u'to mikro debian sto livadi',
u'adminPass': u'X9yqjSTAFO',
u'tenant_id': u'6ff62e8e-0ce9-41f7-ad99-13a18ecada5f',
u'created': u'2015-07-08T10:15:37.837229+00:00',
u'SNF:task_state': u'BUILDING',
u'volumes': [50369], u'diagnostics': [],
u'metadata': {u'os': u'debian',
u'users': u'root ckaner'},
u'SNF:port_forwarding': {}}]}
# from provisioner import Provisioner
# provisioner = Provisioner("lambda")
# inv = provisioner.create_lambda_cluster("test_vm")
manager = Manager(inv)
manager.create_inventory()
response = {u'ips': [{u'floating_network_id': u'2216',
u'floating_ip_address': u'83.212.118.6',
u'id': u'686825'}],
u'nodes': {u'master': {u'id': 666355,
u'name': u'lambda-master'},
u'slaves': [{u'id': 666356, u'name': u'lambda-node1'}]},
u'vpn': {u'type': u'MAC_FILTERED', u'id': u'143499'},
u'subnet': {u'cidr': u'192.168.0.0/24', u'gateway_ip': u'192.168.0.1', u'id': u'142564'}}
ansible.constants.HOST_KEY_CHECKING = False
ansible.constants.ANSIBLE_SSH_ARGS = '-o "ProxyCommand ssh -A -W %%h:%%p root@%s"' % manager.master_fqdn
# ansible.constants.ANSIBLE_SSH_ARGS = '-o "ProxyCommand ssh root@%s nc %%h %%p"' % manager.master_fqdn
manager.run_playbook(playbook_file="../../ansible/playbooks/testinventory.yml", tags=["rm"])
# manager.run_playbook(playbook_file="../../ansible/playbooks/testproxy.yml", tags=["install"])
import argparse
from provisioner import Provisioner
from ansible_manager import Manager
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Okeanos VM provisioning")
parser.add_argument('--cloud', type=str, dest="cloud", default="lambda")
parser.add_argument('--project-name', type=str, dest="project_name",
default="lambda.grnet.gr")
parser.add_argument('--name', type=str, dest='name', default="to mikro debian sto livadi")
parser.add_argument('--slaves', type=int, dest='slaves', default=1)
parser.add_argument('--vcpus_master', type=int, dest='vcpus_master', default=4)
parser.add_argument('--vcpus_slave', type=int, dest='vcpus_slave', default=4)
parser.add_argument('--ram_master', type=int, dest='ram_master', default=4096) # in MB
parser.add_argument('--ram_slave', type=int, dest='ram_slave', default=4096) # in MB
parser.add_argument('--disk_master', type=int, dest='disk_master', default=40) # in GB
parser.add_argument('--disk_slave', type=int, dest='disk_slave', default=40) # in GB
parser.add_argument('--ip_request', type=int, dest='ip_request', default=1)
parser.add_argument('--network_request', type=int, dest='network_request', default=1)
parser.add_argument('--image_name', type=str, dest='image_name', default="debian")
parser.add_argument('--cluster_size', type=int, dest='cluster_size', default=2)
args = parser.parse_args()
provisioner = Provisioner(cloud_name=args.cloud)
provisioner.create_lambda_cluster("test_vm", slaves=args.slaves,
cluster_size=args.cluster_size,
vcpus_master=args.vcpus_master,
vcpus_slave=args.vcpus_slave,
ram_master=args.ram_master,
ram_slave=args.ram_slave,
disk_master=args.disk_master,
disk_slave=args.disk_slave,
ip_request=args.ip_request,
network_request=args.network_request,
project_name=args.project_name)
provisioner_response = provisioner.get_cluster_details()
master_id = provisioner_response["nodes"]["master"]["id"]
master_ip = provisioner.get_server_private_ip(master_id)
provisioner_response["nodes"]["master"]["internal_ip"] = master_ip
slave_ids = [slave["id"] for slave in provisioner_response["nodes"]["slaves"]]
for i, slave in enumerate(provisioner_response["nodes"]["slaves"]):
slave_ip = provisioner.get_server_private_ip(slave["id"])
provisioner_response["nodes"]["slaves"][i]["internal_ip"] = slave_ip
manager = Manager(provisioner_response)
manager.create_inventory()
manager.run_playbook(playbook_file="../../ansible/playbooks/testinventory.yml", tags=["touch"])
# manager.run_playbook(playbook_file="../../ansible/playbooks/testproxy.yml", tags=["install"])
......@@ -100,7 +100,7 @@ class Provisioner:
return None
def find_project_id(self, **kwargs):
""ii"
"""
:param kwargs: name, state, owner and mode to filter project by
:return: first project_id that matches the project name
"""
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment