Commit 0141c833 authored by Ilias Tsitsimpis's avatar Ilias Tsitsimpis
Browse files

ci: Redesign CI scripts

Spawn VM to use as builder-slave.
Build packages inside this VM.
Deploy Synnefo using snf-deploy.
Run unit tests.
Run functional tests using burnin.
parent f8892eeb
#!/usr/bin/env python
"""
Build Synnefo packages for debian
"""
from utils import SynnefoCI
def autopkg_debian():
"""Build synnefo packages for debian"""
synnefo_ci = SynnefoCI()
synnefo_ci.build_synnefo()
if __name__ == "__main__":
autopkg_debian()
#!/usr/bin/env python
"""
Deploy Synnefo using snf-deploy
"""
from utils import SynnefoCI
def deploy_synnefo():
"""Deploy Synnefo using snf-deploy"""
synnefo_ci = SynnefoCI()
synnefo_ci.deploy_synnefo()
if __name__ == "__main__":
deploy_synnefo()
#!/usr/bin/env python
"""
Download Synnefo packages
"""
from utils import SynnefoCI
def fetch_packages():
"""Download Synnefo packages"""
synnefo_ci = SynnefoCI()
synnefo_ci.fetch_packages()
if __name__ == "__main__":
fetch_packages()
[Global]
# Timeouts in seconds
build_timeout = 240
# Apt repository to use
apt_repo = http://apt.dev.grnet.gr squeeze/
# Synnefo git repo
synnefo_repo = https://code.grnet.gr/git/synnefo
# Git branch to test (specify sha1 or branch name)
synnefo_branch = HEAD
# snf-deploy git repo
deploy_repo = https://code.grnet.gr/git/snf-deploy
# Defines the schema that snf-deploy will use
schema = one_node
# Local dir to save builded packages
pkgs_dir = /tmp/synnefo_pkgs
# If True patch the pydist.py module (see Debian bug #657665)
patch_pydist = True
# Configuration of git (on remote server)
git_config_name = Buildbot
git_config_mail = synnefo@builder.dev.grnet.gr
# Url to fetch ssh public keys
public_ssh_keys_url =
# Network address from which we allow access to server
filter_access_network = 195.251.29.0/24,62.217.123.39
# Config file to save temporary options (eg IPs, passwords etc)
temporary_config = /tmp/ci_temp_conf
[Deployment]
# Credentials
auth_url = https://accounts.okeanos.io/identity/v2.0/
token =
# If we deploy on okeanos.io we have to set this to True
# The server will reside besides a NAT and we have to compute ssh port
deploy_on_io = True
# Server name to use for our machine
server_name = Synnefo Deployment
# Flavor ID to use
# (149 for production, 639 for okeanos.io)
flavor_id = 639
# Image to use (name must contain this)
image_name = OldStable
# UUID of owner of system images
# (25ecced9-bf53-4145-91ee-cf47377e9fb2 for production,
# 04cbe33f-29b7-4ef1-94fb-015929e5fc06 for okeanos.io)
system_uuid = 04cbe33f-29b7-4ef1-94fb-015929e5fc06
[Burnin]
# Maybe add some burnin options
# (e.g. tests to run/ignore, timeouts etc)
cmd_options = --nofailfast --no-ipv6 --action-timeout=240
[Unit Tests]
component = astakos cyclades pithos
[Repository]
# Projects reside on this repo
projects =
snf-common
astakosclient
snf-django-lib
snf-webproject
snf-branding
snf-astakos-app
snf-pithos-backend
snf-cyclades-gtools
snf-cyclades-app
snf-pithos-app
snf-tools
#!/usr/bin/env python
"""
Run burnin functional test suite
"""
from utils import SynnefoCI
def run_burnin():
"""Run burnin functional test suite"""
synnefo_ci = SynnefoCI()
synnefo_ci.run_burnin()
if __name__ == "__main__":
run_burnin()
[packages]
# whether to use apt-get or local generated package found in packages dir
use_local_packages = True
# url to obtain latest synnefo packages.
# To use them change USE_LOCAL_PACKAGES setting to yes
# To get them run: snf-deploy packages
package_url = http://builder.dev.grnet.gr/synnefo/packages/Squeeze/40/
[dirs]
# dir to find all template files used to customize setup
# in case you want to add another setting please modify the corresponding file
templates = /var/lib/snf-deploy/files
# dir to store local images (disk0, disk1 of the virtual cluster)
images = /var/lib/snf-deploy/images
# dir to store/find local packages
# dir to locally save packages that will be downloaded from package_url
# put here any locally created packages (useful for development)
packages = /var/lib/snf-deploy/packages
# dir to store pidfiles (dnsmasq, kvm)
run = /var/run/snf-deploy
# dir to store dnsmasq related files
dns = /var/lib/snf-deploy/dnsmasq
# dir to lookup fabfile and ifup script
lib = /usr/lib/snf-deploy
# dir to store executed commands (to enforce sequential execution)
cmd = /var/run/snf-deploy/cmd
[ganeti1]
cluster_nodes = node1
master_node = node1
cluster_netdev = eth0
cluster_name = ganeti1
cluster_ip = 192.168.0.13
vg = autovg
synnefo_public_network_subnet = 10.2.1.0/24
synnefo_public_network_gateway = 10.2.1.1
synnefo_public_network_type = CUSTOM
image_dir = /srv/okeanos
# To add another cluster repeat the above section
# with different header and nodes
# please note that currently is only supported deployment
# with nodes (both ganeti and synnefo) residing in the same subnet/domain
[network]
domain = synnefo.live
subnet = 192.168.0.0/28
gateway = 192.168.0.14
[hostnames]
node1 = auto1
# node2 = auto2
[ips]
node1 = 192.168.0.1
# node2 = 192.168.0.2
# This is used only in case of vcluster
# needed to pass the correct dhcp responces to the virtual nodes
[macs]
node1 = 52:54:00:00:00:01
# node2 = 52:54:00:00:00:02
[info]
# Here we define which nodes from the predefined ones to use
nodes = node1
# login credentials for the nodes
# please note that in case of vcluster these are preconfigured
# and not editable.
# in case of physical nodes all nodes should have the same login account
user = root
password = 12345
public_iface = eth0
vm_public_iface = eth1
vm_private_iface = eth2
# extra disk name inside the nodes
# if defined, snf-deploy will create a VG for ganeti in order to support lvm storage
# if not then only file disk template will be supported
extra_disk = /dev/vdb
[debian]
rabbitmq-server = testing
gunicorn = squeeze-backports
qemu-kvm = squeeze-backports
qemu = squeeze-backports
python-gevent = squeeze-backports
apache2 =
postgresql =
python-psycopg2 =
python-argparse =
nfs-kernel-server = squeeze-backports
nfs-common = squeeze-backports
bind9 =
vlan =
vlan =
lvm2 =
curl =
memcached =
python-memcache =
bridge-utils =
python-progress =
ganeti-instance-debootstrap =
[synnefo]
snf-astakos-app = stable
snf-common = stable
snf-cyclades-app = stable
snf-cyclades-gtools = stable
snf-django-lib = stable
python-astakosclient = stable
python-objpool = stable
snf-branding = stable
snf-webproject = stable
snf-pithos-app = stable
snf-pithos-backend = stable
snf-tools = stable
[ganeti]
snf-ganeti = 2.6.2+ippool11+hotplug5+extstorage3+rbdfix1+kvmfix2+nolvm+netxen-1~squeeze
ganeti-htools = 2.6.2+ippool11+hotplug5+extstorage3+rbdfix1+kvmfix2+nolvm+netxen-1~squeeze
[other]
snf-cloudcms = stable
snf-vncauthproxy = stable
snf-pithos-webclient = stable
snf-image = stable
snf-network = stable
nfdhcpd = stable
kamaki = stable
python-bitarray = stable
nfqueue-bindings-python = stable
[cred]
synnefo_user = synnefo
synnefo_db_passwd = example_passw0rd
synnefo_rapi_passwd = example_rapi_passw0rd
synnefo_rabbitmq_passwd = example_rabbitmq_passw0rd
user_email = user@synnefo.org
user_name = John
user_lastname = Doe
user_passwd = 12345
[roles]
accounts = node1
compute = node1
object-store = node1
cyclades = node1
pithos = node1
cms = node1
db = node1
mq = node1
ns = node1
client = node1
router = node1
[synnefo]
pithos_dir = /srv/pithos
vm_public_bridge = br0
vm_private_bridge = prv0
common_bridge = br0
debian_base_url = https://pithos.okeanos.grnet.gr/public/RDISy7sNVIJ9KIm4JkmbX4
[image]
# url to get the base image. This is a debian base image with preconfigured
# root password and installed rsa/dsa keys. Plus a NetworkManager hook that
# changes the VM's name based on info provided by dhcp response.
# To create it run: snf-deploy image
squeeze_image_url = https://pithos.okeanos.grnet.gr/public/832xv
ubuntu_image_url =
# in order ganeti nodes to support lvm storage (plain disk template) it will
# be needed an extra disk to eventually be able to create a VG. Ganeti requires
# this VG to be at least of 30GB. To this end in order the virtual nodes to have
# this extra disk an image should be created locally. There are three options:
# 1. not create an extra disk (only file storage template will be supported)
# 2. create an image of 30G in image dir (default /var/lib/snf-deploy/images)
# using dd if=/dev/zero of=squeeze.disk1
# 3. create this image in a local VG using lvgreate -L30G squeeze.disk1 lvg
# and create a symbolic link in /var/lib/snf-deploy/images
# Whether to create an extra disk or not
create_extra_disk = False
# lvg is the name of the local VG if any
lvg =
# OS istalled in the virtual cluster
os = squeeze
[cluster]
# the bridge to use for the virtual cluster
# on this bridge we will launch a dnsnmasq and provide
# fqdns needed to the cluster.
# In ordrer cluster nodes to have internet access, host must do NAT.
# iptables -t nat -A POSTROUTING -s 192.0.0.0/28 -j MASQUERADE
# ip addr add 192.0.0.14/28 dev auto_nodes_br
# To create run: snf-deploy cluster
bridge = auto_nodes_br
#!/usr/bin/env python
"""
Setup slave server
"""
from utils import SynnefoCI
def setup_slave():
"""Setup slave server"""
synnefo_ci = SynnefoCI(cleanup_config=True)
# Get token from /nfs/token
try:
token = open("/nfs/synnefo_token").read().strip()
synnefo_ci.write_config('token', token, 'Deployment')
except:
pass
# Build slave server
synnefo_ci.create_server()
# Copy synnefo repo to server
synnefo_ci.clone_repo()
if __name__ == "__main__":
setup_slave()
#!/usr/bin/env python
"""
Run Synnefo unit test suite
"""
from utils import SynnefoCI
def unit_test():
"""Run Synnefo unit test suite"""
synnefo_ci = SynnefoCI()
synnefo_ci.unit_test()
if __name__ == "__main__":
unit_test()
#!/usr/bin/env python
"""
Synnefo ci utils module
"""
import os
import sys
import time
import logging
import fabric.api as fabric
from ConfigParser import ConfigParser, DuplicateSectionError
from kamaki.clients.astakos import AstakosClient
from kamaki.clients.cyclades import CycladesClient
from kamaki.clients.image import ImageClient
def _run(cmd, verbose):
"""Run fabric with verbose level"""
if verbose:
args = ('running',)
else:
args = ('running', 'stdout',)
with fabric.hide(*args):
return fabric.run(cmd)
def _red(msg):
"""Red color"""
#return "\x1b[31m" + str(msg) + "\x1b[0m"
return str(msg)
def _yellow(msg):
"""Yellow color"""
#return "\x1b[33m" + str(msg) + "\x1b[0m"
return str(msg)
def _green(msg):
"""Green color"""
#return "\x1b[32m" + str(msg) + "\x1b[0m"
return str(msg)
def _check_fabric(fun):
"""Check if fabric env has been set"""
def wrapper(self, *args):
"""wrapper function"""
if not self.fabric_installed:
self.setup_fabric()
return fun(self, *args)
return wrapper
def _check_kamaki(fun):
"""Check if kamaki has been initialized"""
def wrapper(self, *args):
"""wrapper function"""
if not self.kamaki_installed:
self.setup_kamaki()
return fun(self, *args)
return wrapper
class _MyFormatter(logging.Formatter):
"""Logging Formatter"""
def format(self, record):
format_orig = self._fmt
if record.levelno == logging.DEBUG:
self._fmt = " %(msg)s"
elif record.levelno == logging.INFO:
self._fmt = "%(msg)s"
elif record.levelno == logging.WARNING:
self._fmt = _yellow("[W] %(msg)s")
elif record.levelno == logging.ERROR:
self._fmt = _red("[E] %(msg)s")
result = logging.Formatter.format(self, record)
self._fmt = format_orig
return result
class SynnefoCI(object):
"""SynnefoCI python class"""
def __init__(self, cleanup_config=False):
""" Initialize SynnefoCI python class
Setup logger, local_dir, config and kamaki
"""
# Setup logger
self.logger = logging.getLogger('synnefo-ci')
self.logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(_MyFormatter())
self.logger.addHandler(handler)
# Get our local dir
self.ci_dir = os.path.dirname(os.path.abspath(__file__))
self.repo_dir = os.path.dirname(self.ci_dir)
# Read config file
self.conffile = os.path.join(self.ci_dir, "new_config")
self.config = ConfigParser()
self.config.optionxform = str
self.config.read(self.conffile)
temp_config = self.config.get('Global', 'temporary_config')
if cleanup_config:
try:
os.remove(temp_config)
except:
pass
else:
self.config.read(self.config.get('Global', 'temporary_config'))
# Initialize variables
self.fabric_installed = False
self.kamaki_installed = False
self.cyclades_client = None
self.image_client = None
def setup_kamaki(self):
"""Initialize kamaki
Setup cyclades_client and image_client
"""
self.logger.info("Setup kamaki client..")
auth_url = self.config.get('Deployment', 'auth_url')
self.logger.debug("Authentication URL is %s" % _green(auth_url))
token = self.config.get('Deployment', 'token')
#self.logger.debug("Token is %s" % _green(token))
astakos_client = AstakosClient(auth_url, token)
cyclades_url = \
astakos_client.get_service_endpoints('compute')['publicURL']
self.logger.debug("Cyclades API url is %s" % _green(cyclades_url))
self.cyclades_client = CycladesClient(cyclades_url, token)
self.cyclades_client.CONNECTION_RETRY_LIMIT = 2
image_url = \
astakos_client.get_service_endpoints('image')['publicURL']
self.logger.debug("Images API url is %s" % _green(image_url))
self.image_client = ImageClient(cyclades_url, token)
self.image_client.CONNECTION_RETRY_LIMIT = 2
def _wait_transition(self, server_id, current_status, new_status):
"""Wait for server to go from current_status to new_status"""
self.logger.debug("Waiting for server to become %s" % new_status)
timeout = self.config.getint('Global', 'build_timeout')
sleep_time = 5
while True:
server = self.cyclades_client.get_server_details(server_id)
if server['status'] == new_status:
return server
elif timeout < 0:
self.logger.error(
"Waiting for server to become %s timed out" % new_status)
self.destroy_server(False)
sys.exit(-1)
elif server['status'] == current_status:
# Sleep for #n secs and continue
timeout = timeout - sleep_time
time.sleep(sleep_time)
else:
self.logger.error(
"Server failed with status %s" % server['status'])
self.destroy_server(False)
sys.exit(-1)
@_check_kamaki
def destroy_server(self, wait=True):
"""Destroy slave server"""
server_id = self.config.getint('Temporary Options', 'server_id')
self.logger.info("Destoying server with id %s " % server_id)
self.cyclades_client.delete_server(server_id)
if wait:
self._wait_transition(server_id, "ACTIVE", "DELETED")
@_check_kamaki
def create_server(self):
"""Create slave server"""
self.logger.info("Create a new server..")
image = self._find_image()
self.logger.debug("Will use image \"%s\"" % _green(image['name']))
self.logger.debug("Image has id %s" % _green(image['id']))
server = self.cyclades_client.create_server(
self.config.get('Deployment', 'server_name'),
self.config.getint('Deployment', 'flavor_id'),
image['id'])
server_id = server['id']
self.write_config('server_id', server_id)
self.logger.debug("Server got id %s" % _green(server_id))
server_user = server['metadata']['users']
self.write_config('server_user', server_user)
self.logger.debug("Server's admin user is %s" % _green(server_user))
server_passwd = server['adminPass']
self.write_config('server_passwd', server_passwd)
self.logger.debug(
"Server's admin password is %s" % _green(server_passwd))
server = self._wait_transition(server_id, "BUILD", "ACTIVE")
self._get_server_ip_and_port(server)
self.setup_fabric()
self.logger.info("Setup firewall")
accept_ssh_from = self.config.get('Global', 'filter_access_network')
self.logger.debug("Block ssh except from %s" % accept_ssh_from)
cmd = """
iptables -A INPUT -s localhost -j ACCEPT
iptables -A INPUT -s {0} -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -p tcp --dport 22 -j DROP
""".format(accept_ssh_from)
_run(cmd, False)
def _find_image(self):
"""Find a suitable image to use
It has to belong to the `system_uuid' user and
contain the word `image_name'
"""
system_uuid = self.config.get('Deployment', 'system_uuid')
image_name = self.config.get('Deployment', 'image_name').lower()