Commit 99210353 authored by Ilias Tsitsimpis's avatar Ilias Tsitsimpis Committed by Christos Stavrakakis

ci: Redesign CI scripts

Spawn VM to use as builder-slave.
Build packages inside this VM.
Deploy Synnefo using snf-deploy.
Run unit tests.
Run functional tests using burnin.
parent a3092cf9
#!/usr/bin/env python
Build Synnefo packages for debian
from utils import SynnefoCI
def autopkg_debian():
"""Build synnefo packages for debian"""
synnefo_ci = SynnefoCI()
if __name__ == "__main__":
#!/usr/bin/env python
Deploy Synnefo using snf-deploy
from utils import SynnefoCI
def deploy_synnefo():
"""Deploy Synnefo using snf-deploy"""
synnefo_ci = SynnefoCI()
if __name__ == "__main__":
#!/usr/bin/env python
Download Synnefo packages
from utils import SynnefoCI
def fetch_packages():
"""Download Synnefo packages"""
synnefo_ci = SynnefoCI()
if __name__ == "__main__":
# Timeouts in seconds
build_timeout = 240
# Apt repository to use
apt_repo = squeeze/
# Synnefo git repo
synnefo_repo =
# Git branch to test (specify sha1 or branch name)
synnefo_branch = HEAD
# snf-deploy git repo
deploy_repo =
# Defines the schema that snf-deploy will use
schema = one_node
# Local dir to save builded packages
pkgs_dir = /tmp/synnefo_pkgs
# If True patch the module (see Debian bug #657665)
patch_pydist = True
# Configuration of git (on remote server)
git_config_name = Buildbot
git_config_mail =
# Url to fetch ssh public keys
public_ssh_keys_url =
# Network address from which we allow access to server
filter_access_network =,
# Config file to save temporary options (eg IPs, passwords etc)
temporary_config = /tmp/ci_temp_conf
# Credentials
auth_url =
token =
# If we deploy on we have to set this to True
# The server will reside besides a NAT and we have to compute ssh port
deploy_on_io = True
# Server name to use for our machine
server_name = Synnefo Deployment
# Flavor ID to use
# (149 for production, 639 for
flavor_id = 639
# Image to use (name must contain this)
image_name = OldStable
# UUID of owner of system images
# (25ecced9-bf53-4145-91ee-cf47377e9fb2 for production,
# 04cbe33f-29b7-4ef1-94fb-015929e5fc06 for
system_uuid = 04cbe33f-29b7-4ef1-94fb-015929e5fc06
# Maybe add some burnin options
# (e.g. tests to run/ignore, timeouts etc)
cmd_options = --nofailfast --no-ipv6 --action-timeout=240
[Unit Tests]
component = astakos cyclades pithos
# Projects reside on this repo
projects =
#!/usr/bin/env python
Run burnin functional test suite
from utils import SynnefoCI
def run_burnin():
"""Run burnin functional test suite"""
synnefo_ci = SynnefoCI()
if __name__ == "__main__":
# whether to use apt-get or local generated package found in packages dir
use_local_packages = True
# url to obtain latest synnefo packages.
# To use them change USE_LOCAL_PACKAGES setting to yes
# To get them run: snf-deploy packages
package_url =
# dir to find all template files used to customize setup
# in case you want to add another setting please modify the corresponding file
templates = /var/lib/snf-deploy/files
# dir to store local images (disk0, disk1 of the virtual cluster)
images = /var/lib/snf-deploy/images
# dir to store/find local packages
# dir to locally save packages that will be downloaded from package_url
# put here any locally created packages (useful for development)
packages = /var/lib/snf-deploy/packages
# dir to store pidfiles (dnsmasq, kvm)
run = /var/run/snf-deploy
# dir to store dnsmasq related files
dns = /var/lib/snf-deploy/dnsmasq
# dir to lookup fabfile and ifup script
lib = /usr/lib/snf-deploy
# dir to store executed commands (to enforce sequential execution)
cmd = /var/run/snf-deploy/cmd
cluster_nodes = node1
master_node = node1
cluster_netdev = eth0
cluster_name = ganeti1
cluster_ip =
vg = autovg
synnefo_public_network_subnet =
synnefo_public_network_gateway =
synnefo_public_network_type = CUSTOM
image_dir = /srv/okeanos
# To add another cluster repeat the above section
# with different header and nodes
# please note that currently is only supported deployment
# with nodes (both ganeti and synnefo) residing in the same subnet/domain
domain =
subnet =
gateway =
node1 = auto1
# node2 = auto2
node1 =
# node2 =
# This is used only in case of vcluster
# needed to pass the correct dhcp responces to the virtual nodes
node1 = 52:54:00:00:00:01
# node2 = 52:54:00:00:00:02
# Here we define which nodes from the predefined ones to use
nodes = node1
# login credentials for the nodes
# please note that in case of vcluster these are preconfigured
# and not editable.
# in case of physical nodes all nodes should have the same login account
user = root
password = 12345
public_iface = eth0
vm_public_iface = eth1
vm_private_iface = eth2
# extra disk name inside the nodes
# if defined, snf-deploy will create a VG for ganeti in order to support lvm storage
# if not then only file disk template will be supported
extra_disk = /dev/vdb
rabbitmq-server = testing
gunicorn = squeeze-backports
qemu-kvm = squeeze-backports
qemu = squeeze-backports
python-gevent = squeeze-backports
apache2 =
postgresql =
python-psycopg2 =
python-argparse =
nfs-kernel-server = squeeze-backports
nfs-common = squeeze-backports
bind9 =
vlan =
vlan =
lvm2 =
curl =
memcached =
python-memcache =
bridge-utils =
python-progress =
ganeti-instance-debootstrap =
snf-astakos-app = stable
snf-common = stable
snf-cyclades-app = stable
snf-cyclades-gtools = stable
snf-django-lib = stable
python-astakosclient = stable
python-objpool = stable
snf-branding = stable
snf-webproject = stable
snf-pithos-app = stable
snf-pithos-backend = stable
snf-tools = stable
snf-ganeti = 2.6.2+ippool11+hotplug5+extstorage3+rbdfix1+kvmfix2+nolvm+netxen-1~squeeze
ganeti-htools = 2.6.2+ippool11+hotplug5+extstorage3+rbdfix1+kvmfix2+nolvm+netxen-1~squeeze
snf-cloudcms = stable
snf-vncauthproxy = stable
snf-pithos-webclient = stable
snf-image = stable
snf-network = stable
nfdhcpd = stable
kamaki = stable
python-bitarray = stable
nfqueue-bindings-python = stable
synnefo_user = synnefo
synnefo_db_passwd = example_passw0rd
synnefo_rapi_passwd = example_rapi_passw0rd
synnefo_rabbitmq_passwd = example_rabbitmq_passw0rd
user_email =
user_name = John
user_lastname = Doe
user_passwd = 12345
accounts = node1
compute = node1
object-store = node1
cyclades = node1
pithos = node1
cms = node1
db = node1
mq = node1
ns = node1
client = node1
router = node1
pithos_dir = /srv/pithos
vm_public_bridge = br0
vm_private_bridge = prv0
common_bridge = br0
debian_base_url =
# url to get the base image. This is a debian base image with preconfigured
# root password and installed rsa/dsa keys. Plus a NetworkManager hook that
# changes the VM's name based on info provided by dhcp response.
# To create it run: snf-deploy image
squeeze_image_url =
ubuntu_image_url =
# in order ganeti nodes to support lvm storage (plain disk template) it will
# be needed an extra disk to eventually be able to create a VG. Ganeti requires
# this VG to be at least of 30GB. To this end in order the virtual nodes to have
# this extra disk an image should be created locally. There are three options:
# 1. not create an extra disk (only file storage template will be supported)
# 2. create an image of 30G in image dir (default /var/lib/snf-deploy/images)
# using dd if=/dev/zero of=squeeze.disk1
# 3. create this image in a local VG using lvgreate -L30G squeeze.disk1 lvg
# and create a symbolic link in /var/lib/snf-deploy/images
# Whether to create an extra disk or not
create_extra_disk = False
# lvg is the name of the local VG if any
lvg =
# OS istalled in the virtual cluster
os = squeeze
# the bridge to use for the virtual cluster
# on this bridge we will launch a dnsnmasq and provide
# fqdns needed to the cluster.
# In ordrer cluster nodes to have internet access, host must do NAT.
# iptables -t nat -A POSTROUTING -s -j MASQUERADE
# ip addr add dev auto_nodes_br
# To create run: snf-deploy cluster
bridge = auto_nodes_br
#!/usr/bin/env python
Setup slave server
from utils import SynnefoCI
def setup_slave():
"""Setup slave server"""
synnefo_ci = SynnefoCI(cleanup_config=True)
# Get token from /nfs/token
token = open("/nfs/synnefo_token").read().strip()
synnefo_ci.write_config('token', token, 'Deployment')
# Build slave server
# Copy synnefo repo to server
if __name__ == "__main__":
#!/usr/bin/env python
Run Synnefo unit test suite
from utils import SynnefoCI
def unit_test():
"""Run Synnefo unit test suite"""
synnefo_ci = SynnefoCI()
if __name__ == "__main__":
#!/usr/bin/env python
Synnefo ci utils module
import os
import sys
import time
import logging
import fabric.api as fabric
from ConfigParser import ConfigParser, DuplicateSectionError
from kamaki.clients.astakos import AstakosClient
from kamaki.clients.cyclades import CycladesClient
from kamaki.clients.image import ImageClient
def _run(cmd, verbose):
"""Run fabric with verbose level"""
if verbose:
args = ('running',)
args = ('running', 'stdout',)
with fabric.hide(*args):
def _red(msg):
"""Red color"""
#return "\x1b[31m" + str(msg) + "\x1b[0m"
return str(msg)
def _yellow(msg):
"""Yellow color"""
#return "\x1b[33m" + str(msg) + "\x1b[0m"
return str(msg)
def _green(msg):
"""Green color"""
#return "\x1b[32m" + str(msg) + "\x1b[0m"
return str(msg)
def _check_fabric(fun):
"""Check if fabric env has been set"""
def wrapper(self, *args):
"""wrapper function"""
if not self.fabric_installed:
return fun(self, *args)
return wrapper
def _check_kamaki(fun):
"""Check if kamaki has been initialized"""
def wrapper(self, *args):
"""wrapper function"""
if not self.kamaki_installed:
return fun(self, *args)
return wrapper
class _MyFormatter(logging.Formatter):
"""Logging Formatter"""
def format(self, record):
format_orig = self._fmt
if record.levelno == logging.DEBUG:
self._fmt = " %(msg)s"
elif record.levelno == logging.INFO:
self._fmt = "%(msg)s"
elif record.levelno == logging.WARNING:
self._fmt = _yellow("[W] %(msg)s")
elif record.levelno == logging.ERROR:
self._fmt = _red("[E] %(msg)s")
result = logging.Formatter.format(self, record)
self._fmt = format_orig
return result
class SynnefoCI(object):
"""SynnefoCI python class"""
def __init__(self, cleanup_config=False):
""" Initialize SynnefoCI python class
Setup logger, local_dir, config and kamaki
# Setup logger
self.logger = logging.getLogger('synnefo-ci')
handler = logging.StreamHandler()
# Get our local dir
self.ci_dir = os.path.dirname(os.path.abspath(__file__))
self.repo_dir = os.path.dirname(self.ci_dir)
# Read config file
self.conffile = os.path.join(self.ci_dir, "new_config")
self.config = ConfigParser()
self.config.optionxform = str
temp_config = self.config.get('Global', 'temporary_config')
if cleanup_config:
else:'Global', 'temporary_config'))
# Initialize variables
self.fabric_installed = False
self.kamaki_installed = False
self.cyclades_client = None
self.image_client = None
def setup_kamaki(self):
"""Initialize kamaki
Setup cyclades_client and image_client
""""Setup kamaki client..")
auth_url = self.config.get('Deployment', 'auth_url')
self.logger.debug("Authentication URL is %s" % _green(auth_url))
token = self.config.get('Deployment', 'token')
#self.logger.debug("Token is %s" % _green(token))
astakos_client = AstakosClient(auth_url, token)
cyclades_url = \
self.logger.debug("Cyclades API url is %s" % _green(cyclades_url))
self.cyclades_client = CycladesClient(cyclades_url, token)
self.cyclades_client.CONNECTION_RETRY_LIMIT = 2
image_url = \
self.logger.debug("Images API url is %s" % _green(image_url))
self.image_client = ImageClient(cyclades_url, token)
self.image_client.CONNECTION_RETRY_LIMIT = 2
def _wait_transition(self, server_id, current_status, new_status):
"""Wait for server to go from current_status to new_status"""
self.logger.debug("Waiting for server to become %s" % new_status)
timeout = self.config.getint('Global', 'build_timeout')
sleep_time = 5
while True:
server = self.cyclades_client.get_server_details(server_id)
if server['status'] == new_status:
return server
elif timeout < 0:
"Waiting for server to become %s timed out" % new_status)
elif server['status'] == current_status:
# Sleep for #n secs and continue
timeout = timeout - sleep_time
"Server failed with status %s" % server['status'])
def destroy_server(self, wait=True):
"""Destroy slave server"""
server_id = self.config.getint('Temporary Options', 'server_id')"Destoying server with id %s " % server_id)
if wait:
self._wait_transition(server_id, "ACTIVE", "DELETED")
def create_server(self):
"""Create slave server""""Create a new server..")
image = self._find_image()
self.logger.debug("Will use image \"%s\"" % _green(image['name']))
self.logger.debug("Image has id %s" % _green(image['id']))
server = self.cyclades_client.create_server(
self.config.get('Deployment', 'server_name'),
self.config.getint('Deployment', 'flavor_id'),
server_id = server['id']
self.write_config('server_id', server_id)
self.logger.debug("Server got id %s" % _green(server_id))
server_user = server['metadata']['users']
self.write_config('server_user', server_user)
self.logger.debug("Server's admin user is %s" % _green(server_user))
server_passwd = server['adminPass']
self.write_config('server_passwd', server_passwd)
"Server's admin password is %s" % _green(server_passwd))
server = self._wait_transition(server_id, "BUILD", "ACTIVE")
self.setup_fabric()"Setup firewall")
accept_ssh_from = self.config.get('Global', 'filter_access_network')
self.logger.debug("Block ssh except from %s" % accept_ssh_from)
cmd = """
iptables -A INPUT -s localhost -j ACCEPT
iptables -A INPUT -s {0} -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -p tcp --dport 22 -j DROP
_run(cmd, False)
def _find_image(self):
"""Find a suitable image to use
It has to belong to the `system_uuid' user and
contain the word `image_name'