Commit e25a5749 authored by Georgios Ouzounis's avatar Georgios Ouzounis

LAM-109 Added supervisord for Apache Flink, Zookeeper and Kafka.

parent aafe40f4
---
- name: Create users for each application.
include: users.yml
when: "'slaves' in group_names"
- name: Include common tasks.
include: common-1.yml
- name: Include more common tasks.
include: common-2.yml
---
- name: Fix locale problem.
command: update-locale LANGUAGE="en_US.UTF-8" LC_ALL="en_US.UTF-8"
- name: Copy sources list.
copy: src=sources.list dest=/etc/apt/sources.list owner=root group=root mode=0640
......@@ -32,3 +35,15 @@
- name: Add kafka user to sudo group.
user: name=kafka group=sudo
- name: Install supervisord with apt.
apt: name=supervisor state=latest
environment: proxy_env
- name: Configure supervisord for master.
template: src=supervisord-master.conf.j2 dest=/etc/supervisor/supervisord.conf owner=root group=root mode=0600
when: "'master' in group_names"
- name: Configure supervisord for slaves.
template: src=supervisord-slaves.conf.j2 dest=/etc/supervisor/supervisord.conf owner=root group=root mode=0600
when: "'slaves' in group_names"
......@@ -5,7 +5,12 @@
tags:
- master
- name: Include tasks for all nodes.
include: all.yml
tags:
- all
- name: Create users for each application.
include: users.yml
when: "'slaves' in group_names"
- name: Include common tasks.
include: common-1.yml
- name: Include more common tasks.
include: common-2.yml
......@@ -2,14 +2,6 @@
- name: Create users for each application.
include: users.yml
# - name: Generate ssh key for root.
# shell: cat /dev/zero | ssh-keygen -q -N ""
# args:
# creates: /root/.ssh/id_rsa.pub
# - name: Fetch id_rsa.pub file from root.
# fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/fetched/root_id_rsa.pub flat=yes
- name: Generate ssh key for hduser.
shell: cat /dev/zero | ssh-keygen -q -N ""
args:
......@@ -43,3 +35,6 @@
# a Lambda Instance running.
- name: Create Lambda Instance lock file.
file: path=/root/lambda-lock state=touch owner=root group=lambda mode=0640
- name: Put lambda init script on boot and shutdown sequence.
command: update-rc.d lambda-init defaults
......@@ -32,42 +32,88 @@ start(){
echo "Apache Yarn has failed to start with returned code $returnedValue."
fi
# Start Apache Zookeeper.
echo "Starting Apache Zookeeper..."
/etc/init.d/zookeeper-init start
# Start supervisord on master node.
echo "Starting Supervisord..."
supervisord -c /etc/supervisor/supervisord.conf --logfile=/root/supervisord.log
returnedValue=$?
if [ $returnedValue -eq 0 ]
then
echo "Supervisord on master node has been started!"
else
echo "Supervisord on master node has failed to start with returned code $returnedValue."
fi
# Start Apache Zookeeper.
echo "Starting Apache Zookeeper..."
supervisorctl start apache_zookeeper
# Wait for Apache Zookeeper to start.
while [ "$(supervisorctl status apache_zookeeper | tr -s ' ' | cut -f2 -d' ')" == "STARTING" ]
do
sleep 10
done
apache_zookeeper_status=$(supervisorctl status apache_zookeeper | tr -s ' ' | cut -f2 -d' ')
if [ "$apache_zookeeper_status" != "RUNNING" ]
then
echo "Apache Zookeeper has failed to start with code $apache_zookeeper_status."
else
echo "Apache Zookeeper has been started!"
fi
# Start Apache Kafka on master node.
echo "Starting Apache kafka..."
supervisorctl start apache_kafka
# Wait for Apache Kafka to start.
while [ "$(supervisorctl status apache_kafka | tr -s ' ' | cut -f2 -d' ')" == "STARTING" ]
do
sleep 10
done
apache_kafka_status=$(supervisorctl status apache_kafka | tr -s ' ' | cut -f2 -d' ')
if [ "$apache_kafka_status" != "RUNNING" ]
then
echo "Apache kafka has failed to start with code $apache_kafka_status."
else
echo "Apache Zookeeper has failed to start with returned code $returnedValue."
echo "Apache kafka has been started!"
fi
# Start Apache Kafka on master node and on each slave node.
for node in $(cat /etc/hosts | grep "192.168." | cut -f 1)
do
echo "Starting Apache Kafka at $node..."
ssh -l root $node /etc/init.d/kafka-init start
returnedValue=$?
if [ $returnedValue -eq 0 ]
# Start Apache Kafka on each slave node.
for node in $(cat /etc/hosts | grep "snf" | cut -f2)
do
if [ "$node" == "$(hostname).local" ]
then
echo "Apache Kafka at $node has been started!"
else
echo "Apache Kafka at $node has failed to start with returned code $returnedValue."
continue
fi
echo "Starting Apache Kafka at $node..."
ssh -l root $node supervisord -c /etc/supervisor/supervisord.conf --logfile=/root/supervisord.log
echo "Supervisord on $node has been started!"
ssh -l root $node supervisorctl start apache_kafka
# Wait for Apache kafka to start.
while [ "$(ssh -l root $node supervisorctl status apache_kafka | tr -s ' ' | cut -f2 -d' ')" == "STARTING" ]
do
sleep 10
done
apache_kafka_status=$(ssh -l root $node supervisorctl status apache_kafka | tr -s ' ' | cut -f2 -d' ')
if [ "$apache_kafka_status" != "RUNNING" ]
then
echo "Apache Kafka at $node has failed to start with code $apache_kafka_status."
else
echo "Apache Kafka at $node has been started!"
fi
done
# Start Apache Flink.
# Note that Apache Flink might take some time before being fully operational due to the fact
# that it runs as an Apache Yarn application.
echo "Starting Apache Flink..."
/etc/init.d/flink-init start
returnedValue=$?
if [ $returnedValue -eq 0 ]
supervisorctl start apache_flink
# Wait for Apache Flink to start.
while [ "$(supervisorctl status apache_flink | tr -s ' ' | cut -f2 -d' ')" == "STARTING" ]
do
sleep 10
done
apache_flink_status=$(supervisorctl status apache_flink | tr -s ' ' | cut -f2 -d' ')
if [ "$apache_flink_status" != "RUNNING" ]
then
echo "Apache Flink has been started!"
echo "Apache Flink has failed to start with code $apache_flink_status."
else
echo "Apache Flink has failed to start with returned code $returnedValue."
echo "Apache Flink has been started!"
fi
# Create a lock file to prevent multiple instantiations.
......@@ -79,13 +125,13 @@ start(){
stop(){
# Stop Apache Flink.
echo "Stopping Apache Flink..."
/etc/init.d/flink-init stop
returnedValue=$?
if [ $returnedValue -eq 0 ]
supervisorctl stop apache_flink
apache_flink_status=$(supervisorctl status apache_flink | tr -s ' ' | cut -f2 -d' ')
if [ "$apache_flink_status" == "STOPPED" ]
then
echo "Apache Flink has been stopped!"
else
echo "Apache Flink has failed to stop with returned code $returnedValue."
echo "Apache Flink has failed to stop with returned code $apache_flink_status."
fi
# Stop Apache Yarn.
......@@ -110,29 +156,54 @@ stop(){
echo "Apache HDFS has failed to stop with returned code $returnedValue."
fi
# Stop Apache Kafka on master node and on each slave node.
for node in $(cat /etc/hosts | grep "192.168." | cut -f 1)
# Stop Apache Kafka on master node.
supervisorctl stop apache_kafka
apache_kafka_status=$(supervisorctl status apache_kafka | tr -s ' ' | cut -f2 -d' ')
if [ "$apache_kafka_status" == "STOPPED" ]
then
echo "Apache kafka has been stopped!"
else
echo "Apache kafka has failed to stop with returned code $apache_kafka_status"
fi
# Stop Apache Kafka on each slave node.
for node in $(cat /etc/hosts | grep "snf" | cut -f2)
do
if [ "$node" == "$(hostname).local" ]
then
continue
fi
echo "Stopping Apache Kafka at $node..."
ssh -l root $node /etc/init.d/kafka-init stop
returnedValue=$?
if [ $returnedValue -eq 0 ]
ssh -l root $node supervisorctl stop apache_kafka
apache_kafka_status=$(ssh -l root $node supervisorctl status apache_kafka | tr -s ' ' | cut -f2 -d' ')
echo "Stopping Supervisord at $node..."
ssh -l root $node supervisorctl shutdown
if [ "$apache_kafka_status" == "STOPPED" ]
then
echo "Apache Kafka at $node has been stopped!"
else
echo "Apache Kafka at $node has failed to stop with returned code $returnedValue."
echo "Apache Kafka at $node has failed to stop with returned code $apache_kafka_status"
fi
done
# Stop Apache Zookeeper.
echo "Stopping Apache Zookeeper..."
/etc/init.d/zookeeper-init stop
supervisorctl stop apache_zookeeper
apache_zookeeper_status=$(supervisorctl status apache_zookeeper | tr -s ' ' | cut -f2 -d' ')
if [ "$apache_zookeeper_status" == "STOPPED" ]
then
echo "Apache Zookeeper has been stopped!"
else
echo "Apache Zookeeper has failed to stop with returned code $apache_zookeeper_status"
fi
# Stop Supervisord on master node.
supervisorctl shutdown
returnedValue=$?
if [ $returnedValue -eq 0 ]
then
echo "Apache Zookeeper has been stopped!"
echo "Supervisord on master node has been stopped!"
else
echo "Apache Zookeeper has failed to stop with returned code $returnedValue."
echo "Supervisord on master node has failed to stop with returned code $returnedValue."
fi
# Remove lock file.
......
; Sample supervisor config file.
;
; For more information on the config file, please see:
; http://supervisord.org/configuration.html
;
; Notes:
; - Shell expansion ("~" or "$HOME") is not supported. Environment
; variables can be expanded using this syntax: "%(ENV_HOME)s".
; - Comments must have a leading space: "a=b ;comment" not "a=b;comment".
[unix_http_server]
file=/tmp/supervisor.sock ; (the path to the socket file)
;chmod=0700 ; socket file mode (default 0700)
;chown=nobody:nogroup ; socket file uid:gid owner
;username=user ; (default is no username (open server))
;password=123 ; (default is no password (open server))
;[inet_http_server] ; inet (TCP) server disabled by default
;port=127.0.0.1:9001 ; (ip_address:port specifier, *:port for all iface)
;username=user ; (default is no username (open server))
;password=123 ; (default is no password (open server))
[supervisord]
logfile=/tmp/supervisord.log ; (main log file;default $CWD/supervisord.log)
logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB)
logfile_backups=10 ; (num of main logfile rotation backups;default 10)
loglevel=info ; (log level;default info; others: debug,warn,trace)
pidfile=/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
nodaemon=false ; (start in foreground if true;default false)
minfds=1024 ; (min. avail startup file descriptors;default 1024)
minprocs=200 ; (min. avail process descriptors;default 200)
;umask=022 ; (process file creation umask;default 022)
;user=chrism ; (default is current user, required if root)
;identifier=supervisor ; (supervisord identifier, default is 'supervisor')
;directory=/tmp ; (default is not to cd during start)
;nocleanup=true ; (don't clean up tempfiles at start;default false)
;childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP)
;environment=KEY="value" ; (key value pairs to add to environment)
;strip_ansi=false ; (strip ansi escape codes in logs; def. false)
; the below section must remain in the config file for RPC
; (supervisorctl/web interface) to work, additional interfaces may be
; added by defining them in separate rpcinterface: sections
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///tmp/supervisor.sock ; use a unix:// URL for a unix socket
;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket
;username=chris ; should be same as http_username if set
;password=123 ; should be same as http_password if set
;prompt=mysupervisor ; cmd line prompt (default "supervisor")
;history_file=~/.sc_history ; use readline history if available
; The below sample program section shows all possible program subsection values,
; create one or more 'real' program: sections to be able to control them under
; supervisor.
[program:apache_zookeeper]
command={{ kafka_home }}/bin/zookeeper-server-start.sh {{ kafka_home }}/config/zookeeper.properties
autostart=false
user=kafka
stdout_logfile=/home/kafka/supervisord_zookeeper_logs.log
[program:apache_kafka]
command={{ kafka_home }}/bin/kafka-server-start.sh {{ kafka_home }}/config/server.properties
autostart=false
user=kafka
stdout_logfile=/home/kafka/supervisord_kafka_logs.log
[program:apache_flink]
command={{ flink_home }}/bin/yarn-session.sh -n {{ flink_number_of_task_managers }} -tm {{ flink_ram_per_task_manager }}
autostart=false
user=flink
stdout_logfile = /home/flink/supervisord_flink_logs.log
;[program:theprogramname]
;command=/bin/cat ; the program (relative uses PATH, can take args)
;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
;numprocs=1 ; number of processes copies to start (def 1)
;directory=/tmp ; directory to cwd to before exec (def no cwd)
;umask=022 ; umask for process (default None)
;priority=999 ; the relative start priority (default 999)
;autostart=true ; start at supervisord start (default: true)
;autorestart=unexpected ; whether/when to restart (default: unexpected)
;startsecs=1 ; number of secs prog must stay running (def. 1)
;startretries=3 ; max # of serial start failures (default 3)
;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
;stopsignal=QUIT ; signal used to kill process (default TERM)
;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
;stopasgroup=false ; send stop signal to the UNIX process group (default false)
;killasgroup=false ; SIGKILL the UNIX process group (def false)
;user=chrism ; setuid to this UNIX account to run the program
;redirect_stderr=true ; redirect proc stderr to stdout (default false)
;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
;stdout_events_enabled=false ; emit events on stdout writes (default false)
;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
;stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)
;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
;stderr_events_enabled=false ; emit events on stderr writes (default false)
;environment=A="1",B="2" ; process environment additions (def no adds)
;serverurl=AUTO ; override serverurl computation (childutils)
; The below sample eventlistener section shows all possible
; eventlistener subsection values, create one or more 'real'
; eventlistener: sections to be able to handle event notifications
; sent by supervisor.
;[eventlistener:theeventlistenername]
;command=/bin/eventlistener ; the program (relative uses PATH, can take args)
;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
;numprocs=1 ; number of processes copies to start (def 1)
;events=EVENT ; event notif. types to subscribe to (req'd)
;buffer_size=10 ; event buffer queue size (default 10)
;directory=/tmp ; directory to cwd to before exec (def no cwd)
;umask=022 ; umask for process (default None)
;priority=-1 ; the relative start priority (default -1)
;autostart=true ; start at supervisord start (default: true)
;autorestart=unexpected ; whether/when to restart (default: unexpected)
;startsecs=1 ; number of secs prog must stay running (def. 1)
;startretries=3 ; max # of serial start failures (default 3)
;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
;stopsignal=QUIT ; signal used to kill process (default TERM)
;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
;stopasgroup=false ; send stop signal to the UNIX process group (default false)
;killasgroup=false ; SIGKILL the UNIX process group (def false)
;user=chrism ; setuid to this UNIX account to run the program
;redirect_stderr=true ; redirect proc stderr to stdout (default false)
;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
;stdout_events_enabled=false ; emit events on stdout writes (default false)
;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
;stderr_logfile_backups ; # of stderr logfile backups (default 10)
;stderr_events_enabled=false ; emit events on stderr writes (default false)
;environment=A="1",B="2" ; process environment additions
;serverurl=AUTO ; override serverurl computation (childutils)
; The below sample group section shows all possible group values,
; create one or more 'real' group: sections to create "heterogeneous"
; process groups.
;[group:thegroupname]
;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions
;priority=999 ; the relative start priority (default 999)
; The [include] section can just contain the "files" setting. This
; setting can list multiple files (separated by whitespace or
; newlines). It can also contain wildcards. The filenames are
; interpreted as relative to this file. Included files *cannot*
; include files themselves.
;[include]
;files = relative/directory/*.ini
; Sample supervisor config file.
;
; For more information on the config file, please see:
; http://supervisord.org/configuration.html
;
; Notes:
; - Shell expansion ("~" or "$HOME") is not supported. Environment
; variables can be expanded using this syntax: "%(ENV_HOME)s".
; - Comments must have a leading space: "a=b ;comment" not "a=b;comment".
[unix_http_server]
file=/tmp/supervisor.sock ; (the path to the socket file)
;chmod=0700 ; socket file mode (default 0700)
;chown=nobody:nogroup ; socket file uid:gid owner
;username=user ; (default is no username (open server))
;password=123 ; (default is no password (open server))
;[inet_http_server] ; inet (TCP) server disabled by default
;port=127.0.0.1:9001 ; (ip_address:port specifier, *:port for all iface)
;username=user ; (default is no username (open server))
;password=123 ; (default is no password (open server))
[supervisord]
logfile=/tmp/supervisord.log ; (main log file;default $CWD/supervisord.log)
logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB)
logfile_backups=10 ; (num of main logfile rotation backups;default 10)
loglevel=info ; (log level;default info; others: debug,warn,trace)
pidfile=/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
nodaemon=false ; (start in foreground if true;default false)
minfds=1024 ; (min. avail startup file descriptors;default 1024)
minprocs=200 ; (min. avail process descriptors;default 200)
;umask=022 ; (process file creation umask;default 022)
;user=chrism ; (default is current user, required if root)
;identifier=supervisor ; (supervisord identifier, default is 'supervisor')
;directory=/tmp ; (default is not to cd during start)
;nocleanup=true ; (don't clean up tempfiles at start;default false)
;childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP)
;environment=KEY="value" ; (key value pairs to add to environment)
;strip_ansi=false ; (strip ansi escape codes in logs; def. false)
; the below section must remain in the config file for RPC
; (supervisorctl/web interface) to work, additional interfaces may be
; added by defining them in separate rpcinterface: sections
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///tmp/supervisor.sock ; use a unix:// URL for a unix socket
;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket
;username=chris ; should be same as http_username if set
;password=123 ; should be same as http_password if set
;prompt=mysupervisor ; cmd line prompt (default "supervisor")
;history_file=~/.sc_history ; use readline history if available
; The below sample program section shows all possible program subsection values,
; create one or more 'real' program: sections to be able to control them under
; supervisor.
[program:apache_kafka]
command={{ kafka_home }}/bin/kafka-server-start.sh {{ kafka_home }}/config/server.properties
autostart=false
user=kafka
stdout_logfile=/home/kafka/supervisord_kafka_logs.log
;[program:theprogramname]
;command=/bin/cat ; the program (relative uses PATH, can take args)
;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
;numprocs=1 ; number of processes copies to start (def 1)
;directory=/tmp ; directory to cwd to before exec (def no cwd)
;umask=022 ; umask for process (default None)
;priority=999 ; the relative start priority (default 999)
;autostart=true ; start at supervisord start (default: true)
;autorestart=unexpected ; whether/when to restart (default: unexpected)
;startsecs=1 ; number of secs prog must stay running (def. 1)
;startretries=3 ; max # of serial start failures (default 3)
;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
;stopsignal=QUIT ; signal used to kill process (default TERM)
;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
;stopasgroup=false ; send stop signal to the UNIX process group (default false)
;killasgroup=false ; SIGKILL the UNIX process group (def false)
;user=chrism ; setuid to this UNIX account to run the program
;redirect_stderr=true ; redirect proc stderr to stdout (default false)
;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
;stdout_events_enabled=false ; emit events on stdout writes (default false)
;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
;stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)
;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
;stderr_events_enabled=false ; emit events on stderr writes (default false)
;environment=A="1",B="2" ; process environment additions (def no adds)
;serverurl=AUTO ; override serverurl computation (childutils)
; The below sample eventlistener section shows all possible
; eventlistener subsection values, create one or more 'real'
; eventlistener: sections to be able to handle event notifications
; sent by supervisor.
;[eventlistener:theeventlistenername]
;command=/bin/eventlistener ; the program (relative uses PATH, can take args)
;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
;numprocs=1 ; number of processes copies to start (def 1)
;events=EVENT ; event notif. types to subscribe to (req'd)
;buffer_size=10 ; event buffer queue size (default 10)
;directory=/tmp ; directory to cwd to before exec (def no cwd)
;umask=022 ; umask for process (default None)
;priority=-1 ; the relative start priority (default -1)
;autostart=true ; start at supervisord start (default: true)
;autorestart=unexpected ; whether/when to restart (default: unexpected)
;startsecs=1 ; number of secs prog must stay running (def. 1)
;startretries=3 ; max # of serial start failures (default 3)
;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
;stopsignal=QUIT ; signal used to kill process (default TERM)
;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
;stopasgroup=false ; send stop signal to the UNIX process group (default false)
;killasgroup=false ; SIGKILL the UNIX process group (def false)
;user=chrism ; setuid to this UNIX account to run the program
;redirect_stderr=true ; redirect proc stderr to stdout (default false)
;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
;stdout_events_enabled=false ; emit events on stdout writes (default false)
;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
;stderr_logfile_backups ; # of stderr logfile backups (default 10)
;stderr_events_enabled=false ; emit events on stderr writes (default false)
;environment=A="1",B="2" ; process environment additions
;serverurl=AUTO ; override serverurl computation (childutils)
; The below sample group section shows all possible group values,
; create one or more 'real' group: sections to create "heterogeneous"
; process groups.
;[group:thegroupname]
;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions
;priority=999 ; the relative start priority (default 999)
; The [include] section can just contain the "files" setting. This
; setting can list multiple files (separated by whitespace or
; newlines). It can also contain wildcards. The filenames are
; interpreted as relative to this file. Included files *cannot*
; include files themselves.
;[include]
;files = relative/directory/*.ini
---
java_home: "/usr"
hadoop_home: "/usr/local/hadoop"
kafka_home: "/usr/local/kafka"
flink_home: "/usr/local/flink"
flink_number_of_task_managers: "{{ groups['slaves']|count }}"
flink_ram_per_task_manager: 768
from celery import shared_task
from kamaki.clients import astakos, cyclades
@shared_task
def lambda_instance_start(token, uuid):
"""
Starts the VMs of a lambda instance using kamaki.
token: The token of the owner of the lambda instance. The validity check of the token should
have already been done.
uuid: The uuid of the lambda instance.
"""
def lambda_instance_start(auth_token, uuid):
"""
Starts the VMs of a lambda instance using kamaki. Starting the master node will cause the lambda
services to start. That is why all slave nodes must be started before starting the master node.
token: The token of the owner of the lambda instance. The validity check of the token should
have already been done.
uuid: The uuid of the lambda instance.
"""
compute_url = astakos.get_endpoint_url(
cyclades.CycladesComputeClient.service_type)
cycladesClient = cyclades.CycladesComputeClient(compute_url, auth_token)
# Start all slave nodes.
# Start master node.
@shared_task
def lambda_instance_stop(token, uuid):
"""
Stops the VMs of a lambda instance using kamaki.
token: The token of the owner of the lambda instance. The validity check of the token should
have already been done.
uuid: The uuid of the lambda instance.
"""
def lambda_instance_stop(auth_token, uuid):
"""
Stops the VMs of a lambda instance using kamaki. Stopping the master node will cause the lambda
services to stop. That is why the master node must be stopped before stopping any of the slave
nodes.
token: The token of the owner of the lambda instance. The validity check of the token should
have already been done.
uuid: The uuid of the lambda instance.
"""
compute_url = astakos.get_endpoint_url(
cyclades.CycladesComputeClient.service_type)
cycladesClient = cyclades.CycladesComputeClient(compute_url, auth_token)
# Stop master node.
# Stop all slave nodes.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment