Commit ad53b410 authored by Christos Stavrakakis's avatar Christos Stavrakakis

Merge branch 'develop' of https://github.com/cnanakos/synnefo into develop

This merge introduces the necessary changes required to better integrate
Archipelago v2 mapfiles with Pithos and Cyclades.
parents 894441e2 2dec3707
[debian]
python-nfqueue = 0.4+physindev-1~wheezy
python-scapy = 2.2.0+rfc6355-1
blktap-utils = 2.0.90next~21~3a70c2f~1452b09-1
blktap-utils = 2.0.90next~29~e51cf34~93577a5-1~wheezy
libcurl4-openssl-dev = 7.26.0-1+wheezy9
curl = 7.26.0-1+wheezy9
snf-image = 0.14-1~wheezy
......
......@@ -964,6 +964,7 @@ The ``X-Object-Sharing`` header may include either a ``read=...`` comma-separate
Return Code Description
============================== ==============================
201 (Created) The object has been created
403 (Forbidden) If ``X-Copy-From`` and the source object is not available in the storage backend.
409 (Conflict) The object can not be created from the provided hashmap (a list of missing hashes will be included in the reply)
411 (Length Required) Missing ``Content-Length`` or ``Content-Type`` in the request
413 (Request Entity Too Large) Insufficient quota to complete the request
......@@ -1015,6 +1016,7 @@ X-Object-Version The object's new version
Return Code Description
============================== ==============================
201 (Created) The object has been created
403 (Forbidden) If the source object is not available in the storage backend.
413 (Request Entity Too Large) Insufficient quota to complete the request
============================== ==============================
......
......@@ -5,7 +5,6 @@
#
## Backend settings
#BACKEND_DB_CONNECTION = 'sqlite:////usr/share/synnefo/pithos/backend.db'
#BACKEND_BLOCK_PATH = '/usr/share/synnefo/pithos/data/'
#PITHOS_BACKEND_POOL_SIZE = 8
#
## The Pithos container where images will be stored by default
......@@ -19,3 +18,11 @@
#
## The owner of the images that will be marked as "system images" by the UI
#SYSTEM_IMAGES_OWNER = 'okeanos'
#Archipelago Configuration File
#PITHOS_BACKEND_ARCHIPELAGO_CONF = '/etc/archipelago/archipelago.conf'
#
#Archipelagp xseg pool size
#PITHOS_BACKEND_XSEG_POOL_SIZE = 8
#
#The maximum interval (in seconds) for consequent backend object map checks
#PITHOS_BACKEND_MAP_CHECK_INTERVAL = 1
......@@ -5,7 +5,6 @@
# Backend settings
BACKEND_DB_CONNECTION = 'sqlite:////usr/share/synnefo/pithos/backend.db'
BACKEND_BLOCK_PATH = '/usr/share/synnefo/pithos/data/'
PITHOS_BACKEND_POOL_SIZE = 8
# The Pithos container where images will be stored by default
......@@ -19,3 +18,12 @@ DEFAULT_CONTAINER_FORMAT = 'bare'
# The owner of the images that will be marked as "system images" by the UI
SYSTEM_IMAGES_OWNER = 'okeanos'
# Archipelago Configuration File
PITHOS_BACKEND_ARCHIPELAGO_CONF = '/etc/archipelago/archipelago.conf'
# Archipelagp xseg pool size
PITHOS_BACKEND_XSEG_POOL_SIZE = 8
# The maximum interval (in seconds) for consequent backend object map checks
PITHOS_BACKEND_MAP_CHECK_INTERVAL = 1
......@@ -81,7 +81,9 @@ def get_pithos_backend():
service_token=settings.CYCLADES_SERVICE_TOKEN,
astakosclient_poolsize=settings.CYCLADES_ASTAKOSCLIENT_POOLSIZE,
db_connection=settings.BACKEND_DB_CONNECTION,
block_path=settings.BACKEND_BLOCK_PATH)
archipelago_conf_file=settings.PITHOS_BACKEND_ARCHIPELAGO_CONF,
xseg_pool_size=settings.PITHOS_BACKEND_XSEG_POOL_SIZE,
map_check_interval=settings.PITHOS_BACKEND_MAP_CHECK_INTERVAL)
return _pithos_backend_pool.pool_get()
......
[debian]
python-nfqueue = 0.4+physindev-1~wheezy
python-scapy = 2.2.0+rfc6355-1
blktap-utils = 2.0.90next~21~3a70c2f~1452b09-1
blktap-utils = 2.0.90next~29~e51cf34~93577a5-1~wheezy
libcurl4-openssl-dev = 7.26.0-1+wheezy9
curl = 7.26.0-1+wheezy9
snf-image = 0.14-1~wheezy
......
[ARCHIPELAGO]
# Switch peer processes to run as this user
USER=root
# Switch peer processes to run as this group
GROUP=root
# xseg
[XSEG]
# Max xseg ports supported by segment
......
......@@ -14,63 +14,108 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pithos.workers import glue
import pickle
from svipc import sem_init, sem_take, sem_give
import os
from pithos.workers import glue
from multiprocessing import Lock
def find_hole(WORKERS, FOLLOW_WORKERS):
old_key = []
old_age = []
for key in FOLLOW_WORKERS:
if key not in WORKERS.keys():
old_age.append(FOLLOW_WORKERS[key] )
old_key.append( key )
break
if len(old_age) and len(old_key):
for key in old_key:
del FOLLOW_WORKERS[key]
return old_age
old_key = []
old_age = []
for key in FOLLOW_WORKERS:
if key not in WORKERS.keys():
old_age.append(FOLLOW_WORKERS[key])
old_key.append(key)
break
if len(old_age) and len(old_key):
for key in old_key:
del FOLLOW_WORKERS[key]
return old_age
return old_age
def follow_workers(pid, wid, WORKERS):
hole = None
try:
fd = open('/dev/shm/wid','rb')
f = pickle.load(fd)
hole = find_hole(WORKERS, f)
if len(hole) > 0:
k = {pid: int(hole[0])}
else:
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid','wb')
pickle.dump(f, fd)
fd.close()
except:
fd = open('/dev/shm/wid','wb')
pickle.dump({pid:wid}, fd)
fd.close()
return hole
hole = None
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
hole = find_hole(WORKERS, f)
if len(hole) > 0:
k = {pid: int(hole[0])}
else:
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)
fd.close()
else:
fd = open('/dev/shm/wid', 'wb')
pickle.dump({pid: wid}, fd)
fd.close()
return hole
def allocate_wid(pid, wid, WORKERS):
d = {pid: wid}
hole = None
if sem_init(88,nums=1) == 0:
hole = follow_workers(pid, wid, WORKERS)
sem_give(88,0)
else:
sem_take(88,0)
hole = follow_workers(pid, wid, WORKERS)
sem_give(88,0)
return hole
hole = None
hole = follow_workers(pid, wid, WORKERS)
return hole
def post_fork(server,worker):
wid = allocate_wid(worker.pid,worker.worker_id, server.WORKERS)
if wid:
glue.WorkerGlue.setmap(worker.pid,wid[0])
else:
glue.WorkerGlue.setmap(worker.pid,worker.worker_id)
def when_ready(server):
server.lock = Lock()
def update_workers(pid, wid):
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
for k, v in f.items():
if wid == v:
del f[k]
break
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)
fd.close()
else:
fd = open('/dev/shm/wid', 'wb')
pickle.dump({pid: wid}, fd)
fd.close()
def resize_workers(no_workers):
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
for k, v in f.items():
if v > no_workers:
del f[k]
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)
fd.close()
def post_fork(server, worker):
server.lock.acquire()
if worker.worker_id <= server.num_workers:
update_workers(worker.pid, worker.worker_id)
glue.WorkerGlue.setmap(worker.pid, worker.worker_id)
else:
wid = allocate_wid(worker.pid, worker.worker_id, server.WORKERS)
glue.WorkerGlue.setmap(worker.pid, wid[0])
resize_workers(server.num_workers)
server.lock.release()
def worker_exit(server, worker):
if glue.WorkerGlue.ioctx_pool:
glue.WorkerGlue.ioctx_pool._shutdown_pool()
def on_exit(server):
os.unlink('/dev/shm/wid')
......@@ -14,63 +14,108 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pithos.workers import glue
import pickle
from svipc import sem_init, sem_take, sem_give
import os
from pithos.workers import glue
from multiprocessing import Lock
def find_hole(WORKERS, FOLLOW_WORKERS):
old_key = []
old_age = []
for key in FOLLOW_WORKERS:
if key not in WORKERS.keys():
old_age.append(FOLLOW_WORKERS[key] )
old_key.append( key )
break
if len(old_age) and len(old_key):
for key in old_key:
del FOLLOW_WORKERS[key]
return old_age
old_key = []
old_age = []
for key in FOLLOW_WORKERS:
if key not in WORKERS.keys():
old_age.append(FOLLOW_WORKERS[key])
old_key.append(key)
break
if len(old_age) and len(old_key):
for key in old_key:
del FOLLOW_WORKERS[key]
return old_age
return old_age
def follow_workers(pid, wid, WORKERS):
hole = None
try:
fd = open('/dev/shm/wid','rb')
f = pickle.load(fd)
hole = find_hole(WORKERS, f)
if len(hole) > 0:
k = {pid: int(hole[0])}
else:
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid','wb')
pickle.dump(f, fd)
fd.close()
except:
fd = open('/dev/shm/wid','wb')
pickle.dump({pid:wid}, fd)
fd.close()
return hole
hole = None
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
hole = find_hole(WORKERS, f)
if len(hole) > 0:
k = {pid: int(hole[0])}
else:
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)
fd.close()
else:
fd = open('/dev/shm/wid', 'wb')
pickle.dump({pid: wid}, fd)
fd.close()
return hole
def allocate_wid(pid, wid, WORKERS):
d = {pid: wid}
hole = None
if sem_init(88,nums=1) == 0:
hole = follow_workers(pid, wid, WORKERS)
sem_give(88,0)
else:
sem_take(88,0)
hole = follow_workers(pid, wid, WORKERS)
sem_give(88,0)
return hole
hole = None
hole = follow_workers(pid, wid, WORKERS)
return hole
def post_fork(server,worker):
wid = allocate_wid(worker.pid,worker.worker_id, server.WORKERS)
if wid:
glue.WorkerGlue.setmap(worker.pid,wid[0])
else:
glue.WorkerGlue.setmap(worker.pid,worker.worker_id)
def when_ready(server):
server.lock = Lock()
def update_workers(pid, wid):
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
for k, v in f.items():
if wid == v:
del f[k]
break
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)
fd.close()
else:
fd = open('/dev/shm/wid', 'wb')
pickle.dump({pid: wid}, fd)
fd.close()
def resize_workers(no_workers):
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
for k, v in f.items():
if v > no_workers:
del f[k]
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)
fd.close()
def post_fork(server, worker):
server.lock.acquire()
if worker.worker_id <= server.num_workers:
update_workers(worker.pid, worker.worker_id)
glue.WorkerGlue.setmap(worker.pid, worker.worker_id)
else:
wid = allocate_wid(worker.pid, worker.worker_id, server.WORKERS)
glue.WorkerGlue.setmap(worker.pid, wid[0])
resize_workers(server.num_workers)
server.lock.release()
def worker_exit(server, worker):
if glue.WorkerGlue.ioctx_pool:
glue.WorkerGlue.ioctx_pool._shutdown_pool()
def on_exit(server):
os.unlink('/dev/shm/wid')
......@@ -1677,8 +1677,6 @@ class Archip(base.Component):
"blktap-utils",
"archipelago",
"archipelago-dbg",
"archipelago-modules-dkms",
"archipelago-modules-source",
"archipelago-rados",
"archipelago-rados-dbg",
"libxseg0",
......@@ -1708,9 +1706,7 @@ class Archip(base.Component):
class ArchipSynnefo(base.Component):
REQUIRED_PACKAGES = [
"python-svipc",
]
REQUIRED_PACKAGES = []
def _configure(self):
r1 = {"HOST": self.node.fqdn}
......
......@@ -10,7 +10,6 @@
# Block storage.
#PITHOS_BACKEND_BLOCK_MODULE = 'pithos.backends.lib.hashfiler'
#PITHOS_BACKEND_BLOCK_UMASK = 0o022
# Default setting for new accounts.
#PITHOS_BACKEND_VERSIONING = 'auto'
......@@ -65,4 +64,4 @@
#PITHOS_BACKEND_XSEG_POOL_SIZE = 8
#
# The maximum interval (in seconds) for consequent backend object map checks
#PITHOS_BACKEND_MAP_CHECK_INTERVAL = 5
#PITHOS_BACKEND_MAP_CHECK_INTERVAL = 1
......@@ -14,63 +14,108 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pithos.workers import glue
import pickle
from svipc import sem_init, sem_take, sem_give
import os
from pithos.workers import glue
from multiprocessing import Lock
def find_hole(WORKERS, FOLLOW_WORKERS):
old_key = []
old_age = []
for key in FOLLOW_WORKERS:
if key not in WORKERS.keys():
old_age.append(FOLLOW_WORKERS[key] )
old_key.append( key )
break
if len(old_age) and len(old_key):
for key in old_key:
del FOLLOW_WORKERS[key]
return old_age
old_key = []
old_age = []
for key in FOLLOW_WORKERS:
if key not in WORKERS.keys():
old_age.append(FOLLOW_WORKERS[key])
old_key.append(key)
break
if len(old_age) and len(old_key):
for key in old_key:
del FOLLOW_WORKERS[key]
return old_age
return old_age
def follow_workers(pid, wid, WORKERS):
hole = None
try:
fd = open('/dev/shm/wid','rb')
f = pickle.load(fd)
hole = find_hole(WORKERS, f)
if len(hole) > 0:
k = {pid: int(hole[0])}
else:
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid','wb')
pickle.dump(f, fd)
fd.close()
except:
fd = open('/dev/shm/wid','wb')
pickle.dump({pid:wid}, fd)
fd.close()
return hole
hole = None
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
hole = find_hole(WORKERS, f)
if len(hole) > 0:
k = {pid: int(hole[0])}
else:
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)
fd.close()
else:
fd = open('/dev/shm/wid', 'wb')
pickle.dump({pid: wid}, fd)
fd.close()
return hole
def allocate_wid(pid, wid, WORKERS):
d = {pid: wid}
hole = None
if sem_init(88,nums=1) == 0:
hole = follow_workers(pid, wid, WORKERS)
sem_give(88,0)
else:
sem_take(88,0)
hole = follow_workers(pid, wid, WORKERS)
sem_give(88,0)
return hole
hole = None
hole = follow_workers(pid, wid, WORKERS)
return hole
def post_fork(server,worker):
wid = allocate_wid(worker.pid,worker.worker_id, server.WORKERS)
if wid:
glue.WorkerGlue.setmap(worker.pid,wid[0])
else:
glue.WorkerGlue.setmap(worker.pid,worker.worker_id)
def when_ready(server):
server.lock = Lock()
def update_workers(pid, wid):
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
for k, v in f.items():
if wid == v:
del f[k]
break
k = {pid: wid}
f.update(k)
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)
fd.close()
else:
fd = open('/dev/shm/wid', 'wb')
pickle.dump({pid: wid}, fd)
fd.close()
def resize_workers(no_workers):
if os.path.isfile('/dev/shm/wid'):
fd = open('/dev/shm/wid', 'rb')
f = pickle.load(fd)
for k, v in f.items():
if v > no_workers:
del f[k]
fd.close()
fd = open('/dev/shm/wid', 'wb')
pickle.dump(f, fd)