Commit 400ce4ea authored by Sofia Papagiannaki's avatar Sofia Papagiannaki

Migration Tools: Progress IV

Refs #1171
parents 051c7450 739c2e9d
......@@ -124,6 +124,56 @@ Useful alias to add in ``~/.bashrc``::
alias pithos-sync='cd /pithos && git pull && python setup.py build_sphinx && /etc/init.d/apache2 restart'
Gunicorn Setup
--------------
Add in ``/etc/apt/sources.list``::
deb http://backports.debian.org/debian-backports squeeze-backports main
Then::
apt-get update
apt-get -t squeeze-backports install gunicorn
apt-get -t squeeze-backports install python-gevent
Create ``/etc/gunicorn.d/pithos``::
CONFIG = {
'mode': 'django',
'working_dir': '/pithos/pithos',
'user': 'www-data',
'group': 'www-data',
'args': (
'--bind=[::]:8080',
'--worker-class=egg:gunicorn#gevent',
'--workers=4',
'--log-level=debug',
'/pithos/pithos/settings.py',
),
}
Replace the ``WSGI*`` directives in ``/etc/apache2/sites-available/pithos`` and ``/etc/apache2/sites-available/pithos-ssl`` with::
<Proxy *>
Order allow,deny
Allow from all
</Proxy>
SetEnv proxy-sendchunked
SSLProxyEngine off
ProxyErrorOverride off
ProxyPass /api http://localhost:8080 retry=0
ProxyPassReverse /api http://localhost:8080
Configure and run::
/etc/init.d/gunicorn restart
a2enmod proxy
a2enmod proxy_http
/etc/init.d/apache2 restart
Shibboleth Setup
----------------
......@@ -135,15 +185,15 @@ Setup the files in ``/etc/shibboleth``.
Add in ``/etc/apache2/sites-available/pithos-ssl``::
ShibConfig /etc/shibboleth/shibboleth2.xml
Alias /shibboleth-sp /usr/share/shibboleth
ShibConfig /etc/shibboleth/shibboleth2.xml
Alias /shibboleth-sp /usr/share/shibboleth
<Location /api/login>
AuthType shibboleth
ShibRequireSession On
ShibUseHeaders On
require valid-user
</Location>
<Location /api/login>
AuthType shibboleth
ShibRequireSession On
ShibUseHeaders On
require valid-user
</Location>
Configure and run apache::
......
......@@ -500,7 +500,7 @@ def socket_read_iterator(request, length=0, blocksize=4096):
sock = raw_input_socket(request)
if length < 0: # Chunked transfers
# Small version (server does the dechunking).
if request.environ.get('mod_wsgi.input_chunked', None):
if request.environ.get('mod_wsgi.input_chunked', None) or request.META['SERVER_SOFTWARE'].startswith('gunicorn'):
while length < MAX_UPLOAD_SIZE:
data = sock.read(blocksize)
if data == '':
......
......@@ -138,7 +138,7 @@ class BaseBackend(object):
"""
return
def put_account(self, user, account, policy=None):
def put_account(self, user, account, policy={}):
"""Create a new account with the given name.
Raises:
......@@ -237,7 +237,7 @@ class BaseBackend(object):
"""
return
def put_container(self, user, account, container, policy=None):
def put_container(self, user, account, container, policy={}):
"""Create a new container with the given name.
Raises:
......
......@@ -243,7 +243,7 @@ class ModularBackend(BaseBackend):
self._put_policy(node, policy, replace)
@backend_method
def put_account(self, user, account, policy=None):
def put_account(self, user, account, policy={}):
"""Create a new account with the given name."""
logger.debug("put_account: %s %s", account, policy)
......@@ -353,7 +353,7 @@ class ModularBackend(BaseBackend):
self._put_policy(node, policy, replace)
@backend_method
def put_container(self, user, account, container, policy=None):
def put_container(self, user, account, container, policy={}):
"""Create a new container with the given name."""
logger.debug("put_container: %s %s %s", account, container, policy)
......@@ -541,8 +541,8 @@ class ModularBackend(BaseBackend):
# Check quota.
size_delta = size # Change with versioning.
if size_delta > 0:
account_quota = self._get_policy(account_node)['quota']
container_quota = self._get_policy(container_node)['quota']
account_quota = long(self._get_policy(account_node)['quota'])
container_quota = long(self._get_policy(container_node)['quota'])
if (account_quota > 0 and self._get_statistics(account_node)[1] + size_delta > account_quota) or \
(container_quota > 0 and self._get_statistics(container_node)[1] + size_delta > container_quota):
# This must be executed in a transaction, so the version is never created if it fails.
......
......@@ -45,13 +45,16 @@ import datetime
ERROR_CODES = {304:'Not Modified',
400:'Bad Request',
401:'Unauthorized',
403:'Forbidden',
404:'Not Found',
409:'Conflict',
411:'Length Required',
412:'Precondition Failed',
413:'Request Entity Too Large',
416:'Range Not Satisfiable',
422:'Unprocessable Entity',
503:'Service Unavailable'}
503:'Service Unavailable',
}
class Fault(Exception):
def __init__(self, data='', status=None):
......
......@@ -34,7 +34,8 @@
# or implied, of GRNET S.A.
from sqlalchemy import create_engine
from sqlalchemy import Table, MetaData
from sqlalchemy import Table, Column, String, MetaData
from sqlalchemy.sql import select
from django.conf import settings
......@@ -51,4 +52,35 @@ class Migration(object):
self.backend = ModularBackend(*options)
def execute(self):
pass
\ No newline at end of file
pass
class Cache():
def __init__(self, db):
self.engine = create_engine(db)
metadata = MetaData(self.engine)
columns=[]
columns.append(Column('path', String(2048), primary_key=True))
columns.append(Column('hash', String(255)))
self.files = Table('files', metadata, *columns)
self.conn = self.engine.connect()
self.engine.echo = True
metadata.create_all(self.engine)
def put(self, path, hash):
# Insert or replace.
s = self.files.delete().where(self.files.c.path==path)
r = self.conn.execute(s)
r.close()
s = self.files.insert()
r = self.conn.execute(s, {'path': path, 'hash': hash})
r.close()
def get(self, path):
s = select([self.files.c.hash], self.files.c.path == path)
r = self.conn.execute(s)
l = r.fetchone()
r.close()
if not l:
return l
return l[0]
\ No newline at end of file
......@@ -35,64 +35,47 @@
from binascii import hexlify
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, String, MetaData
from sqlalchemy import Table
from sqlalchemy.sql import select
from pithos import settings
from pithos.backends.modular import ModularBackend
from lib.hashmap import HashMap
from lib.migrate import Migration
from lib.migrate import Migration, Cache
import os
class DataMigration(Migration):
def __init__(self, pithosdb, db):
Migration.__init__(self, pithosdb)
# XXX Need more columns for primary key - last modified timestamp...
engine = create_engine(db)
metadata = MetaData(engine)
columns=[]
columns.append(Column('path', String(2048), primary_key=True))
columns.append(Column('hash', String(255)))
self.files = Table('files', metadata, *columns)
metadata.create_all(engine)
def cache_put(self, path, hash):
# Insert or replace.
s = self.files.delete().where(self.files.c.path==path)
r = self.conn.execute(s)
r.close()
s = self.files.insert()
r = self.conn.execute(s, {'path': path, 'hash': hash})
r.close()
self.cache = Cache(db)
def cache_get(self, path):
s = select([self.files.c.hash], self.files.c.path == path)
r = self.conn.execute(s)
l = r.fetchone()
r.close()
if not l:
return l
return l[0]
def execute(self):
blocksize = self.backend.block_size
blockhash = self.backend.hash_algorithm
def retrieve_files(self):
# Loop for all available files.
filebody = Table('filebody', self.metadata, autoload=True)
s = select([filebody.c.storedfilepath])
rp = self.conn.execute(s)
paths = rp.fetchall()
path = rp.fetchone()
while path:
yield path
path = rp.fetchone()
rp.close()
def execute(self):
blocksize = self.backend.block_size
blockhash = self.backend.hash_algorithm
for path in paths:
for (path,) in self.retrieve_files():
map = HashMap(blocksize, blockhash)
map.load(path)
try:
map.load(open(path))
except Exception, e:
print e
continue
hash = hexlify(map.hash())
if hash != self.cache_get(path):
if hash != self.cache.get(path):
missing = self.backend.blocker.block_ping(map) # XXX Backend hack...
status = '[>] ' + path
if missing:
......@@ -105,7 +88,7 @@ class DataMigration(Migration):
self.backend.put_block(block)
else:
status += ' - no blocks missing'
self.cache_put(path, hash)
self.cache.put(path, hash)
else:
status = '[-] ' + path
print status
......
This diff is collapsed.
......@@ -65,6 +65,6 @@ class UserMigration(Migration):
user.save(update_timestamps=False)
if __name__ == "__main__":
db = 'postgresql://gss:m0ust@rda@62.217.112.56/pithos'
db = 'postgresql://gss@localhost/pithos'
m = UserMigration(db)
m.execute()
\ No newline at end of file
......@@ -69,6 +69,7 @@ class BaseTestCase(unittest.TestCase):
def setUp(self):
self.client = Pithos_Client(get_server(), get_auth(), get_user(),
get_api())
self._clean_account()
self.invalid_client = Pithos_Client(get_server(), get_auth(), 'invalid',
get_api())
#self.headers = {
......@@ -127,6 +128,9 @@ class BaseTestCase(unittest.TestCase):
self.return_codes = (400, 401, 404, 503,)
def tearDown(self):
self._clean_account()
def _clean_account(self):
for c in self.client.list_containers():
while True:
#list objects returns at most 10000 objects
......@@ -817,6 +821,24 @@ class ContainerPut(BaseTestCase):
self.client.create_container(self.containers[0])
self.assertTrue(not self.client.create_container(self.containers[0]))
def test_quota(self):
self.client.create_container(self.containers[0])
policy = {'quota':100}
self.client.set_container_policies('c1', **policy)
meta = self.client.retrieve_container_metadata('c1')
self.assertTrue('x-container-policy-quota' in meta)
self.assertEqual(meta['x-container-policy-quota'], '100')
args = ['c1', 'o1']
kwargs = {'length':101}
self.assert_raises_fault(413, self.upload_random_data, *args, **kwargs)
#reset quota
policy = {'quota':0}
self.client.set_container_policies('c1', **policy)
class ContainerPost(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
......@@ -839,13 +861,13 @@ class ContainerDelete(BaseTestCase):
self.containers = ['c1', 'c2']
for c in self.containers:
self.client.create_container(c)
self.upload_random_data(self.containers[1], o_names[0])
def test_delete(self):
status = self.client.delete_container(self.containers[0])[0]
self.assertEqual(status, 204)
def test_delete_non_empty(self):
self.upload_random_data(self.containers[1], o_names[0])
self.assert_raises_fault(409, self.client.delete_container,
self.containers[1])
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment