index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
23,625
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/database.py
|
from walrus.containers import *
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
class TestWalrus(WalrusTestCase):
def test_get_key(self):
h = db.Hash('h1')
h['hk1'] = 'v1'
l = db.List('l1')
l.append('i1')
s = db.Set('s1')
s.add('k1')
zs = db.ZSet('z1')
zs.add({'i1': 1., 'i2': 2.})
h_db = db.get_key('h1')
self.assertTrue(isinstance(h_db, Hash))
self.assertEqual(h_db['hk1'], b'v1')
l_db = db.get_key('l1')
self.assertTrue(isinstance(l_db, List))
self.assertEqual(l_db[0], b'i1')
s_db = db.get_key('s1')
self.assertTrue(isinstance(s_db, Set))
self.assertEqual(s_db.members(), set((b'k1',)))
z_db = db.get_key('z1')
self.assertTrue(isinstance(z_db, ZSet))
self.assertEqual(z_db.score('i1'), 1.)
def test_atomic(self):
def assertDepth(depth):
self.assertEqual(len(db._transaction_local.pipes), depth)
assertDepth(0)
with db.atomic() as p1:
assertDepth(1)
with db.atomic() as p2:
assertDepth(2)
with db.atomic() as p3:
assertDepth(3)
p3.pipe.set('k3', 'v3')
assertDepth(2)
self.assertEqual(db['k3'], b'v3')
p2.pipe.set('k2', 'v2')
assertDepth(1)
self.assertEqual(db['k3'], b'v3')
self.assertEqual(db['k2'], b'v2')
p1.pipe.set('k1', 'v1')
assertDepth(0)
self.assertEqual(db['k1'], b'v1')
self.assertEqual(db['k2'], b'v2')
self.assertEqual(db['k3'], b'v3')
def test_atomic_exception(self):
def do_atomic(k, v, exc=False):
with db.atomic() as a:
a.pipe.set(k, v)
if exc:
raise TypeError('foo')
do_atomic('k', 'v')
self.assertEqual(db['k'], b'v')
self.assertRaises(TypeError, do_atomic, 'k2', 'v2', True)
self.assertRaises(KeyError, lambda: db['k2'])
self.assertEqual(db._transaction_local.pipe, None)
# Try nested failure.
with db.atomic() as outer:
outer.pipe.set('k2', 'v2')
self.assertRaises(TypeError, do_atomic, 'k3', 'v3', True)
# Only this will be set.
outer.pipe.set('k4', 'v4')
self.assertTrue(db._transaction_local.pipe is None)
self.assertEqual(db['k2'], b'v2')
self.assertRaises(KeyError, lambda: db['k3'])
self.assertEqual(db['k4'], b'v4')
def test_clear_transaction(self):
with db.atomic() as a1:
a1.pipe.set('k1', 'v1')
with db.atomic() as a2:
a2.pipe.set('k2', 'v2')
a2.clear()
self.assertEqual(db['k1'], b'v1')
self.assertRaises(KeyError, lambda: db['k2'])
with db.atomic() as a1:
a1.pipe.set('k3', 'v3')
with db.atomic() as a2:
self.assertRaises(KeyError, lambda: db['k3'])
a2.pipe.set('k4', 'v4')
a2.clear()
a1.pipe.set('k5', 'v5')
self.assertEqual(db['k3'], b'v3')
self.assertRaises(KeyError, lambda: db['k4'])
self.assertEqual(db['k5'], b'v5')
self.assertTrue(db._transaction_local.pipe is None)
def test_cas(self):
db['k1'] = 'v1'
self.assertTrue(db.cas('k1', 'v1', 'v1-x'))
self.assertFalse(db.cas('k1', 'v1-z', 'v1-y'))
self.assertEqual(db['k1'], b'v1-x')
self.assertTrue(db.cas('k1', 'v1-', 'v2'))
self.assertFalse(db.cas('k1', 'v1', 'v3'))
self.assertEqual(db['k1'], b'v2')
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,626
|
coleifer/walrus
|
refs/heads/master
|
/walrus/rate_limit.py
|
import hashlib
import pickle
import time
from functools import wraps
class RateLimitException(Exception):
pass
class RateLimit(object):
"""
Rate limit implementation. Allows up to "limit" number of events every per
the given number of seconds.
"""
def __init__(self, database, name, limit=5, per=60, debug=False):
"""
:param database: :py:class:`Database` instance.
:param name: Namespace for this cache.
:param int limit: Number of events allowed during a given time period.
:param int per: Time period the ``limit`` applies to, in seconds.
:param debug: Disable rate-limit for debugging purposes. All events
will appear to be allowed and valid.
"""
self.database = database
self.name = name
self._limit = limit
self._per = per
self._debug = debug
def limit(self, key):
"""
Function to log an event with the given key. If the ``key`` has not
exceeded their allotted events, then the function returns ``False`` to
indicate that no limit is being imposed.
If the ``key`` has exceeded the number of events, then the function
returns ``True`` indicating rate-limiting should occur.
:param str key: A key identifying the source of the event.
:returns: Boolean indicating whether the event should be rate-limited
or not.
"""
if self._debug:
return False
counter = self.database.List(self.name + ':' + key)
n = len(counter)
is_limited = False
if n < self._limit:
counter.prepend(str(time.time()))
else:
oldest = counter[-1]
if (oldest is not None) and (time.time() - float(oldest) < self._per):
is_limited = True
else:
counter.prepend(str(time.time()))
del counter[:self._limit]
counter.pexpire(int(self._per * 2000))
return is_limited
def rate_limited(self, key_function=None):
"""
Function or method decorator that will prevent calls to the decorated
function when the number of events has been exceeded for the given
time period.
It is probably important that you take care to choose an appropriate
key function. For instance, if rate-limiting a web-page you might use
the requesting user's IP as the key.
If the number of allowed events has been exceeded, a
``RateLimitException`` will be raised.
:param key_function: Function that accepts the params of the decorated
function and returns a string key. If not provided, a hash of the
args and kwargs will be used.
:returns: If the call is not rate-limited, then the return value will
be that of the decorated function.
:raises: ``RateLimitException``.
"""
if key_function is None:
def key_function(*args, **kwargs):
data = pickle.dumps((args, sorted(kwargs.items())))
return hashlib.md5(data).hexdigest()
def decorator(fn):
@wraps(fn)
def inner(*args, **kwargs):
key = key_function(*args, **kwargs)
if self.limit(key):
raise RateLimitException(
'Call to %s exceeded %s events in %s seconds.' % (
fn.__name__, self._limit, self._per))
return fn(*args, **kwargs)
return inner
return decorator
class RateLimitLua(RateLimit):
"""
Rate limit implementation. Allows up to "limit" number of events every per
the given number of seconds. Uses a Lua script to ensure atomicity.
"""
def limit(self, key):
if self._debug:
return False
key = self.name + ':' + key
return bool(self.database.run_script(
'rate_limit',
keys=[key],
args=[self._limit, self._per, time.time()]))
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,627
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/rate_limit.py
|
import time
from walrus.rate_limit import RateLimitException
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
class TestRateLimit(WalrusTestCase):
def setUp(self):
super(TestRateLimit, self).setUp()
# Limit to 5 events per second.
self.rl = self.get_rate_limit('test-rl', 5, 1)
def get_rate_limit(self, key, limit, per):
return db.rate_limit(key, limit, per)
def test_rate_limit(self):
for i in range(5):
self.assertFalse(self.rl.limit('k1'))
for i in range(3):
self.assertTrue(self.rl.limit('k1'))
self.assertFalse(self.rl.limit('k2'))
def test_rate_limit_rollover(self):
rl = self.get_rate_limit('test-rl2', 3, 100)
container = db.List('test-rl2:k1')
now = time.time()
past = now - 101
# Simulate two events.
container.extend([now, now])
# Third event goes through OK.
self.assertFalse(rl.limit('k1'))
# Fourth event is rate-limited.
self.assertTrue(rl.limit('k1'))
# There are three timestamps in the container.
self.assertEqual(len(container), 3)
# Hand modify the oldest timestamp to appear as if it happened over
# 100 seconds ago.
container[-1] = past
# We can again perform an action.
self.assertFalse(rl.limit('k1'))
# We once again have 3 items all within the last 100 seconds, so we
# are rate-limited.
self.assertTrue(rl.limit('k1'))
# There are only 3 items in the container.
self.assertEqual(len(container), 3)
# The oldest item is the 2nd we added at the beginning of the test.
self.assertEqual(float(container[-1]), now)
# Remove an item and make the 2nd timestamp (oldest) in the past. This
# gives us 2 actions.
container.popright()
container[-1] = past
self.assertFalse(rl.limit('k1'))
self.assertFalse(rl.limit('k1'))
self.assertTrue(rl.limit('k1'))
def test_decorator(self):
rl = self.get_rate_limit('test-rl2', 3, 100)
container = db.List('test-rl2:fake-key')
def key_fn(*args, **kwargs):
return 'fake-key'
@rl.rate_limited(key_function=key_fn)
def do_test():
return 'OK'
now = time.time()
container.extend([now, now])
self.assertEqual(do_test(), 'OK')
self.assertRaises(RateLimitException, do_test)
container.popright()
container[-1] = now - 101
self.assertEqual(do_test(), 'OK')
self.assertEqual(do_test(), 'OK')
self.assertRaises(RateLimitException, do_test)
class TestRateLimitLua(TestRateLimit):
def get_rate_limit(self, key, limit, per):
return db.rate_limit_lua(key, limit, per)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,628
|
coleifer/walrus
|
refs/heads/master
|
/walrus/lock.py
|
from functools import wraps
import os
class Lock(object):
"""
Lock implementation. Can also be used as a context-manager or
decorator.
Unlike the redis-py lock implementation, this Lock does not
use a spin-loop when blocking to acquire the lock. Instead,
it performs a blocking pop on a list. When a lock is released,
a value is pushed into this list, signalling that the lock is
available.
.. warning::
The event list for each lock persists
indefinitely unless removed using :py:meth:`Lock.clear` or
otherwise manually in the Redis database. For this reason,
be cautious when creating locks dynamically, or your
keyspace might grow in an unbounded way.
The lock uses Lua scripts to ensure the atomicity of its
operations.
You can set a TTL on a lock to reduce the potential for deadlocks
in the event of a crash. If a lock is not released before it
exceeds its TTL, and threads that are blocked waiting for the
lock could potentially re-acquire it.
.. note:: TTL is specified in **milliseconds**.
Locks can be used as context managers or as decorators:
.. code-block:: python
lock = db.lock('my-lock')
with lock:
perform_some_calculations()
@lock
def another_function():
# The lock will be acquired when this function is
# called, and released when the function returns.
do_some_more_calculations()
"""
def __init__(self, database, name, ttl=None, lock_id=None):
"""
:param database: A walrus ``Database`` instance.
:param str name: The name for the lock.
:param int ttl: The time-to-live for the lock in milliseconds.
:param str lock_id: Unique identifier for the lock instance.
"""
self.database = database
self.name = name
self.ttl = ttl or 0
self._lock_id = lock_id or os.urandom(32)
@property
def key(self):
return 'lock:%s' % (self.name)
@property
def event(self):
return 'lock.event:%s' % (self.name)
def acquire(self, block=True):
"""
Acquire the lock. The lock will be held until it is released
by calling :py:meth:`Lock.release`. If the lock was
initialized with a ``ttl``, then the lock will be released
automatically after the given number of milliseconds.
By default this method will block until the lock becomes
free (either by being released or expiring). The blocking is
accomplished by performing a blocking left-pop on a list, as
opposed to a spin-loop.
If you specify ``block=False``, then the method will return
``False`` if the lock could not be acquired.
:param bool block: Whether to block while waiting to acquire
the lock.
:returns: Returns ``True`` if the lock was acquired.
"""
event_wait = self.ttl // 1000 if self.ttl else 1
while True:
acquired = self.database.run_script(
'lock_acquire',
keys=[self.key],
args=[self._lock_id, self.ttl])
if acquired == 1 or not block:
return acquired == 1
# Perform a blocking pop on the event key. When a lock
# is released, a value is pushed into the list, which
# signals listeners that the lock is available.
self.database.blpop(self.event, event_wait)
def release(self):
"""
Release the lock.
:returns: Returns ``True`` if the lock was released.
"""
unlocked = self.database.run_script(
'lock_release',
keys=[self.key, self.event],
args=[self._lock_id])
return unlocked != 0
def clear(self):
"""
Clear the lock, allowing it to be acquired. Do not use this
method except to recover from a deadlock. Otherwise you should
use :py:meth:`Lock.release`.
"""
self.database.delete(self.key)
self.database.delete(self.event)
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.release():
raise RuntimeError('Error releasing lock "%s".' % self.name)
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
return inner
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,629
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/fts.py
|
#coding:utf-8
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
class TestSearchIndex(WalrusTestCase):
def test_search_index(self):
phrases = [
('A faith is a necessity to a man. Woe to him who believes in '
'nothing.'),
('All who call on God in true faith, earnestly from the heart, '
'will certainly be heard, and will receive what they have asked '
'and desired.'),
('Be faithful in small things because it is in them that your '
'strength lies.'),
('Faith consists in believing when it is beyond the power of '
'reason to believe.'),
('Faith has to do with things that are not seen and hope with '
'things that are not at hand.')]
index = db.Index('test-index')
for idx, message in enumerate(phrases):
index.add('doc-%s' % idx, message)
def assertDocs(query, indexes):
results = [doc['content'] for doc in index.search(query)]
self.assertEqual(results, [phrases[i] for i in indexes])
assertDocs('faith', [0, 2, 3, 4, 1])
assertDocs('faith man', [0])
assertDocs('things', [4, 2])
assertDocs('blah', [])
def test_add_remove_update(self):
data = [
('huey cat', {'type': 'cat', 'color': 'white'}),
('zaizee cat cat', {'type': 'cat', 'color': 'gray'}),
('mickey dog', {'type': 'dog', 'color': 'black'}),
('beanie cat', {'type': 'cat', 'color': 'gray'}),
]
idx = db.Index('test-index')
for i, (content, metadata) in enumerate(data):
idx.add(str(i), content, **metadata)
huey, = idx.search('huey')
self.assertEqual(huey, {
'content': 'huey cat',
'type': 'cat',
'color': 'white'})
self.assertEqual([d['content'] for d in idx.search('cat')],
['zaizee cat cat', 'huey cat', 'beanie cat'])
idx.remove('3') # Poor beanie :(
zaizee, huey = idx.search('cat')
self.assertEqual(zaizee['content'], 'zaizee cat cat')
self.assertEqual(huey['content'], 'huey cat')
self.assertRaises(KeyError, idx.remove, '3')
idx.update('1', 'zaizee cat', {'type': 'kitten'})
idx.replace('0', 'huey baby cat', {'type': 'kitten'})
zaizee, huey = idx.search('cat')
self.assertEqual(zaizee['content'], 'zaizee cat')
self.assertEqual(zaizee['type'], 'kitten')
self.assertEqual(zaizee['color'], 'gray')
self.assertEqual(huey['content'], 'huey baby cat')
self.assertEqual(huey['type'], 'kitten')
self.assertTrue('color' not in huey)
zaizee, huey = idx.search_items('cat')
self.assertEqual(zaizee[0], '1')
self.assertEqual(zaizee[1]['content'], 'zaizee cat')
self.assertEqual(huey[0], '0')
self.assertEqual(huey[1]['content'], 'huey baby cat')
def test_search_phonetic(self):
data = (
('pf', 'python and flask'),
('lcp', 'learning cython programming'),
('lwd', 'learning web development with flask'),
('pwd', 'python web development'))
data_dict = dict(data)
idx = db.Index('test-index', metaphone=True)
for key, content in data:
idx.add(key, content)
def assertResults(query, keys):
result = idx.search(query)
self.assertEqual([doc['content'] for doc in result],
[data_dict[key] for key in keys])
assertResults('flasck', ['pf', 'lwd'])
assertResults('pythonn', ['pf', 'pwd'])
assertResults('sithon', ['lcp'])
assertResults('webb development', ['pwd', 'lwd'])
assertResults('sithon OR (flasck AND pythonn)', ['pf', 'lcp'])
assertResults('garbage', [])
def test_search_parser(self):
messages = [
'foo green',
'bar green',
'baz blue',
'nug blue',
'nize yellow',
'huey greener',
'mickey greens',
'zaizee',
]
index = db.Index('testing')
for idx, message in enumerate(messages):
index.add(str(idx), message)
def assertMatches(query, expected):
results = [doc['content'] for doc in index.search(query)]
self.assertEqual(results, expected)
assertMatches('foo', ['foo green'])
assertMatches('foo OR baz', ['foo green', 'baz blue'])
assertMatches('green OR blue', [
'foo green',
'bar green',
'baz blue',
'nug blue',
'mickey greens',
])
assertMatches('green AND (bar OR mickey OR nize)', [
'bar green',
'mickey greens',
])
assertMatches('zaizee OR (blue AND nug) OR (green AND bar)', [
'bar green',
'nug blue',
'zaizee',
])
assertMatches('(blue AND (baz OR (nug OR huey OR mickey))', [
'baz blue',
'nug blue',
])
assertMatches(
'(blue OR foo) AND (green OR (huey OR (baz AND mickey)))',
['foo green'])
assertMatches('(green AND nug) OR (blue AND bar)', [])
assertMatches('nuglet', [])
assertMatches('foobar', [])
assertMatches('foo"bar green', ['foo green'])
results = [doc['content'] for doc in index.search('')]
self.assertEqual(sorted(results), sorted(messages))
def test_unicode_handling(self):
index = db.Index('testing', stemmer=False)
index.add('1', u'сколько лет этому безумному моржу', {'val': 'age'})
index.add('2', u'во сколько морж ложится спать', val='sleep')
index.add('3', u'Вы знаете какие-нибудь хорошие истории с моржами',
val='stories')
self.assertEqual([r['val'] for r in index.search(u'морж')], ['sleep'])
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,630
|
coleifer/walrus
|
refs/heads/master
|
/examples/diary.py
|
#!/usr/bin/env python
from collections import OrderedDict
import datetime
import sys
from walrus import *
database = Database(host='localhost', port=6379, db=0)
class Entry(Model):
__database__ = database
__namespace__ = 'diary'
content = TextField(fts=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
def menu_loop():
choice = None
while choice != 'q':
for key, value in menu.items():
print('%s) %s' % (key, value.__doc__))
choice = raw_input('Action: ').lower().strip()
if choice in menu:
menu[choice]()
def add_entry():
"""Add entry"""
print('Enter your entry. Press ctrl+d when finished.')
data = sys.stdin.read().strip()
if data and raw_input('Save entry? [Yn] ') != 'n':
Entry.create(content=data)
print('Saved successfully.')
def view_entries(search_query=None):
"""View previous entries"""
if search_query:
expr = Entry.content.search(search_query)
else:
expr = None
query = Entry.query(expr, order_by=Entry.timestamp.desc())
for entry in query:
timestamp = entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')
print(timestamp)
print('=' * len(timestamp))
print(entry.content)
print('n) next entry')
print('d) delete entry')
print('q) return to main menu')
choice = raw_input('Choice? (Ndq) ').lower().strip()
if choice == 'q':
break
elif choice == 'd':
entry.delete()
print('Entry deleted successfully.')
break
def search_entries():
"""Search entries"""
view_entries(raw_input('Search query: '))
menu = OrderedDict([
('a', add_entry),
('v', view_entries),
('s', search_entries),
])
if __name__ == '__main__':
menu_loop()
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,631
|
coleifer/walrus
|
refs/heads/master
|
/walrus/__init__.py
|
"""
Lightweight Python utilities for working with Redis.
"""
__author__ = 'Charles Leifer'
__license__ = 'MIT'
__version__ = '0.9.3'
# ___
# .-9 9 `\
# =(:(::)= ;
# |||| \
# |||| `-.
# ,\|\| `,
# / \
# ; `'---.,
# | `\
# ; / |
# \ | /
# jgs ) \ __,.--\ /
# .-' \,..._\ \` .-' .-'
# `-=`` `: | /-/-/`
# `.__/
from walrus.autocomplete import Autocomplete
from walrus.cache import Cache
from walrus.containers import Array
from walrus.containers import BitField
from walrus.containers import BloomFilter
from walrus.containers import ConsumerGroup
from walrus.containers import Container
from walrus.containers import Hash
from walrus.containers import HyperLogLog
from walrus.containers import List
from walrus.containers import Set
from walrus.containers import Stream
from walrus.containers import ZSet
from walrus.counter import Counter
from walrus.database import Database
from walrus.fts import Index
from walrus.graph import Graph
from walrus.lock import Lock
from walrus.models import *
from walrus.rate_limit import RateLimit
from walrus.rate_limit import RateLimitException
from walrus.streams import Message
from walrus.streams import TimeSeries
# Friendly alias.
Walrus = Database
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,632
|
kfjustis/dic-final
|
refs/heads/master
|
/encoder_lib.py
|
import cv2
from PIL import Image
import numpy as np
import pywt
def load_image_as_array(imgFile):
img = Image.open(imgFile)
imgArray = np.asarray(img)
return imgArray
|
{"/encoder.py": ["/encoder_lib.py", "/zigzag_lib.py"], "/decoder.py": ["/encoder_lib.py", "/decoder_lib.py", "/zigzag_lib.py"]}
|
23,633
|
kfjustis/dic-final
|
refs/heads/master
|
/encoder.py
|
import encoder_lib as encoder
import zigzag_lib as ziggy
import huffman_lib as huffman
import getopt
import sys
def main(argv):
inputFile = ""
qsize = ""
# load file with command line args
try:
opts, args = getopt.getopt(argv,"i:q:")
except getopt.GetoptError:
print("USAGE: python3 encoder.py -i <file> -q <qsize>")
sys.exit()
for opt, arg in opts:
if opt == "-i":
inputFile = arg
elif opt == "-q":
qsize = arg
else:
print("USAGE: python3 encoder.py -i <file> -q <qsize>")
sys.exit()
# error check command line args
if inputFile is "":
print("USAGE: python3 encoder.py -i <file> -q <qsize>")
sys.exit()
if qsize is "":
print("USAGE: python3 encoder.py -i <file> -q <qsize>")
sys.exit()
if int(qsize) > 512:
print("qsize cannot be greater than 512!")
print("USAGE: python3 encoder.py -i <file> -q <qsize>")
sys.exit()
# load image as array
print("Loading image...")
imgArr = encoder.load_image_as_array(inputFile)
print("\tImage loaded!")
# apply DCT transform
print("Applying DCT transform...")
imgDCT = encoder.cv2.dct(imgArr.astype(float))
print("\tTransform applied!")
# zig-zag scan through array
# store values to text file then feed that to huffman
# and have it output its file - ENCODER DONE
print("Encoding matrix...")
imgDCTZ = ziggy.generateZigMatrix(imgDCT)
if imgDCTZ is None:
print("\tFailed to zig-zag! Please re-run the program with valid params.")
sys.exit()
encoder.np.savetxt("raw.txt", imgDCTZ)
enc = huffman.Encoder("raw.txt")
enc.write("image.bit")
print("\tEncoding complete!")
if __name__ == "__main__":
main(sys.argv[1:])
|
{"/encoder.py": ["/encoder_lib.py", "/zigzag_lib.py"], "/decoder.py": ["/encoder_lib.py", "/decoder_lib.py", "/zigzag_lib.py"]}
|
23,634
|
kfjustis/dic-final
|
refs/heads/master
|
/zigzag_lib.py
|
import math
import numpy as np
'''
Source: http://wdxtub.com/interview/14520595473495.html
I modified this for functional design then converted it
to Python 3 with 2to3.
'''
def generateZigMatrix(matrix):
if matrix is None:
return None
i = 0
j = 0
m = len(matrix)
n = len(matrix[0])
ret = []
up = True
for _ in range(m*n):
ret.append(matrix[i][j])
if up:
if i-1<0 or j+1>=n:
up = False
if j+1>=n: # go down
i += 1
else: # go right
j += 1
else:
i -= 1
j += 1
else:
if i+1>=m or j-1<0:
up = True
if i+1>=m:
j += 1 # go right
else:
i += 1 # go up
else:
i += 1
j -= 1
return ret
def iZigZag(matrix, qsize):
# dimension 2D array from 1D length (length must be square)
dim = int(math.sqrt(len(matrix)))
k = False
i = 0
x = 0
y = 0
j = 0
size = qsize
count = 0
# build 2d array
ret = np.zeros(shape=(dim,dim), dtype="float")
# top-left triangle of matrix
while i < size:
if k:
x = 0
y = i
while x <= i:
ret[x][y] = matrix[count]
count += 1
x += 1
y -= 1
k = False
else:
x = i
y = 0
while y <= i:
ret[x][y] = matrix[count]
count += 1
x -= 1
y += 1
k = True
i += 1
# bottom-right triangle of matrix
j = size - 2
while j >= 0:
if k:
x = size - 1 - j
y = size - 1
while x <= size-1:
ret[x][y] = matrix[count]
count += 1
x += 1
y -= 1
k = False
else:
x = size - 1 - j
y = size - 1
while y <= size-1:
ret[x][y] = matrix[count]
count += 1
x -= 1
y += 1
k = True
j -= 1
return ret
|
{"/encoder.py": ["/encoder_lib.py", "/zigzag_lib.py"], "/decoder.py": ["/encoder_lib.py", "/decoder_lib.py", "/zigzag_lib.py"]}
|
23,635
|
kfjustis/dic-final
|
refs/heads/master
|
/decoder_lib.py
|
import cv2
from PIL import Image
import math
import matplotlib.pyplot as plt
import numpy as np
def getImageError(img1_arr, img2_arr):
if img1_arr is None or img2_arr is None:
return None
arr1 = img1_arr.ravel()
arr2 = img2_arr.ravel()
i = 0
error = 0.0
eArr = []
while i < len(arr1):
error += math.pow(abs((int(arr1[i]) - int(arr2[i]))), 2)
eArr.append(error)
i += 1
error = error / (math.pow(512, 2))
plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
plt.title("Resulting Error per Pixel")
plt.ylabel("Error")
plt.xlabel("Pixel value")
plt.plot(eArr)
plt.show()
return error
def getPSNR(mse):
if mse is None:
return None
psnr = 10 * math.log10(math.pow(255, 2) / mse)
return psnr
|
{"/encoder.py": ["/encoder_lib.py", "/zigzag_lib.py"], "/decoder.py": ["/encoder_lib.py", "/decoder_lib.py", "/zigzag_lib.py"]}
|
23,636
|
kfjustis/dic-final
|
refs/heads/master
|
/decoder.py
|
import encoder_lib as encoder
import decoder_lib as decoder
import zigzag_lib as ziggy
import huffman_lib as huffman
import matplotlib.pyplot as plt
import getopt
import sys
def main(argv):
inputFile1 = ""
intputFile2 = ""
qsize = ""
# load file with command line args
try:
opts, args = getopt.getopt(argv, "b:i:q:")
except getopt.GetoptError:
print("USAGE: python3 decoder.py -b <.bit file> -i <image> -q <qsize>")
sys.exit()
for opt, arg in opts:
if opt == "-b":
inputFile1 = arg
elif opt == "-i":
inputFile2 = arg
elif opt == "-q":
qsize = int(arg)
else:
print("USAGE: python3 decoder.py -b <.bit file> -i <image> -q <qsize>")
sys.exit()
if inputFile1 is "":
print("USAGE: python3 decoder.py -b <.bit file> -i <image> -q <qsize>")
sys.exit()
if inputFile2 is "":
print("USAGE: python3 decoder.py -b <.bit file> -i <image> -q <qsize>")
sys.exit()
if qsize is "":
print("USAGE: python3 decoder.py -b <.bit file> -i <image> -q <qsize>")
sys.exit()
# decode huffman back into text
print("Decoding .bit file...")
dec = huffman.Decoder(inputFile1)
dec.decode_as("raw_inverse.txt")
print("\tDecoding complete!")
# read imgDCTZ back in
print("Loading zig-zagged DCT array...")
imgDCTZ = decoder.np.loadtxt("raw_inverse.txt")
print("\tArray loaded!")
# reverse zig-zag
print("Unpacking zigged matrix...")
imgDCT = ziggy.iZigZag(imgDCTZ, qsize)
print("\tMatrix unpacked!")
print("Reversing DCT...")
imgInverse = decoder.cv2.idct(imgDCT.astype(float))
print("\tDCT reversed!")
imgRecon = decoder.Image.fromarray(imgInverse)
print("Displaying image...")
imgRecon.show()
print("Calculating error...")
imgOrig = encoder.load_image_as_array(inputFile2)
iError = decoder.getImageError(decoder.np.asarray(imgRecon),
decoder.np.asarray(imgOrig))
if iError is not None:
print("\tMean squared error: ""{:.5}%".format(float(iError) * 100))
else:
print("\tERROR: Could not display error!")
print("Calculating PSNR...")
psnr = decoder.getPSNR(iError)
print("\tPSNR: " + str(psnr))
if __name__ == "__main__":
main(sys.argv[1:])
|
{"/encoder.py": ["/encoder_lib.py", "/zigzag_lib.py"], "/decoder.py": ["/encoder_lib.py", "/decoder_lib.py", "/zigzag_lib.py"]}
|
23,638
|
alexabravo/Proyecto2
|
refs/heads/master
|
/main.py
|
#Cristopher Jose Rodolfo Barrios Solis 18207
#Proyecto 2 Algoritmos
#Recomendar restaurantes
from Connectar import *
print("O _ 0 Bienvenido a nuestra aplicacion RESTAUNATOS O _ 0\n")
inicio = ("\n""Que desea hacer? (Ingrese numero)\n\n"
"1. Iniciar Sesion\n"
"2. Comer Ya!\n"
"3. Administrador\n"
"4. Salir")
print(inicio)
menu =str(input(": "))
while (menu!="4"):
if menu =="1":
nombre = str(raw_input("Ingrese su nombre: "))
edad = str(raw_input("Ingrese su edad: "))
agregarCliente(nombre,edad)
preguntaEstilo =str(input("\n""Que estilo y precio de comida prefiere? (Ingrese numero)\n"
"1. Comida rapida, Accesible\n"
"2. Comida elegante, Alto\n"
"3. Comida saludable, Regular\n"
"4. Comida casera, Accesible\n\n"
": "))
if preguntaEstilo=="1":
style = "Comida Rapida"
if preguntaEstilo=="2":
style = "Comida Elegante"
if preguntaEstilo=="3":
style = "Comida Saludable"
if preguntaEstilo=="4":
style = "Comida Casera"
clienteEstilo(nombre,style)
preguntaPreferencia =str(input("\n""Que le da mas importancia a un restaurante? (Ingrese numero)\n"
"1. Ambiente del lugar\n"
"2. Sabor de la comida\n"
"3. Precio de la comida\n"
"4. Compania al ir a comer\n\n"
": "))
if preguntaEstilo=="1":
preferencia = "Ambiente del lugar"
if preguntaEstilo=="2":
preferencia = "Sabor de la comida"
if preguntaEstilo=="3":
preferencia = "Precio de la comida"
if preguntaEstilo=="4":
preferencia = "Compania al ir a comer"
clientePreferencia(nombre,preferencia)
print(inicio)
menu =str(input(": "))
if menu =="2":
nombre = str(raw_input("Ingrese su nombre: "))
reco = recomendar(nombre,Preferencias(),nombre,Preferencias())
print(reco)
print(inicio)
menu =str(input(": "))
if menu =="3":
nombreRestaurante = str(raw_input("Ingrese nombre de restaurante: "))
agregarRestaurante(nombreRestaurante)
preguntaEstilo =str(input("\n""Que estilo y precio de comida pertenece? (Ingrese numero)\n"
"1. Comida rapida, Accesible\n"
"2. Comida elegante, Alto\n"
"3. Comida saludable, Regular\n"
"4. Comida casera, Accesible\n\n"
": "))
if preguntaEstilo=="1":
styleRes = "Comida Rapida"
if preguntaEstilo=="2":
styleRes = "Comida Elegante"
if preguntaEstilo=="3":
styleRes = "Comida Saludable"
if preguntaEstilo=="4":
styleRes = "Comida Casera"
restauranteEstilo(nombreRestaurante,styleRes)
preguntaPreferencia =str(input("\n""Que le da mas importancia a su restaurante? (Ingrese numero)\n"
"1. Ambiente del lugar\n"
"2. Sabor de la comida\n"
"3. Precio de la comida\n"
"4. Compania al ir a comer\n\n"
": "))
if preguntaEstilo=="1":
preferenciaRes = "Ambiente del lugar"
if preguntaEstilo=="2":
preferenciaRes = "Sabor de la comida"
if preguntaEstilo=="3":
preferenciaRes = "Precio de la comida"
if preguntaEstilo=="4":
preferenciaRes = "Compania al ir a comer"
restaurantePreferencia(nombreRestaurante,preferenciaRes)
print(inicio)
menu =str(input(": "))
print("Buen provecho!!!!")
|
{"/main.py": ["/Connectar.py"]}
|
23,639
|
alexabravo/Proyecto2
|
refs/heads/master
|
/Connectar.py
|
#Cristopher Barrios 18207
#Documentacion https://neo4j-rest-client.readthedocs.io/en/latest/
from neo4jrestclient.client import GraphDatabase
gdb = GraphDatabase("http://localhost:7474", username="neo4j", password="CristopherBarrios")
#insertando
def agregarRestaurante(name):
CadenaDeRestaurantes= gdb.nodes.create(nombre=name)
CadenaDeRestaurantes.labels.add("CadenaDeRestaurantes")
def agregarCliente(name,age):
Cliente= gdb.nodes.create(nombre=name,edad=age)
Cliente.labels.add("Cliente")
def agregarEstilo(name,costo):
EstilodeComida= gdb.nodes.create(nombre=name,precio=costo)
EstilodeComida.labels.add("EstilodeComida")
def agregarPreferencia(name):
PreferenciaComida= gdb.nodes.create(nombre=name)
PreferenciaComida.labels.add("PreferenciaComida")
#Relaciones
def clientePreferencia (name, preferencia):
Cliente= gdb.labels.get("Cliente")
Cliente.all()
PreferenciaComida=gdb.labels.get("PreferenciaComida")
PreferenciaComida.all()
Cliente.get(nombre=name)[0].relationships.create("prefiere",PreferenciaComida.get(nombre=preferencia)[0])
def clienteEstilo (name, estilo):
Cliente= gdb.labels.get("Cliente")
Cliente.all()
EstilodeComida=gdb.labels.get("EstilodeComida")
EstilodeComida.all()
Cliente.get(nombre=name)[0].relationships.create("prefiere",EstilodeComida.get(nombre=estilo)[0])
def restaurantePreferencia (name, preferencia):
CadenaDeRestaurantes= gdb.labels.get("CadenaDeRestaurantes")
CadenaDeRestaurantes.all()
PreferenciaComida=gdb.labels.get("PreferenciaComida")
PreferenciaComida.all()
CadenaDeRestaurantes.get(nombre=name)[0].relationships.create("pertenece",PreferenciaComida.get(nombre=preferencia)[0])
def restauranteEstilo (name, estilo):
CadenaDeRestaurantes= gdb.labels.get("CadenaDeRestaurantes")
CadenaDeRestaurantes.all()
EstilodeComida=gdb.labels.get("EstilodeComida")
EstilodeComida.all()
CadenaDeRestaurantes.get(nombre=name)[0].relationships.create("petenece",EstilodeComida.get(nombre=estilo)[0])
#Fuciones para logaritmo
def recomendar(cliente, clientes,restaurante,restaurantes):
inicioCliente = buscar(cliente,clientes)
inicioRestaurante = buscar(restaurante,restaurantes)
tamañoCliente = len(clientes)
tamañoRestaurante = len(restaurantes)
preferenciasClientes = Preferencias(cliente, clientes)
preferenciasRestaurantes = Preferencias(restaurante, restaurantes)
for i in range(0,tamañoCliente):
com=comun(preferenciasClientes[inicioCliente],preferenciasClientes[i])
for i in range(0,tamañoRestaurante):
com=comun(tamañoRestaurante[inicioRestaurante],tamañoRestaurante[i])
return com
def buscar(objeto, listaObjeto):
a=-1
if objeto in listaObjeto:
a=listaCliente.index(objeto)
return a
def Preferencia(name):
buscar="match (Cliente{nombre:'"+name+"'})-[x]->(d) return x,d"
resul = gdb.query(buscar, data_contents=True)
lista = resul.rows
y=[]
if lista is not None:
for x in lista:
for i in x:
for c in i:
if c is not None:
y.append(i[c])
return y
def Preferencias(objeto, listaObjeto):
y=[]
for x in listaObjeto:
a=Preferencia(x)
if a is not None:
y.append(a)
return y
def comun(busca, buscado):
y=[]
for x in buscado:
if x in busca:
if x not in y:
y.append(x)
return y
|
{"/main.py": ["/Connectar.py"]}
|
23,650
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/providers/common.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
class BaseProvider:
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def list_nodes(self):
"""Return all VM known to the virtualization layer, as a list"""
raise NotImplementedError()
@abstractmethod
def list_sizes(self):
"""Return all sizes from provider"""
raise NotImplementedError()
@abstractmethod
def create_node(self, instance, image_meta, *args, **kwargs):
raise NotImplementedError()
@abstractmethod
def reboot(self, instance, *args, **kwargs):
raise NotImplementedError()
@abstractmethod
def destroy(self, instance, *args, **kwargs):
raise NotImplementedError()
@abstractmethod
def get_info(self, instance):
"""Get instance info from provider
Must return dict:
{
state: the running state, one of the power_state codes
max_mem_kb: (int) the maximum memory in KBytes allowed
mem_kb: (int) the memory in KBytes used by the instance
num_cpu: (int) the number of virtual CPUs for the instance
cpu_time_ns: (int) the CPU time used in nanoseconds
id: a unique ID for the instance
}
:param instance: Openstack node instance
:return: Info dict
"""
raise NotImplementedError()
@abstractmethod
def list_instances(self):
"""Return the names of all the instances known to the virtualization layer, as a list"""
raise NotImplementedError()
@abstractmethod
def list_instance_uuids(self):
"""Return the UUIDS of all the instances known to the virtualization layer, as a list"""
raise NotImplementedError()
@abstractmethod
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
raise NotImplementedError()
@abstractmethod
def power_on(self, context, instance, network_info, block_device_info=None):
"""Issues a provider specific commend to start provider instance
:param instance: Local instance
"""
raise NotImplementedError()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will hold the snapshot.
"""
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize.
:param context: the context for the migration/resize
:param migration: the migrate/resize information
:param instance: nova.objects.instance.Instance being migrated/resized
:param disk_info: the newly transferred disk information
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
:param resize_instance: True if the instance is being resized,
False otherwise
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,651
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/manage/utils.py
|
import os
from jinja2 import Environment, PackageLoader, meta
PACKAGE_NAME = __name__[:__name__.rfind('.')]
def render_template(template, to_file=None, context=None):
path, template = os.path.split(template)
jenv = Environment(loader=PackageLoader(PACKAGE_NAME, path), keep_trailing_newline=True)
text = jenv.get_template(template).render(**context or {})
if to_file:
with open(to_file, 'w') as conf_file:
conf_file.write(text)
else:
return text
def get_templates_vars(templates):
vars = set()
templates = [templates] if not isinstance(templates, list) else templates
for filename in templates:
path, filename = os.path.split(filename)
jenv = Environment(loader=PackageLoader(PACKAGE_NAME, path), keep_trailing_newline=True)
ts = jenv.loader.get_source(jenv, filename)
pc = jenv.parse(ts[0])
vars.update(meta.find_undeclared_variables(pc))
return vars
def render_json_to_template(provider, token_values):
template_text = ''
if provider.get('section_name'):
template_text += '[{}]\n'.format(provider.get('section_name'))
for token_name, token_data in provider.get('tokens', {}).items():
value = token_values.get(token_name) or token_data.get('default')
assert value is not None
template_text += '{token}={value}\n'.format(token=token_name, value=value)
return template_text
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,652
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/providers/aws/config.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
aws_region = 'us-east-1'
aws_endpoint = 'ec2.us-east-1.amazonaws.com'
port = 443
host = str(port) + ":" + aws_endpoint
secure = True
# Adding a Red Hat Linux image below
aws_ami = "ami-785bae10"
instance_type = "t2.micro"
ec2_subnet_id = "subnet-e713cdbe"
# Mapping OpenStack's flavor IDs(which seems to be randomly assigned) to EC2's flavor names
flavor_map = {2: 't2.micro', 5: 't2.small', 1: 't2.medium', 3: 'c3.xlarge', 4: 'c3.2xlarge'}
# Add image maps key: image in openstack, Value: EC2_AMI_ID
image_map = {}
# Using defaultdict as we need to get a default EBS volume to be returned if we access this map with an unknown key
volume_map_no_default = {'ed6fcf64-8c74-49a0-a30c-76128c7bda47': 'vol-83db57cb',
'ac28d216-6dda-4a7b-86c4-d95209ae8181': 'vol-1eea8a56'}
volume_map = defaultdict(lambda: 'vol-83db57cb')
keypair_map = {}
# The limit on maximum resources you could have in the AWS EC2.
VCPUS = 100
MEMORY_IN_MBS = 88192
DISK_IN_GB = 1028
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,653
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/utils.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from functools import wraps
def timeout_call(wait_period, timeout):
"""
This decorator calls given method repeatedly
until it throws exception. Loop ends when method
returns.
"""
def _inner(f):
@wraps(f)
def _wrapped(*args, **kwargs):
start = time.time()
if '_timeout' in kwargs and kwargs['_timeout']:
_timeout = kwargs['_timeout']
end = start + _timeout
else:
end = start + timeout
exc = None
while(time.time() < end):
try:
return f(*args, **kwargs)
except Exception as exc:
time.sleep(wait_period)
raise exc
return _wrapped
return _inner
# test
@timeout_call(wait_period=3, timeout=600)
def func():
raise Exception('none')
if __name__ == "__main__":
func(_timeout=3)
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,654
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/providers/azure/provider.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
from oslo_config import cfg
from haikunator import Haikunator
from nova.image import glance
from nova.compute import power_state, task_states
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.common.credentials import ServicePrincipalCredentials
from azure.servicemanagement import ServiceManagementService, CaptureRoleAsVMImage
from msrestazure.azure_exceptions import CloudError, CloudErrorData
from ..common import BaseProvider
LOG = logging.getLogger(__name__)
haikunator = Haikunator()
VM_REFERENCE = {
'UbuntuServer_16.04.0-LTS_latest': {
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '16.04.0-LTS',
'version': 'latest'
},
'WindowsServerEssentials_WindowsServerEssentials_latest': {
'publisher': 'MicrosoftWindowsServerEssentials',
'offer': 'WindowsServerEssentials',
'sku': 'WindowsServerEssentials',
'version': 'latest'
}
}
POWER_STATE_MAP = {
'PowerState/running': power_state.RUNNING,
'PowerState/starting': power_state.NOSTATE,
'PowerState/deallocating': power_state.NOSTATE,
'PowerState/deallocated': power_state.SHUTDOWN,
'PowerState/stopped': power_state.SHUTDOWN,
'PowerState/stopping': power_state.NOSTATE,
# power_state.PAUSED,
# power_state.CRASHED,
# power_state.STATE_MAP,
# power_state.SUSPENDED,
}
class AzureProvider(BaseProvider):
def __init__(self):
super(AzureProvider, self).__init__()
self.name = 'AZURE'
self.config_name = 'kozinaki_' + self.name
self._mounts = {}
@staticmethod
def get_management_service(service, config):
if service is ServiceManagementService:
return ServiceManagementService(config['subscription_id'], config['key_file'])
else:
credential_service = ServicePrincipalCredentials(
client_id=config['app_client_id'],
secret=config['app_secret'],
tenant=config['app_tenant']
)
return service(credentials=credential_service, subscription_id=config['subscription_id'])
def get_credentials(self):
config = self.load_config()
credential_service = ServicePrincipalCredentials(
client_id=config['app_client_id'],
secret=config['app_secret'],
tenant=config['app_tenant']
)
return credential_service, config['subscription_id']
def load_config(self):
"""Load config options from nova config file or command line (for example: /etc/nova/nova.conf)
Sample settings in nova config:
[kozinaki_EC2]
user=AKIAJR7NAEIZPWSTFBEQ
key=zv9zSem8OE+k/axFkPCgZ3z3tLrhvFBaIIa0Ik0j
"""
provider_opts = [
cfg.StrOpt('subscription_id', help='Subscribe is from azure portal settings'),
cfg.StrOpt('key_file', help='API key to work with the cloud provider', secret=True),
cfg.StrOpt('username', help='Default vm username'),
cfg.StrOpt('password', help='Azure: default instance password. '
'Password must be 6-72 characters long'),
cfg.StrOpt('app_client_id', help='Azure app client id'),
cfg.StrOpt('app_secret', help='Azure app secret'),
cfg.StrOpt('app_tenant', help='Azure app tenant'),
cfg.StrOpt('resource_group_name', help='Azure resource group name'),
cfg.StrOpt('location', help='VM location'),
cfg.StrOpt('storage_account_name', help='Azure storage account name'),
cfg.StrOpt('os_disk_name', help='VM default disk name'),
cfg.StrOpt('vnet_name', help='Azure default virtual network'),
cfg.StrOpt('subnet_name', help='Azure default subnet name'),
cfg.StrOpt('ip_config_name', help='Azure default ip config name'),
cfg.StrOpt('nic_name', help='Azure default nic name'),
cfg.StrOpt('cloud_service_name', help='Azure default cloud service name'),
cfg.StrOpt('deployment_name', help='Azure default deployment name'),
]
cfg.CONF.register_opts(provider_opts, self.config_name)
return cfg.CONF[self.config_name]
def list_nodes(self):
config = self.load_config()
compute_client = self.get_management_service(ComputeManagementClient, config=config)
return list(compute_client.virtual_machines.list_all())
def list_sizes(self):
config = self.load_config()
sms = self.get_management_service(StorageManagementClient, config=config)
return list(sms.list_role_sizes())
def create_node(self, instance, image_meta, *args, **kwargs):
LOG.info("***** Calling CREATE NODE *******************")
config = self.load_config()
# Get info
image_id = getattr(image_meta.properties, 'os_distro')
node_name = instance.uuid
flavor_name = instance.flavor['name']
# Get services
resource_client = self.get_management_service(ResourceManagementClient, config=config)
compute_client = self.get_management_service(ComputeManagementClient, config=config)
# Get or create resource group
resource_group = resource_client.resource_groups.create_or_update(
config['resource_group_name'],
{'location': config['location']}
)
# Configure network
network = self._get_or_create_vnet(config=config)
subnet_info = self._get_or_create_subnet(config=config)
nic = self._get_or_create_nic(subnet_info=subnet_info, config=config)
storage_account = self._get_or_create_storage_account(config=config)
vm_parameters = self._create_vm_parameters(
vm_name=node_name,
vm_size=flavor_name,
nic_id=nic.id,
vm_reference=VM_REFERENCE[image_id],
config=config
)
async_vm_creation = compute_client.virtual_machines.create_or_update(
config['resource_group_name'],
node_name,
vm_parameters
)
async_vm_creation.wait()
LOG.info("CREATE NODE result: {}".format(async_vm_creation.result()))
return async_vm_creation
def _create_vm_parameters(self, vm_name, vm_size, nic_id, vm_reference, config=None):
"""Create the VM parameters structure"""
config = config or self.load_config()
return {
'location': config['location'],
'os_profile': {
'computer_name': vm_name,
'admin_username': config['username'],
'admin_password': config['password']
},
'hardware_profile': {
'vm_size': vm_size
},
'storage_profile': {
'image_reference': {
'publisher': vm_reference['publisher'],
'offer': vm_reference['offer'],
'sku': vm_reference['sku'],
'version': vm_reference['version']
},
'os_disk': {
'name': config['os_disk_name'],
'caching': 'None',
'create_option': 'fromImage',
'vhd': {
'uri': 'https://{}.blob.core.windows.net/vhds/{}.vhd'.format(
config['storage_account_name'], vm_name + haikunator.haikunate())
}
},
},
'network_profile': {
'network_interfaces': [{
'id': nic_id,
}]
},
}
def _get_or_create_nic(self, subnet_info, config=None):
LOG.info("***** Calling _get_or_create_nic *******************")
config = config or self.load_config()
network_client = self.get_management_service(NetworkManagementClient, config=config)
for nic in network_client.network_interfaces.list_all():
if not nic.virtual_machine:
return nic
else:
# Create new one
async_nic_creation = network_client.network_interfaces.create_or_update(
config['resource_group_name'],
config['nic_name'] + haikunator.haikunate(),
{
'location': config['location'],
'ip_configurations': [{
'name': config['ip_config_name'],
'subnet': {
'id': subnet_info.id
}
}]
}
)
async_nic_creation.wait()
return async_nic_creation.result()
def _create_cloud_service(self, service_name=None):
config = self.load_config()
azure_sms = self.get_management_service(ServiceManagementService, config=config)
service_name = service_name or config['cloud_service_name']
desc = service_name
label = service_name
location = config['location']
result = azure_sms.create_hosted_service(service_name, label, desc, location=location)
return result
def _get_or_create_subnet(self, config=None):
LOG.info("***** Calling _get_or_create_subnet *******************")
config = config or self.load_config()
network_client = self.get_management_service(NetworkManagementClient, config=config)
# Try get existing storage by name
try:
return network_client.subnets.get(config['resource_group_name'], config['vnet_name'], config['subnet_name'])
except CloudError, error:
if error.inner_exception.error != 'NotFound':
raise error
# Create new one
async_subnet_creation = network_client.subnets.create_or_update(
config['resource_group_name'],
config['vnet_name'],
config['subnet_name'],
{'address_prefix': '10.0.1.0/24'}
)
async_subnet_creation.wait()
return async_subnet_creation.result()
def _get_or_create_vnet(self, config=None):
LOG.info("***** Calling _get_or_create_vnet *******************")
config = config or self.load_config()
network_client = self.get_management_service(NetworkManagementClient, config=config)
# Try get existing storage by name
try:
return network_client.virtual_networks.get(config['resource_group_name'], config['vnet_name'])
except CloudError, error:
if not isinstance(error.inner_exception, CloudErrorData) or \
error.inner_exception.error != 'ResourceNotFound':
raise error
# Create new one
async_vnet_creation = network_client.virtual_networks.create_or_update(
config['resource_group_name'],
config['vnet_name'],
{
'location': config['location'],
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
)
async_vnet_creation.wait()
return async_vnet_creation.result()
def _get_or_create_storage_account(self, config=None):
LOG.info("***** Calling get_or_create_storage_account *******************")
config = config or self.load_config()
storage_client = self.get_management_service(StorageManagementClient, config)
# Find existing storage account
try:
return storage_client.storage_accounts.get_properties(config['resource_group_name'],
config['storage_account_name'])
except CloudError, error:
if not isinstance(error.inner_exception, CloudErrorData) or \
error.inner_exception.error != 'ResourceNotFound':
raise error
# Create new account
storage_async_operation = storage_client.storage_accounts.create(
config['resource_group_name'],
config['storage_account_name'],
{
'sku': {'name': 'standard_lrs'},
'kind': 'storage',
'location': config['location']
}
)
storage_async_operation.wait()
return storage_async_operation.result()
def _get_node_by_name(self, node_name):
"""Get node instance by name
We need to use expand param to get full instance info from InstanceView (e.g. power state).
More details in this issue: https://github.com/Azure/azure-rest-api-specs/issues/117
"""
config = self.load_config()
compute_client = self.get_management_service(ComputeManagementClient, config=config)
try:
return compute_client.virtual_machines.get(config['resource_group_name'], node_name, expand='InstanceView')
except CloudError, error:
if not isinstance(error.inner_exception, CloudErrorData) or \
error.inner_exception.error != 'ResourceNotFound' or 'not found' not in error.message:
raise error
def destroy(self, instance, *args, **kwargs):
config = self.load_config()
compute_client = self.get_management_service(ComputeManagementClient, config=config)
compute_client.virtual_machines.delete(config['resource_group_name'], instance.uuid)
def reboot(self, instance, *args, **kwargs):
config = self.load_config()
compute_client = self.get_management_service(ComputeManagementClient, config=config)
compute_client.virtual_machines.restart(config['resource_group_name'], instance.uuid)
def power_off(self, instance, timeout=0, retry_interval=0):
config = self.load_config()
compute_client = self.get_management_service(ComputeManagementClient, config=config)
return compute_client.virtual_machines.power_off(config['resource_group_name'], instance.uuid)
def power_on(self, context, instance, network_info, block_device_info=None):
config = self.load_config()
compute_client = self.get_management_service(ComputeManagementClient, config=config)
return compute_client.virtual_machines.start(config['resource_group_name'], instance.uuid)
def get_info(self, instance):
node = self._get_node_by_name(instance.uuid)
if node:
node_id = node.vm_id
if len(node.instance_view.statuses) == 2:
node_provision_state, node_power_state = node.instance_view.statuses
node_state = POWER_STATE_MAP[node_power_state.code]
else:
node_state = power_state.NOSTATE
else:
node_state = power_state.NOSTATE
node_id = 0
node_info = {
'state': node_state,
'max_mem_kb': 0, # '(int) the maximum memory in KBytes allowed',
'mem_kb': 0, # '(int) the memory in KBytes used by the instance',
'num_cpu': 0, # '(int) the number of virtual CPUs for the instance',
'cpu_time_ns': 0, # '(int) the CPU time used in nanoseconds',
'id': node_id
}
return node_info
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
config = self.load_config()
azure_sms = self.get_management_service(ServiceManagementService, config=config)
vm_name = instance['metadata']['vm_name']
if vm_name not in self._mounts:
self._mounts[vm_name] = {}
service_name = instance['metadata']['cloud_service_name']
lun = azure_sms.get_available_lun(service_name, vm_name)
volume_id = connection_info['data']['volume_id']
azure_sms.attach_volume(service_name, vm_name, 5, lun)
self._mounts[vm_name][mountpoint] = connection_info
instance['metadata'].setdefault('volumes', {})
instance['metadata']['volumes'][volume_id] = lun
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
config = self.load_config()
azure_sms = self.get_management_service(ServiceManagementService, config=config)
vm_name = instance['metadata']['vm_name']
service_name = instance['metadata']['cloud_service_name']
try:
del self._mounts[vm_name][mountpoint]
except KeyError:
pass
volume_id = connection_info['data']['volume_id']
lun = instance['metadata']['volumes'][volume_id]
azure_sms.detach_volume(service_name, vm_name, lun)
del instance['metadata']['volumes'][volume_id]
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
return
config = self.load_config()
azure_sms = self.get_management_service(ServiceManagementService, config=config)
# Power off vm
result = self.power_off(instance=instance)
result.wait()
hosted_service_name = 'compunovacloud'
deployment_name = 'dep1'
vm_name = 'vm1'
image_name = instance.uuid + 'image'
image = CaptureRoleAsVMImage('Specialized', image_name, image_name + 'label', image_name + 'description', 'english', 'openstack-virtual-machines')
result = azure_sms.capture_vm_image(hosted_service_name, deployment_name, vm_name, image)
image_service = glance.get_default_image_service()
snapshot = image_service.show(context, image_id)
LOG.debug("**** Snapshot info--> %s" % snapshot)
snapshot_name = haikunator.haikunate()
image_url = glance.generate_image_url(image_id)
LOG.debug("**** image url--> '%s' ****" % image_url)
image_metadata = {
'is_public': False,
'status': 'active',
'name': '-'.join(('azure', snapshot_name)),
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'ramdisk_id': instance['ramdisk_id'],
'owner_id': instance['project_id']
}
}
if instance['os_type']:
image_metadata['properties']['os_type'] = instance['os_type']
update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_SNAPSHOT)
azure_sms.snapshot(service_name, vm_name, image_id, snapshot_name)
image_service.update(context, image_id, image_metadata, "fake image data")
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize.
:param context: the context for the migration/resize
:param migration: the migrate/resize information
:param instance: nova.objects.instance.Instance being migrated/resized
:param disk_info: the newly transferred disk information
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
:param resize_instance: True if the instance is being resized,
False otherwise
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
# raise NotImplementedError()
pass
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def list_instances(self):
return [node.name for node in self.list_nodes()]
def list_instance_uuids(self):
return [node.uuid for node in self.list_nodes()]
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,655
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/network.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_utils import uuidutils
from oslo_log import log as logging
from nova.network import api as network_api
from nova.objects import fixed_ip as fixed_ip_obj
from nova.network.manager import RPCAllocateFixedIP, NetworkManager
LOG = logging.getLogger(__name__)
network_opts = [
cfg.StrOpt('flat_network_bridge',
help='Bridge for simple network instances'),
cfg.StrOpt('flat_network_dns',
default='8.8.4.4',
help='DNS server for simple network'),
cfg.BoolOpt('flat_injected',
default=False,
help='Whether to attempt to inject network setup into guest'),
cfg.StrOpt('flat_interface',
help='FlatDhcp will bridge into this interface if set'),
cfg.IntOpt('vlan_start',
default=100,
help='First VLAN for private networks'),
cfg.StrOpt('vlan_interface',
help='VLANs will bridge into this interface if set'),
cfg.IntOpt('num_networks',
default=1,
help='Number of networks to support'),
cfg.StrOpt('vpn_ip',
default='$my_ip',
help='Public IP for the cloudpipe VPN servers'),
cfg.IntOpt('vpn_start',
default=1000,
help='First Vpn port for private networks'),
cfg.IntOpt('network_size',
default=256,
help='Number of addresses in each private subnet'),
cfg.StrOpt('fixed_range_v6',
default='fd00::/48',
help='Fixed IPv6 address block'),
cfg.StrOpt('gateway',
help='Default IPv4 gateway'),
cfg.StrOpt('gateway_v6',
help='Default IPv6 gateway'),
cfg.IntOpt('cnt_vpn_clients',
default=0,
help='Number of addresses reserved for vpn clients'),
cfg.IntOpt('fixed_ip_disassociate_timeout',
default=600,
help='Seconds after which a deallocated IP is disassociated'),
cfg.IntOpt('create_unique_mac_address_attempts',
default=5,
help='Number of attempts to create unique mac address'),
cfg.BoolOpt('fake_call',
default=False,
help='If True, skip using the queue and make local calls'),
cfg.BoolOpt('teardown_unused_network_gateway',
default=False,
help='If True, unused gateway devices (VLAN and bridge) are '
'deleted in VLAN network mode with multi hosted '
'networks'),
cfg.BoolOpt('force_dhcp_release',
default=True,
help='If True, send a dhcp release on instance termination'),
cfg.BoolOpt('share_dhcp_address',
default=False,
help='If True in multi_host mode, all compute hosts share '
'the same dhcp address. The same IP address used for '
'DHCP will be added on each nova-network node which '
'is only visible to the vms on the same host.'),
cfg.BoolOpt('update_dns_entries',
default=False,
help='If True, when a DNS entry must be updated, it sends a '
'fanout cast to all network hosts to update their DNS '
'entries in multi host mode'),
cfg.IntOpt("dns_update_periodic_interval",
default=-1,
help='Number of seconds to wait between runs of updates to DNS '
'entries.'),
cfg.StrOpt('dhcp_domain',
default='novalocal',
help='Domain to use for building the hostnames'),
cfg.StrOpt('l3_lib',
default='nova.network.l3.LinuxNetL3',
help="Indicates underlying L3 management library"),
]
CONF = cfg.CONF
for option in network_opts:
try:
CONF.register_opt(option)
except cfg.DuplicateOptError as e:
LOG.debug(e)
# CONF.register_opts(network_opts)
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('network_topic', 'nova.network.rpcapi')
CONF.import_opt('fake_network', 'nova.network.linux_net')
class FlatManager(RPCAllocateFixedIP, NetworkManager):
"""Basic network where no vlans are used.
FlatManager does not do any bridge or vlan creation. The user is
responsible for setting up whatever bridges are specified when creating
networks through nova-manage. This bridge needs to be created on all
compute hosts.
The idea is to create a single network for the host with a command like:
nova-manage network create 192.168.0.0/24 1 256. Creating multiple
networks for for one manager is currently not supported, but could be
added by modifying allocate_fixed_ip and get_network to get the network
with new logic. Arbitrary lists of addresses in a single network can
be accomplished with manual db editing.
If flat_injected is True, the compute host will attempt to inject network
config into the guest. It attempts to modify /etc/network/interfaces and
currently only works on debian based systems. To support a wider range of
OSes, some other method may need to be devised to let the guest know which
ip it should be using so that it can configure itself. Perhaps an attached
disk or serial device with configuration info.
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
timeout_fixed_ips = False
required_create_args = ['bridge']
# def _allocate_mac_addresses(self, context, instance_uuid, networks, macs):s):
# """ We are allocating instance during instance spawn, so we don't
# need this part. """
# pass
# def _allocate_fixed_ips(self, context, instance_id, host, networks,
# **kwargs):
# """We are allocating IPs during instance spawn."""
# requested_networks = kwargs.get('requested_networks')
# for network in networks:
# address = None
# if requested_networks is not None:
# for address in (fixed_ip for (uuid, fixed_ip) in
# requested_networks if network['uuid'] == uuid):
# break
#
# self.allocate_fixed_ip(context, instance_id,
# network, address=address)
# def _get_networks_for_instance(self, context, instance_id, project_id,
# requested_networks=None):
# LOG.debug("###%s \n\n" % requested_networks)
# if requested_networks is not None and len(requested_networks) != 0:
# return NetworkManager._get_networks_for_instance(self, context, instance_id, project_id,
# requested_networks=requested_networks)
# else:
# return []
def allocate_for_instance(self, context, **kwargs):
import pdb; pdb.set_trace()
requested_networks = kwargs.get('requested_networks')
if requested_networks is not None and len(requested_networks) != 0:
return NetworkManager.allocate_for_instance(self, context, **kwargs)
else:
instance_uuid = kwargs['instance_id']
if not uuidutils.is_uuid_like(instance_uuid):
instance_uuid = kwargs.get('instance_uuid')
host = kwargs['host']
rxtx_factor = kwargs['rxtx_factor']
return self.get_instance_nw_info(context, instance_uuid, rxtx_factor,
host)
def deallocate_fixed_ip(self, context, address, host=None, teardown=True, instance=None):
"""Returns a fixed ip to the pool."""
import pdb; pdb.set_trace()
# super(FlatManager, self).deallocate_fixed_ip(context, address, host, teardown, instance=instance)
fixed_ip_obj.FixedIP.disassociate_by_address(context, address)
def _setup_network_on_host(self, context, network):
"""Setup Network on this host."""
import pdb; pdb.set_trace()
# NOTE(tr3buchet): this does not need to happen on every ip
# allocation, this functionality makes more sense in create_network
# but we'd have to move the flat_injected flag to compute
network.injected = CONF.flat_injected
network.save()
def _teardown_network_on_host(self, context, network):
"""Tear down network on this host."""
import pdb; pdb.set_trace()
pass
# NOTE(justinsb): The floating ip functions are stub-implemented.
# We were throwing an exception, but this was messing up horizon.
# Timing makes it difficult to implement floating ips here, in Essex.
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
import pdb; pdb.set_trace()
return None
def get_floating_pools(self, context):
"""Returns list of floating pools."""
# NOTE(maurosr) This method should be removed in future, replaced by
# get_floating_ip_pools. See bug #1091668
import pdb; pdb.set_trace()
return {}
def get_floating_ip_pools(self, context):
"""Returns list of floating ip pools."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
import pdb; pdb.set_trace()
return {}
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
import pdb; pdb.set_trace()
return None
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
import pdb; pdb.set_trace()
return []
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
import pdb; pdb.set_trace()
return []
@network_api.wrap_check_policy
def allocate_floating_ip(self, context, project_id, pool):
"""Gets a floating ip from the pool."""
import pdb; pdb.set_trace()
return None
def deallocate_floating_ip(self, context, address, affect_auto_assigned):
"""Returns a floating ip to the pool."""
import pdb; pdb.set_trace()
return None
def associate_floating_ip(self, context, floating_address, fixed_address, affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Makes sure everything makes sense then calls _associate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
import pdb; pdb.set_trace()
return None
def disassociate_floating_ip(self, context, address, affect_auto_assigned=False):
"""Disassociates a floating ip from its fixed ip.
Makes sure everything makes sense then calls _disassociate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
import pdb; pdb.set_trace()
return None
def migrate_instance_start(self, context, instance_uuid,
floating_addresses,
rxtx_factor=None, project_id=None,
source=None, dest=None):
import pdb; pdb.set_trace()
pass
def migrate_instance_finish(self, context, instance_uuid,
floating_addresses, host=None,
rxtx_factor=None, project_id=None,
source=None, dest=None):
import pdb; pdb.set_trace()
pass
def update_dns(self, context, network_ids):
"""Called when fixed IP is allocated or deallocated."""
import pdb; pdb.set_trace()
pass
class FakeManager(RPCAllocateFixedIP, NetworkManager):
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
import pdb; pdb.set_trace()
return '0.0.0.0'
def allocate_for_instance(self, context, **kwargs):
import pdb; pdb.set_trace()
pass
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
import pdb; pdb.set_trace()
pass
def _setup_network_on_host(self, context, network):
import pdb; pdb.set_trace()
pass
def _teardown_network_on_host(self, context, network):
import pdb; pdb.set_trace()
pass
def _get_network_dict(self, network):
import pdb; pdb.set_trace()
return {}
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,656
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/providers/aws/provider.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import logging
from Crypto.PublicKey import RSA
import boto3
from botocore.exceptions import BotoCoreError, ClientError
from haikunator import Haikunator
from oslo_config import cfg
from nova.image import glance
from oslo_service import loopingcall
from nova.compute import power_state, task_states
from .config import volume_map, flavor_map
from ..common import BaseProvider
LOG = logging.getLogger(__name__)
haikunator = Haikunator()
POWER_STATE_MAP = {
0: power_state.NOSTATE,
16: power_state.RUNNING,
32: power_state.NOSTATE,
48: power_state.SHUTDOWN,
64: power_state.NOSTATE,
80: power_state.SHUTDOWN,
# power_state.PAUSED,
# power_state.CRASHED,
# power_state.STATE_MAP,
# power_state.SUSPENDED,
}
class AWSProvider(BaseProvider):
def __init__(self):
super(AWSProvider, self).__init__()
self.name = 'AWS'
self.config_name = 'kozinaki_' + self.name
self.driver = self.get_driver()
self._mounts = {}
def get_driver(self):
config = self.load_config()
session = boto3.session.Session(
aws_access_key_id=config['aws_access_key_id'],
aws_secret_access_key=config['aws_secret_access_key'],
region_name=config['region']
)
return session.resource('ec2')
def load_config(self):
"""Load config options from nova config file or command line (for example: /etc/nova/nova.conf)
Sample settings in nova config:
[kozinaki_EC2]
user=AKIAJR7NAEIZPWSTFBEQ
key=zv9zSem8OE+k/axFkPCgZ3z3tLrhvFBaIIa0Ik0j
"""
provider_opts = [
cfg.StrOpt('aws_secret_access_key', help='AWS secret key', secret=True),
cfg.StrOpt('aws_access_key_id', help='AWS access key id', secret=True),
cfg.StrOpt('region', help='AWS region name'),
]
cfg.CONF.register_opts(provider_opts, self.config_name)
return cfg.CONF[self.config_name]
def create_node(self, instance, image_meta, *args, **kwargs):
# Get info
image_id = getattr(image_meta.properties, 'os_distro')
subnet_id = instance.metadata.get('subnet_id')
flavor_name = instance.flavor['name']
image_config = {
'ImageId': image_id,
'InstanceType': flavor_name,
'MinCount': 1,
'MaxCount': 1
}
if instance.key_name:
self._check_keypair(instance.key_name, instance.key_data)
image_config['KeyName'] = instance.key_name
if subnet_id:
image_config.update({'SubnetId': subnet_id})
aws_instance = self.driver.create_instances(**image_config)[0]
# Add openstack image uuid to tag
aws_instance.create_tags(Tags=[{'Key': 'openstack_server_id', 'Value': instance.uuid}])
instance['metadata']['ec2_id'] = aws_instance.id
return aws_instance
def _check_keypair(self, key_name, key_data):
aws_key_fingerprint = None
try:
aws_key_fingerprint = self.driver.KeyPair(key_name).key_fingerprint
except ClientError as e:
if 'not exist' not in e.message:
raise Exception(e)
if aws_key_fingerprint:
if aws_key_fingerprint != self._key_to_aws_fingerprint(key_data):
raise Exception('Local and AWS public key pair with name "{}" has different fingerprints.'
'Please set another key name.'.format(key_name))
else:
self.driver.import_key_pair(KeyName=key_name, PublicKeyMaterial=key_data)
def _key_to_aws_fingerprint(self, key_data):
key = RSA.importKey(key_data)
key_der = key.publickey().exportKey("DER")
fp_plain = hashlib.md5(key_der).hexdigest()
return ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2]))
def list_nodes(self):
return list(self.driver.instances.all())
def destroy(self, instance, *args, **kwargs):
aws_node = self._get_node_by_uuid(instance.uuid)
if aws_node:
aws_node.terminate()
def list_instances(self):
return list(self.driver.instances.all())
def list_sizes(self):
return list(self.driver.images.all())
def power_on(self, context, instance, network_info, block_device_info=None):
aws_node = self._get_node_by_uuid(instance.uuid)
if aws_node:
aws_node.start()
def list_instance_uuids(self):
return [node.id for node in self.list_nodes()]
def power_off(self, instance, timeout=0, retry_interval=0):
aws_node = self._get_node_by_uuid(instance.uuid)
if aws_node:
aws_node.stop()
def get_info(self, instance):
aws_node = self._get_node_by_uuid(instance.uuid)
if aws_node:
node_power_state = POWER_STATE_MAP[aws_node.state['Code']]
node_id = aws_node.id
else:
node_power_state = power_state.NOSTATE
node_id = 0
node_info = {
'state': node_power_state,
'max_mem_kb': 0, # '(int) the maximum memory in KBytes allowed',
'mem_kb': 0, # '(int) the memory in KBytes used by the instance',
'num_cpu': 0, # '(int) the number of virtual CPUs for the instance',
'cpu_time_ns': 0, # '(int) the CPU time used in nanoseconds',
'id': node_id
}
return node_info
def reboot(self, instance, *args, **kwargs):
aws_node = self._get_node_by_uuid(instance.uuid)
if aws_node:
aws_node.reboot()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
volume_id = connection_info['data']['volume_id']
# ec2 only attaches volumes at /dev/sdf through /dev/sdp
self.driver.attach_volume(volume_map[volume_id], instance['metadata']['ec2_id'], "/dev/sdn", dry_run=False)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
volume_id = connection_info['data']['volume_id']
self.driver.detach_volume(volume_map[volume_id], instance_id=instance['metadata']['ec2_id'],
device="/dev/sdn", force=False, dry_run=False)
def snapshot(self, context, instance, image_id, update_task_state):
aws_node = self._get_node_by_uuid(instance.uuid)
update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_SNAPSHOT)
if aws_node.state['Name'] == 'running':
ec2_image = aws_node.create_image(Name=str(image_id),
Description="Image from OpenStack", NoReboot=False, DryRun=False)
else:
# TODO: else case
LOG.error('Node state: "{}". Must be "running" for snapshot.'.format(aws_node.state['Name']))
return
# The instance will be in pending state when it comes up, waiting for it to be in available
self._wait_for_image_state(ec2_image, "available")
image_api = glance.get_default_image_service()
image_ref = glance.generate_image_url(image_id)
metadata = {'is_public': False,
'location': image_ref,
'properties': {
'kernel_id': instance['kernel_id'],
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
'ec2_image_id': ec2_image.id}
}
# TODO: HTTPInternalServerError: 500 Internal Server Error:
# TODO: The server has either erred or is incapable of performing the requested operation.
image_api.update(context, image_id, metadata)
def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.info("***** Calling FINISH MIGRATION *******************")
aws_node = self._get_node_by_uuid(instance.uuid)
# EC2 instance needs to be stopped to modify it's attribute. So we stop the instance,
# modify the instance type in this case, and then restart the instance.
aws_node.stop()
self._wait_for_state(instance, aws_node, "stopped", power_state.SHUTDOWN)
new_instance_type = flavor_map[migration['new_instance_type_id']]
aws_node.modify_attribute('instanceType', new_instance_type)
def confirm_migration(self, migration, instance, network_info):
LOG.info("***** Calling CONFIRM MIGRATION *******************")
aws_node = self._get_node_by_uuid(instance.uuid)
aws_node.start()
self._wait_for_state(instance, aws_node, "running", power_state.RUNNING)
def _get_node_by_uuid(self, uuid):
aws_nodes = list(self.driver.instances.filter(Filters=[{'Name': 'tag:openstack_server_id', 'Values': [uuid]}]))
return aws_nodes[0] if aws_nodes else None
def _wait_for_image_state(self, ami, desired_state):
"""Timer to wait for the image/snapshot to reach a desired state
:params:ami_id: correspoding image id in Amazon
:params:desired_state: the desired new state of the image to be in.
"""
def _wait_for_state():
"""Called at an interval until the AMI image is available."""
try:
# LOG.info("\n\n\nImage id = %s" % ami_id + ", state = %s\n\n\n" % state)
image = self.driver.Image(ami.id)
if image.state == desired_state:
LOG.info("Image has changed state to %s." % desired_state)
raise loopingcall.LoopingCallDone()
else:
LOG.info("Image state %s." % image.state)
except BotoCoreError as e:
LOG.info("BotoCoreError: {}".format(e))
pass
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_state)
timer.start(interval=3).wait()
def _wait_for_state(self, instance, ec2_node, desired_state, desired_power_state):
"""Wait for the state of the corrosponding ec2 instance to be in completely available state.
:params:ec2_id: the instance's corrosponding ec2 id.
:params:desired_state: the desired state of the instance to be in.
"""
def _wait_for_power_state():
"""Called at an interval until the VM is running again."""
state = ec2_node.state
if state == desired_state:
LOG.info("Instance has changed state to %s." % desired_state)
raise loopingcall.LoopingCallDone()
def _wait_for_status_check():
"""Power state of a machine might be ON, but status check is the one which gives the real"""
if ec2_node.system_status.status == 'ok':
LOG.info("Instance status check is %s / %s" %
(ec2_node.system_status.status, ec2_node.instance_status.status))
raise loopingcall.LoopingCallDone()
# waiting for the power state to change
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_state)
timer.start(interval=1).wait()
# waiting for the status of the machine to be in running
if desired_state == 'running':
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_status_check)
timer.start(interval=0.5).wait()
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,657
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/providers/__init__.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from azure.provider import AzureProvider
from aws.provider import AWSProvider
from gcp.provider import GCPProvider
from libcloud_driver.provider import LibCloudProvider
def get_provider_by_name(provider_name):
"""Provider factory method"""
providers = {
'AZURE': AzureProvider,
'AWS': AWSProvider,
'GCP': GCPProvider,
}
if provider_name.startswith('LC_'):
return LibCloudProvider(provider_name)
elif provider_name in providers:
return providers[provider_name]()
else:
raise NotImplementedError('Provider {} not implemented in driver. Available providers: {}'
.format(provider_name, providers.keys()))
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,658
|
compunova/kozinaki
|
refs/heads/master
|
/setup.py
|
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='kozinaki',
description='OpenStack multi-cloud driver for AWS, Azure',
url='https://github.com/compunova/kozinaki.git',
author='Compunova',
author_email='kozinaki@compu-nova.com',
version='0.1.8',
long_description=readme(),
packages=find_packages(),
include_package_data=True,
install_requires=[
'haikunator',
'requests==2.11',
'oauth2client==3.0.0',
'azure==2.0.0rc6',
'boto3',
'google-cloud',
'cryptography==1.4',
'Fabric3',
'Jinja2',
'PyYAML',
'terminaltables',
'apache-libcloud',
'click',
'click-spinner',
'click-didyoumean'
],
entry_points={
'console_scripts': [
'kozinaki-manage=kozinaki.manage.__main__:main',
],
}
)
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,659
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/providers/gcp/provider.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from haikunator import Haikunator
from oslo_config import cfg
from oslo_service import loopingcall
from nova.compute import power_state
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from googleapiclient.errors import HttpError
from ..common import BaseProvider
LOG = logging.getLogger(__name__)
haikunator = Haikunator()
POWER_STATE_MAP = {
0: power_state.NOSTATE,
'RUNNING': power_state.RUNNING,
32: power_state.NOSTATE,
'TERMINATED': power_state.SHUTDOWN,
64: power_state.NOSTATE,
80: power_state.SHUTDOWN,
}
IMAGE_MAP = {
'centos': 'centos-cloud',
'coreos': 'coreos-cloud',
'debian': 'debian-cloud',
'rhel': 'rhel-cloud',
'sles': 'suse-cloud',
'ubuntu': 'ubuntu-cloud',
'windows': 'windows-cloud',
'sql': 'windows-sql-cloud',
}
class GCPProvider(BaseProvider):
def __init__(self):
super(GCPProvider, self).__init__()
self.name = 'GCP'
self.config_name = 'kozinaki_' + self.name
self.vm_prefix = 'kozinaki-'
self.driver = self.get_driver()
self._mounts = {}
def get_driver(self):
config = self.load_config()
os.environ.update(
{'GOOGLE_APPLICATION_CREDENTIALS': config['path_to_json_token']})
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
return compute
def load_config(self):
"""Load config options from nova config file or command line (for example: /etc/nova/nova.conf)
Sample settings in nova config:
[kozinaki_EC2]
user=AKIAJR7NAEIZPWSTFBEQ
key=zv9zSem8OE+k/axFkPCgZ3z3tLrhvFBaIIa0Ik0j
"""
provider_opts = [
cfg.StrOpt('path_to_json_token', help='Google API json token file', secret=True),
cfg.StrOpt('project', help='Google project id'),
cfg.StrOpt('zone', help='Google zone name'),
]
cfg.CONF.register_opts(provider_opts, self.config_name)
return cfg.CONF[self.config_name]
def create_node(self, instance, image_meta, *args, **kwargs):
config = self.load_config()
# Get info
image_family = getattr(image_meta.properties, 'os_distro')
flavor_name = instance.flavor['name']
# Get the latest image
for family_startwith, project in IMAGE_MAP.items():
if image_family.startswith(family_startwith):
image_project = project
break
else:
raise Exception('Project for image family "{}" not found'.format(image_family))
image_response = self.driver.images().getFromFamily(project=image_project, family=image_family).execute()
source_disk_image = image_response['selfLink']
# Configure the machine
machine_type = "zones/{zone}/machineTypes/{flavor}".format(zone=config['zone'], flavor=flavor_name)
machine_config = {
'name': self.vm_prefix + instance.uuid,
'machineType': machine_type,
# Specify the boot disk and the image to use as a source.
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}],
# Allow the instance to access cloud storage and logging.
'serviceAccounts': [{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write'
]
}],
}
operation = self.driver.instances().insert(
project=config['project'], zone=config['zone'], body=machine_config).execute()
self.wait_for_operation(operation)
def list_nodes(self):
config = self.load_config()
result = self.driver.instances().list(project=config['project'], zone=config['zone']).execute()
return result['items']
def destroy(self, instance, *args, **kwargs):
config = self.load_config()
gcp_instance = self._get_gcp_instance(instance, config['project'], config['zone'])
if gcp_instance:
operation = self.driver.instances().delete(
project=config['project'], zone=config['zone'], instance=gcp_instance['name']).execute()
self.wait_for_operation(operation)
def list_instances(self):
config = self.load_config()
result = self.driver.instances().list(project=config['project'], zone=config['zone']).execute()
return result['items']
def list_sizes(self):
config = self.load_config()
result = self.driver.instances().list(project=config['project'], zone=config['zone']).execute()
return result['items']
def power_on(self, context, instance, network_info, block_device_info=None):
config = self.load_config()
operation = self.driver.instances().start(
project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid).execute()
self.wait_for_operation(operation)
def list_instance_uuids(self):
return [node.id for node in self.list_nodes()]
def power_off(self, instance, timeout=0, retry_interval=0):
config = self.load_config()
operation = self.driver.instances().stop(
project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid).execute()
self.wait_for_operation(operation)
def get_info(self, instance):
config = self.load_config()
instance = self.driver.instances().get(
project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid).execute()
if instance:
node_power_state = POWER_STATE_MAP.get(instance['status'], power_state.NOSTATE)
node_id = instance['id']
else:
node_power_state = power_state.NOSTATE
node_id = 0
node_info = {
'state': node_power_state,
'max_mem_kb': 0, # '(int) the maximum memory in KBytes allowed',
'mem_kb': 0, # '(int) the memory in KBytes used by the instance',
'num_cpu': 0, # '(int) the number of virtual CPUs for the instance',
'cpu_time_ns': 0, # '(int) the CPU time used in nanoseconds',
'id': node_id
}
return node_info
def reboot(self, instance, *args, **kwargs):
config = self.load_config()
operation = self.driver.instances().reset(
project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid).execute()
self.wait_for_operation(operation)
def get_or_create_volume(self, project, zone, volume_name):
try:
response = self.driver.disks().get(project=project, zone=zone, disk=volume_name).execute()
return response['selfLink']
except HttpError as e:
if e.resp.status != 404:
raise e
# Disk not found
disk_body = {
"name": volume_name,
"description": "Created by kozinaki",
"type": "projects/{project}/zones/{zone}/diskTypes/pd-standard".format(project=project, zone=zone),
"sizeGb": "10"
}
operation = self.driver.disks().insert(project=project, zone=zone, body=disk_body).execute()
self.wait_for_operation(operation)
return operation['selfLink']
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
config = self.load_config()
instance_name = self.vm_prefix + instance.uuid
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
volume_id = connection_info['data']['volume_id']
volume_self_link = self.get_or_create_volume(
project=config['project'], zone=config['zone'], volume_name=volume_id)
body = {
"kind": "compute#attachedDisk",
"source": volume_self_link,
"deviceName": volume_id,
"boot": False,
"autoDelete": False,
}
operation = self.driver.instances().attachDisk(
project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid, body=body).execute()
self.wait_for_operation(operation)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
config = self.load_config()
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
volume_id = connection_info['data']['volume_id']
operation = self.driver.instances().detachDisk(
project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid, deviceName=volume_id).execute()
self.wait_for_operation(operation)
def snapshot(self, context, instance, image_id, update_task_state):
config = self.load_config()
body = {
"sourceDisk": "projects/{project}/zones/{zone}/disks/{instance}".format(
project=config['project'],
zone=config['zone'],
instance=self.vm_prefix + instance.uuid
),
"name": "snapshot-{}".format(self.vm_prefix + instance.uuid)
}
operation = self.driver.disks().createSnapshot(
project=config['project'], zone=config['zone'], disk=self.vm_prefix + instance.uuid, body=body).execute()
self.wait_for_operation(operation)
def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.info("***** Calling FINISH MIGRATION *******************")
config = self.load_config()
flavor_name = migration['new_instance_type_name']
body = {
"machineType": "zones/{zone}/machineTypes/{flavor}".format(zone=config['zone'], flavor=flavor_name)
}
operation = self.driver.instances().setMachineType(
project=config['project'], zone=config['zone'], instnace=self.vm_prefix + instance.uuid, body=body).execute()
self.wait_for_operation(operation)
def confirm_migration(self, migration, instance, network_info):
LOG.info("***** Calling CONFIRM MIGRATION *******************")
config = self.load_config()
operation = self.driver.instances().start(
project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid).execute()
self.wait_for_operation(operation)
def _get_gcp_instance(self, nova_instance, project, zone):
try:
response = self.driver.instances().get(
project=project, zone=zone, instance=self.vm_prefix + nova_instance.uuid).execute()
return response
except HttpError as e:
if e.resp.status != 404:
raise e
return False
def wait_for_operation(self, operation):
config = self.load_config()
LOG.info('Waiting for operation to finish...')
def waiting():
result = self.driver.zoneOperations().get(
project=config['project'],
zone=config['zone'],
operation=operation['name']).execute()
LOG.info('Operation state {}'.format(result['status']))
if result['status'] == 'DONE':
print("done.")
if 'error' in result:
raise Exception(result['error'])
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(waiting)
timer.start(interval=1).wait()
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,660
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/manage/manage.py
|
import os
import re
import json
import inspect
from collections import defaultdict
import yaml
from fabric.api import local, settings, hide
from libcloud.compute.types import Provider, OLD_CONSTANT_TO_NEW_MAPPING
from libcloud.compute.providers import get_driver as get_libcloud_driver
from .utils import render_template, get_templates_vars, render_json_to_template
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(BASE_PATH, 'config.yaml'), 'r') as conf_file:
CONFIG = yaml.load(conf_file)
class Service:
def __init__(self, node_name, service_type):
self.config = CONFIG['services']
self.name = '{prefix}-{node_name}-{type}.service'.format(
prefix=self.config['prefix'],
node_name=node_name,
type=service_type
)
self.type = service_type
@property
def exist(self):
with settings(warn_only=True):
with hide('commands'):
result = local('systemctl list-unit-files', capture=True)
for line in result.split('\n'):
if line.startswith(self.name):
return True
return False
def create(self, template_context):
render_template(
template=self.config['templates'][self.type],
to_file=os.path.join(self.config['systemd_dir'], self.name),
context=template_context
)
# Enable and run compute node service
self.command('enable')
self.command('start')
def delete(self):
# Disable and stop service
self.command('disable')
self.command('stop')
# Delete service file
service_file = os.path.join(CONFIG['services']['systemd_dir'], self.name)
if os.path.exists(service_file):
os.remove(service_file)
def command(self, cmd):
valid_commands = CONFIG['services']['commands']
if cmd not in valid_commands:
raise BadServiceCommand('Command "{}" not supported. Valid commands: {}'.format(cmd, valid_commands))
with settings(warn_only=True):
response = local('systemctl {cmd} {nova_service}'.format(cmd=cmd, nova_service=self.name), capture=False)
return response
class Node:
def __init__(self, name, node_type):
self.name = name
self.type = node_type
self.config = CONFIG['nodes']
self.services = {service_name: self._get_service(service_name) for service_name in self.config['services']}
def create(self, template_context):
for service_name, service in self.services.items():
if not service:
service_conf = self.config['services'][service_name]
template_context['config_file'] = os.path.join(
service_conf['dir_dest'], '{prefix}-{name}-{type}.conf'.format(
prefix=self.config['prefix'],
name=self.name,
type=self.type
)
)
render_template(
template=service_conf['template'],
to_file=template_context['config_file'],
context=template_context
)
new_service = Service(node_name=self.name, service_type=service_name)
new_service.create(template_context=template_context)
self.services[service_name] = new_service
def delete(self):
# Delete all services
for service_name, service in self.services.items():
if service:
service.delete()
# Delete configs
service_conf = self.config['services'][service_name]
config_file = os.path.join(
service_conf['dir_dest'], '{prefix}-{name}-{type}.conf'.format(
prefix=self.config['prefix'],
name=self.name,
type=self.type
)
)
if os.path.exists(config_file):
os.remove(config_file)
def command(self, cmd):
response = []
for service_name, service in self.services.items():
if service:
response.append(service.command(cmd))
return response
def _get_service(self, service_type):
service = Service(node_name=self.name, service_type=service_type)
return service if service.exist else None
class NodeManager:
def __init__(self):
self.valid_node_types = self._get_valid_node_types()
def node_create(self, node_name, node_type, **kwargs):
# Check node type
if node_type not in self.valid_node_types['providers']:
raise NodeTypeNotFound('Node type "{}" not found. Valid types: {}'.format(node_type,
self.valid_node_types.keys()))
# Check if node already exist
enabled_nodes = self.node_list()
if node_name in [node.name for node in enabled_nodes]:
raise NodeAlreadyExist('Node "{}" already exist'.format(node_name))
kwargs['hostname'] = node_name
kwargs['node_type'] = node_type
# Check if we got all necessary params in kwargs
templates_vars = self.get_node_params(node_type)
if not all([var in kwargs for var in templates_vars]):
raise AttributeError('Too few arguments to create "{}" node. Need to provide: {}'
.format(node_type, templates_vars.keys()))
kwargs['provider_config'] = render_json_to_template(
provider=self.valid_node_types['providers'][node_type],
token_values=kwargs
)
new_node = Node(name=node_name, node_type=node_type)
new_node.create(template_context=kwargs)
def node_delete(self, node_name):
node = self.node_get(node_name)
node.delete()
def node_get(self, node_name):
all_nodes = self.node_list()
for node in all_nodes:
if node.name == node_name:
return node
@staticmethod
def node_list():
nodes = []
nodes_conf = CONFIG['nodes']
for filename in os.listdir(nodes_conf['services']['nova']['dir_dest']):
match = re.search(r'{}-(?P<name>.+)-(?P<type>.+)\.conf'.format(nodes_conf['prefix']), filename)
if match:
node = Node(
name=match.groupdict().get('name'),
node_type=match.groupdict().get('type')
)
nodes.append(node)
return nodes
def get_node_params(self, node_type=None):
# Check if we got all necessary params in kwargs
templates_vars = get_templates_vars(
templates=[service['template'] for service_name, service in CONFIG['nodes']['services'].items()]
)
# Remove hostname and provider_config form vars, because hostname == node_name
templates_vars.remove('hostname')
templates_vars.remove('provider_config')
all_node_params = {}
for n_type_name, n_type_params in self.valid_node_types['providers'].items():
node_params = defaultdict(lambda: 'Description not provided')
node_params.update(n_type_params.get('tokens', {}))
for token in templates_vars:
if token in self.valid_node_types['basic_tokens']:
node_params[token] = self.valid_node_types['basic_tokens'][token]
else:
node_params[token] = 'Description not provided'
if n_type_name == node_type:
return node_params
all_node_params[n_type_name] = node_params
return all_node_params
@staticmethod
def _get_libcloud_providers():
providers = {}
for provider_name in [item for item in vars(Provider) if not item.startswith('_')]:
if provider_name.lower() in OLD_CONSTANT_TO_NEW_MAPPING:
continue
try:
provider_cls = get_libcloud_driver(getattr(Provider, provider_name))
except Exception as e:
continue
provider_cls_info = inspect.getargspec(provider_cls)
node_params = defaultdict(lambda: 'Description not provided')
for arg in provider_cls_info.args:
if arg not in ['cls', 'self']:
node_params[arg] = {
'description': {
'en': '',
'ru': ''
},
'type': 'str'
}
providers[provider_name] = {
'section_name': 'kozinaki_GCP',
'tokens': node_params
}
return providers
def _get_valid_node_types(self):
with open(os.path.join(BASE_PATH, 'providers.json'), 'r') as f:
providers_data = json.load(f)
# libcloud_providers = self._get_libcloud_providers()
# providers_data['providers'].update(libcloud_providers)
return providers_data
# Compute node manager exceptions
ComputeNodeManager = type('ComputeNodeManager', (Exception,), {})
BadServiceCommand = type('BadServiceCommand', (ComputeNodeManager,), {})
NodeNotFound = type('NodeNotFound', (ComputeNodeManager,), {})
NodeAlreadyExist = type('NodeAlreadyExist', (ComputeNodeManager,), {})
NodeTypeNotFound = type('NodeTypeNotFound', (ComputeNodeManager,), {})
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,661
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/providers/libcloud_driver/extended_drivers.py
|
from libcloud.utils.py3 import httplib
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.base import NodeSize
from libcloud.common.linode import API_ROOT
def get_extended_driver(driver_cls, nova_config=None):
extended_drivers = {
'Vultr': VultrNodeDriverExt,
'Linode': LinodeNodeDriverExt,
'Packet': PacketNodeDriverExt,
}
driver = extended_drivers[driver_cls.name] if driver_cls.name in extended_drivers.keys() else driver_cls
driver.nova_config = nova_config
return driver
class VultrNodeDriverExt(get_driver(Provider.VULTR)):
def ex_shutdown_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/halt', params)
return res.status == httplib.OK
def ex_power_on_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/start', params)
return res.status == httplib.OK
class LinodeNodeDriverExt(get_driver(Provider.LINODE)):
def ex_shutdown_node(self, node):
params = {"api_action": "linode.shutdown", "LinodeID": node.id}
self.connection.request(API_ROOT, params=params)
return True
def ex_power_on_node(self, node):
params = {"api_action": "linode.boot", "LinodeID": node.id}
self.connection.request(API_ROOT, params=params)
return True
class PacketNodeDriverExt(get_driver(Provider.PACKET)):
def ex_shutdown_node(self, node):
params = {'type': 'power_off'}
res = self.connection.request('/devices/%s/actions' % (node.id),
params=params, method='POST')
return res.status == httplib.OK
def ex_power_on_node(self, node):
params = {'type': 'power_on'}
res = self.connection.request('/devices/%s/actions' % (node.id),
params=params, method='POST')
return res.status == httplib.OK
def list_nodes(self):
data = self.connection.request('/projects/%s/devices' % (self.nova_config.get('project_id')),
params={'include': 'plan'}).object['devices']
return list(map(self._to_node, data))
def _to_size(self, data):
extra = {'description': data['description'], 'line': data['line']}
ram = data['specs'].get('memory', {}).get('total')
if ram:
ram = ram.lower()
if 'mb' in ram:
ram = int(ram.replace('mb', ''))
elif 'gb' in ram:
ram = int(ram.replace('gb', '')) * 1024
disk = 0
for disks in data['specs'].get('drives', []):
if 'GB' in disks['size']:
disk_size = int(disks['size'].replace('GB', ''))
elif 'TB' in disks['size']:
size = disks['size'].replace('TB', '')
disk_size = (float(size) if '.' in size else int(size)) * 1024
else:
raise Exception('Unknown disk size metric "{}"'.format(disks['size']))
disk += disks['count'] * disk_size
price = data['pricing']['hour']
return NodeSize(id=data['slug'], name=data['name'], ram=ram, disk=disk,
bandwidth=0, price=price, extra=extra, driver=self)
def create_node(self, name, size, image, location):
"""
Create a node.
:return: The newly created node.
:rtype: :class:`Node`
"""
params = {'hostname': name, 'plan': size.id,
'operating_system': image.id, 'facility': location.id,
'include': 'plan', 'billing_cycle': 'hourly'}
data = self.connection.request('/projects/%s/devices' %
(self.nova_config.get('project_id')),
params=params, method='POST')
status = data.object.get('status', 'OK')
if status == 'ERROR':
message = data.object.get('message', None)
error_message = data.object.get('error_message', message)
raise ValueError('Failed to create node: %s' % (error_message))
return self._to_node(data=data.object)
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,662
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/providers/libcloud_driver/provider.py
|
# Copyright (c) 2016 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import inspect
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.providers import get_driver as get_libcloud_driver
from oslo_config import cfg
from nova.compute import power_state
from ..common import BaseProvider
from .extended_drivers import get_extended_driver
LOG = logging.getLogger(__name__)
# Disable SSL check
libcloud.security.VERIFY_SSL_CERT = False
POWER_STATE_MAP = {
'running': power_state.RUNNING,
'starting': power_state.NOSTATE,
'rebooting': power_state.NOSTATE,
'terminated': power_state.NOSTATE,
'pending': power_state.NOSTATE,
'unknown': power_state.NOSTATE,
'stopping': power_state.NOSTATE,
'stopped': power_state.SHUTDOWN,
'suspended': power_state.SUSPENDED,
'error': power_state.CRASHED,
'paused': power_state.PAUSED,
'reconfiguring': power_state.NOSTATE,
'migrating': power_state.NOSTATE,
}
class LibCloudProvider(BaseProvider):
def __init__(self, name):
super(LibCloudProvider, self).__init__()
self.name = name
self.config_name = 'kozinaki_' + self.name
self.provider_name = self.name[3:]
self.driver = self.get_driver()
self._mounts = {}
def get_driver(self):
config = self.load_config()
provider_cls = get_extended_driver(
driver_cls=get_libcloud_driver(getattr(Provider, self.provider_name)),
nova_config=config
)
provider_cls_info = inspect.getargspec(provider_cls.__init__)
driver = provider_cls(
**{arg: value for arg, value in config.items() if arg in provider_cls_info.args and value is not None})
return driver
def load_config(self):
"""Load config options from nova config file or command line (for example: /etc/nova/nova.conf)
Sample settings in nova config:
[kozinaki_EC2]
user=AKIAJR7NAEIZPWSTFBEQ
key=zv9zSem8OE+k/axFkPCgZ3z3tLrhvFBaIIa0Ik0j
"""
provider_cls = get_libcloud_driver(getattr(Provider, self.provider_name))
provider_cls_info = inspect.getargspec(provider_cls.__init__)
provider_opts = [cfg.StrOpt(arg) for arg in provider_cls_info.args]
provider_opts.append(cfg.StrOpt('location'))
provider_opts.append(cfg.StrOpt('root_password'))
provider_opts.append(cfg.StrOpt('project_id'))
cfg.CONF.register_opts(provider_opts, self.config_name)
return cfg.CONF[self.config_name]
def create_node(self, instance, image_meta, *args, **kwargs):
config = self.load_config()
# Get info
image_id = getattr(image_meta.properties, 'os_distro')
flavor_name = instance.flavor['name']
node_config = {'name': instance.uuid}
# Find image
for image in self.driver.list_images():
if image.id == image_id:
node_config['image'] = image
break
else:
Exception('Image with id "{}" not found'.format(image_id))
# Find size
for size in self.driver.list_sizes():
if size.id == flavor_name:
node_config['size'] = size
break
else:
Exception('Flavor with id "{}" not found'.format(flavor_name))
# Find location
for location in self.driver.list_locations():
if location.id == config['location']:
node_config['location'] = location
break
else:
Exception('Location with id "{}" not found'.format(config['location']))
# Root password
try:
if config.get('root_password'):
node_config['auth'] = NodeAuthPassword(config.get('root_password'))
except cfg.NoSuchOptError:
pass
instance = self.driver.create_node(**node_config)
return instance
def list_nodes(self):
return self.driver.list_nodes()
def destroy(self, instance, *args, **kwargs):
node = self._get_node_by_uuid(instance.uuid)
if node:
self.driver.destroy_node(node)
def list_instances(self):
return self.list_nodes()
def list_sizes(self):
return self.driver.list_images()
def power_on(self, context, instance, network_info, block_device_info=None):
node = self._get_node_by_uuid(instance.uuid)
if node:
self.driver.ex_power_on_node(node)
def list_instance_uuids(self):
return [node.id for node in self.list_nodes()]
def power_off(self, instance, timeout=0, retry_interval=0):
node = self._get_node_by_uuid(instance.uuid)
if node:
self.driver.ex_shutdown_node(node)
def get_info(self, instance):
node = self._get_node_by_uuid(instance.uuid)
if node:
node_power_state = POWER_STATE_MAP[node.state]
node_id = node.id
else:
node_power_state = power_state.NOSTATE
node_id = 0
node_info = {
'state': node_power_state,
'max_mem_kb': 0, # '(int) the maximum memory in KBytes allowed',
'mem_kb': 0, # '(int) the memory in KBytes used by the instance',
'num_cpu': 0, # '(int) the number of virtual CPUs for the instance',
'cpu_time_ns': 0, # '(int) the CPU time used in nanoseconds',
'id': node_id
}
return node_info
def reboot(self, instance, *args, **kwargs):
node = self._get_node_by_uuid(instance.uuid)
if node:
self.driver.reboot_node(node)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
volume_id = connection_info['data']['volume_id']
volume = self._get_volume_by_uuid(volume_id)
node = self._get_node_by_uuid(instance.uuid)
if not all([volume, node]):
return
self.driver.attach_volume(node, volume)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
volume_id = connection_info['data']['volume_id']
volume = self._get_volume_by_uuid(volume_id)
if not volume:
return
self.driver.detach_volume(volume)
def snapshot(self, context, instance, image_id, update_task_state):
volume = self._get_volume_by_uuid(image_id)
if not volume:
return
self.driver.create_volume_snapshot(volume, 'snapshot_1')
def _get_node_by_uuid(self, uuid):
nodes = self.list_nodes()
for node in nodes:
# Some providers limit node name. For example - Linode (32 symbols).
# node.name -> 'cd87279c-308a-4a3d-94d9-0ed1912f'
# uuid -> 'cd87279c-308a-4a3d-94d9-0ed1912f06a1'
if node.name in uuid:
return node
def _get_volume_by_uuid(self, uuid):
volumes = self.driver.list_volumes()
for volume in volumes:
if volume.id == uuid:
return volume
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,663
|
compunova/kozinaki
|
refs/heads/master
|
/kozinaki/manage/__main__.py
|
import os
import sys
import argparse
import pkg_resources
import yaml
import libcloud
import click
import click_spinner
from terminaltables import AsciiTable
from .manage import NodeManager
DEFAULT_LANG = 'en'
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
CONTEXT_SETTINGS = {'help_option_names': ['-h', '--help'], 'max_content_width': 160}
with open(os.path.join(BASE_PATH, 'config.yaml'), 'r') as conf_file:
CONFIG = yaml.load(conf_file)
# Get node manager
node_manager = NodeManager()
# KOZINAKI CLI MAIN CLASS
class KozinakiCLI(click.Group):
def format_version(self, ctx, formatter):
"""Writes version."""
package = pkg_resources.require("kozinaki")[0]
text = click.style(package.project_name.capitalize(), fg='blue', bold=True)
text += ' ver. {}'.format(package.version)
text += ' ({})'.format(click.style('http://compu-nova.com/kozinaki', fg='blue', underline=True))
formatter.write_text(text)
formatter.write_paragraph()
def format_options(self, ctx, formatter):
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section(click.style('Optional arguments', fg='green')):
formatter.write_dl(opts)
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
rows = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
help = cmd.short_help or ''
rows.append((subcommand, help))
if rows:
with formatter.section(click.style('Positional arguments', fg='green')):
formatter.write_dl(rows)
def format_usage(self, ctx, formatter):
"""Writes the usage line into the formatter."""
pieces = self.collect_usage_pieces(ctx)
usage_prefix = '{}: '.format(click.style('Usage', fg='green'))
formatter.write_usage(ctx.command_path, ' '.join(pieces), prefix=usage_prefix)
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_version(ctx, formatter)
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_commands(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
# NODE COMMANDS
class NodeCommand(click.MultiCommand):
def list_commands(self, ctx):
return CONFIG['services']['commands']
def get_command(self, ctx, name):
cmd = self.create_cmd(name)
return cmd
def create_cmd(self, cmd_action):
option_name = click.Argument(['name'], nargs=-1, required=True)
cmd = click.Command(
name=cmd_action,
params=[option_name],
help="{} all node's services".format(cmd_action.capitalize()),
callback=self.cmd_callback
)
return cmd
def cmd_callback(self, name):
ctx = click.get_current_context()
for node_name in name:
node = node_manager.node_get(node_name=node_name)
if node:
response = node.command(cmd=ctx.command.name)
for r in response:
print(r)
else:
click.secho('Node "{}" not found'.format(node_name), fg='red')
# CREATE PROVIDER NODES
class NodeProviderCreate(click.MultiCommand):
def __init__(self, **attrs):
super(NodeProviderCreate, self).__init__(**attrs)
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands after the options."""
native_providers = []
libcloud_providers = []
list_commands = self.list_commands(ctx)
max_provider_name_ident = len(max(list_commands, key=len))
for subcommand in list_commands:
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
subcommand = subcommand.ljust(max_provider_name_ident)
help = cmd.short_help or ''
if subcommand[0].islower():
native_providers.append((subcommand, help))
else:
libcloud_providers.append((subcommand, help))
if native_providers or libcloud_providers:
with formatter.section(click.style('Providers', fg='green')):
if native_providers:
with formatter.section(click.style('Native clients', fg='yellow')):
formatter.write_dl(sorted(native_providers))
if libcloud_providers:
with formatter.section(click.style('Libcloud ({}) clients'.format(libcloud.__version__),
fg='yellow')):
formatter.write_dl(sorted(libcloud_providers))
def list_commands(self, ctx):
return node_manager.valid_node_types['providers'].keys()
def get_command(self, ctx, name):
with click_spinner.spinner():
cmd = self.create_command(name)
return cmd
def create_command(self, provider_name):
config = node_manager.valid_node_types['providers'].get(provider_name)
if not config:
return
provider_options = [
click.Option(param_decls=['--name'], help='Compute node name', required=True)
]
for param, param_data in node_manager.get_node_params(provider_name).items():
argument_params = dict()
argument_params['help'] = ''
description = param_data.get('description', {}).get(DEFAULT_LANG)
default = param_data.get('default')
arg_type = param_data.get('type')
if description:
argument_params['help'] += '{} '.format(description)
if arg_type:
argument_params['help'] += '(type: {}) '.format(arg_type)
if default:
argument_params.update({
'default': default,
'required': False
})
argument_params['help'] += '(default: {}) '.format(default)
else:
argument_params['required'] = True
provider_options.append(click.Option(
param_decls=['--{}'.format(param)],
help=argument_params['help'],
default=default,
required=False if default else True
))
cmd = click.Command(
name=provider_name,
params=provider_options,
help=click.style(config['description'], fg='cyan'),
short_help=click.style(config['description'], fg='cyan'),
callback=self.create_node_callback
)
return cmd
def create_node_callback(self, name, compute_driver, **kwargs):
with click_spinner.spinner():
ctx = click.get_current_context()
node_manager.node_create(node_name=name, node_type=ctx.command.name,
compute_driver=compute_driver, **kwargs)
@click.group(cls=KozinakiCLI, context_settings=CONTEXT_SETTINGS)
@click.version_option()
@click.option('--verbose', '-v', is_flag=True, help="Will print verbose messages.")
def main(verbose):
"""Command-line interface to manage cloud provider nodes for the Kozinaki driver.
A node is a collection of OpenStack service instances responsible for interaction with the cloud provider's API.
There could be multiple nodes created for the same cloud provider with different parameters.
"""
pass
@main.command('list')
def node_list():
"""Show all created compute nodes"""
table_data = [['Name', 'Type', 'Services']]
for node in node_manager.node_list():
table_data.append([node.name, node.type, ','.join(service for service in node.services)])
table = AsciiTable(table_data)
print(table.table)
@main.group(cls=NodeProviderCreate)
def create():
"""Create compute node for cloud provider"""
pass
@main.command()
@click.confirmation_option()
@click.argument('node-name')
def delete(node_name):
"""Delete compute node"""
node_manager.node_delete(node_name=node_name)
@main.group('send', cls=NodeCommand)
def node_commands():
"""Send command(s) to a compute node's services"""
pass
def main_old():
# Formatter
formatter_class = lambda prog: argparse.HelpFormatter(prog, max_help_position=100, width=200)
parser = argparse.ArgumentParser(description='ApexView compute node manage utility')
subparsers = parser.add_subparsers(help='Available actions', dest='action')
# CREATE
parser_create = subparsers.add_parser(
'create',
description='Create compute node for cloud provider',
help='Create new nova compute node'
)
parser_create_subparsers = parser_create.add_subparsers(help='Available cloud providers', dest='type')
# Create providers subparsers
for provider_name, config in node_manager.valid_node_types['providers'].items():
parser_create_type = parser_create_subparsers.add_parser(
provider_name,
description='Create node in {} cloud'.format(provider_name.upper()),
help='Create node in {} cloud'.format(provider_name.upper()),
formatter_class=formatter_class
)
parser_create_type.add_argument('--name', type=str, required=True, help='Compute node name (type: str)')
for param, param_data in node_manager.get_node_params(provider_name).items():
argument_params = dict()
argument_params['help'] = ''
description = param_data.get('description', {}).get(DEFAULT_LANG)
default = param_data.get('default')
arg_type = param_data.get('type')
if description:
argument_params['help'] += '{} '.format(description)
if arg_type:
argument_params['type'] = getattr(sys.modules['builtins'], arg_type)
argument_params['help'] += '(type: {}) '.format(arg_type)
if default:
argument_params.update({
'default': default,
'required': False
})
argument_params['help'] += '(default: {}) '.format(default)
else:
argument_params['required'] = True
parser_create_type.add_argument('--{}'.format(param), **argument_params)
# DELETE
parser_delete = subparsers.add_parser(
'delete',
description='Delete compute node',
help='Delete compute node'
)
parser_delete.add_argument('--name', type=str, required=True, help='Compute node name')
# LIST
parser_node_list = subparsers.add_parser(
'list',
description='Show all created compute nodes',
help='Show all created compute nodes'
)
# COMMANDS
for command in CONFIG['services']['commands']:
parser_node_command = subparsers.add_parser(
command,
description='Pass {} command to all node services'.format(command),
help='Pass {} command to all node services'.format(command)
)
parser_node_command.add_argument('--name', type=str, required=True, help='Compute node name')
args = parser.parse_args()
if args.action == 'create':
node_manager.node_create(node_name=args.name, node_type=args.type, **vars(args))
elif args.action == 'delete':
node_manager.node_delete(node_name=args.name)
elif args.action == 'list':
table_data = [['Name', 'Type', 'Services']]
for node in node_manager.node_list():
table_data.append([node.name, node.type, ','.join(service for service in node.services)])
table = AsciiTable(table_data)
print(table.table)
elif args.action in CONFIG['services']['commands']:
node = node_manager.node_get(node_name=args.name)
response = node.command(cmd=args.action)
for r in response:
print(r)
else:
parser.print_help()
|
{"/kozinaki/providers/aws/provider.py": ["/kozinaki/providers/aws/config.py", "/kozinaki/providers/common.py"], "/kozinaki/providers/gcp/provider.py": ["/kozinaki/providers/common.py"], "/kozinaki/manage/manage.py": ["/kozinaki/manage/utils.py"], "/kozinaki/providers/libcloud_driver/provider.py": ["/kozinaki/providers/common.py", "/kozinaki/providers/libcloud_driver/extended_drivers.py"], "/kozinaki/manage/__main__.py": ["/kozinaki/manage/manage.py"]}
|
23,664
|
onkarmumbrekar/CFPWSN
|
refs/heads/master
|
/buildModel.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 11:05:26 2018
@author: Onkar Mumbrekar
Website: www.onkarmumbrekar.co.in
"""
from scipy.spatial.distance import pdist, squareform
from dataClean import userCreation
def similarityMatrixCreation(P,sources,userdata):# P=user size, sources = temperature, humidity, light, voltage
M = userCreation(P,sources,userdata)
M_u = M.mean(axis=1)
item_mean_subtracted = M - M_u[:, None]
w =1-squareform(pdist(item_mean_subtracted.T, 'cosine'))
return w
|
{"/buildModel.py": ["/dataClean.py"], "/test2.py": ["/Prediction_model.py", "/dataClean.py", "/buildModel.py"], "/test.py": ["/Prediction_model.py"]}
|
23,665
|
onkarmumbrekar/CFPWSN
|
refs/heads/master
|
/test2.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 12:39:53 2018
@author: Onkar Mumbrekar
Website: www.onkarmumbrekar.co.in
"""
import pandas as pd
from Prediction_model import Prediction
from dataClean import cleanDataset,userCreation
from buildModel import similarityMatrixCreation
data_file = 'data.csv'
#dataset = pd.read_csv(data_file,parse_dates=[['date','time']],sep=' ',index_col=0)
dataset = pd.read_csv(data_file,parse_dates=[['date','time']],index_col=0)
dataset = pd.DataFrame(dataset,columns=['temperature','humidity','light','voltage'])
dataset = cleanDataset(dataset,'temperature')
p=10
users=userCreation(p,4,dataset)
w = similarityMatrixCreation(p,4,dataset)
for t in range(10):
prd = Prediction(t,w,users,p,4)
print(prd)
|
{"/buildModel.py": ["/dataClean.py"], "/test2.py": ["/Prediction_model.py", "/dataClean.py", "/buildModel.py"], "/test.py": ["/Prediction_model.py"]}
|
23,666
|
onkarmumbrekar/CFPWSN
|
refs/heads/master
|
/dataClean.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 09:36:45 2018
@author:Onkar Mumbrekar
Website: www.onkarmumbrekar.co.in
"""
import numpy as np
def userCreation(P,sources,userdata):
users = np.zeros((len(userdata.iloc[:])-P, P*sources))
for i in range(len(userdata.iloc[:])-P):
users[i]=np.array(userdata.iloc[i:i+P]).reshape(1,P*sources)
return users
def cleanDataset(userdata,colname):
userdata = userdata.dropna() #remove rows with null values
userdata = remove_outlier(userdata,'temperature')
return userdata
def remove_outlier(df_in, col_name):# remove extreme values
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3-q1
fence_low = q1-1.5*iqr
fence_high = q3+1.5*iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
|
{"/buildModel.py": ["/dataClean.py"], "/test2.py": ["/Prediction_model.py", "/dataClean.py", "/buildModel.py"], "/test.py": ["/Prediction_model.py"]}
|
23,667
|
onkarmumbrekar/CFPWSN
|
refs/heads/master
|
/test.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 09:54:37 2018
@author:Onkar Mumbrekar
Website: www.onkarmumbrekar.co.in
"""
import pandas as pd
from scipy.spatial.distance import pdist, squareform
from sklearn.model_selection import train_test_split
import numpy as np
import Prediction_model as pm
data_file = 'data.csv'
dataset = pd.read_csv(data_file,parse_dates=[['date','time']],index_col=0)
dataset = pd.DataFrame(dataset,columns=['temperature','humidity','light','voltage'])
dataset.dropna()
dataset.to_csv('geo.csv')
train_data, test_data = train_test_split(dataset, test_size = 0.20, random_state=0)
P=4
users = np.zeros((len(train_data.iloc[:])-P, P*4))
for i in range(len(train_data.iloc[:])-P):
users[i]=np.array(train_data.iloc[i:i+P]).reshape(1,P*4)
M = users
M_u = M.mean(axis=1)
item_mean_subtracted = M - M_u[:, None]
w =1-squareform(pdist(item_mean_subtracted.T, 'cosine'))
prediction = pm.Prediction(item=12,p=4,sources=4,user=3,userdata=users,w=w)
print(prediction)
|
{"/buildModel.py": ["/dataClean.py"], "/test2.py": ["/Prediction_model.py", "/dataClean.py", "/buildModel.py"], "/test.py": ["/Prediction_model.py"]}
|
23,668
|
onkarmumbrekar/CFPWSN
|
refs/heads/master
|
/Prediction_model.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 16:41:56 2018
@author:Onkar Mumbrekar
Website: www.onkarmumbrekar.co.in
"""
import numpy as np
#def Prediction(user,item,w,userdata,p,sources):
# ri_dash = cal_rating_mean_ri_dash(userdata,item)
# #print("ri_dash ", ri_dash)
# sigma_i = cal_variance(userdata,item)
# n=(p-1)*sources
# numerator = cal_numerator(user,item,userdata,w,n)
# denominator = cal_denominator(item,w,n)
# intermediatedata =numerator/denominator
# prediction_i = ri_dash + (sigma_i * intermediatedata)
# return prediction_i
def Prediction(user,w,userdata,p,sources):
prediction_user = np.zeros(4)
start = (p-1)*sources
end = (p*sources)
for item in range(start,end):
ri_dash = cal_rating_mean_ri_dash(userdata,item)
sigma_i = cal_variance(userdata,item)
n=(p-1)*sources
numerator = cal_numerator(user,item,userdata,w,n)
denominator = cal_denominator(item,w,n)
intermediatedata =numerator/denominator
prediction_user[item-((p-1)*sources)] = ri_dash + (sigma_i * intermediatedata)
return prediction_user
def cal_rating_mean_ri_dash(userdata,item):
r_dash = userdata.mean(axis=0)[item]
return r_dash
def cal_variance(userdata,item):
sigma =userdata.var(axis=0)[item]
return sigma
def cal_numerator(user,item,userdata,w,n):
numerator=0
for j in range(n-1):
ruj = userdata[user,j]
rj_dash = cal_rating_mean_ri_dash(userdata,j)
sigma_j =cal_variance(userdata,j)
wij = w[item,j]
numerator += ((ruj-rj_dash)/sigma_j)*wij
return numerator
def cal_denominator(item,w,n):
wij_sum = w[item,:n-1].sum()
return wij_sum
|
{"/buildModel.py": ["/dataClean.py"], "/test2.py": ["/Prediction_model.py", "/dataClean.py", "/buildModel.py"], "/test.py": ["/Prediction_model.py"]}
|
23,673
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/settings/base.py
|
import os
class Config(object):
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CLIENT_BUILD_FOLDER = os.path.join(PROJECT_ROOT, 'aquizz-client', 'build')
STATIC_FOLDER = os.path.join(CLIENT_BUILD_FOLDER, 'static')
DEBUG = False
TESTING = False
MONGODB_SETTINGS = {
'db': 'aquizz',
'host': 'localhost',
'port': 27017,
}
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,674
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/settings/production.py
|
import os
from .base import Config as BaseConfig
from pymongo.uri_parser import parse_uri
def get_mongodb_settings():
connection_string = os.environ['MONGODB_URI']
parsed = parse_uri(connection_string)
first_node = parsed['nodelist'][0]
host, port = first_node
return {
'host': host,
'port': port,
'db': parsed['database'],
'username': parsed['username'],
'password': parsed['password']}
class Config(BaseConfig):
DEBUG = False
Testing = False
SECRET_KEY = os.environ['FLASK_SECRET_KEY']
SECURITY_PASSWORD_SALT = os.environ['FLASK_PASSWORD_SALT']
MONGODB_SETTINGS = get_mongodb_settings()
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,675
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/exc.py
|
class QuizException(Exception):
pass
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,676
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/gunicorn.conf.py
|
import os
import multiprocessing
port = os.getenv('PORT', '5000')
bind = '0.0.0.0:' + str(port)
worker_class = 'gevent'
workers = multiprocessing.cpu_count() * 2 + 1
threads = workers * 2
max_requests = 1000
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,677
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/utils.py
|
import csv
from aquizz import models
def load_data_from_file(filename):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
for row in csv_reader:
text = row[0].strip()
question = models.Question.objects(text=text).first()
if question is None:
question = models.Question(text=text)
option = row[1].strip()
options = [x.value for x in question.options]
if option not in options:
question.options.append(models.Option(value=row[1], is_correct=True))
question.save()
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,678
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/models.py
|
from bson import ObjectId
from datetime import datetime
from flask_mongoengine import MongoEngine
from flask_security import UserMixin, RoleMixin, MongoEngineUserDatastore
from aquizz import exc
db = MongoEngine()
class Option(db.EmbeddedDocument):
value = db.StringField(required=True)
is_correct = db.BooleanField(default=False)
def __str__(self):
return self.value
class Question(db.Document):
id = db.ObjectIdField(primary_key=True, default=ObjectId)
text = db.StringField(required=True, unique=True)
options = db.ListField(db.EmbeddedDocumentField(Option))
def __str__(self):
return self.text
def get_correct_options(self):
return (x for x in self.options if x.is_correct)
def is_answer_correct(self, value):
return value in (x.value for x in self.get_correct_options())
class Item(db.EmbeddedDocument):
question = db.ReferenceField(Question, required=True)
answer = db.StringField(required=False)
points = db.IntField(min_value=0, max_value=100)
answered_at = db.DateTimeField(required=False)
def is_correct(self):
return self.question.is_answer_correct(self.answer)
def __str__(self):
tick = ''
if self.answer:
tick = '✔ ' if self.is_correct() else '✗ '
return '{0}{1} => {2} @ {3} <br/>'.format(
tick,
self.question,
self.answer,
self.answered_at,
)
class Quiz(db.Document):
id = db.ObjectIdField(primary_key=True, default=ObjectId)
started_at = db.DateTimeField(required=True, default=datetime.utcnow)
finished_at = db.DateTimeField(required=False, default=None)
player_name = db.StringField(required=False, default='Anonymous')
items = db.ListField(db.EmbeddedDocumentField(Item))
def check_answer(self, question_id: ObjectId, value: str):
for item in self.items:
if item.question.id == question_id:
if item.answer:
raise exc.QuizException('Question already answered')
item.answer = value
item.answered_at = datetime.utcnow()
item.save()
return item.question.is_answer_correct(value)
def get_correct_options(self, question_id):
for item in self.items:
if item.question.id == question_id:
return item.question.get_correct_options()
def check_if_completed(self):
unanswered_questions = sum(1 for x in self.items if x.answered_at is None)
is_completed = unanswered_questions == 0
if is_completed and self.finished_at is None:
self.finished_at = datetime.utcnow()
self.save()
def get_score(self) -> float:
correct_count = sum(1 for x in self.items if x.is_correct())
score = float(correct_count) / float(len(self.items))
return score
###################################
class Role(db.Document, RoleMixin):
name = db.StringField(max_length=80, unique=True)
description = db.StringField(max_length=255)
def __str__(self):
return self.name
class User(UserMixin, db.Document):
email = db.StringField(required=True, unique=True)
password = db.StringField(max_length=255)
active = db.BooleanField(default=True)
confirmed_at = db.DateTimeField()
roles = db.ListField(db.ReferenceField(Role), default=lambda: [])
def __str__(self):
return self.email
user_datastore = MongoEngineUserDatastore(db, User, Role)
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,679
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/settings/develop.py
|
import os
from .base import Config as BaseConfig
class Config(BaseConfig):
DEBUG = True
SECRET_KEY = 'super-secret'
SECURITY_PASSWORD_SALT = os.getenv('FLASK_PASSWORD_SALT', 'some_salt')
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,680
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/admin.py
|
import pprint
from collections import defaultdict, OrderedDict
from flask import request, redirect, url_for
from flask_admin import AdminIndexView, expose
from flask_admin.babel import lazy_gettext
from flask_admin.contrib.mongoengine import ModelView, filters
from flask_security import current_user, login_required, roles_required
from jinja2 import Markup
from aquizz import models
pp = pprint.PrettyPrinter(indent=4)
class FilterArrayBaseLength(filters.BaseMongoEngineFilter):
operator = '!='
def apply(self, query, value):
where = 'this.{field}.length {operator} {value}'.format(
field=self.column.name,
operator=self.operator,
value=value
)
return query.filter(__raw__={'$where': where})
def operation(self):
return lazy_gettext('array_size {}'.format(self.operator))
class FilterArrayLengthLower(FilterArrayBaseLength):
operator = '<'
class FilterArrayLengthHigherOrEqual(FilterArrayBaseLength):
operator = '>='
class AdminProtectedIndexView(AdminIndexView):
@expose()
@login_required
@roles_required('admin')
def index(self):
correct_answers = defaultdict(int)
incorrect_answers = {}
completed_quizzes = models.Quiz.objects(finished_at__ne='')
all_questions = {}
for quiz in completed_quizzes:
for item in quiz.items:
question_text = item.question.text
if question_text not in all_questions:
all_questions[
question_text] = item.question.get_correct_options()
if item.is_correct():
correct_answers[question_text] += 1
else:
if question_text not in incorrect_answers:
incorrect_answers[question_text] = {
x.value: 0 for x in item.question.options
}
if item.answer not in incorrect_answers[question_text]:
# print('Error: {0}'.format(item))
continue
incorrect_answers[question_text][item.answer] += 1
data = {}
for question, correct_options in all_questions.items():
incorrect = incorrect_answers.get(question)
if incorrect is None:
incorrect_count = 0
incorrect_options = None
else:
incorrect_count = sum(incorrect.values())
incorrect_options = OrderedDict(
sorted(((q, o) for q, o in incorrect.items() if o > 0),
key=lambda x: x[1],
reverse=True))
incorrect_ratio = 0
correct_count = correct_answers.get(question, 0)
correct_ratio = 0
total = incorrect_count + correct_count
if correct_count > 0:
correct_ratio = float(correct_count) / float(total)
if incorrect_count > 0:
incorrect_ratio = float(incorrect_count) / float(total)
data[question] = {
'total': total,
'correct_count': correct_count,
'correct_ratio': "{0:.0f}%".format(correct_ratio * 100.0),
'correct_options': correct_options,
'incorrect_count': incorrect_count,
'incorrect_ratio': "{0:.0f}%".format(incorrect_ratio * 100.0),
'incorrect_options': incorrect_options,
}
questions_analysis = list(
sorted(
data.items(),
key=lambda x: x[1].get('incorrect_count'),
reverse=True
))
hardest_questions = questions_analysis[:20]
simplest_questions = list(
sorted(
questions_analysis,
key=lambda x: x[1].get('correct_count'),
reverse=True)
)[:10]
self._template_args['hardest_questions'] = hardest_questions
self._template_args['simplest_questions'] = simplest_questions
# Questions
self._template_args[
'all_questions_count'] = models.Question.objects().count()
self._template_args['ready_questions_count'] = models.Question.objects(
__raw__={'$where': 'this.options.length >= 4'}).count()
self._template_args[
'incomplete_questions_count'] = models.Question.objects(
__raw__={'$where': 'this.options.length < 4'}).count()
# Quizzes
self._template_args[
'total_quizzes_count'] = models.Quiz.objects.count()
self._template_args['completed_quizzes_count'] = models.Quiz.objects(
finished_at__ne='').count()
return super().index()
class AdminProtectedModelView(ModelView):
def is_accessible(self):
if not current_user.is_active or not current_user.is_authenticated:
return False
if current_user.has_role('admin'):
return True
return False
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('security.login', next=request.url))
class QuestionAdminView(AdminProtectedModelView):
column_list = ('text', 'options')
column_filters = (
'text',
FilterArrayLengthLower(column=models.Question.options, name='Options'),
FilterArrayLengthHigherOrEqual(column=models.Question.options,
name='Options'),
)
form_subdocuments = {
'options': {
'form_subdocuments': {
None: {
'form_columns': ('value', 'is_correct')
}
}
}
}
def list_br_formatter(view, values):
return Markup('<br/>'.join((str(x) for x in values)))
class QuizAdminView(AdminProtectedModelView):
can_create = False
can_edit = False
can_delete = True
can_view_details = True
column_details_list = (
'started_at',
'finished_at',
'player_name',
'items'
)
column_default_sort = ('started_at', True)
column_type_formatters = {
list: list_br_formatter
}
column_filters = (
'started_at',
'finished_at',
'player_name',
)
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,681
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/app.py
|
import importlib
import os
from flask import Flask, send_file
from flask_admin import Admin
from flask_restful import Api
from flask_security import Security
from aquizz.models import db
from aquizz.admin import AdminProtectedModelView, AdminProtectedIndexView, QuestionAdminView, QuizAdminView
from aquizz.models import Question, User, Quiz, user_datastore
from aquizz.api import QuizListResource, QuizResource
def create_app():
settings = os.getenv('FLASK_SETTINGS', 'develop')
try:
config_module = importlib.import_module('aquizz.settings.' + settings)
config_obj = config_module.Config
except (ImportError, AttributeError):
config_module = importlib.import_module('aquizz.settings.' + 'develop')
config_obj = config_module.Config
app = Flask(__name__, static_folder=config_obj.STATIC_FOLDER)
app.config.from_object(config_obj)
db.init_app(app)
@app.route('/')
def home():
return send_file(os.path.join(app.config['CLIENT_BUILD_FOLDER'], 'index.html'))
@app.route('/favicon.ico')
def favicon():
return send_file(os.path.join(app.config['CLIENT_BUILD_FOLDER'], 'favicon.ico'))
if app.config['DEBUG']:
@app.after_request
def allow_standalone_client(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Origin, X-Requested-With, Content-Type, Accept'
return response
Security(app, user_datastore)
setup_admin(app)
setup_api(app)
return app
def setup_admin(app):
"""
:param app:
:return:
"""
admin_portal = Admin(app,
name='aquizz',
index_view=AdminProtectedIndexView(),
template_mode='bootstrap3',
url='/admin')
admin_portal.add_view(QuestionAdminView(Question))
admin_portal.add_view(QuizAdminView(Quiz))
return admin_portal
def setup_api(app):
api = Api(app, prefix='/api/v1')
# Auth
api.add_resource(QuizListResource, '/quiz', endpoint='quiz')
api.add_resource(QuizResource, '/quiz/<string:quiz_id>', endpoint='quiz_object')
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,682
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/wsgi.py
|
import os
from aquizz.app import create_app
os.environ.setdefault('FLASK_SETTINGS', 'production')
app = create_app()
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,683
|
citizen-stig/aquizz
|
refs/heads/master
|
/manage.py
|
import statistics
from flask_script import Manager, Command, Option
import numpy as np
from aquizz import app as app_factory
from aquizz import models
from aquizz import utils
class CreateAdminUser(Command):
option_list = (
Option('--email', '-e', dest='email'),
Option('--password', '-p', dest='password'),
)
def run(self, email, password):
admin_role = models.user_datastore.find_or_create_role('admin')
# admin_user = models.User(email=email)
admin_user = models.user_datastore.create_user(email=email, password=password)
models.user_datastore.add_role_to_user(admin_user, admin_role)
class LoadBaseQuestions(Command):
option_list = (
Option('--filename', '-f', dest='filename'),
)
def run(self, filename):
utils.load_data_from_file(filename)
class PrintStat(Command):
def run(self):
completed_quizzes = models.Quiz.objects()
scores = []
for quiz in completed_quizzes:
if quiz.finished_at:
scores.append(quiz.get_score())
# print(scores)
print('Avg: ', statistics.mean(scores))
print('Median:', statistics.median(scores))
a = np.array(scores)
print('p90:', np.percentile(a, 90))
if __name__ == '__main__':
app = app_factory.create_app()
manager = Manager(app)
manager.add_command('create_admin_user', CreateAdminUser())
manager.add_command('load_base', LoadBaseQuestions())
manager.add_command('print_stat', PrintStat())
manager.run()
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,684
|
citizen-stig/aquizz
|
refs/heads/master
|
/aquizz/api.py
|
import random
from bson import ObjectId
from flask_restful import Resource, abort
from marshmallow import fields, Schema
from webargs.flaskparser import use_args
from aquizz import models, exc
class NewQuizSchema(Schema):
player_name = fields.String(required=False)
class ItemSchema(Schema):
question_id = fields.String(required=True)
answer = fields.String(required=True)
class QuizListResource(Resource):
size = 10
def get(self):
return {
'questions': [1, 2, 3]
}
@use_args(NewQuizSchema())
def post(self, args):
player_name = args.get('player_name', 'Anonymous')
all_questions = list(models.Question.objects(__raw__={'$where': 'this.options.length >= 4'}))
quiz = models.Quiz(player_name=player_name)
questions = []
if len(all_questions) < self.size:
return abort(500)
for question in random.sample(all_questions, self.size):
options = [x.value for x in question.options]
random.shuffle(options)
questions.append({
'id': str(question.id),
'text': question.text,
'options': options,
})
item = models.Item(question=question)
quiz.items.append(item)
quiz.save()
return {
'id': str(quiz.id),
'questions': questions,
}
class QuizResource(Resource):
@use_args(ItemSchema())
def post(self, args, quiz_id):
quiz = models.Quiz.objects.get_or_404(pk=quiz_id)
question_id = ObjectId(args['question_id'])
try:
is_correct = quiz.check_answer(question_id, args['answer'])
quiz.check_if_completed()
except exc.QuizException as e:
return abort(400, message=str(e))
if is_correct is None:
return abort(404, message='Question not found in quiz')
correct_options = quiz.get_correct_options(question_id)
return {
'is_correct': is_correct,
'correct_options': [x.value for x in correct_options]
}
|
{"/aquizz/settings/production.py": ["/aquizz/settings/base.py"], "/aquizz/settings/develop.py": ["/aquizz/settings/base.py"], "/aquizz/app.py": ["/aquizz/models.py", "/aquizz/admin.py", "/aquizz/api.py"], "/aquizz/wsgi.py": ["/aquizz/app.py"]}
|
23,685
|
luancaius/kaggle-manager
|
refs/heads/master
|
/service.py
|
import util
import sys
class Service:
def __init__(self, settings):
self.settings = settings
self.compName = settings["compName"]
self.filesToCopy = ['util.txt', 'customEmail.txt', 'main.txt', 'prepareData.txt',
'training.txt', 'tuning.txt', 'submission.txt']
self.templatesFolder = 'templates'
self.rootPath = settings["defaultKagglePath"]
def DownloadData(self):
print("Downloading data")
util.createDir(self.path)
self.path_data = self.path+'/data'
util.createDir(self.path_data)
command = 'kaggle competitions download ' + \
self.compName + ' -p '+self.path_data
util.execute(command)
def CreateScripts(self):
print("Creating scripts")
self.path_script = self.path+'/script'
util.createDir(self.path_script)
self.path_submission = self.path+'/submission'
util.createDir(self.path_submission)
for filename in self.filesToCopy:
util.copyFileTo(self.templatesFolder, filename, self.path_script)
def ReplaceVariables(self):
submissionFile = self.path_script+'/submission.py'
emailFile = self.path_script+'/customEmail.py'
util.replaceVariableInText(submissionFile, "@compName@", self.compName)
util.replaceVariableInText(
emailFile, "@user_email@", self.settings["user"])
util.replaceVariableInText(
emailFile, "@password_email@", self.settings["password"])
util.replaceVariableInText(
emailFile, "@smtp_server@", self.settings["smtpServer"])
util.replaceVariableInText(emailFile, "@port@", self.settings["port"])
util.replaceVariableInText(
emailFile, "@to_email@", self.settings["destin"])
def InitCompetition(self):
print('Init competition')
self.path = self.rootPath+'/'+self.compName
self.DownloadData()
self.CreateScripts()
self.ReplaceVariables()
|
{"/service.py": ["/util.py"], "/manager.py": ["/service.py"]}
|
23,686
|
luancaius/kaggle-manager
|
refs/heads/master
|
/util.py
|
import os
import shutil
def copyFileTo(src, filename, dst):
shutil.copy(src+'/'+filename, dst)
newfilename = os.path.splitext(filename)[0]+'.py'
os.rename(dst+'/'+filename, dst+'/'+newfilename)
def createDir(folder):
try:
if not os.path.exists(folder):
print('Creating folder '+folder)
os.mkdir(folder)
except OSError:
print("Creation of the directory %s failed" % folder)
else:
print("Successfully created the directory %s " % folder)
def replaceVariableInText(filename, textToReplace, text):
f = open(filename, 'r')
filedata = f.read()
f.close()
newdata = filedata.replace(textToReplace, text)
f = open(filename, 'w')
f.write(newdata)
f.close()
def execute(command):
os.system(command)
|
{"/service.py": ["/util.py"], "/manager.py": ["/service.py"]}
|
23,687
|
luancaius/kaggle-manager
|
refs/heads/master
|
/manager.py
|
from service import Service
import json
with open('settings.txt') as f:
settings = json.load(f)
compName = input("Type Kaggle competition name to Download:")
settings["compName"] = compName
service = Service(settings)
service.InitCompetition()
exit(0)
|
{"/service.py": ["/util.py"], "/manager.py": ["/service.py"]}
|
23,688
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/metadata/eaccpf.py
|
#!/usr/bin/env python2.6
import logging
import datetime
from StringIO import StringIO
import base64
from islandoraUtils import xmlib
etree = xmlib.import_etree()
class EACCPF(object):
'''
A python library to deal with (a tiny subset) of EAC-CPF
See http://eac.staatsbibliothek-berlin.de/eac-cpf-schema.html
TODO: Ensure ordering of elements, for validation purposes...
'''
NSMAP = {
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
None: 'urn:isbn:1-931666-33-4', #default namespace
'xlink': 'http://www.w3.org/1999/xlink'
}
def __init__(self, id, element=None, xml=None, agency=('DGI', 'DiscoveryGarden Inc.'), language=('eng', 'English'), script=('Latn', 'Latin'), loggerName='islandoraUtils.metadata.eaccpf'):
'''
"--EAC-CPF" will be appended to the ID to create a "recordId"
"language" is that used for maintenance, not contents
'''
self.logger = logging.getLogger(loggerName)
#TODO: Validate any input data, to make sure that it looks like valid eac-cpf
if element and xml:
raise Exception('Either element or xml should be given, not both')
elif element:
self.element = element
elif xml:
self.element = etree.fromstring(xml)
else:
#Build a fairly bare eac-cpf schema for a base.
root = etree.Element('eac-cpf',
attrib={'{http://www.w3.org/2001/XMLSchema-instance}schemaLocation': 'urn:isbn:1-931666-33-4 http://eac.staatsbibliothek-berlin.de/schema/cpf.xsd'},
nsmap=self.NSMAP)
control = etree.SubElement(root, 'control')
etree.SubElement(control, 'recordId').text = '%(id)s--EAC-CPF' % {'id': id}
etree.SubElement(control, 'maintenanceStatus').text = 'new'
agent = etree.SubElement(control, 'maintenanceAgency')
#FIXME... The unpacking use could probably be a little clearer... Anyway.
lang = etree.SubElement(control, 'languageDeclaration')
a, b = language
etree.SubElement(lang, 'language', {'languageCode': a}).text = b
a, b = script
etree.SubElement(lang, 'script', {'scriptCode': a}).text = b
etree.SubElement(control, 'maintenanceHistory')
etree.SubElement(control, 'sources')
#Deliciously awesome tuple unpacking!
a, b = agency
#etree.SubElement(agent, 'agencyCode').text = a #Needs to be in a defined set to validate
etree.SubElement(agent, 'agencyName').text = b
etree.SubElement(root, 'cpfDescription')
self.element = root
self.__check_base()
def __check_base(self):
'''
We want to be able to make a few assumptions regarding what
elements will be present, so let's ensure that a few sections are
present (fake validation?)
'''
el = self.element
for i in ['control', 'control/maintenanceHistory', 'control/sources', 'cpfDescription']:
if el.find(i) is None:
raise Exception('No %s element!' % i)
def __str__(self):
'''Get the XML as a string'''
return etree.tostring(self.element, pretty_print=True, encoding="utf8")
def add_maintenance_event(self, type="derived", time="now", agent_type="human", agent="Me"):
'''
If 'time' is not provided, or is 'now', utcnow will be used.
If 'time' is an instance of datetime, it will be used directly
Otherwise, 'time' will be interpreted as a Unix timestamp.
'''
mh = self.element.find('control/maintenanceHistory')
if time == "now" or time == None:
t = datetime.datetime.utcnow().isoformat()
elif isinstance(time, datetime.datetime):
t = time.isoformat()
else:
t = datetime.utcfromtimestamp(time).isoformat()
me = etree.SubElement(mh, 'maintenanceEvent')
etree.SubElement(me, 'eventType').text = type
etree.SubElement(me, 'eventDateTime', {'standardDateTime': t})
etree.SubElement(me, 'agentType').text = agent_type
etree.SubElement(me, 'agent').text = agent
#TODO: Should probably check that this source does not already exists in the list of sources...
def add_XML_source(self, caption="XML source", xml=None):
'''
Currently, 'xml' can either be an instance of etree.Element,
or can be a string containing the XML
'''
if xml is None:
raise Exception('No XML provided!')
sources = self.element.find('control/sources')
source = etree.SubElement(sources, 'source')
etree.SubElement(source, 'sourceEntry').text = caption
try:
xmlWrap = etree.SubElement(source, 'objectXMLWrap')
xmlWrap.append(xml)
except TypeError:
xmlWrap.append(etree.fromstring(xml))
#TODO: Should probably check that this source does not already exists in the list of sources...
def add_bin_source(self, caption="Binary source (base-64 encoded)", obj=None, encoded=False):
'''
base64 encodes the provided obj, and adds into
/eac-cpf/control/sources/source/objectBinWrap element
'''
#FIXME: Seems like it might be very memory inefficient... Probably better off creating a temp-file, though how to deal with knowing the size before hand... Determine how large the obj is before hand, and allocate double?
d64 = StringIO()
if isinstance(obj, file):
base64.encode(obj, d64)
elif obj is not None:
d64.write(base64.encodestring(obj))
sources = self.element.find('control/sources')
source = etree.SubElement(sources, 'source')
etree.SubElement(source, 'sourceEntry').text = caption
etree.SubElement(source, 'objectBinWrap').text = d64.getvalue()
d64.close()
#FIXME: Should probably verify that 'role' is in the agreed upon vocab?
#TODO: Checking whether or not a the entry to add already exists would probably be a good idea.
def add_name_entry(self, role='primary', name={'forename': 'first', 'middle': 'middle', 'surname': 'last'}, entityType='person'):
'''
* "name" should be a dictionary whose keys will be used as the
"localType" attribute in nameEntry/part, with the text set
to the value for the given key
* "role" will be used as the "localType" attribute on the
nameEntry, and because we are allowing only one "primary",
old primaries will be made into "alt[s]"'''
id = self.element.find('cpfDescription/identity')
if id is None:
id = etree.SubElement(self.element.find('cpfDescription'), 'identity')
et = id.find('entityType')
if et is None:
et = etree.SubElement(id, 'entityType').text = entityType
if role is 'primary':
for old_primary in id.findall('nameEntry[@localType="primary"]'):
old_primary.set('localType', 'alt')
ne = etree.SubElement(id, 'nameEntry', {'localType': role})
for k, v in name.items():
etree.SubElement(ne, 'part', {'localType': k}).text = v
def __get_subelement(self, path):
'''
Get (possibly creating) a node at the given path.
'''
toReturn = self.element.find(path)
if toReturn is None:
element, sep, sub = path.rpartition('/')
if not element:
el = self.element
else:
el = self.element.find(element)
if el is None:
el = self.__get_subelement(element)
return etree.SubElement(el, sub)
else:
return toReturn
def add_exist_dates(self, birth=None, death=None):
'''
Adds a date of birth and/or death to the description.
'''
if birth:
self.__get_subelement('cpfDescription/description/existDates/dateRange/fromDate').set('standardDate', unicode(birth))
if death:
self.__get_subelement('cpfDescription/description/existDates/dateRange/toDate').set('standardDate', unicode(death))
def add_bio(self, bio=None, wipe=True):
'''
bio should be sequence of XML elements (which includes an element
with children!--hopefully with the set of elements permitted by
the EAC-CPF schema)... We'll try to store it even if it's not
(first by trying to create XML from a string, and falling-back to
throwing the what was provided into a <p> element)...
TODO: Might need to create copies of elements when they are
passed in, due to how lxml works... Dunno.
'''
try:
biogHist = self.__get_subelement('cpfDescription/description/biogHist')
if wipe:
biogHist.clear()
biogHist.extend(bio)
self.logger.debug('Added bio subelements via extend')
except TypeError:
try:
biogHist.extend(etree.fromstring(bio))
self.logger.debug('Added bio subelements after creating etree from string.')
except (etree.XMLSyntaxError, ValueError):
etree.SubElement(biogHist, 'p').text = bio
self.logger.debug('Added bio as text of a <p> tag')
def add_chron_list(self, item_list):
chronList = self.__get_subelement('cpfDescription/description/biogHist/chronList')
for item in item_list:
chronItem = etree.SubElement(chronList, 'chronItem')
self.__add_elements(chronItem, item)
def __add_elements(self, element, item):
for tag, value in item.items():
if tag == 'dateRange':
dr = etree.SubElement(element, tag)
for tg, val in value.items():
el = etree.SubElement(dr, tg)
el.text = unicode(val)
el.set('standardDate', unicode(val))
elif tag == 'date':
el = etree.SubElement(element, tag)
el.text = unicode(value)
el.set('standardDate', unicode(value))
elif tag in ['descriptiveNote']:
etree.SubElement(etree.SubElement(element, tag), 'p').text = value
else:
etree.SubElement(element, tag).text = value
def __add_address(self, element, role, addr=None):
'''
"Private" function, used to actually add the address. Takes an element, as the address can be added
at (at least) two different "levels" in the schema
'''
address = etree.SubElement(etree.SubElement(element, 'place', {'localType': role}), 'address')
for k, v in addr.items():
etree.SubElement(address, 'addressLine', {'localType': k}).text = v
def add_address(self, role='primary', addr=None):
'''
Add an address entry under the eac-cpf/cpfDescription/description...
Multiple place entries will be automatically placed under a "places"
entry.
Only a single "primary" entry is allowed with any number of "alt[s]"
(so if you attempt to add an additional "primary" when there is
already one, the old one will be made into an "alt")
'''
tmp_desc = self.element.find('cpfDescription/description')
if tmp_desc is not None:
tmp_pl = self.element.findall('cpfDescription/description/place')
#FIXME: Should merge multiple "places", if found?
tmp_pls = self.element.find('cpfDescription/description/places')
if tmp_pl:
if not tmp_pls:
places = etree.SubElement(self.element.find('cpfDescription/description'), 'places')
else:
places = tmp_pls
#TODO: Move the existing "place" element(s) under the "places" element... This could probably use some more testing?
places.extend(tmp_pl)
if role is 'primary':
for place in places.findall('place[@localType="primary"]'):
place.set('localType', 'alt')
node = places
else:
node = tmp_desc
else:
node = etree.SubElement(self.element.find('cpfDescription'), 'description')
self.__add_address(node, role, addr)
def add_relation(self, type, url=None, elements=None):
relations = self.__get_subelement('cpfDescription/relations')
rel_el = etree.SubElement(relations, type)
if url:
rel_el.set('{%(xlink)s}href' % self.NSMAP, url)
if elements:
self.__add_elements(rel_el, elements)
def testSchema():
'''
A bit of simple testing--create a garbage EAC-CPF schema
This is realy unit testing but it is not in a 'if __name__ == '__main__':'
to simplify the structure because of the dynamic importing of etree
'''
test = EACCPF('test')
test.add_maintenance_event()
test.add_XML_source('Blargh', '<Honk/>')
test.add_maintenance_event(type='revised', agent="Him")
test.add_XML_source('Bob', etree.Element('Loblaw'))
test.add_maintenance_event(type='revised', agent="They", agent_type="machine")
#with open('./FileHandler.py') as aFile:
# test.add_bin_source('Try a file object', aFile)
test.add_name_entry()
test.add_name_entry(name={'a': 'asdf', 'b': '2', 'c': '3'})
test.add_exist_dates('1923', '2010')
test.add_address(addr={'line1': 'here', 'line2': 'there', 'country': 'Everywhere'})
test.add_address(addr={'line1': 'asdf', 'line2': 'qwerty', 'country': 'yuiop'})
test.add_bio('this is not xml!')
b_tmp = etree.Element('bio')
etree.SubElement(b_tmp, 'p').text = 'Ceci est de XML'
#test.add_bio(b_tmp)
test.add_bio("<bio><p>C'est de la XML fausse!</p><asdf><p>other</p></asdf></bio>")
test.add_bin_source('Some text and stuff...', '<>></\'e2345^<!')
cl = [{
'date': 2011,
'event': 'Achieved PhD'
},{
'dateRange': {
'fromDate': 2001,
'toDate': 2011
},
'event': '10 years'
},{
'dateRange': {
'fromDate': 1999
},
'event': 'Since 1999'
},{
'dateRange': {
'toDate': 2030
},
'event': 'Until 2030'
}
]
test.add_chron_list(cl)
test.add_relation('resourceRelation', url="http://www.example.org/blah.asdf", elements={'relationEntry': 'Academic webpage', 'descriptiveNote': 'Blah blah blah.'})
test.add_relation('cpfRelation', elements={'dateRange': {'fromDate': 1999, 'toDate': 2005}, 'descriptiveNote': 'Was a member and stuff...'})
el = None
#el = test.element.find('control/sources/source/objectBinWrap')
if el is not None:
print('Decoded base64 test:\n%s' % base64.decodestring(el.text))
return test
if __name__ == '__main__':
test = testSchema()
# = etree.ElementTree(test.element).xmlschema(etree.parse(source='http://eac.staatsbibliothek-berlin.de/schema/cpf.xsd'))
print(test)
#print(validator(test.element))
#print(validator.error_log)
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,689
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/metadata/tests/fedora_relationships.py
|
import unittest
from islandoraUtils.metadata.fedora_relationships import rels_namespace, rels_object, rels_ext_string, rels_int_string, fedora_relationship, rels_predicate
from lxml import etree
import xml.etree.ElementTree
class XmlHelper:
@classmethod
def mangle(cls, xmlStr):
parser = etree.XMLParser(remove_blank_text=True) # xml parser ignoring whitespace
root = etree.fromstring(xmlStr, parser)
xmlStr = etree.tostring(root, pretty_print=False)
xmlElement = xml.etree.ElementTree.XML(xmlStr)
xmlStr = xml.etree.ElementTree.tostring(xmlElement, 'UTF-8')
return xmlStr
class TestRelsExtBigD(unittest.TestCase):
def setUp(self):
xml = """
<rdf:RDF xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:fedora="info:fedora/fedora-system:def/relations-external#" xmlns:fedora-model="info:fedora/fedora-system:def/model#" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#">
<rdf:Description rdf:about="info:fedora/cogru:1332">
<fedora:isMemberOfCollection rdf:resource="info:fedora/cogru:1130"></fedora:isMemberOfCollection>
<fedora-model:hasModel xmlns="info:fedora/fedora-system:def/model#" rdf:resource="info:fedora/cogru:cogruETD"></fedora-model:hasModel>
</rdf:Description>
</rdf:RDF>
"""
self.xml = xml
self.relsext = rels_ext_string('cogru:1332', rels_namespace('islandora','http://islandora.ca/ontology/relsext#'), 'islandora', xml)
def tearDown(self):
self.relsext = None
def test_add_pid(self):
xmlStr = """
<rdf:RDF xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:islandora="http://islandora.ca/ontology/relsext#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#" xmlns:fedora-model="info:fedora/fedora-system:def/model#" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#">
<rdf:Description rdf:about="info:fedora/cogru:1332">
<fedora:isMemberOfCollection rdf:resource="info:fedora/cogru:1130"></fedora:isMemberOfCollection>
<fedora-model:hasModel xmlns="info:fedora/fedora-system:def/model#" rdf:resource="info:fedora/cogru:cogruETD"></fedora-model:hasModel>
<islandora:test rdf:resource="info:fedora/cogru:1337"/>
</rdf:Description>
</rdf:RDF>
"""
self.relsext.addRelationship('test', 'cogru:1337')
expected_string = XmlHelper.mangle(xmlStr)
result_string = XmlHelper.mangle(self.relsext.toString())
self.assertEqual(expected_string, result_string, 'generated xml does not match')
def test_add_literal(self):
xmlStr = """
<rdf:RDF xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:islandora="http://islandora.ca/ontology/relsext#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#" xmlns:fedora-model="info:fedora/fedora-system:def/model#" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#">
<rdf:Description rdf:about="info:fedora/cogru:1332">
<fedora:isMemberOfCollection rdf:resource="info:fedora/cogru:1130"></fedora:isMemberOfCollection>
<fedora-model:hasModel xmlns="info:fedora/fedora-system:def/model#" rdf:resource="info:fedora/cogru:cogruETD"></fedora-model:hasModel>
<islandora:isViewableByUser>Jon</islandora:isViewableByUser>
</rdf:Description>
</rdf:RDF>
"""
self.relsext.addRelationship('isViewableByUser', rels_object('Jon',rels_object.LITERAL))
expected_string = XmlHelper.mangle(xmlStr)
result_string = XmlHelper.mangle(self.relsext.toString())
self.assertEqual(expected_string, result_string, 'generated xml does not match')
def test_purge_literal(self):
#add literal then delete it
self.relsext.addRelationship('isViewableByUser', rels_object('Jon',rels_object.LITERAL))
self.relsext.purgeRelationships(predicate='isViewableByUser')
expected_string = XmlHelper.mangle(self.xml)
result_string = XmlHelper.mangle(self.relsext.toString())
self.assertEqual(expected_string, result_string, 'generated xml does not match')
def test_get_literal(self):
#add literal then delete it
self.relsext.addRelationship('isViewableByUser', rels_object('Jon',rels_object.LITERAL))
relationships = self.relsext.getRelationships(predicate='isViewableByUser')
self.assertEqual(len(relationships), 1, 'Too many relationships returned')
relationships = relationships[0]
self.assertEqual(relationships[0], 'cogru:1332', 'Incorrect Subject')
self.assertEqual("%s" % relationships[1], 'isViewableByUser', 'Incorrect Predicate')
self.assertEqual("%s" % relationships[2], 'Jon', 'Incorrect literal')
class TestRelsExtSmallD(unittest.TestCase):
def setUp(self):
xml = """
<rdf:RDF xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:fedora="info:fedora/fedora-system:def/relations-external#" xmlns:fedora-model="info:fedora/fedora-system:def/model#" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#">
<rdf:description rdf:about="info:fedora/cogru:1332">
<fedora:isMemberOfCollection rdf:resource="info:fedora/cogru:1130"></fedora:isMemberOfCollection>
<fedora-model:hasModel xmlns="info:fedora/fedora-system:def/model#" rdf:resource="info:fedora/cogru:cogruETD"></fedora-model:hasModel>
</rdf:description>
</rdf:RDF>
"""
self.xml = xml
self.relsext = rels_ext_string('cogru:1332', rels_namespace('islandora','http://islandora.ca/ontology/relsext#'), 'islandora', xml)
def tearDown(self):
self.relsext = None
def test_add_literal(self):
xmlStr = """
<rdf:RDF xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:islandora="http://islandora.ca/ontology/relsext#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#" xmlns:fedora-model="info:fedora/fedora-system:def/model#" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#">
<rdf:description rdf:about="info:fedora/cogru:1332">
<fedora:isMemberOfCollection rdf:resource="info:fedora/cogru:1130"></fedora:isMemberOfCollection>
<fedora-model:hasModel xmlns="info:fedora/fedora-system:def/model#" rdf:resource="info:fedora/cogru:cogruETD"></fedora-model:hasModel>
<islandora:isViewableByUser>Jon</islandora:isViewableByUser>
</rdf:description>
</rdf:RDF>
"""
self.relsext.addRelationship('isViewableByUser', rels_object('Jon',rels_object.LITERAL))
expected_string = XmlHelper.mangle(xmlStr)
result_string = XmlHelper.mangle(self.relsext.toString())
self.assertEqual(expected_string, result_string, 'generated xml does not match')
def test_purge_literal(self):
#add literal then delete it
self.relsext.addRelationship('isViewableByUser', rels_object('Jon',rels_object.LITERAL))
self.relsext.purgeRelationships(predicate='isViewableByUser')
expected_string = XmlHelper.mangle(self.xml)
result_string = XmlHelper.mangle(self.relsext.toString())
self.assertEqual(expected_string, result_string, 'generated xml does not match')
def test_get_literal(self):
#add literal then delete it
self.relsext.addRelationship('isViewableByUser', rels_object('Jon',rels_object.LITERAL))
relationships = self.relsext.getRelationships(predicate='isViewableByUser')
self.assertEqual(len(relationships), 1, 'Too many relationships returned')
relationships = relationships[0]
self.assertEqual(relationships[0], 'cogru:1332', 'Incorrect Subject')
self.assertEqual("%s" % relationships[1], 'isViewableByUser', 'Incorrect Predicate')
self.assertEqual("%s" % relationships[2], 'Jon', 'Incorrect literal')
class TestFedoraRelationship(unittest.TestCase):
def test_two_namespace_literal(self):
xmlStr = """
<rdf:RDF xmlns:coal="http://www.coalliance.org/ontologies/relsint" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#" xmlns:jon="http://jebus/trainstation">
<rdf:Description rdf:about="info:fedora/coccc:2040">
<jon:feezle>JON</jon:feezle>
</rdf:Description>
</rdf:RDF>
"""
relationship = fedora_relationship([rels_namespace('coal','http://www.coalliance.org/ontologies/relsint'), rels_namespace('jon','http://jebus/trainstation')])
relationship.addRelationship('coccc:2040', rels_predicate('jon','feezle'), rels_object('JON',rels_object.LITERAL))
result_string = XmlHelper.mangle(relationship.toString())
expected_string = XmlHelper.mangle(xmlStr)
self.assertEqual(result_string, expected_string, 'Generated XML Incorrect')
def test_one_namespace_literal(self):
xmlStr = """
<rdf:RDF xmlns:coal="http://www.coalliance.org/ontologies/relsint" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#">
<rdf:Description rdf:about="info:fedora/coccc:2040">
<coal:HasAwesomeness>JON</coal:HasAwesomeness>
</rdf:Description>
</rdf:RDF>
"""
relationship = fedora_relationship(rels_namespace('coal','http://www.coalliance.org/ontologies/relsint'), 'coal')
relationship.addRelationship('coccc:2040', 'HasAwesomeness', rels_object('JON',rels_object.LITERAL))
result_string = XmlHelper.mangle(relationship.toString())
expected_string = XmlHelper.mangle(xmlStr)
self.assertEqual(result_string, expected_string, 'Generated XML Incorrect')
def test_literal_pid_dsid(self):
xmlStr= """
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#">
<rdf:Description rdf:about="info:fedora/coccc:2040">
<fedora:HasAwesomeness>JON</fedora:HasAwesomeness>
<fedora:HasTN rdf:resource="info:fedora/coccc:2030"/>
</rdf:Description>
<rdf:Description rdf:about="info:fedora/coccc:2033">
<fedora:HasTN rdf:resource="info:fedora/coccc:2040"/>
</rdf:Description>
<rdf:Description rdf:about="info:fedora/coccc:2033/DSID">
<fedora:HasTN rdf:resource="info:fedora/coccc:2040/DSID"/>
</rdf:Description>
</rdf:RDF>
"""
relationship = fedora_relationship()
relationship.addRelationship('coccc:2040', 'HasAwesomeness', rels_object('JON',rels_object.LITERAL))
relationship.addRelationship('coccc:2040', 'HasTN', rels_object('coccc:2030',rels_object.PID))
relationship.addRelationship('coccc:2033', 'HasTN', rels_object('coccc:2040',rels_object.PID))
relationship.addRelationship('coccc:2033/DSID', 'HasTN', rels_object('coccc:2040/DSID',rels_object.DSID))
result_string = XmlHelper.mangle(relationship.toString())
expected_string = XmlHelper.mangle(xmlStr)
self.assertEqual(result_string, expected_string, 'Generated XML Incorrect')
def test_get_relationships(self):
xmlStr= """
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#">
<rdf:Description rdf:about="info:fedora/coccc:2040">
<fedora:HasAwesomeness>JON</fedora:HasAwesomeness>
<fedora:HasTN rdf:resource="info:fedora/coccc:2030"/>
</rdf:Description>
<rdf:Description rdf:about="info:fedora/coccc:2033">
<fedora:HasTN rdf:resource="info:fedora/coccc:2040"/>
</rdf:Description>
<rdf:Description rdf:about="info:fedora/coccc:2033/DSID">
<fedora:HasTN rdf:resource="info:fedora/coccc:2040/DSID"/>
</rdf:Description>
</rdf:RDF>
"""
relationship = fedora_relationship(xml=xmlStr)
results = relationship.getRelationships(predicate = 'HasTN')
self.assertEqual(len(results), 3, 'Too many relationships returned')
self.assertEqual(results[0][0], "coccc:2040", "Subject incorrect")
self.assertEqual(results[1][0], "coccc:2033", "Subject incorrect")
self.assertEqual(results[2][0], "DSID", "Subject incorrect")
self.assertEqual("%s" % results[0][1], "HasTN", "Predicate incorrect")
self.assertEqual("%s" % results[1][1], "HasTN", "Predicate incorrect")
self.assertEqual("%s" % results[2][1], "HasTN", "Predicate incorrect")
self.assertEqual("%s" % results[0][2], "coccc:2030", "Object incorrect")
self.assertEqual("%s" % results[1][2], "coccc:2040", "Object incorrect")
self.assertEqual("%s" % results[2][2], "DSID", "Object incorrect")
def test_purge_relationships(self):
xmlStr1= """
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#">
<rdf:Description rdf:about="info:fedora/coccc:2040">
<fedora:HasAwesomeness>JON</fedora:HasAwesomeness>
<fedora:HasTN rdf:resource="info:fedora/coccc:2030"/>
</rdf:Description>
<rdf:Description rdf:about="info:fedora/coccc:2033">
<fedora:HasTN rdf:resource="info:fedora/coccc:2040"/>
</rdf:Description>
<rdf:Description rdf:about="info:fedora/coccc:2033/DSID">
<fedora:HasTN rdf:resource="info:fedora/coccc:2040/DSID"/>
</rdf:Description>
</rdf:RDF>
"""
xmlStr2= """
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:fedora="info:fedora/fedora-system:def/relations-external#">
<rdf:Description rdf:about="info:fedora/coccc:2033">
<fedora:HasTN rdf:resource="info:fedora/coccc:2040"/>
</rdf:Description>
<rdf:Description rdf:about="info:fedora/coccc:2033/DSID">
<fedora:HasTN rdf:resource="info:fedora/coccc:2040/DSID"/>
</rdf:Description>
</rdf:RDF>
"""
relationship = fedora_relationship(xml=xmlStr1)
relationship.purgeRelationships(subject = 'coccc:2040')
result_string = XmlHelper.mangle(relationship.toString())
expected_string = XmlHelper.mangle(xmlStr2)
self.assertEqual(result_string, expected_string, 'Generated XML Incorrect')
if __name__ == '__main__':
unittest.main()
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,690
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/fedoraLib.py
|
'''
Created on Apr 20, 2011
@author: William Panting
'''
import tempfile
import string
import re
import random
import subprocess
from urllib import quote
import logging
from StringIO import StringIO as StringIO
from fcrepo.connection import Connection
from fcrepo.client import FedoraClient as Client
from metadata import fedora_relationships as FR
import os
from time import sleep
import hashlib
from islandoraUtils.misc import hash_file
import requests
def mangle_dsid(dsid):
'''
A very aptly named function that will take any string and make it conform [via hack and slash]
to Fedora's Datastream ID naming requirements
@author: Jonathan Green
@param dsid: Datastream ID to mangle
@return dsid: Mangled ID
'''
find = '[^a-zA-Z0-9\.\_\-]';
replace = '';
dsid = re.sub(find, replace, dsid)
if( len(dsid) > 64 ):
dsid = dsid[-64:]
if( len(dsid) > 0 and not dsid[0].isalpha() ):
letter = random.choice(string.letters)
if( len(dsid) == 64 ):
dsid = letter+dsid[1:]
else:
dsid = letter+dsid
if( dsid == '' ):
for i in range(10):
dsid += random.choice(string.letters)
return dsid
def get_datastream_as_file(obj, dsid, extension = ''):
'''
Download the indicated datastream (probably for processing)
Taken out of Fedora Microservices
@author Alexander O'Neil
'''
d = tempfile.mkdtemp()
success = False
tries = 10
filename = '%(dir)s/content.%(ext)s' % {'dir': d, 'ext': extension}
while not success and tries > 0:
with open(filename, 'w') as f:
f.write(obj[dsid].getContent().read())
f.flush() #Flushing should be enough... Shouldn't actually have to sync the filesystem. Caching would actually be a good thing, yeah?
logging.debug("Size of datastream: %(size)d. Size on disk: %(size_disk)d." % {'size': obj[dsid].size, 'size_disk': os.path.getsize(filename)})
success = os.path.getsize(filename) == obj[dsid].size
if not success:
tries = tries - 1
return d, 'content.'+extension
def update_datastream(obj, dsid, filename, label='', mimeType='', controlGroup='M', tries=3, checksumType=None, checksum=None):
'''
This function uses requests to add a datastream to Fedora via the REST API,
because of a bug [we need confirmation that this is the bug Alexander
referenced in Federa Microservices' code] in the pyfcrepo library, that
creates unnecessary failures with closed sockets. The bug could be related
to the use of httplib.
@author Alexander Oneil
@param obj:
@param dsid:
@param filename: If the controlGroup is X or M. This is a filename as a string.
If it is R or E then it should be a URL as a string.
@param label:
@param mimeType:
@param controlGroup:
@param tries: The number of attempts at uploading
@param checksumType: A hashing algorithm to attempt...
@param checksum: A precalculated sum for the file. Is unlikely to work for
inline XML streams, due to normalization Fedora performs when saving.
@return boolean representing success
'''
logger = logging.getLogger('islandoraUtils.fedoraLib.update_datastream')
#Get the connection from the object.
conn = obj.client.api.connection
'''
XXX: Could probably avoid the creation of this array, and put stuff
directly into the post_vars dictionary.
'''
info_dict = {
'url': conn.url,
'username': conn.username, 'password': conn.password,
'pid': obj.pid, 'dsid': dsid,
'label': label, 'mimetype': mimeType, 'controlgroup': controlGroup,
'filename': filename,
'tries': tries,
'checksumType': checksumType,
'checksum': checksum
}
#FIXME: This is duplicated here and in misc.hash_file
#The checksum/hashing algorithms supported by Fedora (mapped to the names that Python's hashlib uses)
hashes = {
'MD5': 'md5',
'SHA-1': 'sha1',
'SHA-256': 'sha256',
'SHA-384': 'sha384',
'SHA-385': 'sha384', #Seems to be an error in the Fedora documentation (SHA-385 doesn't actually exist)? Let's try to account for it.
'SHA-512': 'sha512'
}
#Wanna do checksumming?
if checksumType in hashes:
#Let's figure it out ourselves!
if checksum is None:
#No sum provided, calculate it:
info_dict['checksum'] = hash_file(filename, checksumType)
else:
#We trust the user to provide the proper checksum (don't think that Fedora does, though)
pass
post_vars = {
'dsLabel': info_dict['label'],
'mimeType': info_dict['mimetype'],
'controlGroup': info_dict['controlgroup'],
'checksumType': info_dict['checksumType'],
'checksum': info_dict['checksum']
}
if post_vars['checksumType'] is None:
del post_vars['checksumType']
del post_vars['checksum']
elif post_vars['checksum'] is None:
del post_vars['checksum']
files = {}
if info_dict['controlgroup'] in ['R', 'E']:
post_vars['dsLocation'] = info_dict['filename']
else:
files['file'] = open(info_dict['filename'], 'rb')
updated = False
while not updated and info_dict['tries'] > 0:
files['file'].seek(0)
info_dict['tries'] = info_dict['tries'] - 1
r = requests.post('%(url)s/objects/%(pid)s/datastreams/%(dsid)s' % info_dict,
auth=(info_dict['username'], info_dict['password']),
params=post_vars,
files=files)
if r.status_code == 201:
logger.info('Updated %(pid)s/%(dsid)s.' % info_dict)
updated = True
else:
logger.warning('Failed to update %(pid)s/%(dsid)s: %(tries)s tries remaining.' % info_dict)
sleep(5) #Something went wrong... Let's give it a few, to see if it sorts itself out.
if not updated:
logger.error('Failed to update %(pid)s/%(dsid)s in the given number ' +
'of attempts. Failing...')
for name, f in files.items():
f.close()
return updated
def update_hashed_datastream_without_dup(obj, dsid, filename, **params):
'''
@author Adam Vessey
NOTE: This function essentially wraps update_datastream, and as such takes an
identical set of parameters
Get the DS profile
if 404'd due to DS, then
update;
else if there is an algorithm in the profile, then
use it to hash;
if the calculated hash is not the same as that from the profile, update
else,
use the provided checksumType to hash and update
'''
if params['checksumType'] and params['checksumType'] != 'DISABLED': #If we do really want to hash,
if dsid in obj and params['checksumType'] == obj[dsid].checksumType: #And we want to use the same algorithm as is already in use
#Figure out the checksum for the given file (if it isn't given)
if not params['checksum']:
params['checksum'] = hash_file(filename, params['checksumType'])
#And compare the checksums.
if params['checksum'] == obj[dsid].checksum:
#If it's the same, we don't actually need to add the new version.
return True
else:
#If the sums are different, we need to update (fall through to end of function)
pass
else:
#We're trying to use a different algorithm. Log in info and update? (fall through to end of function)
pass
else:
#No algorithm specified: Nothing to compare, so update (fall through)
pass
return update_datastream(obj=obj, dsid=dsid, filename=filename, **params)
if __name__ == '__main__':
import fcrepo
connection = fcrepo.connection.Connection('http://localhost:8080/fedora', username='fedoraAdmin', password='fedoraAdmin', persistent=False)
client = fcrepo.client.FedoraClient(connection)
#print(client.getDatastreamProfile('atm:1250', 'DC'))
#print(client.getDatastreamProfile('atm:1075', 'DC'))
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,691
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/xacml/constants.py
|
XACML_NAMESPACE = "urn:oasis:names:tc:xacml:1.0:policy"
XACML = "{%s}" % XACML_NAMESPACE
XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
XSI = "{%s}" % XSI_NAMESPACE
NSMAP = {None : XACML_NAMESPACE, 'xsi' : XSI_NAMESPACE}
XPATH_MAP = {'xacml' : XACML_NAMESPACE, 'xsi' : XSI_NAMESPACE}
stringequal = "urn:oasis:names:tc:xacml:1.0:function:string-equal"
mime = "urn:fedora:names:fedora:2.1:resource:datastream:mimeType"
dsid = "urn:fedora:names:fedora:2.1:resource:datastream:id"
onememeberof = "urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of"
MANAGEMENT_RULE = 'deny-management-functions';
DATASTREAM_RULE = 'deny-dsid-mime';
VIEWING_RULE = 'deny-access-functions';
PERMIT_RULE = 'allow-everything-else';
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,692
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/xmlib.py
|
'''
Created on Apr 15, 2011
@file
This is unavoidable don't move around the order of functions/imports and calls in this file
@author
William Panting
@dependencies
lxml
'''
import logging
def import_etree():
'''
This function will import the best etree it can find.
Because dynamic importing is crazy this function can be used like this:
<example>
from .. import xmlib
etree = xmlib.import_etree()
</example>
ONLY USE THIS FUNCTION IF YOU ARE NOT USING LXML SPECIFIC APIS (GRR THIS INCLUDES .XPATH())
THIS FUNCTION ALSO SCREWS WITH LOGGING, SETUP YOUR LOGGER BEFORE CALLING IT
FIXME: AVOID LOGGING ISSUES
@author
Adam, Will
'''
#Get etree from somewhere it should be...
try:
from lxml import etree
logging.debug("running with lxml.etree")
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
logging.debug("running with cElementTree on Python 2.5+")
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
logging.debug("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
logging.debug("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
logging.debug("running with ElementTree")
except ImportError:
message = "Failed to import ElementTree from any known place"
logging.critical(message)
raise ImportError(message)
return etree
#yuk sory
etree = import_etree()
def rootHasNamespace(xmlIn,namespaceIn):
'''
Checks if the indicated xml file's root has the indicated namespace
@param xmlIn
xml file to check
@param namespaceIn
namespace to check for
@return bool
return true if namespace found false if not
'''
parser = etree.XMLParser(remove_blank_text=True)
xmlFile = etree.parse(xmlIn, parser)
xmlFileRoot = xmlFile.getroot()
xmlFileRootNamespaces = xmlFileRoot.nsmap
for namespace in xmlFileRootNamespaces:
if xmlFileRootNamespaces[namespace] == namespaceIn:
return True
return False
def copy_element_attributes(from_element, to_element):
'''
This function will copy the attributes from one etree element to anther
@param from_element
Get attributes from this one
@param to_element
Put attributes on this one
@author
William Panting
'''
attributes = from_element.attrib
for attribute, value in attributes.iteritems():
to_element.set(attribute, value)
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,693
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/xacml/test.py
|
from islandoraUtils.xacml.tools import Xacml
xacml = Xacml()
xacml.managementRule.addUser('jon')
xacml.managementRule.addRole(['roleA', 'roleB'])
xacml.managementRule.removeRole('roleB')
xacml.viewingRule.addUser('feet')
xacml.viewingRule.addRole('toes')
xacml.datastreamRule.addUser('22')
xacml.datastreamRule.addDsid('OBJ')
xacml.datastreamRule.addMimetype('image/pdf')
xstring = xacml.getXmlString()
xacml = Xacml(xstring)
xstring2 = xacml.getXmlString()
print xstring2
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,694
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/fileManipulator.py
|
'''
Created on May 5, 2011
@author
William Panting
This file is meant to help with file manipulations/alterations.
'''
from pyPdf import PdfFileWriter, PdfFileReader
import logging, os
from . import xmlib
from .misc import force_extract_integer_from_string
etree = xmlib.import_etree()
def appendPDFwithPDF(outFile,toAppend):
'''
This function is meant to combine multiple pdf files, I'm not sure I like the pyPdf module's issues atm, hope it updates soon
@author
William Panting
@param outFile
a string representing the path of the file that is to be created/modified
@param toAppend
a string representing the path of the file that is to be appended to the orgional file,
or an ordered list of multiple strings representing files
@return bool
true if successful false if not
'''
pdfWriter=PdfFileWriter()
#out file must not be a directory
if os.path.isdir(outFile):
logging.error('Input error: outFile cannot be a directory.')
return False
#if outfile is a file then it needs to be added to the output page by page just like the other pdfs
elif os.path.isfile(outFile):
#if toAppend is a string then make it into a list [outDir,toAppend]
if isinstance(toAppend,str):
toAppend=[outFile,toAppend]
#if toAppend is a list prepend outDir to it
elif isinstance(toAppend,list):
toAppend.insert(0,outFile)
#if toAppend is a string
if isPDF(toAppend):
toAppendReader=PdfFileReader(open(toAppend, "rb"))
try:
numPages=toAppendReader.getNumPages()
except Exception: #this try catch handles where the pyPDF lib mistakenly thinks a pdf is encrypted, will not work with encryption 3,4
toAppendReader.decrypt('')
numPages=toAppendReader.getNumPages()
#loop over pages adding them one by one
pageCount=0
while pageCount<numPages:
pdfWriter.addPage(toAppendReader.getPage(pageCount))
pageCount+=1
#if toAppend is a list of paths
elif isinstance(toAppend, list):
for path in toAppend:
#verify list as pdfs
if isPDF(path)==False:
logging.error('Error with input: '+str(path)+' --Each member of the list to append must be a valid pdf.')
return False
#loop over each page appending it
toAppendReader=PdfFileReader(open(path, "rb"))
try:
numPages=toAppendReader.getNumPages()
except Exception: #this try catch handles where the pyPDF lib mistakenly thinks a pdf is encrypted, will not work with encryption 3,4
toAppendReader.decrypt('')
numPages=toAppendReader.getNumPages()
#loop over pages adding them one by one
pageCount=0
while pageCount<numPages:
pdfWriter.addPage(toAppendReader.getPage(pageCount))
pageCount+=1
else:
logging.error('Error with input: '+str(toAppend)+' --The input to Append must be a file path or list of file paths.')
return False
#write the concatenated file, must open for read write or if it exists or you get an exception in pyPdf
if(os.path.lexists(outFile)):
pdfStream = open(outFile, "r+b")
else:
pdfStream= open(outFile,'wb')
pdfWriter.write(pdfStream)
return True
def isPDF(file_path):
'''
This function is a helper function that validates user input as a valid pdf file
but not realy it just checks the extension right now.
@todo
actualy check
@author
William Panting
@param file_path
path to analyse for pdf-ness
@return bool
true if the input is a path to a pdf false if not
'''
if isinstance(file_path, str):
if os.path.isfile(file_path) and file_path[file_path.rindex('.'):len(file_path)]=='.pdf':
return True
return False
def breakTEIOnPages(file_path, output_directory, force_numeric_page_numbers = False):
'''
This function will break a tei file into tei snipits for each page
This may not work with tei files not from Hamilton
It explodes on non expanded pb tags. it will likely break on expanded ones
@author
William Panting
@param string file_path
@param string output_directory
@todo
make pages rely on the @n of <pb> quashing non-numeric characters
@todo
make the script handle non-numeric pageination
'''
if os.path.isfile(file_path) and (file_path.endswith('.xml') or file_path.endswith('.tei') or file_path.endswith('.TEI') or file_path.endswith('.XML')):
#init
TEI_iterator = etree.iterparse(file_path, events=('start', 'end'))
element_tracker = list()
page_element_tracker = list()
first_page_sentinal = True
root_text_sentinal = 0 #to be considered true only if ==1
page_number = False
pb_parent = etree.Element
DoNotRepeat_list = list()
#go through file until eof
for event, elem in TEI_iterator:
#consider coping headers
#if the element is root then create current_page root
if first_page_sentinal == True:
root = etree.Element(elem.tag)
xmlib.copy_element_attributes(elem, root)
current_page = etree.ElementTree(root)
first_page_sentinal = False
if event == 'start':
#handles getting root text as soon as it is available
if root_text_sentinal == 1:
last_elem = element_tracker.pop()
current_page_root = current_page.getroot()
current_page_root.text = last_elem.text
element_tracker.append(last_elem)
root_text_sentinal+=1
elif root_text_sentinal == 0:
root_text_sentinal+=1
'''on a page break open iterate through everything on the element stack
grab the textual content posting it to the current page's elements
then print it to file
'''
if elem.tag.endswith('}pb'):
#populate the .text of the incomplete elements of the current page
#if they were not populated in a previous page
#todo: FIX !!!
for element in element_tracker:#only get text if it isn't on a page already
if DoNotRepeat_list.count(element) == 0:
page_element = page_element_tracker[element_tracker.index(element)]
page_element.text = element.text
DoNotRepeat_list = list()#clear so we aren't appending each pb
#create the next page parser
root_element_sentinal = True
for element in page_element_tracker:
if root_element_sentinal == True:
root = etree.Element(element.tag)
xmlib.copy_element_attributes(element, root)
next_page = etree.ElementTree(root)
root_element_sentinal = False
element_copy = root
else:
element_copy = etree.Element(element.tag)
xmlib.copy_element_attributes(element, element_copy)
last_element = DoNotRepeat_list.pop()
last_element.append(element_copy)
DoNotRepeat_list.append(last_element)
DoNotRepeat_list.append(element_copy)
#print to file, but don't print the 'first page' it's metadata
if page_number is not False:
output_path = os.path.join(output_directory, os.path.basename(file_path)[:-4] + '_page_' + str(page_number) + '.xml')
current_page.write(output_path, encoding = "UTF-8")
#switch to new page
page_number = force_extract_integer_from_string(elem.attrib['n'])
print(page_number)
current_page = next_page
page_element_tracker = DoNotRepeat_list
DoNotRepeat_list = list(element_tracker)
else:#push tag into new page
#construct element
page_elem = etree.Element(elem.tag)
xmlib.copy_element_attributes(elem, page_elem)
#put element on the current page
if page_element_tracker:
last_page_elem = page_element_tracker.pop()
last_page_elem.append(page_elem)
page_element_tracker.append(last_page_elem)
else:
last_page_elem = current_page.getroot()
last_page_elem.append(page_elem)
element_tracker.append(elem)
page_element_tracker.append(page_elem)
#push tag with attributes onto the current page
if event == 'end':
#if close of file print to page
if elem.tag.endswith('}TEI'):
output_path = os.path.join(output_directory, os.path.basename(file_path)[:-4] + '_page_' + str(page_number) + '.xml')
current_page.write(output_path, encoding = "UTF-8")
else:
#pop the stack to work on it
last_elem = element_tracker.pop()
last_page_elem = page_element_tracker.pop()
#push preceding text onto current page
last_page_elem.tail = last_elem.tail #gets closing text
#only get text if it isn't on a page already
if DoNotRepeat_list.count(last_elem) == 0:
last_page_elem.text = last_elem.text #gets opening text
return True
return False
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,695
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/setup.py
|
'''
Created on Apr 18, 2011
Setup script for isladoraUtils
@author: William Panting
'''
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='islandoraUtils',
version='0.1',
description='A Package meant to assist Islandora developers',
author='William Panting',
author_email='will@discoverygarden.ca',
license = "GPL",
packages=find_packages(),
package_dir={"islandoraUtils" : "islandoraUtils"},
package_data={"islandoraUtils" : ["__resources/*"]},
long_description=read('README'),
install_requires=['setuptools','lxml', 'requests']
)
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,696
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/misc.py
|
'''
Created on May 30, 2011
This is a holding place for useful re-usable code
that doesn't have a place anywhere else in the package
'''
import os
import hashlib
import re
def getMimeType(extension):
'''
This function will get the mimetype of the provided file extension
This is not fool proof, some extensions have multiple mimetypes possible. I return what was useful for me.
It is also limited to a small set of mimetypes.
@param extension
The file extension to find the mimetype from
@return mimeType
The mimetype that was associated to the file extension
@todo
add more mimeTypes
@todo
Match Islandora's functionality
@note
We could instead use the python mimetypes module
@see
http://docs.python.org/library/mimetypes.html
'''
# use mimetypes module instead
"""
file = ext.lower()
if ext.find(".") == -1:
file = "." + ext # make sure there is a . in it
return mimetypes.guess_type("a" + file)[0] #prepend something so there definitely is a file name
"""
# use custom mimetype lookup
ext = extension.lower()
#strip the '.' if it was included in the ext string
if ext.find('.')==0:#using find to avoid catching the doesn't exist exception from index
ext = ext[1:]
# this is the list of mime types defined in MimeClass.inc in islandora (commit f608652cf6421c2952100b451fe2d699cb1d8b63)
mimes = {
# openoffice:
'odb' : 'application/vnd.oasis.opendocument.database',
'odc' : 'application/vnd.oasis.opendocument.chart',
'odf' : 'application/vnd.oasis.opendocument.formula',
'odg' : 'application/vnd.oasis.opendocument.graphics',
'odi' : 'application/vnd.oasis.opendocument.image',
'odm' : 'application/vnd.oasis.opendocument.text-master',
'odp' : 'application/vnd.oasis.opendocument.presentation',
'ods' : 'application/vnd.oasis.opendocument.spreadsheet',
'odt' : 'application/vnd.oasis.opendocument.text',
'otg' : 'application/vnd.oasis.opendocument.graphics-template',
'oth' : 'application/vnd.oasis.opendocument.text-web',
'otp' : 'application/vnd.oasis.opendocument.presentation-template',
'ots' : 'application/vnd.oasis.opendocument.spreadsheet-template',
'ott' : 'application/vnd.oasis.opendocument.text-template',
# staroffice:
'stc' : 'application/vnd.sun.xml.calc.template',
'std' : 'application/vnd.sun.xml.draw.template',
'sti' : 'application/vnd.sun.xml.impress.template',
'stw' : 'application/vnd.sun.xml.writer.template',
'sxc' : 'application/vnd.sun.xml.calc',
'sxd' : 'application/vnd.sun.xml.draw',
'sxg' : 'application/vnd.sun.xml.writer.global',
'sxi' : 'application/vnd.sun.xml.impress',
'sxm' : 'application/vnd.sun.xml.math',
'sxw' : 'application/vnd.sun.xml.writer',
# k-office:
'kil' : 'application/x-killustrator',
'kpt' : 'application/x-kpresenter',
'kpr' : 'application/x-kpresenter',
'ksp' : 'application/x-kspread',
'kwt' : 'application/x-kword',
'kwd' : 'application/x-kword',
# ms office 97:
'doc' : 'application/msword',
'xls' : 'application/vnd.ms-excel',
'ppt' : 'application/vnd.ms-powerpoint',
# office2007:
'docx' : 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'docm' : 'application/vnd.ms-word.document.macroEnabled.12',
'dotx' : 'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'dotm' : 'application/vnd.ms-word.template.macroEnabled.12',
'xlsx' : 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'xlsm' : 'application/vnd.ms-excel.sheet.macroEnabled.12',
'xltx' : 'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
'xltm' : 'application/vnd.ms-excel.template.macroEnabled.12',
'xlsb' : 'application/vnd.ms-excel.sheet.binary.macroEnabled.12',
'xlam' : 'application/vnd.ms-excel.addin.macroEnabled.12',
'pptx' : 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'pptm' : 'application/vnd.ms-powerpoint.presentation.macroEnabled.12',
'ppsx' : 'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'ppsm' : 'application/vnd.ms-powerpoint.slideshow.macroEnabled.12',
'potx' : 'application/vnd.openxmlformats-officedocument.presentationml.template',
'potm' : 'application/vnd.ms-powerpoint.template.macroEnabled.12',
'ppam' : 'application/vnd.ms-powerpoint.addin.macroEnabled.12',
'sldx' : 'application/vnd.openxmlformats-officedocument.presentationml.slide',
'sldm' : 'application/vnd.ms-powerpoint.slide.macroEnabled.12',
# wordperfect (who cares?):
'wpd' : 'application/wordperfect',
# common and generic containers:
'pdf' : 'application/pdf',
'eps' : 'application/postscript',
'ps' : 'application/postscript',
'rtf' : 'text/rtf',
'rtx' : 'text/richtext',
'latex' : 'application/x-latex',
'tex' : 'application/x-tex',
'texi' : 'application/x-texinfo',
'texinfo' : 'application/x-texinfo',
# *ml:
'css' : 'text/css',
'htm' : 'text/html',
'html' : 'text/html',
'wbxml' : 'application/vnd.wap.wbxml',
'xht' : 'application/xhtml+xml',
'xhtml' : 'application/xhtml+xml',
'xsl' : 'text/xml',
'xml' : 'text/xml',
'csv' : 'text/csv',
'tsv' : 'text/tab-separated-values',
'txt' : 'text/plain',
# images:
"bmp" : "image/bmp",
"gif" : "image/gif",
"ief" : "image/ief",
"jpeg" : "image/jpeg",
"jpe" : "image/jpeg",
"jpg" : "image/jpeg",
"jp2" : "image/jp2",
"png" : "image/png",
"tiff" : "image/tiff",
"tif" : "image/tiff",
"djvu" : "image/vnd.djvu",
"djv" : "image/vnd.djvu",
"wbmp" : "image/vnd.wap.wbmp",
"ras" : "image/x-cmu-raster",
"pnm" : "image/x-portable-anymap",
"pbm" : "image/x-portable-bitmap",
"pgm" : "image/x-portable-graymap",
"ppm" : "image/x-portable-pixmap",
"rgb" : "image/x-rgb",
"xbm" : "image/x-xbitmap",
"xpm" : "image/x-xpixmap",
"xwd" : "image/x-windowdump",
# videos:
"mpeg" : "video/mpeg",
"mpe" : "video/mpeg",
"mpg" : "video/mpeg",
"m4v" : "video/mp4",
"mp4" : "video/mp4",
"ogv" : "video/ogg",
"qt" : "video/quicktime",
"mov" : "video/quicktime",
"mxu" : "video/vnd.mpegurl",
"avi" : "video/x-msvideo",
"movie" : "video/x-sgi-movie",
"flv" : "video/x-flv",
"swf" : "application/x-shockwave-flash",
# audio:
"mp3" : "audio/mpeg",
"mp4a" : "audio/mp4",
"m4a" : "audio/mp4",
"oga" : "audio/ogg",
"ogg" : "audio/ogg",
"flac" : "audio/x-flac",
"wav" : "audio/vnd.wave",
# compressed formats: (note: http:#svn.cleancode.org/svn/email/trunk/mime.types)
"tgz" : "application/x-gzip",
"gz" : "application/x-gzip",
"tar" : "application/x-tar",
"gtar" : "application/x-gtar",
"zip" : "application/x-zip",
# others:
'bin' : 'application/octet-stream',
}
# these are some additional mimetypes not covered that are required for various projects
mimes.update({
# combo types
'dvi' : 'application/x-dvi',
'rar' : 'application/x-rar-compressed',
'rm' : 'audio/x-pn-realaudio', # this one can do audio/video/images
# text types
'ocr' : 'text/plain',
'mods': 'text/xml',
'exif': 'text/xml',
# image types
'nef' : 'image/x-nikon-net',
'dng' : 'image/x-adobe-dng',
'tn' : 'image/jpeg', # used for fedora thumbnails
# video types
'3gp' : 'video/3gpp',
'wmv' : 'video/x-ms-wmv',
})
if ext in mimes:
return mimes[ext]
# assume an unkown binary format if no match found
return 'application/octet-stream'
def __chunk(file_name, size):
start = 0
with open(file_name, 'r+b') as temp:
pass
def hash_file(file_name, hash_type='SHA-1', chunks=2**20):
'''
Hashes a file at the given path with the given algorithm, and returns the hash.
@author Adam Vessey
@param file_name A string containing the path to the relevant file
@param hash_type A hashing algorithm, currently restricted to those
available in Fedora 3.4.2 {MD5,SHA-{1,256,38{4,5},512}}
NOTE: 385 is made to point to 384
@param chunks The number of hash blocks to read at a time
@return A string containing the hex digest of the file.
@todo: Remove commented debug code.
'''
#FIXME: This is duplicated here and in fedoraLib.update_datastream
#The checksum/hashing algorithms supported by Fedora (mapped to the names that Python's hashlib uses)
hashes = {
'MD5': 'md5',
'SHA-1': 'sha1',
'SHA-256': 'sha256',
'SHA-384': 'sha384',
'SHA-385': 'sha384', #Seems to be an error in the Fedora documentation (SHA-385 doesn't actually exist)? Let's try to account for it.
'SHA-512': 'sha512'
}
if os.path.exists(file_name):
with open(file_name, 'rb') as temp:
h = hashlib.new(hashes[hash_type])
#Should chunk the hashing based on the hash's block_size, and the number of chunks specified. Yay memory efficiency?
#This seems to work, it seems a little weird in my head... May have to look at it in the future?
#total = 0
chunksize = chunks * h.block_size
#Lamba craziness borrowed from stackoverflow. Huzzah!
for chunk in iter(lambda: temp.read(chunksize), ''):
#total += len(chunk)
#print('Chunksize: %s\tTotal: %s' % (len(chunk), total))
h.update(chunk)
#print('File size: %s' % temp.tell())
return h.hexdigest()
else:
raise ValueError('File %s does not exist!' % file_name)
def force_extract_integer_from_string(string_to_cast):
'''
This is a simple function that will quash non-numeric characters in a string and return an integer.
The integer will be made up of all numerals in the string.
@param string_to_cast
The string to quash to an int
@return string_cast_to_int
The integer value of the quashed string
'''
interum_string = re.sub('[^0-9]', '', string_to_cast)#match non-numeric, replaces with nothing
string_cast_to_int = int(interum_string)
return string_cast_to_int
if __name__ == '__main__':
'''
@todo:
refine the 'tests'
'''
#print(hash_file('/mnt/fjm_obj/dump/Fotos/949_0227818_53.jpg', 'SHA-1'))
print(force_extract_integer_from_string('l33t'))
pass
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,697
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/metadata/fedora_relationships.py
|
from lxml import etree
import copy
import fcrepo #For type checking...
class rels_namespace:
def __init__(self, alias, uri):
self.alias = alias
self.uri = uri
def __repr__(self):
return '{%s}' % self.uri
class rels_object:
DSID = 1
LITERAL = 2
PID = 3
TYPES = [DSID, LITERAL, PID]
def __init__(self, data, type):
self.type = type
self.data = data
def __repr__(self):
return self.data
class rels_predicate:
def __init__(self, alias, predicate):
self.predicate = predicate
self.alias = alias
def __repr__(self):
return self.predicate
class fedora_relationship():
r"""Top level class in the class hierarchy.
The top level classes are easy to test, since they are independant of fedora.
Looks like this:
fedora_relationship
^
/ \
rels_int_string rels_ext_string
| |
rels_int rels_ext
"""
rdf_namespace = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
rdf = '{%s}' % rdf_namespace
fedora_namespace = 'info:fedora/fedora-system:def/relations-external#'
fedora = '{%s}' % fedora_namespace
nsmap = {
'rdf' : rdf_namespace,
'fedora' : fedora_namespace,
}
ns = {
'rdf' : rdf,
'fedora' : fedora,
}
def __init__(self, namespaces=None, default_namespace=None, xml=None):
if namespaces:
if isinstance(namespaces,rels_namespace):
self.nsmap[namespaces.alias] = namespaces.uri
self.ns[namespaces.alias] = '{%s}' % namespaces.uri
elif isinstance(namespaces,list):
if isinstance(namespaces[0],basestring):
self.nsmap[namespaces[0]] = namespaces[1]
self.ns[namespaces[0]] = '{%s}' % namespaces[1]
elif isinstance(namespaces[0],rels_namespace):
for namespace in namespaces:
self.nsmap[namespace.alias] = namespace.uri
self.ns[namespace.alias] = '{%s}' % namespace.uri
else:
raise TypeError
else:
raise TypeError
if(xml):
parser = etree.XMLParser(remove_blank_text=True) # xml parser ignoring whitespace
root = etree.fromstring(xml, parser)
# we unfortunatly have to go through quite a lot of crap to add new namespaces to
# an existing xml document. There is a lxml bug filed against this but currently
# its on the wishlist. if that is like our wishlish this will be here for awhile.
# https://bugs.launchpad.net/lxml/+bug/555602
if namespaces:
oldnsmap = root.nsmap
for (alias, uri) in oldnsmap.items():
self.ns[alias] = '{%s}' % uri
self.nsmap.update(oldnsmap)
self.root = etree.Element(root.tag, nsmap=self.nsmap)
self.root[:] = root[:]
else:
self.root = root
else:
self.root = etree.Element(self.rdf+'RDF', nsmap=self.nsmap)
#set deafult namespace for predicates
if(default_namespace):
if( default_namespace not in self.ns ):
raise KeyError
self.nsalias = default_namespace
else:
self.nsalias = 'fedora'
# state variable to know if the tree has been modified
self.modified = False
def toString(self, pretty_print = True):
return etree.tostring(self.root, pretty_print=pretty_print)
def __str__(self):
return etree.tostring(self.root, pretty_print=True)
def _doXPathQuery(self, subject=None, predicate=None, object=None):
predicate_object = self._objectifyPredicate(predicate)
# becasue we start using xpath here, and xpath namespacing is a little different
# we have to change to using alias:tag instead of {uri}tag
# because of the damn stupid inconsistency in the data between description
# and Description, we need to figure out what we are dealing with.
description = self.root.find(self.rdf+'description')
if(description is not None):
description_xpath = 'rdf:description'
else:
description_xpath = 'rdf:Description'
if subject != None:
description_xpath += '[@rdf:about="info:fedora/'+subject+'"]'
if predicate_object == None:
predicate_xpath = '/*'
else:
predicate_xpath = '/'+predicate_object.alias+':'+predicate_object.predicate
if object == None:
object_xpath = ''
else:
if object.type == rels_object.PID or object.type == rels_object.DSID:
object_xpath = '[@rdf:resource="info:fedora/%s"]' % object
elif object.type == rels_object.LITERAL:
object_xpath = '[.="%s"]' % object
return self.root.xpath(description_xpath + predicate_xpath + object_xpath, namespaces=self.nsmap)
def _objectifyPredicate(self, predicate):
if predicate == None:
pred_obj = predicate
elif isinstance(predicate,basestring):
pred_obj = rels_predicate(self.nsalias,predicate)
elif isinstance(predicate,list):
pred_obj = rels_predicate(predicate[0], predicate[1])
if predicate[0] not in self.ns:
raise KeyError
elif isinstance(predicate,rels_predicate):
pred_obj = predicate
if pred_obj.alias == None:
pred_obj.alias = self.nsalias
if pred_obj.alias not in self.ns:
raise KeyError
else:
raise TypeError
return pred_obj
def _addRelationship(self, subject, predicate):
description = self.root.find(self.rdf+'Description[@'+self.rdf+'about="info:fedora/'+subject+'"]')
# add a check for lower case description
if description == None:
description = self.root.find(self.rdf+'description[@'+self.rdf+'about="info:fedora/'+subject+'"]')
if description == None:
description = etree.SubElement(self.root, self.rdf+'Description')
description.attrib[self.rdf+'about'] = 'info:fedora/'+subject
relationship = etree.SubElement(description, self.ns[predicate.alias]+predicate.predicate)
return relationship
def addRelationship(self, subject, predicate, object):
if( subject == None or predicate == None or object == None):
raise TypeError
self.modified = True
pred_obj = self._objectifyPredicate(predicate)
relationship = self._addRelationship(subject, pred_obj)
if( object.type == rels_object.DSID or object.type == rels_object.PID):
relationship.attrib[self.rdf+'resource'] = 'info:fedora/%s' % object
elif( object.type == rels_object.LITERAL ):
relationship.text = '%s' % object
def getRelationships(self, subject=None, predicate=None, object=None):
result_elements = self._doXPathQuery(subject, predicate, object)
results = []
for element in result_elements:
result = []
parent = element.getparent()
parent_name = parent.attrib[self.rdf+'about'].rsplit('/',1)[1]
result.append(parent_name)
predicate_name_array = element.tag.rsplit('}',1)
if(len(predicate_name_array) == 1):
predicate_name = rels_predicate(None, predicate_name_array[0])
else:
predicate_ns = predicate_name_array[0][1:]
for a,p in self.nsmap.iteritems():
if( predicate_ns == p ):
predicate_alias = a
predicate_name = rels_predicate(predicate_alias, predicate_name_array[1])
result.append(predicate_name)
if self.rdf+'resource' in element.attrib:
object_name = element.attrib[self.rdf+'resource']
object_name = object_name.rsplit('/',1)[1]
if( object_name.find(':') == -1 ):
object_type = rels_object.DSID
else:
object_type = rels_object.PID
object_obj = rels_object(object_name,object_type)
else:
object_obj = rels_object(element.text, rels_object.LITERAL)
result.append(object_obj)
results.append(result)
return results
def purgeRelationships(self, subject=None, predicate=None, object=None):
if( subject == None and predicate == None and object == None ):
raise TypeError
result_elements = self._doXPathQuery(subject,predicate,object)
if result_elements:
self.modified = True
for element in result_elements:
parent = element.getparent()
parent.remove(element)
if len(parent) == 0:
grandparent = parent.getparent()
grandparent.remove(parent)
class rels_int_string(fedora_relationship):
"""Class to update a fedora RELS-INT datastream."""
def __init__(self, pid, namespaces = None, default_namespace = None, xml = None):
"""Constructor for rels_int object.
Arguements:
obj -- The fcrepo object to modify/create rels_int for.
namespaces -- Namespaces to be added to the rels_int.
[] - list containing ['alias','uri']
[rels_namespace, ...] - list of rels_namespace objects.
[[],[],...[]] - list of ['alias','uri']
rels_namespace - rels_namespace object containing namespace and alias.
default_namespace -- String containing the alias of the default namespace.
If no namespace is passed in then this is assumed:
info:fedora/fedora-system:def/relations-external#
"""
self.pid = pid
fedora_relationship.__init__(self, namespaces, default_namespace, xml)
def _updateObject(self, object):
"""Private method to overload object. Turns everything into a rels_object"""
if object == None:
obj = None
elif isinstance(object,basestring):
obj = rels_object('%s/%s'%(self.pid,object), rels_object.DSID)
elif isinstance(object,rels_object):
if object.type not in rels_object.TYPES:
raise TypeError
if object.type == rels_object.DSID:
obj = copy.copy(object)
obj.data = '%s/%s'%(self.pid, object.data)
elif object.type == rels_object.LITERAL or object.type == rels_object.PID:
obj = copy.copy(object)
elif isinstance(object, list):
reltype = object[1].lower()
if reltype == 'dsid':
obj = rels_object('%s/%s'%(self.pid, object[0]), rels_object.DSID)
elif reltype == 'pid':
obj = rels_object(object[0], rels_object.PID)
elif reltype == 'literal':
obj = rels_object(object[0], rels_object.LITERAL)
else:
raise KeyError
elif isinstance(object,fcrepo.object.FedoraObject):
obj = rels_object(object.pid, rels_object.PID)
else:
raise TypeError
return obj
def _updateSubject(self, subject):
"""Private method to add pid/dsid to the passed in dsid."""
if(subject):
subject = '%s/%s' % (self.pid, subject)
return subject
def addRelationship(self, subject, predicate, object):
"""Add new relationship to rels_int XML.
Arguements:
subject -- String containing the DSID of the subject.
predicate -- The predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object.
string - String containing a DSID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
"""
obj = self._updateObject(object)
sub = self._updateSubject(subject)
return fedora_relationship.addRelationship(self, sub, predicate, obj)
def getRelationships(self, subject=None, predicate=None, object=None):
"""Query relationships contained in rels_int XML.
This function uses xpath to do a query to find all the objects that match
the passed in arguements. Passing None acts as a wildcard.
Arguements:
subject -- String containing the DSID of the subject.
predicate -- The predicate to search for.
This is an overloaded method:
None - Any predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object to search for.
None - Any object.
string - String containing a DSID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
Returns:
List of lists of the form:
[[subject1,predicate1,object1],[subject2,predicate2,object2]]
The predicates and objects returned are of rels_predicate and rels_object
"""
obj = self._updateObject(object)
sub = self._updateSubject(subject)
return fedora_relationship.getRelationships(self, sub, predicate, obj)
def purgeRelationships(self, subject=None, predicate=None, object=None):
"""Purge relationships from the rels_int XML.
This function uses xpath to do a query to remove all the objects that match
the passed in arguements. Passing None acts as a wildcard.
WARNING: Because None is a wildcard, passing no arguements will
DELETE THE ENTIRE CONTENTS of the rels_int.
Arguements:
subject -- String containing the DSID.
predicate -- The predicate to remove.
None - Any predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object to remove.
None - Any object.
string - String containing a DSID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
"""
obj = self._updateObject(object)
sub = self._updateSubject(subject)
return fedora_relationship.purgeRelationships(self, sub, predicate, obj)
class rels_ext_string(fedora_relationship):
"""Class to update a fedora RELS-EXT datastream."""
def __init__(self, pid, namespaces = None, default_namespace = None, xml = None):
"""Constructor for rels_ext object.
Arguements:
obj -- The fcrepo object to modify/create rels_ext for.
namespaces -- Namespaces to be added to the rels_ext.
[] - list containing ['alias','uri']
[rels_namespace, ...] - list of rels_namespace objects.
[[],[],...[]] - list of ['alias','uri']
rels_namespace - rels_namespace object containing namespace and alias.
default_namespace -- String containing the alias of the default namespace.
If no namespace is passed in then this is assumed:
info:fedora/fedora-system:def/relations-external#
"""
self.pid = pid
fedora_relationship.__init__(self, namespaces, default_namespace, xml)
def _updateObject(self, object):
"""Private method to overload object. Turns everything into a rels_object"""
if object == None:
obj = None
elif isinstance(object,basestring):
obj = rels_object('%s'%(object), rels_object.DSID)
elif isinstance(object,rels_object):
if object.type not in rels_object.TYPES:
raise TypeError
else:
obj = copy.copy(object)
elif isinstance(object, list):
reltype = object[1].lower()
if reltype == 'dsid':
obj = rels_object(object[0], rels_object.DSID)
elif reltype == 'pid':
obj = rels_object(object[0], rels_object.PID)
elif reltype == 'literal':
obj = rels_object(object[0], rels_object.LITERAL)
else:
raise KeyError
elif isinstance(object,fcrepo.object.FedoraObject):
obj = rels_object(object.pid, rels_object.PID)
else:
raise TypeError
return obj
def addRelationship(self, predicate, object):
"""Add new relationship to rels_ext XML.
Arguements:
predicate -- The predicate.
This is an overloaded method:
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object.
string - String containing a PID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
"""
obj = self._updateObject(object)
return fedora_relationship.addRelationship(self, self.pid, predicate, obj)
def getRelationships(self, predicate=None, object=None):
"""Query relationships contained in rels_ext XML.
This function uses xpath to do a query to find all the objects that match
the passed in arguements. Passing None acts as a wildcard.
Arguements:
predicate -- The predicate to search for.
This is an overloaded method:
None - Any predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object to search for.
None - Any object.
string - String containing a PID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
Returns:
List of lists of the form:
[[subject1,predicate1,object1],[subject2,predicate2,object2]]
The predicates and objects returned are of rels_predicate and rels_object
"""
obj = self._updateObject(object)
return fedora_relationship.getRelationships(self, self.pid, predicate, obj)
def purgeRelationships(self, predicate=None, object=None):
"""Purge relationships from the rels_ext XML.
This function uses xpath to do a query to remove all the objects that match
the passed in arguements. Passing None acts as a wildcard.
WARNING: Because None is a wildcard, passing no arguements will
DELETE THE ENTIRE CONTENTS of the rels_ext.
Arguements:
predicate -- The predicate to remove.
This is an overloaded method:
None - Any predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object to remove.
None - Any object.
string - String containing a PID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
"""
obj = self._updateObject(object)
return fedora_relationship.purgeRelationships(self, self.pid, predicate, obj)
class fedora_helper():
"""This class adds fcrepo functionality to fedora_relationship_element."""
def __init__(self, obj, reldsid):
if reldsid in obj:
xmlstring = obj[reldsid].getContent().read()
else:
xmlstring = None
self.xmlstring = xmlstring
self.dsid = reldsid
self.obj = obj
def update(self):
if self.modified:
if self.dsid not in self.obj:
self.obj.addDataStream(self.dsid, self.toString(), label=u"Fedora Object-to-Object Relationship Metadata")
else:
self.obj[self.dsid].setContent(self.toString())
class rels_int(rels_int_string, fedora_helper):
"""Class to update a fedora RELS-INT datastream."""
def __init__(self, obj, namespaces = None, default_namespace = None):
"""Constructor for rels_int object.
Arguements:
obj -- The fcrepo object to modify/create rels_int for.
namespaces -- Namespaces to be added to the rels_int.
[] - list containing ['alias','uri']
[rels_namespace, ...] - list of rels_namespace objects.
[[],[],...[]] - list of ['alias','uri']
rels_namespace - rels_namespace object containing namespace and alias.
default_namespace -- String containing the alias of the default namespace.
If no namespace is passed in then this is assumed:
info:fedora/fedora-system:def/relations-external#
"""
fedora_helper.__init__(self, obj, 'RELS-INT')
rels_int_string.__init__(self, obj.pid, namespaces, default_namespace, self.xmlstring)
def addRelationship(self, subject, predicate, object):
"""Add new relationship to rels_int XML.
Arguements:
subject -- String containing the DSID of the subject.
predicate -- The predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object.
string - String containing a DSID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
"""
return rels_int_string.addRelationship(self, subject, predicate, object)
def getRelationships(self, subject=None, predicate=None, object=None):
"""Query relationships contained in rels_int XML.
This function uses xpath to do a query to find all the objects that match
the passed in arguements. Passing None acts as a wildcard.
Arguements:
subject -- String containing the DSID of the subject.
predicate -- The predicate to search for.
This is an overloaded method:
None - Any predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object to search for.
None - Any object.
string - String containing a DSID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
Returns:
List of lists of the form:
[[subject1,predicate1,object1],[subject2,predicate2,object2]]
The predicates and objects returned are of rels_predicate and rels_object
"""
return rels_int_string.getRelationships(self, subject, predicate, object)
def purgeRelationships(self, subject=None, predicate=None, object=None):
"""Purge relationships from the rels_int XML.
This function uses xpath to do a query to remove all the objects that match
the passed in arguements. Passing None acts as a wildcard.
WARNING: Because None is a wildcard, passing no arguements will
DELETE THE ENTIRE CONTENTS of the rels_int.
Arguements:
subject -- String containing the DSID.
predicate -- The predicate to remove.
None - Any predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object to remove.
None - Any object.
string - String containing a DSID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
"""
return rels_int_string.purgeRelationships(self, subject, predicate, object)
def update(self):
"""Save the updated rels_ext XML to the fedora object."""
return fedora_helper.update(self)
class rels_ext(rels_ext_string, fedora_helper):
"""Class to update a fedora RELS-EXT datastream."""
def __init__(self, obj, namespaces = None, default_namespace = None):
"""Constructor for rels_ext object.
Arguements:
obj -- The fcrepo object to modify/create rels_ext for.
namespaces -- Namespaces to be added to the rels_ext.
[] - list containing ['alias','uri']
[rels_namespace, ...] - list of rels_namespace objects.
[[],[],...[]] - list of ['alias','uri']
rels_namespace - rels_namespace object containing namespace and alias.
default_namespace -- String containing the alias of the default namespace.
If no namespace is passed in then this is assumed:
info:fedora/fedora-system:def/relations-external#
"""
fedora_helper.__init__(self, obj, 'RELS-EXT')
rels_ext_string.__init__(self, obj.pid, namespaces, default_namespace, self.xmlstring)
def addRelationship(self, predicate, object):
"""Add new relationship to rels_ext XML.
Arguements:
predicate -- The predicate.
This is an overloaded method:
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object.
string - String containing a PID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
"""
return rels_ext_string.addRelationship(self, predicate, object)
def getRelationships(self, predicate=None, object=None):
"""Query relationships contained in rels_ext XML.
This function uses xpath to do a query to find all the objects that match
the passed in arguements. Passing None acts as a wildcard.
Arguements:
predicate -- The predicate to search for.
This is an overloaded method:
None - Any predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object to search for.
None - Any object.
string - String containing a PID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
Returns:
List of lists of the form:
[[subject1,predicate1,object1],[subject2,predicate2,object2]]
The predicates and objects returned are of rels_predicate and rels_object
"""
return rels_ext_string.getRelationships(self, predicate, object)
def purgeRelationships(self, predicate=None, object=None):
"""Purge relationships from the rels_ext XML.
This function uses xpath to do a query to remove all the objects that match
the passed in arguements. Passing None acts as a wildcard.
WARNING: Because None is a wildcard, passing no arguements will
DELETE THE ENTIRE CONTENTS of the rels_ext.
Arguements:
predicate -- The predicate to remove.
This is an overloaded method:
None - Any predicate.
String - The predicate string. The default namespace is assumed.
rels_predicate - object with namespace alias and predicate set.
list - ['alias','predicate']
object -- The object to remove.
None - Any object.
string - String containing a PID.
rels_object - Rels object.
list - ['string','type'] where: type is in ['dsid', 'pid', 'literal']
"""
return rels_ext_string.purgeRelationships(self, predicate, object)
def update(self):
"""Save the updated rels_ext XML to the fedora object."""
return fedora_helper.update(self)
# do some basic testing of the functionality
if __name__ == '__main__':
relationship = fedora_relationship([rels_namespace('coal','http://www.coalliance.org/ontologies/relsint'), rels_namespace('jon','http://jebus/trainstation')])
print relationship.toString()
relationship.addRelationship('coccc:2040', rels_predicate('jon','feezle'), rels_object('JON',rels_object.LITERAL))
print relationship.toString()
relationship = fedora_relationship(rels_namespace('coal','http://www.coalliance.org/ontologies/relsint'), 'coal')
print relationship.toString()
relationship.addRelationship('coccc:2040', 'HasAwesomeness', rels_object('JON',rels_object.LITERAL))
print relationship.toString()
relationship = fedora_relationship()
print relationship.toString()
relationship.addRelationship('coccc:2040', 'HasAwesomeness', rels_object('JON',rels_object.LITERAL))
print relationship.toString()
relationship.addRelationship('coccc:2040', 'HasTN', rels_object('coccc:2030',rels_object.PID))
print relationship.toString()
relationship.addRelationship('coccc:2033', 'HasTN', rels_object('coccc:2040',rels_object.PID))
print relationship.toString()
relationship.addRelationship('coccc:2033/DSID', 'HasTN', rels_object('coccc:2040/DSID',rels_object.DSID))
print relationship.toString()
results = relationship.getRelationships(predicate = 'HasTN')
print results
results = relationship.getRelationships(predicate = rels_predicate('fedora','HasTN'))
print results
results = relationship.getRelationships(object = rels_object('coccc:2040/DSID',rels_object.DSID))
print results
results = relationship.getRelationships(object = rels_object('JON',rels_object.LITERAL))
print results
results = relationship.getRelationships(subject = 'coccc:2040')
print results
results = relationship.getRelationships(subject = 'coccc:2040', predicate = 'HasTN')
print results
relationship.purgeRelationships(subject = 'coccc:2040')
print relationship.toString()
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,698
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/DSConverter.py
|
'''
Created on March 5, 2011
@author: jonathangreen
Copyied into islandoraUtils by Adam Vessey
TODO: Should likely be made to use the fileConverter module, so as not to have
two copies of code which do much of the same thing... If someone is doing this
this should be treated as the cannonical copy. I have been updating these
conversion scripts with input from JWA and Colorado.
'''
from islandoraUtils.fedoraLib import get_datastream_as_file, update_datastream
from shutil import rmtree, move
from datetime import datetime
import os
import subprocess
import logging
from lxml import etree
from fcrepo.connection import FedoraConnectionException
import re
import math
# thumbnail constants
tn_postfix = '-tn.jpg'
tn_size = (150, 200)
def create_thumbnail(obj, dsid, tnid):
logger = logging.getLogger('islandoraUtils.DSConverter.create_thumbnail')
# We receive a file and create a jpg thumbnail
directory, file = get_datastream_as_file(obj, dsid, "tmp")
# fine out what mimetype the input file is
try:
mime = obj[dsid].mimeType
except KeyError:
mime = None
infile = os.path.join(directory, file)
tmpfile = os.path.join(directory, 'tmp.jpg')
tnfile = os.path.join(directory, tnid)
# make the thumbnail based on the mimetype of the input
# right now we assume everything but video/mp4 can be handled
if mime == 'video/mp4':
# grab the 'middle' of the video for use in creating thumbnails from mp4s
p = subprocess.Popen(['ffmpeg', '-i', infile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# use stderr as ffmpeg expects two params, but duration is still returned with only the source
duration = re.search("Duration:\s{1}\d{2}:\d{2}:\d{2}\.\d{2},", stderr).group();
duration = duration.replace("Duration: ", '')
duration = duration.split('.')
# get everything before the milliseconds in hr:min:seconds format
duration = duration[0]
duration = map(int, duration.split(':'))
time = math.floor(((duration[0] * 360) + (duration[1] * 60) + duration[2]) / 2)
r = subprocess.call(['ffmpeg', '-itsoffset', '-4', '-ss', str(time), '-i', infile, '-vcodec', 'mjpeg',\
'-vframes', '1', '-an', '-f', 'rawvideo', tmpfile])
if r == 0:
r = subprocess.call(['convert', '%s[0]' % tmpfile, '-thumbnail', '%sx%s' % tn_size,\
'-colorspace', 'rgb', 'jpg:%s'%tnfile])
else:
# Make a thumbnail with convert
r = subprocess.call(['convert', '%s[0]' % infile, '-thumbnail', \
'%sx%s' % tn_size, '-colorspace', 'rgb', '+profile', '*', 'jpg:%s'%tnfile])
if r == 0:
update_datastream(obj, tnid, directory+'/'+tnid, label='thumbnail', mimeType='image/jpeg')
else :
logger.warning('PID:%s DSID:%s Thumbnail creation failed (return code:%d).' % (obj.pid, dsid, r))
logger.debug(directory)
logger.debug(file)
logger.debug(tnid)
logger.debug(os.listdir(directory))
rmtree(directory, ignore_errors=True)
return r
def create_jp2(obj, dsid, jp2id):
logger = logging.getLogger('islandoraUtils.DSConverter.create_jp2')
# We receive a TIFF and create a Lossless JPEG 2000 file from it.
directory, file = get_datastream_as_file(obj, dsid, 'tiff')
r = subprocess.call(["convert", directory+'/'+file, '+compress', '-colorspace', 'RGB', directory+'/uncompressed.tiff'])
if r != 0:
logger.warning('PID:%s DSID:%s JP2 creation failed (convert return code:%d).' % (obj.pid, dsid, r))
rmtree(directory, ignore_errors=True)
return r;
r = subprocess.call(["kdu_compress", "-i", directory+'/uncompressed.tiff', "-o", directory+"/tmpfile_lossy.jp2", "-rate", "0.5", "Clayers=1", "Clevels=7", "Cprecincts={256,256},{256,256},{256,256},{128,128},{128,128},{64,64},{64,64},{32,32},{16,16}", "Corder=RPCL", "ORGgen_plt=yes", "ORGtparts=R", "Cblk={32,32}", "Cuse_sop=yes"])
if r != 0:
logger.warning('PID:%s DSID:%s JP2 creation failed. Trying alternative.' % (obj.pid, dsid))
r = subprocess.call(["convert", directory+'/'+file, '-compress', 'JPEG2000', '-quality', '50%', directory+'/tmpfile_lossy.jp2'])
if r != 0:
logger.warning('PID:%s DSID:%s JP2 creation failed (kdu_compress return code:%d).' % (obj.pid, dsid, r))
if r == 0:
update_datastream(obj, jp2id, directory+'/tmpfile_lossy.jp2', label='Compressed JPEG2000', mimeType='image/jp2')
rmtree(directory, ignore_errors=True)
return r
def create_mp4(obj, dsid, mp4id):
logger = logging.getLogger('islandoraUtils.DSConverter.create_mp4')
directory, file = get_datastream_as_file(obj, dsid, 'video')
infile = os.path.join(directory, file)
mp4file = os.path.join(directory, 'output.mp4')
r = subprocess.call(['ffmpeg', '-i', infile, '-f', 'mp4', '-vcodec', 'libx264', '-preset', 'medium', '-acodec', 'libfaac', '-ab', '128k', '-ac', '2', '-async', '1', '-movflags', 'faststart', mp4file])
if r == 0:
update_datastream(obj, mp4id, mp4file, label='compressed mp4', mimeType='video/mp4')
else:
logger.warning('PID:%s DSID:%s MP4 creation (ffmpeg) failed.' % (obj.pid, dsid))
rmtree(directory, ignore_errors=True)
return r
def create_mp3(obj, dsid, mp3id, args = None):
logger = logging.getLogger('islandoraUtils.DSConverter.create_mp3')
#mimetype throws keyerror if it doesn't exist
try:
mime = obj[dsid].mimeType
except KeyError:
mime = None
if mime == 'audio/mpeg':
ext = 'mp3'
else:
ext = 'wav'
# We recieve a WAV file. Create a MP3
directory, file = get_datastream_as_file(obj, dsid, ext)
# I think we need more sensible defaults for web streaming
if args == None:
args = ['-mj', '-v', '-V6', '-B224', '--strictly-enforce-ISO']
args.insert(0, 'lame')
args.append(os.path.join(directory,file))
outpath = os.path.join(directory,mp3id)
args.append(outpath)
# Make MP3 with lame
r = subprocess.call(args)
if r == 0:
update_datastream(obj, mp3id, outpath, label='compressed to mp3', mimeType='audio/mpeg')
else:
logger.warning('PID:%s DSID:%s MP3 creation failed (lame return code:%d).' % (obj.pid, dsid, r))
rmtree(directory, ignore_errors=True)
return r
def create_ogg(obj, dsid, oggid):
logger = logging.getLogger('islandoraUtils.DSConverter.create_ogg')
#recieve a wav file create a OGG
directory, file = get_datastream_as_file(obj, dsid, "wav")
# Make OGG with ffmpeg
r = subprocess.call(['ffmpeg', '-i', directory+'/'+file, '-acodec', 'libvorbis', '-ab', '96k', directory+'/'+oggid])
if r == 0:
update_datastream(obj, oggid, directory+'/'+oggid, label='compressed to ogg', mimeType='audio/ogg')
else:
logger.warning('PID:%s DSID:%s OGG creation failed (ffmpeg return code:%d).' % (obj.pid, dsid, r))
rmtree(directory, ignore_errors=True)
return r
def create_swf(obj, dsid, swfid, args = None):
logger = logging.getLogger('islandoraUtils.DSConverter.create_swf')
directory, file = get_datastream_as_file(obj, dsid, "pdf") #recieve PDF create a SWF for use with flexpaper
program = ['pdf2swf', directory+'/'+file, '-o', directory+'/'+swfid]
if args == None:
default_args = ['-T 9', '-f', '-t', '-s', 'storeallcharacters', '-G']
pdf2swf = subprocess.Popen(program + default_args, stdout=subprocess.PIPE)
out, err = pdf2swf.communicate()
# try with additional arguments
if pdf2swf.returncode != 0:
logger.warning('PID:%s DSID:%s SWF creation failed. Trying alternative.' % (obj.pid, dsid))
extra_args = ['-s', 'poly2bitmap']
pdf2swf = subprocess.Popen(program + default_args + extra_args, stdout=subprocess.PIPE)
out, err = pdf2swf.communicate()
# catch the case where PDF2SWF fails to create the file, but returns
if pdf2swf.returncode == 0 and os.path.isfile(directory + '/' + swfid):
update_datastream(obj, swfid, directory+'/'+swfid, label='pdf to swf', mimeType='application/x-shockwave-flash')
r = 0
elif not os.path.isfile(directory + '/' + swfid):
logger.warning('PID:%s DSID:%s SWF creation failed (pdf2swf returned: "%s").' % (obj.pid, dsid, out))
r = 1
else:
logger.warning('PID:%s DSID:%s SWF creation failed (pdf2swf return code:%d).' % (obj.pid, dsid, pdf2swf.returncode))
r = pdf2swf.returncode
else:
r = subprocess.call(program + args)
if r != 0:
logger.warning('PID:%s DSID:%s SWF creation failed (pdf 2swf return code:%d).' % (obj.pid, dsid, r))
if r == 0:
update_datastream(obj, swfid, directory+'/'+swfid, label='pdf to swf', mimeType='application/x-shockwave-flash')
rmtree(directory, ignore_errors=True)
return r
def create_pdf(obj, dsid, pdfid):
logger = logging.getLogger('islandoraUtils.DSConverter.create_pdf' )
#recieve document and create a PDF with libreoffice if possible
directory, file = get_datastream_as_file(obj, dsid, "document")
subprocess.call(['soffice', '--headless', '-convert-to', 'pdf', '-outdir', directory, directory+'/'+file])
newfile = file.split('.',1)[0]
newfile += '.pdf'
if os.path.isfile(directory + '/' + newfile):
update_datastream(obj, pdfid, directory+'/'+newfile, label='doc to pdf', mimeType='application/pdf')
# we should probably be using true or false like normal python, but i stay consistant here
value = 0
else:
value = 1
logger.warning('PID:%s DSID:%s PDF creation failed.' % (obj.pid, dsid))
logger.debug(os.listdir(directory))
rmtree(directory, ignore_errors=True)
return value
def marcxml_to_mods(obj, dsid, dsidOut='MODS'):
logger = logging.getLogger('islandoraUtils.DSConverter.marcxml_to_mods')
directory, file = get_datastream_as_file(obj, dsid, 'MARCXML')
logger.debug('Got datastream')
marcxml = etree.parse(os.path.join(directory, file))
logger.debug('Parsed datastream')
transform = etree.XSLT(etree.parse(os.path.join(os.path.dirname(__file__), '__resources/marcxml2mods.xslt')))
logger.debug('Parsed XSLT')
transformed = transform(marcxml)
logger.debug('Transformed datastream')
with open(os.path.join(directory, dsidOut), 'w', 0) as temp:
transformed.write(temp)
logger.debug('Wrote transformed DS to disk')
r = update_datastream(obj, dsidOut, temp.name, label='MODS (translated from MARCXML)', mimeType="text/xml")
rmtree(directory, ignore_errors=True)
return r
def check_dates(obj, dsid, derivativeid):
try:
date = datetime.strptime( obj[dsid].createdDate, '%Y-%m-%dT%H:%M:%S.%fZ' )
derdate = datetime.strptime( obj[derivativeid].createdDate, '%Y-%m-%dT%H:%M:%S.%fZ' )
except FedoraConnectionException:
return True
if date > derdate:
return True
else:
return False
def create_fits(obj, dsid, derivativeid = 'FITS', args = []):
logger = logging.getLogger('islandoraUtils.DSConverter.create_fits' )
directory, file = get_datastream_as_file(obj, dsid, "document")
in_file = directory + '/' + file
out_file = directory + '/FITS.xml'
program = ['fits', '-i', in_file, '-o', out_file]
r = subprocess.call(program + args)
if r != 0:
logger.warning('PID:%s DSID:%s FITS creation failed (fits return code:%d).' % (obj.pid, dsid, r))
if r == 0:
update_datastream(obj, derivativeid, out_file, label='FITS Generated Image Metadata', mimeType='text/xml')
rmtree(directory, ignore_errors=True)
return r
def create_csv(obj, dsid = 'OBJ', derivativeid = 'CSV', args = []):
logger = logging.getLogger('islandoraUtils.DSConverter.create_csv' )
directory, file = get_datastream_as_file(obj, dsid, "document")
in_file = directory + '/' + file
process = subprocess.Popen(['xls2csv', '-x', in_file] + args, stdout=subprocess.PIPE);
output = process.communicate()[0]
if process.returncode != 0:
logger.warning('PID:%s DSID:%s CSV creation failed (xls2csv return code:%d).' % (obj.pid, dsid, r))
if process.returncode == 0:
num_sheet = 0
out_file = directory + '/' + 'csv.csv'
logger.warning('Output: ' + output)
sheets = output.split("\f")
for sheet in sheets:
if len(sheet) != 0:
logger.warning('PID:%s DSID:%s CSV create sheet: %d.' % (obj.pid, dsid, num_sheet))
f = open(out_file, 'w')
f.write(sheet)
f.close()
new_dsid = derivativeid + '_SHEET_' + str(num_sheet) if num_sheet > 0 else derivativeid
update_datastream(obj, new_dsid, out_file, 'CSV Generated Metadata', 'text/csv')
num_sheet += 1
rmtree(directory, ignore_errors=True)
return process.returncode
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,699
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/fileConverter.py
|
'''
Created on Apr 5, 2011
@author: William Panting
@dependencies: Kakadu, ImageMagick, ABBYY CLI, Lame, SWFTools, FFmpeg
This is a Library that will make file conversions and manipulations like OCR using Python easier.
Primarily it will use Kakadu and ABBYY
but it will fall back on ImageMagick if Kakadu fails and for some conversions Kakadu does not support.
Used scripts created by Jonathan Green as the starting piont.
Please make sure that the output directory already exists.
Note that the extra args variables are here to facilitate debugging if there are ever version issues.
TODO: add video support
TODO: add open office word doc conversions
TODO: make recursive option
TODO: explore handling output directory creation
TODO: explore more input file type checking
TODO: explore better conversion options
TODO: explore more backup solutions
FIXME: Some poor assumptions are made regarding paths... There exist other types of files besides 'files'and 'directories' (block/char devices, sym-links (which may cause weird evaluations?), etc...)
TODO: Seems like generalizing file selection based on a path and extension(s) could be rather useful
or automatically determine file type by magic number (resulting in things like tif_to_jpg -> any_to_jpg)
TODO: provide override options for various input checks
'''
import logging, subprocess, os, xmlib
def tif_to_jp2(inPath,outPath,kakaduOpts=None,imageMagicOpts=None,*extraArgs):
'''
Converts tiff to jp2
@param inPath: source file or dir
@param outPath: destination file or dir
@param kakaduOpts: a list of options or a string 'default'
@param imageMagicOpts: a list of options or a string 'default'
@return bool: true if successful [completion not conversion] false if not
'''
#error checking, does not take TN
if checkStd(inPath,outPath,extraArgs,kakaduOpts,imageMagicOpts)==False:
return False
if kakaduOpts=='TN' or imageMagicOpts=='TN':
logging.error('This function tif_to_jp2 does not accept the \'TN\' keyword')
return False
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if os.path.isdir(outPath)==False: #outPath is a file path
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
tmpFilePath=os.path.join(outDirectory,'uncompressed.tiff')
#create list of files to be converted
if os.path.isdir(inPath)==False:
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in os.listdir(inPath):
pathLength=len(path)
#remove non tiff entries
if path[(pathLength-4):pathLength]!='.tif' and path[(pathLength-5):pathLength]!='.tiff' :
fileList.remove(path)
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath)==True:
fileNameOut=fileName[0:fileName.rindex('.')]+'.jp2'
filePathIn=os.path.join(inDirectory,fileName)
filePathOut=os.path.join(outDirectory,fileNameOut)
#use convert because of lack of kakadu license means we can't convert a compressed tif
r = subprocess.call(["convert", filePathIn, '+compress', tmpFilePath])
if r != 0:
logging.warning('JP2 creation failed (convert return code:%d for file input %s).' % ( r, filePathIn))
#prep Kakadu call
if kakaduOpts!='default':
kakaduCall=("kdu_compress", "-i", tmpFilePath,"-o", filePathOut)
kakaduCall.extend(kakaduOpts)
else:
kakaduCall=["kdu_compress", "-i", tmpFilePath,\
"-o", filePathOut,\
"-rate", "0.5", "Clayers=1", "Clevels=7",\
"Cprecincts={256,256},{256,256},{256,256},{128,128},{128,128},{64,64},{64,64},{32,32},{16,16}",\
"Corder=RPCL", "ORGgen_plt=yes", "ORGtparts=R", "Cblk={32,32}", "Cuse_sop=yes"]
#make Kakadu call
r = subprocess.call(kakaduCall)
#if Kakadu fails [happens on certain color pellets] use less powerful ImageMagicK on original file
if r != 0:
logging.warning('JP2 creation failed. Trying alternative (kdu_compress return code:%d).' % (r) )
#prep image magic call
if imageMagicOpts!='default':
imageMagicCall=("convert", filePathIn)
imageMagicCall.extend(imageMagicOpts)
imageMagicCall.append(filePathOut)
else:
imageMagicCall=["convert", filePathIn, '-compress', 'JPEG2000', '-quality', '50%', filePathOut]
#make image magic call
r = subprocess.call(imageMagicCall)
if r != 0:
logging.warning('JP2 creation failed (convert return code:%d for file input %s).' % ( r, filePathIn))
if r == 0:
logging.info('File converted: %s'% (filePathOut))
#kill the temp file if it exists
if os.path.exists(tmpFilePath):
os.remove(tmpFilePath)
return True
def tif_OCR(inPath,outPath,fileTypeOpts,inputOpts=None,*extraArgs):
'''
ABBYY OCR CLI Command Line Tool support
@param: inPath: source file or dir
@param: outPath: destination file or dir
@param: inputOpts: the ABBYY command line options not associated with a specific file output tyep, can be None
@param: fileTypeOpts: 1. a dictionary where the key is a file output type and the vale is a string 'default' or list of options, or 2. a string 'default'
@return bool: true if successful [completion not conversion] false if not
TODO: make default output options for all output file types
'''
#error checking, does not take TN
if not checkPaths(inPath,outPath):
return False
if fileTypeOpts=='TN' or inputOpts=='TN':
logging.error('This function tif_to_jp2 does not accept the \'TN\' keyword')
return False
#special dictionary error checking
if not isinstance(fileTypeOpts, dict) and fileTypeOpts != 'default':
logging.error('The fileTypeOpts must be a dictionary or the keyword \'default\'.' )
return False
#prevents the script from attempting to write multiple output files to one output file path
if os.path.isdir(inPath) and not os.path.isdir(outPath):
logging.error('If the input path is a directory, so must be the output path.')
return False
if len(fileTypeOpts)>1 and fileTypeOpts!='default' and os.path.isdir(outPath)!=True:
logging.error('If there is to be more than one output file then the output path must be a directory.')
return False
#determine the output directory if there are multiple output files due to a directory batch
#put directory not created error handling here'''
if not os.path.isdir(outPath):
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
#create list of files to be converted
if not os.path.isdir(inPath):
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in fileList:
pathLength=len(path)
#remove non tiff entries
if path[(pathLength-4):pathLength]!='.tif' and path[(pathLength-5):pathLength]!='.tiff' :
fileList.remove(path)
for fileName in fileList:
#some useful vars
absPathFileIn=os.path.join(inDirectory,fileName)
absPathFileOutNoExt=os.path.join(outDirectory,fileName[0:fileName.rindex('.')])
#reset the ABBYY call
ABBYY_Call=['CLI']
#input opts and input file
if inputOpts!='default' and inputOpts!=None:
ABBYY_Call.extend(inputOpts)
ABBYY_Call.extend(('-if',absPathFileIn))
outputs = {
'PDF': ('.pdf', ('-pem','ImageOnText','-pfq','75')),
'HTML': ('.html', ()),
'RTF': ('.rtf', ()),
'DBF': ('.dbf', ()),
'XML': ('.xml', ()),
'TEXT': ('.txt', ('-tel','-tpb','-tet','UTF8')),
'XLS': ('.xls', ())
}
#determine output file extension, and input check
for outputType,outputOpts in fileTypeOpts.iteritems():
try:
extension, defaultOutPutsOpts = outputs[outputType]
except KeyError:
logging.error('Incorrect output type "%s" specified for ABBYY CLI.', outputType)
return False
#append this round of output info
ABBYY_Call.extend(('-f',outputType))
if outputOpts!='default':
ABBYY_Call.extend(fileTypeOpts[outputType])
else:
ABBYY_Call.extend(defaultOutPutOpts)
#append output file for this round
ABBYY_Call.extend(('-of',absPathFileOutNoExt+extension))
#make ABBYYcall
r = subprocess.call(ABBYY_Call)
if r != 0:
logging.warning('JP2 creation failed (ABBYY CLI return code:%d).' % ( r))
elif r == 0:
logging.info('File OCR\'d: %s'% (absPathFileIn))
return True
def tif_to_jpg(inPath,outPath, imageMagicOpts,*extraArgs):
'''
This function will use ImageMagick to convert tifs to jpgs
@param: inPath: source file or dir
@param: outPath: destination file or dir
@param imageMagicOpts: can be 'default' 'TN' or a list of options to use
@return bool: true if successful false if not
'''
#error checking
if checkStd(inPath,outPath,extraArgs,imageMagicOpts)==False:
return False
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if os.path.isdir(outPath)==False: #outPath is a file path
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
#create list of files to be converted
if os.path.isdir(inPath)==False:
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in os.listdir(inPath):
pathLength=len(path)
#remove non tiff entries
if path[(pathLength-4):pathLength]!='.tif' and path[(pathLength-5):pathLength]!='.tiff' :
fileList.remove(path)
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath)==True:
fileNameOut=fileName[0:fileName.rindex('.')]+'.jpg'
filePathIn=os.path.join(inDirectory,fileName)
filePathOut=os.path.join(outDirectory,fileNameOut)
#create image magick call
if imageMagicOpts=='default':
imageMagicCall=["convert", filePathIn, '-compress', 'JPEG', '-quality', '50%', filePathOut]
elif imageMagicOpts=='TN':
imageMagicCall=["convert", filePathIn, '-compress', 'JPEG', "-thumbnail", "85x110", "-gravity", "center", "-extent", "85x110", filePathOut]
else:
imageMagicCall=["convert",filePathIn]
imageMagicCall.extend(imageMagicOpts)
imageMagicCall.append(filePathOut)
#make image magic call
r = subprocess.call(imageMagicCall)
if r != 0:
logging.warning('JPG creation failed (convert return code:%d for file input %s).' % ( r, filePathIn))
if r == 0:
logging.info('File converted: %s'% (filePathOut))
return True
def tif_to_pdf(inPath,outPath,tiff2pdfOpts,*extraArgs):
'''
This function will use the shell's tiff2pdf to convert tiff files to pdf
@param: inPath: source file
@param: outPath: destination file or dir
@param tiff2pdfOpts: options to be applied to the conversion, can be 'default'
@return bool: true if successful [completion not conversion] false if not
'''
#error checking
if not checkStd(inPath,outPath,extraArgs,tiff2pdfOpts):
return False
if tiff2pdfOpts == 'TN':
logging.error('This function tif_to_pdf does not support the \'TN\' keyword')
return False
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if os.path.isdir(outPath): #outPath is a directory
outDirectory = outPath
else:#is a file path
outDirectory, fileNameOut = os.path.split(outPath)
#create list of files to be converted
inDirectory, fileListStr = os.path.split(inPath)
fileList = [fileListStr]
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath):
fileNameOut = "%s.pdf" % os.path.splitext(fileName)[0]
filePathIn = os.path.join(inDirectory, fileName)
filePathOut = os.path.join(outDirectory, fileNameOut)
#create tiff2pdf call
if tiff2pdfOpts=='default':
tiff2pdfCall=["convert", filePathIn, '-resize', '1024x1024^', '-compress', 'jpeg', '-quality', '50', filePathOut]
else:
tiff2pdfCall=["convert", filePathIn]
tiff2pdfCall.extend(tiff2pdfOpts)
tiff2pdfCall.append(filePathOut);
#make the system call
r = subprocess.call(tiff2pdfCall)
if r == 0:
logging.info('File converted: %s'% (filePathOut))
else:
logging.warning('PDF creation failed (tiff2pdf return code:%d for file input %s).' % ( r, filePathIn))
return True
def pdf_to_swf(inPath,outPath,swfToolsOpts,*extraArgs):
'''
This function will use swftools to convert pdf files to swfs
@param: inPath: source file or dir
@param: outPath: destination file or dir
@param swfToolsOpts: options to be applied to the conversion can be 'default'
@return bool: true if successful [completion not conversion] false if not
'''
#error checking
if checkStd(inPath,outPath,extraArgs,swfToolsOpts)==False:
return False
if swfToolsOpts=='TN':
logging.error('This function pdf_to_swf does not accept the \'TN\' keyword')
return False
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if os.path.isdir(outPath)==False: #outPath is a file path
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
#create list of files to be converted
if os.path.isdir(inPath)==False:
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in os.listdir(inPath):
pathLength=len(path)
#remove non pdf entries
if path[(pathLength-4):pathLength]!='.pdf':
fileList.remove(path)
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath)==True:
fileNameOut=fileName[0:fileName.rindex('.')]+'.pdf'
filePathIn=os.path.join(inDirectory,fileName)
filePathOut=os.path.join(outDirectory,fileNameOut)
#create image magick call
if swfToolsOpts=='default':
swfToolsCall=["pdf2swf", filePathIn, '-o', filePathOut,'-T 9', '-f', '-t', '-s', 'storeallcharacters', '-G']
else:
swfToolsCall=["pdf2swf",filePathIn,'-o', filePathOut]
swfToolsCall.extend(swfToolsOpts)
#make the system call
r = subprocess.call(swfToolsCall)
#move to bitmap because swftools fails on very large files otherwise
if swfToolsOpts=='default' and r!=0:
logging.warning('PDF creation failed (SWFTools return code:%d for file input %s: Trying alternative.).' % ( r, filePathIn))
swfToolsCall=["pdf2swf", filePathIn, '-o', filePathOut,'-T 9', '-f', '-t', '-s', 'storeallcharacters', '-G', '-s', 'poly2bitmap']
r=subprocess.call(swfToolsCall)
if r != 0:
logging.warning('PDF creation failed (SWFTools return code:%d for file input %s).' % ( r, filePathIn))
if r == 0:
logging.info('File converted: %s'% (filePathOut))
return True
def wav_to_ogg(inPath,outPath,FFmpegOpts,*extraArgs):
'''
This function will use FFmpeg to turn a wav file into an ogg file
@param: inPath: source file or dir
@param: outPath: destination file or dir
@param FFmpegOpts: options to be applied to the conversion can be 'default'
@return bool: true if successful [completion not conversion] false if not
'''
#error checking
if checkStd(inPath,outPath,extraArgs,FFmpegOpts)==False:
return False
if FFmpegOpts=='TN':
logging.error('This function wav_to_ogg does not accept the \'TN\' keyword')
return False
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if os.path.isdir(outPath)==False: #outPath is a file path
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
#create list of files to be converted
if os.path.isdir(inPath)==False:
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in os.listdir(inPath):
pathLength=len(path)
#remove non applicable file entries
if path[(pathLength-4):pathLength]!='.wav':
fileList.remove(path)
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath)==True:
fileNameOut=fileName[0:fileName.rindex('.')]+'.ogg'
filePathIn=os.path.join(inDirectory,fileName)
filePathOut=os.path.join(outDirectory,fileNameOut)
#create the system call
if FFmpegOpts=='default':
FFmpegCall=['ffmpeg', '-i', filePathIn, '-acodec', 'libvorbis', '-ab', '48k', filePathOut]
else:
FFmpegCall=['ffmpeg', '-i', filePathIn]
FFmpegCall.extend(FFmpegOpts)
FFmpegCall.append(filePathOut)
#make the system call
r = subprocess.call(FFmpegCall)
if r != 0:
logging.warning('ogg creation failed (FFmpeg return code:%d for file input %s).' % ( r, filePathIn))
if r == 0:
logging.info('File converted: %s'% (filePathOut))
return True
def wav_to_mp3(inPath,outPath,lameOpts,*extraArgs):
'''
This function uses the lame tool to make wav files into mp3 files
@param: inPath: source file or dir
@param: outPath: destination file or dir
@param lameOpts: options to be applied to the conversion can be 'default'
@return bool: true if successful [completion not conversion] false if not
'''
#error checking
if checkStd(inPath,outPath,extraArgs,lameOpts)==False:
return False
if lameOpts=='TN':
logging.error('This function wav_to_mp3 does not accept the \'TN\' keyword')
return False
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if os.path.isdir(outPath)==False: #outPath is a file path
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
#create list of files to be converted
if os.path.isdir(inPath)==False:
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in os.listdir(inPath):
pathLength=len(path)
#remove non applicable file entries
if path[(pathLength-4):pathLength]!='.wav':
fileList.remove(path)
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath)==True:
fileNameOut=fileName[0:fileName.rindex('.')]+'.mp3'
filePathIn=os.path.join(inDirectory,fileName)
filePathOut=os.path.join(outDirectory,fileNameOut)
#create the system call
if lameOpts=='default':
lameCall=['lame', '-mm', '--cbr', '-b48', filePathIn, filePathOut]
else:
lameCall=['lame']
lameCall.extend(lameOpts)
lameCall.extend([filePathIn, filePathOut])
#make the system call
r = subprocess.call(lameCall)
if r != 0:
logging.warning('mp3 creation failed (lame return code:%d for file input %s).' % ( r, filePathIn))
if r == 0:
logging.info('File converted: %s'% (filePathOut))
return True
def pdf_to_jpg(inPath,outPath,imageMagicOpts,*extraArgs):
'''
This function will use ImageMagick to convert tifs to jpgs
@param: inPath: source file or dir
@param: outPath: destination file or dir
@param imageMagicOpts: can be 'default' 'TN' or a list of options to use
@return bool: true if successful [completion not conversion] false if not
'''
#error checking
if checkStd(inPath,outPath,extraArgs,imageMagicOpts)==False:
return False
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if os.path.isdir(outPath)==False: #outPath is a file path
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
#create list of files to be converted
if os.path.isdir(inPath)==False:
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in os.listdir(inPath):
pathLength=len(path)
#remove non tiff entries
if path[(pathLength-4):pathLength]!='.tif' and path[(pathLength-5):pathLength]!='.tiff' :
fileList.remove(path)
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath)==True:
fileNameOut=fileName[0:fileName.rindex('.')]+'.jpg'
filePathIn=os.path.join(inDirectory,fileName)
filePathOut=os.path.join(outDirectory,fileNameOut)
#create image magick call
if imageMagicOpts=='default':
imageMagicCall=["convert", filePathIn, '-compress', 'JPEG', '-quality', '50%', filePathOut]
elif imageMagicOpts=='TN':
imageMagicCall=["convert", filePathIn, '-compress', 'JPEG', "-thumbnail", "85x110", "-gravity", "center", "-extent", "85x110", filePathOut]
else:
imageMagicCall=["convert",filePathIn]
imageMagicCall.extend(imageMagicOpts)
imageMagicCall.append(filePathOut)
#make image magic call
r = subprocess.call(imageMagicCall)
if r != 0:
logging.warning('JPG creation failed (convert return code:%d for file input %s).' % ( r, filePathIn))
if r == 0:
logging.info('File converted: %s'% (filePathOut))
return True
def exif_to_xml(inPath, outPath, *extraArgs):
'''
This function will extract the entire exif from an image and send it to an xml file, also for full directories
@param input: file or directory to pipe from
@param output: file or directory to pipe to
@param extList (extraArgs[0]): list of file extensions to perform operations on, only needs to be provided with inPath is a directory
@return bool: true on completion of funciton false on cought error
TODO: add options
'''
#if it exists extract the extList var from extraArgs
if len(extraArgs)>0:
extList=extraArgs[0]
extraArgs=extraArgs[1:len(extraArgs)]
if isinstance(extList, list)==False:
logging.error("The extension List must be a list not:"+str(extList))
return False
#standard error checking
if checkStd(inPath,outPath,extraArgs)==False:
return False
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if os.path.isdir(outPath)==False: #outPath is a file path
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
#create list of files to be converted
if os.path.isdir(inPath)==False:
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in os.listdir(inPath):
pathLength=len(path)
#remove files that do not have an indicated extension from the list to work on
for ext in extList:
#use of rfind is because I don't want the ValueError thrown from rindex on failure to find the substr
if path.rfind(ext)==-1 or path.rfind(ext)!=(pathLength-len(ext)):
fileList.remove(path)
print("removing path: "+path)
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath)==True:
fileNameOut=fileName[0:fileName.rindex('.')]+'.xml'
filePathIn=os.path.join(inDirectory,fileName)
filePathOut=os.path.join(outDirectory,fileNameOut)
#create exiftool call
exiftoolCall=['exiftool','-X',filePathIn]
#make exiftool call (using the Popen constructor because we need to do further work with the obj)
r = subprocess.Popen(exiftoolCall,stdout=subprocess.PIPE)
#grab exiftool output
exif_value = r.communicate()[0]
#put exiftool output in the file
rFile=open(filePathOut,'w')
rFile.write(exif_value)
if r.poll() != 0:
logging.warning('EXIF XML creation failed (convert return code:%d for file input %s).' % ( r, filePathIn))
if r.poll() == 0:
logging.info('File converted: %s'% (filePathOut))
return True
def mods_to_solr(inPath, outPath, *extraArgs):
'''
This function will take a MODS xml file and transform it into a SOLR xml file.
@param inPath: source file or dir
@param outPath: destination file or dir
@return bool: True on successful completion of function False on errors
'''
#error checking
if checkStd(inPath,outPath,extraArgs)==False:
return False
#set up the translator
xslt_root = etree.parse(os.path.join(os.path.dirname(__file__), '__resources/mods_to_solr.xslt'))
transform = etree.XSLT(xslt_root)
#determine the output directory for the tempfile and for if there are multiple output files due to a directory batch
#put directory not created error handle here'''
if not os.path.isdir(outPath): #outPath is a file path
outDirectory,fileNameOut=os.path.split(outPath)
fileList=(fileNameOut)
else:#is a driectory
outDirectory=outPath
#create list of files to be converted
if not os.path.isdir(inPath):
inDirectory, fileListStr=os.path.split(inPath)
fileList=[fileListStr]
else:
inDirectory=inPath
fileList=os.listdir(inPath)#get files in the dir
for path in os.listdir(inPath):
if not path.endswith('.xml'):
fileList.remove(path)
elif not xmlib.rootHasNamespace(os.path.join(inPath,path), 'http://www.loc.gov/mods/v3'):
fileList.remove(path)
#remove files that are not xml that have the mods namespace
for fileName in fileList:
#if fileNameOut was not in outPath make one up
if os.path.isdir(outPath):
fileNameOut=fileName[0:fileName.rindex('.')]+'_solr.xml'
#cunstruct the paths
filePathIn=os.path.join(inDirectory,fileName)
filePathOut=os.path.join(outDirectory,fileNameOut)
#read mods file
modsFile=open(filePathIn,'r')
doc = etree.parse(modsFile)
#translate
transform_result = transform(doc)
#write solr file
solrOut=open (filePathOut,'w')
solrOut.write(str(transform_result))
return True
'''
collection of helper functions used by the API functions
'''
def checkPaths(pathIn, pathOut):
'''
Does some standardized error checking on the input and output path arguments
@param pathIn: input path to check
@param pathOut: output path to check
@return bool: return false if the arguments are not valid, true if they are
'''
#make sure that the indicated paths are valid
if os.path.lexists(pathIn)==False:
logging.error('The indicated input path is not valid: '+pathIn)
return False
if os.path.isdir(pathOut):
return True
elif os.path.isfile(pathOut):
logging.error('If the output path is a file it can not already exist: '+ pathOut)
return False
elif os.path.lexists(os.path.dirname(pathOut))!=True:
logging.error('The output path is invalid: '+pathOut)
return False
#make sure that if the input path is a directory that the output path is also a directory
if os.path.isdir(pathIn)==True and os.path.isdir(pathOut)==False:
logging.error('If the input path is a directory then so must be the output directory')
return False
return True
def checkOpts(optsIn):
'''
Does some standardized checking on command line option arguments
@param optsIn: option set to check
@return bool: return false if the arguments are not valid, true if they are
'''
if isinstance(optsIn, list)==False and optsIn!='default' and optsIn!='TN':
logging.error('CommandLine arguments must be lists or a known keyword like \'TN\' or \'default\'' )
return False
return True
def checkExtraArgs(args):
'''
Does a standard check to see if too many args was passed in
@param args: list holding the *args to be checked
@return bool: return false if the arguments are not valid, true if they are
'''
if len(args)>0:
logging.error('Too many arguments supplied:'+args)
return False
return True
def checkStd(pathIn,pathOut,args,*opts):
'''
Wrapper function that calls all standard error checking
@param pathIn: input path to check
@param pathOut: output path to check
@param args: list holding the *args to be checked
@param *opts: option set to check
@return bool: return false if the arguments are not valid, true if they are
'''
if checkPaths(pathIn,pathOut)==False:
return False
if checkExtraArgs(args)==False:
return False
for optSet in opts:
if checkOpts(optSet)==False:
return False
return True
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,700
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/xacml/parser.py
|
import islandoraUtils.xacml.constants as xacmlconstants
from islandoraUtils.xacml.exception import XacmlException
import string
from lxml import etree
def parse (xacml_string):
xacml = {}
xacml['rules'] = []
parser = etree.XMLParser(remove_blank_text=True)
root = etree.fromstring(xacml_string, parser)
# Do basic sanity check that root element is <Policy>
if root.tag != xacmlconstants.XACML + 'Policy':
raise XacmlException('Root tag is not Policy.')
# see if the policy was written by islandora, othewise throw an exception
if root.get('PolicyId') != 'islandora-xacml-editor-v1':
raise XacmlException('XACML file was not written by XACML Editor.')
parseXacml(xacml, root)
return xacml
def parseXacml(xacml, root):
xacml['PolicyId'] = root.get("PolicyId")
xacml['RuleCombiningAlgId'] = root.get("RuleCombiningAlgId")
# get each rule element
for rule_element in root.findall(xacmlconstants.XACML + "Rule"):
rule = {}
rule['effect'] = rule_element.get("Effect")
rule['ruleid'] = rule_element.get("RuleId")
rule['dsids'] = []
rule['mimes'] = []
rule['methods'] = []
rule['users'] = []
rule['roles'] = []
findDsidMime(rule, rule_element)
findMethods(rule, rule_element)
findRoles(rule, rule_element)
findUsers(rule, rule_element)
xacml['rules'].append(rule)
def findDsidMime(rule, element):
resources = element.findall('.//' + xacmlconstants.XACML + "ResourceMatch")
for resource in resources:
value = resource[0].text
type = resource[1].get("AttributeId")
if(type == xacmlconstants.mime):
rule['mimes'].append(value)
elif(type == xacmlconstants.dsid):
rule['dsids'].append(value)
else:
raise XacmlException('Unknown ResourceMatch AttributeId.')
def findMethods(rule, element):
actions = element.find(xacmlconstants.XACML + "Target/" + xacmlconstants.XACML + "Actions")
values = actions.findall('.//' + xacmlconstants.XACML + 'AttributeValue')
for value in values:
method = value.text
if string.find(method, 'api-a') != -1 or string.find(method, 'api-m') != -1:
rule['methods'].append(method[35:])
else:
rule['methods'].append(method[38:])
def findRoles(rule, element):
role_designator = element.xpath('.//xacml:Apply[@FunctionId="'+xacmlconstants.onememeberof+'"]/xacml:SubjectAttributeDesignator[@AttributeId="fedoraRole"]', namespaces=xacmlconstants.XPATH_MAP)
if len(role_designator) != 0:
role_attrib = role_designator[0].xpath('../xacml:Apply/xacml:AttributeValue', namespaces=xacmlconstants.XPATH_MAP)
for role in role_attrib:
rule['roles'].append(role.text)
def findUsers(rule, element):
user_designator = element.xpath('.//xacml:Apply[@FunctionId="'+xacmlconstants.onememeberof+'"]/xacml:SubjectAttributeDesignator[@AttributeId="urn:fedora:names:fedora:2.1:subject:loginId"]', namespaces=xacmlconstants.XPATH_MAP)
if len(user_designator) != 0:
user_attrib = user_designator[0].xpath('../xacml:Apply/xacml:AttributeValue', namespaces=xacmlconstants.XPATH_MAP)
for user in user_attrib:
rule['users'].append(user.text)
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,701
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/xacml/writer.py
|
from lxml import etree
import islandoraUtils.xacml.constants as xacmlconstants
from islandoraUtils.xacml.exception import XacmlException
def toXML(xacml, prettyprint=False):
# create the root element
policy = createRoot(xacml)
createTarget(policy, xacml)
createRules(policy, xacml)
# return the XML as a formatted string
return etree.tostring(policy, pretty_print=prettyprint, xml_declaration=True)
def createRoot(xacml):
policy = etree.Element(xacmlconstants.XACML + "Policy", nsmap = xacmlconstants.NSMAP)
policy.set('PolicyId','islandora-xacml-editor-v1')
policy.set('RuleCombiningAlgId', xacml['RuleCombiningAlgId'])
return policy
def createTarget(policy, xacml):
target = etree.SubElement(policy, xacmlconstants.XACML + 'Target')
subjects = etree.SubElement(target, xacmlconstants.XACML + 'Subjects')
etree.SubElement(subjects, xacmlconstants.XACML + 'AnySubject')
resources = etree.SubElement(target, xacmlconstants.XACML + 'Resources')
etree.SubElement(resources, xacmlconstants.XACML + 'AnyResource')
actions = etree.SubElement(target, xacmlconstants.XACML + 'Actions')
etree.SubElement(actions, xacmlconstants.XACML + 'AnyAction')
def createRules(policy, xacml):
for rule in xacml['rules']:
createRule(policy, rule)
def createRule(policy, rule):
root = etree.SubElement(policy, xacmlconstants.XACML + 'Rule')
root.set('RuleId', rule['ruleid'])
root.set('Effect', rule['effect'])
createRuleTarget(root, rule)
createRuleCondition(root, rule)
def createRuleTarget(root, rule):
target = etree.SubElement(root, xacmlconstants.XACML + "Target")
createRuleTargetSubjects(target, rule)
createRuleTargetResources(target, rule)
createRuleTargetActions(target, rule)
def createRuleTargetSubjects(target, rule):
subjects = etree.SubElement(target, xacmlconstants.XACML + "Subjects")
etree.SubElement(subjects, xacmlconstants.XACML + "AnySubject")
def createRuleTargetActions(target, rule):
actions = etree.SubElement(target, xacmlconstants.XACML + "Actions")
if rule['methods']:
for method in rule['methods']:
createRuleTargetAction(actions, method)
else:
etree.SubElement(actions, xacmlconstants.XACML + "AnyAction")
def createRuleTargetAction(actions, method):
action = etree.SubElement(actions, xacmlconstants.XACML + 'Action')
actionMatch = etree.SubElement(action, xacmlconstants.XACML + 'ActionMatch')
actionMatch.set('MatchId',xacmlconstants.stringequal)
if method == 'api-a' or method == 'api-m':
attributevalue = 'urn:fedora:names:fedora:2.1:action:' + method
attributeid = 'urn:fedora:names:fedora:2.1:action:api'
else:
attributevalue = 'urn:fedora:names:fedora:2.1:action:id-' + method
attributeid = "urn:fedora:names:fedora:2.1:action:id"
attributeValue = etree.SubElement(actionMatch, xacmlconstants.XACML + "AttributeValue")
attributeValue.text = attributevalue
attributeValue.set("DataType","http://www.w3.org/2001/XMLSchema#string")
actionAttributeDesignator = etree.SubElement(actionMatch, xacmlconstants.XACML + "ActionAttributeDesignator")
actionAttributeDesignator.set("AttributeId", attributeid)
actionAttributeDesignator.set("DataType","http://www.w3.org/2001/XMLSchema#string")
def createRuleTargetResources(target, rule):
resources = etree.SubElement(target, xacmlconstants.XACML + "Resources")
if not rule['mimes'] and not rule['dsids']:
etree.SubElement(resources, xacmlconstants.XACML + "AnyResource")
else:
for mime in rule['mimes']:
createRuleTargetResource(resources, mime, 'mime')
for dsid in rule['dsids']:
createRuleTargetResource(resources, dsid, 'dsid')
def createRuleTargetResource(resources, name, type):
resource = etree.SubElement(resources, xacmlconstants.XACML + 'Resource')
resourceMatch = etree.SubElement(resource, xacmlconstants.XACML + 'ResourceMatch')
resourceMatch.set('MatchId', xacmlconstants.stringequal)
AttributeValue = etree.SubElement(resourceMatch, xacmlconstants.XACML + 'AttributeValue')
AttributeValue.text = name
AttributeValue.set('DataType',"http://www.w3.org/2001/XMLSchema#string")
ResourceAttributeDesignator = etree.SubElement(resourceMatch, xacmlconstants.XACML + 'ResourceAttributeDesignator')
ResourceAttributeDesignator.set("DataType","http://www.w3.org/2001/XMLSchema#string")
if type == 'mime':
ResourceAttributeDesignator.set("AttributeId","urn:fedora:names:fedora:2.1:resource:datastream:mimeType")
elif type == 'dsid':
ResourceAttributeDesignator.set("AttributeId","urn:fedora:names:fedora:2.1:resource:datastream:id")
def createRuleCondition(target, rule):
condition = etree.Element(xacmlconstants.XACML + "Condition")
condition.set("FunctionId", "urn:oasis:names:tc:xacml:1.0:function:not")
if rule['users']:
users = createRuleConditionApply(rule['users'],'user')
if rule['roles']:
roles = createRuleConditionApply(rule['roles'],'role')
try:
apply = etree.Element(xacmlconstants.XACML + "Apply")
apply.set("FunctionId","urn:oasis:names:tc:xacml:1.0:function:or")
apply.append(users)
apply.append(roles)
condition.append(apply)
target.append(condition)
except NameError:
try:
condition.append(users)
target.append(condition)
except NameError:
pass
try:
condition.append(roles)
target.append(condition)
except NameError:
pass
def createRuleConditionApply(attributes, type):
apply = etree.Element(xacmlconstants.XACML + 'Apply')
apply.set("FunctionId","urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of")
subjectAttribureDesignator = etree.SubElement(apply, xacmlconstants.XACML + 'SubjectAttributeDesignator')
subjectAttribureDesignator.set("DataType", "http://www.w3.org/2001/XMLSchema#string")
subjectAttribureDesignator.set("MustBePresent", "false")
if type == 'role':
subjectAttribureDesignator.set('AttributeId',"fedoraRole")
elif type == 'user':
subjectAttribureDesignator.set('AttributeId',"urn:fedora:names:fedora:2.1:subject:loginId")
stringBag = etree.SubElement(apply, xacmlconstants.XACML + "Apply")
stringBag.set("FunctionId", "urn:oasis:names:tc:xacml:1.0:function:string-bag")
for attribute in attributes:
attrib = etree.SubElement(stringBag, xacmlconstants.XACML + "AttributeValue")
attrib.text = attribute
attrib.set("DataType","http://www.w3.org/2001/XMLSchema#string")
return apply
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,702
|
discoverygarden/IslandoraPYUtils
|
refs/heads/1.x
|
/islandoraUtils/xacml/tools.py
|
import islandoraUtils.xacml.writer as xacmlwriter
import islandoraUtils.xacml.parser as xacmlparser
import islandoraUtils.xacml.constants as xacmlconst
from abc import ABCMeta
'''
@file
This file defines a set of object for manipulating XACML. Other files in the
XACML module provide a lower level access to creating XCAML these objects
work together to provide a nice high level view of a standard islandora
XACML object.
All of this was hastily ported from PHP, so the comments may say array
when they in fact mean list, set or dictionary. If you see these references
please correct them.
'''
'''
This abstract class represents a general XACML Rule. The XACML object contains
4 standard XACML rules, which are all extended from this base class.
'''
class XacmlRule:
# define this is an abstract base class
__metaclass__ = ABCMeta
'''
Private internal representation of the XACML rule.
'''
_rule = None
'''
This points to the Xacml object that this rule is instantiated inside of,
so that references to other rules can be made.
@var Xacml
'''
_xacml = None;
'''
Initialized a rule datastructure
@param $id
Takes the ID for the new rule as a string.
@param $effect
The effect of the rule (Permit or Deny)
@return array
The rule dictionary.
'''
def _initializeRule(self, id, effect):
rule = {}
rule['ruleid'] = id
rule['effect'] = effect;
rule['users'] = set();
rule['roles'] = set();
rule['methods'] = [];
return rule
'''
Helper function. Allows strings or arrays of strings to be passed in.
@param $type
Array key to modify in internal $rules datastructure.
@param $data
Data to be added.
'''
def _setValue(self, type, data):
if isinstance(data, basestring):
self._rule[type].add(data)
else:
self._rule[type] |= set(data)
'''
Helper function. We want to return lists.
@param $type
Array key in internal datastructure to return
@return
Array requested.
'''
def _getValues(self, type):
return list(self._rule[type])
'''
Uses the set functionality to remove data from internal rule representation.
@param $type
Array key to work on
@param $data
Data to be removed.
'''
def _removeValue(self, type, datarg):
if isinstance(datarg, basestring):
data = set([datarg])
else:
data = set(datarg)
self._rule[type] -= data
'''
Constructs new XacmlRule. This generic constructor does not set any
methods. It assumes if arg1 is an array that array is an existing
xacml rule datastructure. Concrete implementations should call
parent::__construct then initialize the datastructure correctly if
arg1 is NULL by calling parent::initializeRule() with the proper
methods.
@param $arg1
array containing pre-exisitng xacml rule or NULL.
@param $xacml
reference to the XACML object that this datastructure is part of.
'''
def __init__(self, xacml, rule = None):
if (rule):
self._rule = self._initializeRule(rule['ruleid'], rule['effect'])
self._rule['users'] |= set(rule['users'])
self._rule['roles'] |= set(rule['roles'])
self._rule['methods'] = list(rule['methods'])
self._setValue('users', 'fedoraAdmin')
self._setValue('roles', 'administrator')
self.xacml = xacml
'''
Returns true if the rule is populated with data, otherwise returns false.
For example a rule can be created that has no users or roles. This rule has
no meaning in XACML. We need Users and Roles associated with the rule. This
function lets us know if the rule has be populated.
@return boolean
'''
def isPopulated(self):
return self.getUsers() or self.getRoles()
'''
Add a user to the XACML rule.
@param $user
String or array or strings containing users to add.
'''
def addUser(self, user):
self._setValue('users', user)
'''
Add roles to the XACML rule.
@param $role
String or array of string containing roles to add.
'''
def addRole(self,role):
self._setValue('roles', role)
'''
Remove users from XACML Rule.
@param $user
String or array of strings with users to remove.
'''
def removeUser(self, user):
self._removeValue('users', user)
'''
Remove roles from XACML rule.
@param $role
String or array of string with roles to remove.
'''
def removeRole(self, role):
self._removeValue('roles', role)
'''
Get users associated with this XACML rule.
@return
Array containing the users.
'''
def getUsers(self):
return self._getValues('users')
'''
Get roles associated with this XACML rule.
@return
Array containing the roles.
'''
def getRoles(self):
return self._getValues('roles')
'''
Return the $rule datastructure associated with this object. This can be parsed by XacmlWriter.
@return
array containing the datastructure.
'''
def getRuleArray(self):
rule = {}
# make frigging sure that these are included
self.addUser('fedoraAdmin');
self.addRole('administrator')
rule['ruleid'] = self._rule['ruleid']
rule['effect'] = self._rule['effect']
rule['users'] = list(self._rule['users'])
rule['roles'] = list(self._rule['roles'])
# copy methods
rule['methods'] = list(self._rule['methods'])
rule['dsids'] = []
rule['mimes'] = []
return rule
'''
This is the concrete implementation of XacmlRule for the rule restricting who
can manage an object.
'''
class XacmlManagementRule(XacmlRule):
'''
This calls the parent constructor and then if $arg1 is NULL instantiates the
rule as a blank rule.
@param $arg1
Existing Rule datastructure with ID MANAGEMENT_RULE or NULL
@param $xacml
Reference to the parent XACML object.
'''
def __init__(self, xacml, rule = None):
XacmlRule.__init__(self, xacml, rule)
if(not rule):
self._rule = self._initializeRule(xacmlconst.MANAGEMENT_RULE, 'Deny')
self._rule['methods'] = [
'addDatastream',
'addDisseminator',
'adminPing',
'getDisseminatorHistory',
'getNextPid',
'ingest',
'modifyDatastreamByReference',
'modifyDatastreamByValue',
'modifyDisseminator',
'modifyObject',
'purgeObject',
'purgeDatastream',
'purgeDisseminator',
'setDatastreamState',
'setDisseminatorState',
'setDatastreamVersionable',
'compareDatastreamChecksum',
'serverShutdown',
'serverStatus',
'upload',
'dsstate',
'resolveDatastream',
'reloadPolicies'
]
'''
This is the concrete implementation of XacmlRule for the rule restricting who
can view an object.
'''
class XacmlViewingRule( XacmlRule ):
'''
This calls the parent constructor and then if $arg1 is NULL instantiates
the rule as a new blank rule.
@param $arg1
Existing Rule datastructure with ID VIEWING_RULE or NULL
@param $xacml
Reference to the parent XACML object.
'''
def __init__(self, xacml, rule = None):
XacmlRule.__init__(self, xacml, rule)
if not rule:
self._rule = self._initializeRule(xacmlconst.VIEWING_RULE, 'Deny')
self._rule['methods'] = [
'api-a',
'getDatastreamHistory',
'listObjectInResourceIndexResults'
]
'''
Calls parent::getRuleArray() and then adds the roles and users fromt the
managementRule and datastreamRule datastructues if they are populated. This
ensures that our xacml object works as expected. Otherwise it would be
possible to have people that could manage an object but not view
datastreams. An unexpected behavior.
@return
$rule datastructure parsable by XacmlWriter.
'''
def getRuleArray(self):
rule = XacmlRule.getRuleArray(self)
users = set(rule['users'])
roles = set(rule['roles'])
if self.xacml.managementRule.isPopulated():
users |= set(self.xacml.managementRule.getUsers())
roles |= set(self.xacml.managementRule.getRoles())
if self.xacml.datastreamRule.isPopulated():
users |= set(self.xacml.datastreamRule.getUsers())
roles |= set(self.xacml.datastreamRule.getRoles())
rule['users'] = list(users)
rule['roles'] = list(roles)
return rule
'''
This is a concrete implementaion of a XacmlRule that allows everything. It needs
to be added to the end of every XACML policy to allow anything not explicitly
forbidden by the policy. Otherwise XACML defaults to denying access.
This is entirely managed by Xacml object so not much needs to be said about it.
'''
class XacmlPermitEverythingRule(XacmlRule):
def __init__(self, xacml):
XacmlRule.__init__(self, xacml)
self._rule = self._initializeRule(xacmlconst.PERMIT_RULE, 'Permit')
def getRuleArray(self):
rule = XacmlRule.getRuleArray(self)
rule['roles'] = []
rule['users'] = []
return rule
'''
A concrete implementation of XacmlRule to restrict who can view certain mimetypes and datastreams.
'''
class XacmlDatastreamRule(XacmlRule):
def __init__(self, xacml, rule = None):
XacmlRule.__init__(self, xacml, rule)
if not rule:
self._rule = self._initializeRule(xacmlconst.DATASTREAM_RULE, 'Deny')
self._rule['methods'] = [
'getDatastreamDissemination'
]
self._rule['mimes'] = set()
self._rule['dsids'] = set()
else:
self._rule['mimes'] = set(rule['mimes'])
self._rule['dsids'] = set(rule['dsids'])
'''
Calls parent::getRuleArray() and then adds the roles and users fromt the
managementRule object if they are populated. This ensures that our xacml
object works as expected. Otherwise it would be possible to have people that
could manage an object but not view datastreams. An unexpected behavior.
@return
$rule datastructure parsable by XacmlWriter.
'''
def getRuleArray(self):
rule = XacmlRule.getRuleArray(self)
rule['dsids'] = list(self._rule['dsids'])
rule['mimes'] = list(self._rule['mimes'])
if self.xacml.managementRule.isPopulated():
users = set(rule['users'])
roles = set(rule['roles'])
users |= set(self.xacml.managementRule.getUsers())
roles |= set(self.xacml.managementRule.getRoles())
rule['users'] = list(users)
rule['roles'] = list(roles)
return rule
'''
Add a dsid to the rule.
@param $dsid
String or array of strings containing the datastream to add.
'''
def addDsid(self, dsid):
self._setValue('dsids', dsid)
'''
Add a mimetype to the rule.
@param $mime
String or array of strings to add to the rule.
'''
def addMimetype(self, mime):
self._setValue('mimes', mime)
'''
Remove mimetypes from the rule.
@param $mime
String or array ofs tring to remove from the rule.
'''
def removeMimetype(self, mime):
self._removeValue('mimes', mime)
def removeDsid(self, dsid):
self._removeValue('dsids', dsid)
def getMimetypes(self):
return self._getValues('mimes')
def getDsids(self):
return self._getValues('dsids')
'''
Returns true if the rule is populated with data, otherwise returns false.
For example a rule can be created that has no users, roles, dsids or mimetypes.
This makes sure there is at least on role or user and at least one mimtype or dsid.
@return boolean
'''
def isPopulated(self):
return XacmlRule.isPopulated(self) and (self.getMimetypes or self.getDsids())
'''
This class is how programmers should interact with Xacml objects. It takes either xacml XAML as a string
or no arguements and creates a blank xacml object. The interaction with the rules takes place through
member object of this class. For instance to add roles that can manage the object:
@code
xacml = new Xacml();
// allow userA to manage the object
xacml->managementRule->addUser('userA');
// allow roleC and roleD to manage the object
xacml->managementRule->addRole(array('roleC', 'roleD'));
@endcode
'''
class Xacml:
_permitEverythingRule = None;
'''
Rule controling who can manage the object with this XACML policy.
@var XacmlManagementRule
'''
managementRule = None;
'''
Rule controlling who can view the specific datastreams and mimetypes that are in this rule.
@var XacmlDatastreamRule
'''
datastreamRule = None
'''
Rule controlling who can view datastreams in this object.
@var XacmlViewingRule
'''
viewingRule = None
'''
The constructor for the XACML object. Initialize new XACML object.
@param (optional) $xacml The XACML XML as a string. If this isn't passed
the constructor will instead create a new XACML object that permits
everything.
@throws XacmlException if the XML cannot be parsed
'''
def __init__(self, xacml = None):
management_rule = None;
datastream_rule = None;
viewing_rule = None;
if xacml != None:
xacmlds = xacmlparser.parse(xacml)
# decide what is enabled
for rule in xacmlds['rules']:
if rule['ruleid'] == xacmlconst.MANAGEMENT_RULE:
management_rule = rule
elif rule['ruleid'] == xacmlconst.DATASTREAM_RULE:
datastream_rule = rule
elif rule['ruleid'] == xacmlconst.VIEWING_RULE:
viewing_rule = rule
self.datastreamRule = XacmlDatastreamRule(self, datastream_rule)
self.managementRule = XacmlManagementRule(self, management_rule)
self.viewingRule = XacmlViewingRule(self, viewing_rule)
self.permitEverythingRule = XacmlPermitEverythingRule(self)
'''
This creates a datastructure to be passed into XacmlWriter. It takes into
account which rules have been populated.
'''
def _getXacmlDatastructure(self):
xacml = {
'RuleCombiningAlgId' : 'urn:oasis:names:tc:xacml:1.0:rule-combining-algorithm:first-applicable',
'rules' : []
}
if self.datastreamRule.isPopulated():
xacml['rules'].append(self.datastreamRule.getRuleArray())
if self.managementRule.isPopulated():
xacml['rules'].append(self.managementRule.getRuleArray())
if self.viewingRule.isPopulated():
xacml['rules'].append(self.viewingRule.getRuleArray())
xacml['rules'].append(self.permitEverythingRule.getRuleArray())
return xacml
'''
Returns a string containing the XML for this XACML policy.
@param boolean $prettyPrint
If set to TRUE the function will return a prettyprinted xacml policy.
@return string containing xacml xml
'''
def getXmlString(self, prettyPrint=True):
xacml = self._getXacmlDatastructure()
return xacmlwriter.toXML(xacml, prettyPrint);
|
{"/islandoraUtils/metadata/tests/fedora_relationships.py": ["/islandoraUtils/metadata/fedora_relationships.py"], "/islandoraUtils/fedoraLib.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/fileManipulator.py": ["/islandoraUtils/misc.py"], "/islandoraUtils/xacml/parser.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/writer.py": ["/islandoraUtils/xacml/constants.py"], "/islandoraUtils/xacml/tools.py": ["/islandoraUtils/xacml/writer.py", "/islandoraUtils/xacml/parser.py", "/islandoraUtils/xacml/constants.py"]}
|
23,705
|
chanonroy/ml-dog-or-cat
|
refs/heads/master
|
/models.py
|
import os
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
IMG_SIZE = 50
LR = 1e-3
def setup_model(MODEL_NAME, IMG_SIZE, LEARNING_RATE):
"""
:MODEL_NAME: - str
:IMG_SIZE: - int
:LEARNING_RATE - int
"""
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LEARNING_RATE, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
model.load('model/' + MODEL_NAME)
return model
|
{"/train.py": ["/models.py"], "/predict.py": ["/models.py"]}
|
23,706
|
chanonroy/ml-dog-or-cat
|
refs/heads/master
|
/train.py
|
import cv2 # resizing image
import numpy as np
import os
from random import shuffle
from tqdm import tqdm # progress bar for CLI
BASE_DIR = 'X:/Machine_Learning/Data/dogs_vs_cats'
TRAIN_DIR = BASE_DIR + '/train'
TEST_DIR = BASE_DIR + '/test1'
IMG_SIZE = 50
LR = 1e-3
MODEL_NAME = 'dogsvscats-{}-{}'.format(LR, '6conv-basic')
# Prepare and Process the Data
def label_img(img):
# dog.93.png
word_label = img.split('.')[-3]
if word_label == 'cat': return [1,0]
elif word_label == 'dog': return [0,1]
def prepare_train_data():
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR, img)
img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE))
training_data.append([np.array(img), np.array(label)])
shuffle(training_data)
np.save('./data/train_data.npy', training_data)
return training_data
def prepare_test_data():
# 93.png
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR, img)
img_num = img.split('.')[0]
img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE))
testing_data.append([np.array(img), img_num])
np.save('./data/test_data.npy', testing_data)
return testing_data
training_data = []
if os.path.isfile('./data/train_data.npy'):
print('Loading training data...')
training_data = np.load('./data/train_data.npy')
else:
print('Generating training data...')
training_data = prepare_train_data()
# Tensorflow
import models
model = models.setup_model(IMG_SIZE, LR)
if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('Model loaded!')
train = training_data[: -500]
test = training_data[-500:]
X = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
test_y = [i[1] for i in test]
model.fit({'input': X}, {'targets': Y}, n_epoch=5, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=500, show_metric=True, run_id=MODEL_NAME)
model.save(MODEL_NAME)
|
{"/train.py": ["/models.py"], "/predict.py": ["/models.py"]}
|
23,707
|
chanonroy/ml-dog-or-cat
|
refs/heads/master
|
/predict.py
|
import os
import cv2
import numpy as np
import models
IMG_SIZE = 50
LR = 1e-3
PATH = os.getcwd()
IMAGE_PATH = PATH + '/images/340.jpg'
MODEL_NAME = 'dogsvscats-0.001-6conv-basic'
class PetClassifier():
def __init__(self):
self.model = models.setup_model(MODEL_NAME, IMG_SIZE, LR)
def parse_img_from_path(self, path):
print(path)
img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE))
return np.array(img).reshape(IMG_SIZE, IMG_SIZE, 1)
def predict(self, path):
# cat = [1,0]
# dog = [0,1]
img_matrix = self.parse_img_from_path(path)
model_out = self.model.predict([img_matrix])
print(model_out)
result = ''
if np.argmax(model_out) == 1:
result = 'Dog'
print('Dog')
else:
result = 'Cat'
print('Cat')
return result
model = PetClassifier()
model.predict(IMAGE_PATH)
|
{"/train.py": ["/models.py"], "/predict.py": ["/models.py"]}
|
23,720
|
luciengaitskell/python-dagu-rs039
|
refs/heads/master
|
/example/motor_drive_example.py
|
from dagurs039 import DaguRS039, data
from dagurs039.config import MotorLayout
import time
d = DaguRS039()
d.cfg(MotorLayout(MotorLayout.INDIV), data.lipo_low_bty_preset['2S'], 2, 2, 2, 2, 13500, 800, 10, 10)
num_mtrs = 4
for m in range(0, num_mtrs):
mp = [0] * num_mtrs
mp[m] = 10
print("Set Motor {} to 10".format(m))
d.set_mtr(*mp)
time.sleep(1)
# Disable all motors:
d.set_mtr(*([0]*num_mtrs))
|
{"/example/motor_drive_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/dagurs039/__init__.py": ["/dagurs039/main.py", "/dagurs039/config.py"], "/example/configure_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/setup.py": ["/dagurs039/__init__.py"], "/dagurs039/main.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"]}
|
23,721
|
luciengaitskell/python-dagu-rs039
|
refs/heads/master
|
/dagurs039/__init__.py
|
"""An i2c interface library for the Dagu RS039 ComMotion motor controller."""
from .main import DaguRS039
from .config import MotorLayout
__version__ = '0.2'
|
{"/example/motor_drive_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/dagurs039/__init__.py": ["/dagurs039/main.py", "/dagurs039/config.py"], "/example/configure_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/setup.py": ["/dagurs039/__init__.py"], "/dagurs039/main.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"]}
|
23,722
|
luciengaitskell/python-dagu-rs039
|
refs/heads/master
|
/dagurs039/data.py
|
# Values for low voltage configuration:
lipo_low_bty_preset = {'2S': 60, '3S': 90}
|
{"/example/motor_drive_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/dagurs039/__init__.py": ["/dagurs039/main.py", "/dagurs039/config.py"], "/example/configure_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/setup.py": ["/dagurs039/__init__.py"], "/dagurs039/main.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"]}
|
23,723
|
luciengaitskell/python-dagu-rs039
|
refs/heads/master
|
/example/configure_example.py
|
from dagurs039 import DaguRS039, data
from dagurs039.config import MotorLayout
import time
d = DaguRS039()
d.cfg(MotorLayout(MotorLayout.INDIV), data.lipo_low_bty_preset['2S'], 2, 2, 2, 2, 13500, 800, 10, 10)
|
{"/example/motor_drive_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/dagurs039/__init__.py": ["/dagurs039/main.py", "/dagurs039/config.py"], "/example/configure_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/setup.py": ["/dagurs039/__init__.py"], "/dagurs039/main.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"]}
|
23,724
|
luciengaitskell/python-dagu-rs039
|
refs/heads/master
|
/dagurs039/config.py
|
class MotorLayout:
OMNI3 = 0
OMNI4 = 1
MEC = 2
INDIV = 3
def __init__(self, base_layout: int, enc_enable: bool=False):
self.base = base_layout
if enc_enable:
self.enc = 0
else:
self.enc = 16
def as_num(self):
return self.base + self.enc
|
{"/example/motor_drive_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/dagurs039/__init__.py": ["/dagurs039/main.py", "/dagurs039/config.py"], "/example/configure_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/setup.py": ["/dagurs039/__init__.py"], "/dagurs039/main.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"]}
|
23,725
|
luciengaitskell/python-dagu-rs039
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from dagurs039 import __version__
setup(name='python-dagu-rs039',
version=__version__,
description='An i2c interface library for the Dagu RS039 ComMotion motor controller',
author='luciengaitskell',
packages=find_packages(),
install_requires=['smbus']
)
|
{"/example/motor_drive_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/dagurs039/__init__.py": ["/dagurs039/main.py", "/dagurs039/config.py"], "/example/configure_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/setup.py": ["/dagurs039/__init__.py"], "/dagurs039/main.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"]}
|
23,726
|
luciengaitskell/python-dagu-rs039
|
refs/heads/master
|
/dagurs039/main.py
|
from . import data
from .config import MotorLayout
import smbus
import time
def split_high_low(arr):
return [(arr >> 8) & 0xff, arr & 0xff]
class DaguRS039:
def __init__(self, addr=0x1e, bus=1):
self.b = smbus.SMBus(1)
self.addr = addr
def cfg(self, mtr_cfg: MotorLayout, btry_lv: int, m1_max: int, m2_max: int, m3_max: int, m4_max: int,
max_rpm: int, encdr_res: int, res_power: int, stall_ms: int, i2c_offset: int=0):
self.basic_cfg(mtr_cfg, btry_lv, m1_max, m2_max, m3_max, i2c_offset)
time.sleep(0.5) # from tests I have found 0.2 also works, but this is for safety
self.encoder_cfg(max_rpm, encdr_res, res_power, stall_ms)
time.sleep(0.5)
def basic_cfg(self, mtr_cfg: MotorLayout, btry_lv: int, m1_max: int, m2_max: int, m3_max: int, m4_max: int,
i2c_offset: int=0):
"""Write general configuration data to the device."""
self.b.write_i2c_block_data(self.addr, 1, [0, mtr_cfg.as_num(), btry_lv, m1_max, m2_max, m3_max, m4_max,
i2c_offset, 1])
def encoder_cfg(self, max_rpm: int, encdr_res: int, res_power: int, stall_ms: int):
"""Write encoder configuration data to the device."""
darr = []
darr.extend(split_high_low(max_rpm))
darr.extend(split_high_low(encdr_res))
darr.append(res_power)
darr.append(stall_ms)
self.b.write_i2c_block_data(self.addr, 2, darr)
def set_mtr(self, v1: int, v2: int, v3: int, v4: int=None):
"""Set the motor velocities."""
darr = []
darr.extend(split_high_low(v1))
darr.extend(split_high_low(v2))
darr.extend(split_high_low(v3))
if v4 is not None:
darr.extend(split_high_low(v4))
self.b.write_i2c_block_data(self.addr, 3, darr)
|
{"/example/motor_drive_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/dagurs039/__init__.py": ["/dagurs039/main.py", "/dagurs039/config.py"], "/example/configure_example.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"], "/setup.py": ["/dagurs039/__init__.py"], "/dagurs039/main.py": ["/dagurs039/__init__.py", "/dagurs039/config.py"]}
|
23,727
|
amithreddy/demoapp
|
refs/heads/master
|
/flaskserver.py
|
from flask import Flask, render_template, request, redirect, url_for, g
import imgrepo
app = Flask(__name__)
@app.route('/', methods= ['GET','POST'])
def main():
db = get_db()
#request is a global object that flask uses to store incoming request data
if request.method == "GET":
#Get all image links in database
items = db.getall()
#Pass all image links into renderer
return render_template('home.html',table=items)
elif request.method =="POST":
if request.form.get("submit") is not None:
checked =request.form.getlist("checked")
if len(checked)>0:
checked= [int(val.replace('/','')) for val in checked]
print(checked)
for val in checked:
db.delete(val)
if request.form.get("reset") is not None:
db.reset()
return redirect('/')
else:
#Shouldn't happen. Should return a error here
pass
def get_db():
#g from Flask.g live in the request context, meaning its created at
#the start of a request and killed at the end of the request.
#So we need to connect to the database here and close the databaes
#in close_connection
db= getattr(g, '_database',None)
if db is None:
db= g._database = imgrepo.DB()
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database',None)
if db is not None:
db.conn.close()
|
{"/flaskserver.py": ["/imgrepo.py"]}
|
23,728
|
amithreddy/demoapp
|
refs/heads/master
|
/imgrepo.py
|
import sqlite3
import os
import shutil
#This code is to acess a Sqlite server which holds the links to all the images in our repo.
class DB:
def __init__(self):
self.cursor=None
self.conn=None
self.masterdb='masterrepo.db'
self.dbfile='imgrepo.db'
self.connect()
def connect(self):
self.conn = sqlite3.connect(self.dbfile)
self.cursor = self.conn.cursor()
def create(self):
#Ran this code only once when first create a sql server
#Populated the server interactively while in repel
self.cursor.execute(
'''
CREATE TABLE IMAGES
(ID INTEGER PRIMARY KEY AUTOINCREMENT, DESCRIPTION str,URL str)
''')
def getall(self):
self.cursor.execute("Select * FROM IMAGES")
raw= self.cursor.fetchall()
table = [ {"ID": item[0], "description": item[1], "url":item[2]} for item in raw]
return table
def delete(self,ID):
self.cursor.execute("DELETE FROM IMAGES WHERE ID={number}".format(number=ID))
self.conn.commit()
def reset(self):
#Disconnect from database.
self.conn.close()
#delet previous database
os.remove(self.dbfile)
#clone master database
shutil.copyfile(self.masterdb, self.dbfile)
#connect to cloned database
self.connect()
if __name__ == "__main__":
db= DB()
print(db.getall())
|
{"/flaskserver.py": ["/imgrepo.py"]}
|
23,730
|
prabhakarkevat/graphtraversalpython
|
refs/heads/master
|
/MainClass.py
|
from Graph import Graph
class MainClass(object):
"""docstring for MainClass"""
def __init__(self):
super(MainClass, self).__init__()
def main(self):
adj_mat1 = [
[0, 1, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0],
]
adj_mat2 = [
[0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 1, 0, 1, 1],
[0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
]
graph = Graph(7)
print(graph.dfs(adj_mat1, 0))
print(graph.bfs(adj_mat1, 0))
print(graph.dfs(adj_mat2, 0))
print(graph.bfs(adj_mat2, 0))
if __name__ == '__main__':
MainClass().main()
|
{"/MainClass.py": ["/Graph.py"]}
|
23,731
|
prabhakarkevat/graphtraversalpython
|
refs/heads/master
|
/Graph.py
|
from Stack import Stack
from Queue import Queue
class Graph(object):
"""docs for Graph"""
def __init__(self, n=0):
super(Graph, self).__init__()
self.n = n
def dfs(self, adj_mat, source=0):
path = "";
stack = Stack();
visited = [0] * self.n
stack.push(source)
stack.push(source)
visited[source] = 1
path += str(source)
while not stack.isEmpty():
v = stack.pop()
# v = stack.peek()
i = 0
for _ in range(self.n):
if adj_mat[v][i] == 1:
if visited[i] != 1:
stack.push(i)
path += str(i)
visited[i] = 1
v = i
i = 0
i+=1
# stack.pop()
return path
def bfs(self, adj_mat, source=0):
path = ""
queue = Queue()
visited = [0] * self.n
queue.enqueue(source)
visited[source] = 1
path += str(source)
while not queue.isEmpty():
v = queue.dequeue()
for i, w in enumerate(adj_mat[v]):
if w == 1 and visited[i] != 1:
queue.enqueue(i)
visited[i] = 1
path += str(i)
return path
|
{"/MainClass.py": ["/Graph.py"]}
|
23,762
|
ddu365/datawhale-code
|
refs/heads/master
|
/task7/dynamic_programming.py
|
# 0-1背包问题
# 递推关系: c[i][m]=max{c[i-1][m-w[i]]+p[i] (m>w[i]), c[i-1][m]}
# 参考链接: https://blog.csdn.net/superzzx0920/article/details/72178544
# n:物品件数;c:最大承重为c的背包;w:各个物品的重量;v:各个物品的价值
# 第一步建立最大价值矩阵(横坐标表示[0,c]整数背包承重):(n+1)*(c+1)
# 技巧:python 生成二维数组(数组)通常先生成列再生成行
def bag(n, c, w, p):
res = [[-1 for _ in range(c+1)]for _ in range(n+1)]
for j in range(c+1):
# 第0行全部赋值为0,物品编号从1开始.为了下面赋值方便
res[0][j] = 0
for i in range(1, n+1):
for j in range(1, c+1):
res[i][j] = res[i-1][j]
# 生成了n*c有效矩阵,以下公式w[i-1],p[i-1]代表从第一个元素w[0],p[0]开始取。
if j >= w[i-1] and res[i-1][j-w[i-1]] + p[i-1] > res[i][j]:
res[i][j] = res[i-1][j-w[i-1]] + p[i-1]
return res
# 以下代码功能:标记出有放入背包的物品
# 反过来标记,在相同价值情况下,后一件物品比前一件物品的最大价值大,则表示物品i#有被加入到背包,x数组设置为True。设初始为j=c。
def show(n, c, w, res):
print('最大价值为:', res[n][c])
x = [False for _ in range(n)]
j = c
for i in range(1, n+1):
if res[i][j] > res[i-1][j]:
x[i-1] = True
j -= w[i-1]
print('选择的物品为:')
for i in range(n):
if x[i]:
print('第', i, '个')
# 最小路径和
# 题目地址: https://leetcode-cn.com/problems/minimum-path-sum/
class Solution:
def minPathSum(self, grid):
r = len(grid)
c = len(grid[0])
path = [[0] * c for _ in range(r)]
path[0][0] = grid[0][0] # save the sum of each position
for _r in range(1, r):
path[_r][0] = path[_r - 1][0] + grid[_r][0]
for _c in range(1, c):
path[0][_c] = path[0][_c - 1] + grid[0][_c]
for _r in range(1, r):
for _c in range(1, c):
path[_r][_c] = min(path[_r - 1][_c], path[_r][_c - 1]) + grid[_r][_c]
return path[r - 1][c - 1]
# 莱文斯坦最短编辑距离
# 题目地址: https://leetcode-cn.com/problems/edit-distance/
def minDistance(self, word1: str, word2: str) -> int:
w1 = len(word1)
w2 = len(word2)
steps = [[0 for _ in range(w2 + 2)] for _ in range(w1 + 2)] # the first position represent space
# w1=a w2=sc
# # s c
# # 0 1 2
# a 1 1 2
for i in range(w1 + 1):
steps[i][0] = i
for j in range(w2 + 1):
steps[0][j] = j
for i in range(1, w1 + 1):
for j in range(1, w2 + 1):
if word1[i - 1] == word2[j - 1]:
steps[i][j] = steps[i - 1][j - 1]
else:
steps[i][j] = 1 + min(steps[i - 1][j - 1], min(steps[i][j - 1], steps[i - 1][j])) # representing replace、delete and add
return steps[w1][w2]
# 最长上升子序列
# 题目链接: https://leetcode-cn.com/problems/longest-increasing-subsequence/
# dp 数组定义为:以 nums[i] 结尾的最长上升子序列的长度
# 那么题目要求的,就是这个 dp 数组中的最大者
# 以数组 [10, 9, 2, 5, 3, 7, 101, 18] 为例:
# dp 的值: 1 1 1 2 2 3 4 4
def lengthOfLIS(self, nums):
if len(nums) <= 1:
return len(nums)
dp = [1 for _ in range(len(nums))]
for i in range(1, len(nums)):
for j in range(i):
if nums[i] > nums[j]:
dp[i] = max(dp[i], dp[j] + 1)
return max(dp)
# 最长公共子序列
# 求两个字符串的最大公共子序列(可以不连续)的长度,并输出这个子序列。
# 例如:
# 输入 googleg 和 elgoog 输出 goog 4
# 输入 abcda 和 adcba 输出 aba 3
# 参考:
# https://blog.csdn.net/zszszs1994/article/details/78208488
# https://www.cnblogs.com/AndyJee/p/4465696.html
def longest_common_subsequence(s1, s2):
l1 = len(s1)
l2 = len(s2)
res = []
dp = [[0 for _ in range(l2+2)] for _ in range(l1+2)]
for i in range(1, l1 + 1):
for j in range(1, l2 + 1):
if s1[i-1] == s2[j-1]:
dp[i][j] = dp[i - 1][j - 1] + 1
# res.append(s1[i-1])
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
# 记录最长公共子序列
for i in range(1, l1 + 1):
if dp[i][j] > dp[i - 1][j]:
res.append(s1[i-1])
return dp[l1][l2], ''.join(res)
if __name__ == '__main__':
n = 5
c = 10
w = [2, 2, 6, 5, 4]
p = [6, 3, 5, 4, 6]
res = bag(n, c, w, p)
show(n, c, w, res)
print(longest_common_subsequence('googleg', 'elgoog'))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,763
|
ddu365/datawhale-code
|
refs/heads/master
|
/task6/graph_adj_list.py
|
"""
图的分类:
按边是否有方向可分为有向图和无向图
按边是否有权重可分为有权图和无权图
图的表示:
邻接表: 将所有与图中某个顶点u相连的顶点依此表示出来 [无需事先指定顶点数]
邻接矩阵: 一维数组存储图的顶点信息,二维数组(邻接矩阵)存储顶点间边的信息 [需事先指定顶点数]
"""
import copy
import queue
class Vertex:
def __init__(self, val):
self._val = val
self._adjacent = {}
self.visited = False
# self.previous = None
# In_degree Count
self._in_degree = 0
# Out_degree Count
self._out_degree = 0
def add_neighbor(self, nbr, weight=0):
self._adjacent[nbr] = weight
def get_connections(self):
return self._adjacent.keys()
def get_vertex_val(self):
return self._val
def get_weight(self, nbr):
return self._adjacent[nbr]
# def set_previous(self, pre):
# self.previous = pre
def set_visited(self):
self.visited = True
def get_visited(self):
return self.visited
def get_in_degree(self):
return self._in_degree
def set_in_degree(self, in_degree):
self._in_degree = in_degree
def get_out_degree(self):
return self._out_degree
def set_out_degree(self, out_degree):
self._out_degree = out_degree
def __str__(self):
return str(self._val) + ' adjacent: ' + str([v.get_vertex_val() for v in self._adjacent.keys()])
class Graph:
def __init__(self, is_directed=False):
self.vertex_dict = {}
self.num_vertex = 0
self.is_directed = is_directed
def __iter__(self):
return iter(self.vertex_dict.values())
def add_vertex(self, val):
self.num_vertex += 1
new_vertex = Vertex(val)
self.vertex_dict[val] = new_vertex
return new_vertex
def get_vertex(self, val):
if val in self.vertex_dict:
return self.vertex_dict[val]
else:
return None
def add_edge(self, source, target, weight=0):
if source not in self.vertex_dict:
self.add_vertex(source)
if target not in self.vertex_dict:
self.add_vertex(target)
self.vertex_dict[source].add_neighbor(self.vertex_dict[target], weight)
if not self.is_directed: # 无向图
self.vertex_dict[target].add_neighbor(self.vertex_dict[source], weight)
if self.is_directed: # 只有有向图才记录顶点的入度与出度
self.get_vertex(source).set_out_degree(self.get_vertex(source).get_out_degree() + 1)
self.get_vertex(target).set_in_degree(self.get_vertex(target).get_in_degree() + 1)
def get_vertices(self):
return self.vertex_dict.keys()
def get_edges(self):
edges = []
for u in self:
for v in u.get_connections():
u_val = u.get_vertex_val()
v_val = v.get_vertex_val()
edges.append((u_val, v_val, u.get_weight(v))) # 无向图时(u,v,w)与(v,u,w)表示同一条边
return edges
# 参考图示 https://www.jianshu.com/p/70952b51f0c8
def dfs_traverse(self, start):
stack = [start]
res = []
while stack:
v_node = stack.pop()
if not v_node.get_visited():
v_node.set_visited()
res.append(v_node.get_vertex_val())
for next_v_node in v_node.get_connections():
if not next_v_node.get_visited():
stack.append(next_v_node)
return res
# 参考图示 https://www.jianshu.com/p/70952b51f0c8
def bfs_traverse(self, start):
queue = [start]
res = []
while queue:
v_node = queue.pop(0)
if not v_node.get_visited():
v_node.set_visited()
res.append(v_node.get_vertex_val())
for next_v_node in v_node.get_connections():
if not next_v_node.get_visited():
queue.append(next_v_node)
return res
# 拓扑排序是针对有向无环图(DAG)的一种排序
# Kahn思想:
# 不断的寻找有向图中没有前驱(入度为0)的顶点,将之输出。
# 然后从有向图中删除所有以此顶点为尾的弧。
# 重复操作,直至图空,或者找不到没有前驱的顶点为止
def topo_kahn(self):
kahn_out = []
in_0 = [] # 入度为0的顶点
for v_val in self.get_vertices():
v_obj = self.get_vertex(v_val)
if v_obj.get_in_degree() == 0:
in_0.append(v_obj)
while in_0:
u_obj = in_0.pop()
kahn_out.append(u_obj.get_vertex_val())
for v_obj in u_obj.get_connections():
v_obj.set_in_degree(v_obj.get_in_degree() - 1)
if v_obj.get_in_degree() == 0:
in_0.append(v_obj)
return kahn_out
# 参考链接: https://blog.csdn.net/pandora_madara/article/details/26478385
def topo_dfs(self):
dfs_out = []
def dfs(u_obj):
for v_obj in u_obj.get_connections():
if not v_obj.get_visited():
v_obj.set_visited()
dfs(v_obj)
dfs_out.append(u_obj.get_vertex_val())
for u_val in self.get_vertices():
u_obj = self.get_vertex(u_val)
if not u_obj.get_visited():
u_obj.set_visited()
dfs(u_obj)
dfs_out.reverse()
return dfs_out
# 求给定源顶点src到各顶点的最短路径,也叫单源最短路径
# 参考链接:
# https://bradfieldcs.com/algos/graphs/dijkstras-algorithm/
# https://wiki.jikexueyuan.com/project/easy-learn-algorithm/dijkstra.html
def dijkstra(self, src): # src 顶点对象
pq = queue.PriorityQueue()
dis = {v_val: float('inf') for v_val in self.get_vertices() if v_val != src.get_vertex_val()}
dis[src.get_vertex_val()] = 0
entry_lookup = {}
for v, d in dis.items():
entry = [d, v]
entry_lookup[v] = entry
pq.put(entry)
while not pq.empty():
u_val = pq.get()[1]
u = self.get_vertex(u_val)
for v in u.get_connections():
v_val = v.get_vertex_val()
if v_val in entry_lookup:
if dis[u_val] + u.get_weight(v) < dis[v_val]:
dis[v_val] = dis[u_val] + u.get_weight(v)
entry_lookup[v_val][0] = dis[v_val]
return dis
if __name__ == '__main__':
g = Graph(is_directed=True) # is_directed=True
g.add_vertex('a')
g.add_vertex('b')
g.add_vertex('c')
g.add_vertex('d')
g.add_vertex('e')
g.add_edge('a', 'b', 4)
g.add_edge('a', 'c', 1)
g.add_edge('c', 'b', 2)
g.add_edge('b', 'e', 4)
g.add_edge('c', 'd', 4)
g.add_edge('d', 'e', 4)
# g.add_vertex('v1')
# g.add_vertex('v2')
# g.add_vertex('v3')
# g.add_vertex('v4')
# g.add_vertex('v5')
# g.add_edge('v2', 'v1')
# g.add_edge('v1', 'v5')
# g.add_edge('v4', 'v2')
# g.add_edge('v4', 'v5')
# g.add_edge('v3', 'v1')
# g.add_edge('v3', 'v5')
# dfs时g的所有顶点已被访问过
cp_bfs = copy.deepcopy(g)
cp_dfs = copy.deepcopy(g)
print(g.get_vertices())
print(g.get_edges())
print('[dfs]', g.dfs_traverse(g.get_vertex('a')))
print('[bfs]', cp_bfs.bfs_traverse(cp_bfs.get_vertex('a')))
print('[dijkstra]', g.dijkstra(g.get_vertex('a')))
print('[topo_kahn]', g.topo_kahn())
print('[topo_dfs]', cp_dfs.topo_dfs())
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,764
|
ddu365/datawhale-code
|
refs/heads/master
|
/task3/bubble_sort.py
|
def bubble_sort(s):
"""
sort the elements of list s using the bubble-sort algorithm
Complexity: best O(n) avg O(n^2), worst O(n^2)
"""
for i in range(0, len(s)):
for j in range(1, len(s)-i):
if s[j-1] > s[j]:
s[j-1], s[j] = s[j], s[j-1]
return s
if __name__ == '__main__':
ul = [85, 24, 63, 45, 17, 31, 96, 50]
print(bubble_sort(ul))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,765
|
ddu365/datawhale-code
|
refs/heads/master
|
/task5/heap.py
|
"""
Min Heap. A min heap is a complete binary tree where each node is smaller
its children. The root, therefore, is the minimum element in the tree. The min
heap use array to represent the data and operation. For example a min heap:
4
/ \
50 7
/ \ /
55 90 87
Heap [4, 50, 7, 55, 90, 87]
Method in class: insert, remove_min
For example insert(2) in a min heap:
4 4 2
/ \ / \ / \
50 7 --> 50 2 --> 50 4
/ \ / \ / \ / \ / \ / \
55 90 87 2 55 90 87 7 55 90 87 7
For example remove_min() in a min heap:
4 87 7
/ \ / \ / \
50 7 --> 50 7 --> 50 87
/ \ / / \ / \
55 90 87 55 90 55 90
"""
from abc import ABCMeta, abstractmethod
class AbstractHeap(metaclass=ABCMeta):
"""Abstract Class for Binary Heap."""
def __init__(self):
self._size = 0
self.heap = []
def __len__(self):
return self._size
"""
Method insert always start by inserting the element at the bottom.
it inserts rightmost spot so as to maintain the complete tree property
Then, it fix the tree by swapping the new element with its parent,
until it finds an appropriate spot for the element. It essentially
adjust_up the min/max element
Complexity: O(logN)
"""
def insert(self, val):
self.heap.append(val)
self._size += 1
self.adjust_up(self._size - 1)
@abstractmethod
def adjust_up(self, i):
pass
@abstractmethod
def adjust_down(self, i):
pass
class MinHeap(AbstractHeap):
def __init__(self):
super().__init__()
def adjust_up(self, i):
# 存在父节点
while (i+1) // 2 > 0:
# 当前节点比父节点小
if self.heap[i] < self.heap[(i-1) // 2]:
# Swap value of child with value of its parent
self.heap[i], self.heap[(i-1) // 2] = self.heap[(i-1) // 2], self.heap[i]
i = (i-1) // 2
"""
Method min_child returns index of smaller 2 children of its parent
"""
def min_child(self, i):
if 2 * (i+1) + 1 > self._size: # No right child
return 2 * i + 1
else:
# left child > right child
if self.heap[2 * i + 1] > self.heap[2 * (i + 1)]:
return 2 * (i + 1)
else:
return 2 * i + 1
def adjust_down(self, i):
# 存在子节点
while 2 * (i+1) <= self._size:
min_child = self.min_child(i)
if self.heap[min_child] < self.heap[i]:
# Swap min child with parent
self.heap[min_child], self.heap[i] = self.heap[i], self.heap[min_child]
i = min_child
"""
Remove Min method removes the minimum element and swap it with the last
element in the heap( the bottommost, rightmost element). Then, it
adjust_down this element, swapping it with one of its children until the
min heap property is restored
Complexity: O(logN)
"""
def remove_min(self):
ret = self.heap[0] # the smallest value at beginning
self.heap[0] = self.heap[self._size-1] # Replace it by the last value
self._size -= 1
self.heap.pop()
self.adjust_down(0)
return ret
class MaxHeap(AbstractHeap):
def __init__(self):
super().__init__()
def adjust_up(self, i):
# 存在父节点
while (i+1) // 2 > 0:
# 当前节点比父节点大
if self.heap[i] > self.heap[(i-1) // 2]:
# Swap value of child with value of its parent
self.heap[i], self.heap[(i-1) // 2] = self.heap[(i-1) // 2], self.heap[i]
i = (i-1) // 2
"""
Method max_child returns index of larger 2 children of its parent
"""
def max_child(self, i):
if 2 * (i + 1) + 1 > self._size: # No right child
return 2 * i + 1
else:
# left child > right child
if self.heap[2 * i + 1] > self.heap[2 * (i + 1)]:
return 2 * i + 1
else:
return 2 * (i + 1)
def adjust_down(self, i):
# 存在子节点
while 2 * (i+1) <= self._size:
max_child = self.max_child(i)
if self.heap[max_child] > self.heap[i]:
# Swap min child with parent
self.heap[max_child], self.heap[i] = self.heap[i], self.heap[max_child]
i = max_child
"""
Remove Max method removes the max element and swap it with the last
element in the heap( the bottommost, rightmost element). Then, it
adjust_down this element, swapping it with one of its children until the
max heap property is restored
Complexity: O(logN)
"""
def remove_max(self):
ret = self.heap[0] # the largest value at beginning
self.heap[0] = self.heap[self._size - 1] # Replace it by the last value
self._size -= 1
self.heap.pop()
self.adjust_down(0)
return ret
if __name__ == '__main__':
min_heap = MinHeap()
min_heap.insert(85)
min_heap.insert(24)
min_heap.insert(63)
min_heap.insert(45)
min_heap.insert(17)
min_heap.insert(31)
min_heap.insert(96)
min_heap.insert(50)
print('[min_heap]', [min_heap.remove_min() for _ in range(len(min_heap))])
import heapq
heap = []
heapq.heappush(heap, 85)
heapq.heappush(heap, 24)
heapq.heappush(heap, 63)
heapq.heappush(heap, 45)
heapq.heappush(heap, 17)
heapq.heappush(heap, 31)
heapq.heappush(heap, 96)
heapq.heappush(heap, 50)
print('[heap top k(k=3)]', heapq.nlargest(3, heap))
print('[built-in heap]', [heapq.heappop(heap) for _ in range(len(heap))])
max_heap = MaxHeap()
max_heap.insert(85)
max_heap.insert(24)
max_heap.insert(63)
max_heap.insert(45)
max_heap.insert(17)
max_heap.insert(31)
max_heap.insert(96)
max_heap.insert(50)
print('[max_heap]', [max_heap.remove_max() for _ in range(len(max_heap))])
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,766
|
ddu365/datawhale-code
|
refs/heads/master
|
/task7/divide_conquer.py
|
# 逆序对个数
# 解题思路:
# 首先将给定的数组列表按索引递归的分成左右两部分,然后比较分割后的左右两部分,
# 当左边列表的元素大于右边的,往左移动左列表的索引,并记录逆序对个数
# 当右边列表的元素大于左边的,往左移动右列表的索引
# 最终将两个列表合成一个有序的列表
# 重复上述过程,直到将所有的左右列表合成一个列表。
global count
count = 0
def inverse_pairs(data):
return merge_sort(data)
def merge_sort(lists):
global count
if len(lists) <= 1:
return lists
num = len(lists) // 2
left = merge_sort(lists[0:num])
right = merge_sort(lists[num:])
return merge(left, right)
# left right两个列表都是有序的
def merge(left, right):
global count
r = len(right) - 1
l = len(left) - 1
result = []
while l >= 0 and r >= 0:
if left[l] > right[r]:
result.insert(0, left[l])
l -= 1
count += r + 1
else:
result.insert(0, right[r])
r -= 1
if l >= 0:
left = left[0: l + 1]
result = left + result
if r >= 0:
right = right[0: r + 1]
result = right + result
return result
if __name__ == '__main__':
data = [1, 2, 3, 4, 0]
inverse_pairs(data)
print(count)
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,767
|
ddu365/datawhale-code
|
refs/heads/master
|
/task1/queue_test.py
|
import task1.queue as queue
from collections import deque
class MyCircularDeque:
def __init__(self, k: int):
"""
Initialize your data structure here. Set the size of the deque to be k.
"""
self._array = [None] * k
self._front = 0
self._rear = 0
self._size = 0
def insertFront(self, value: int) -> bool:
"""
Adds an item at the front of Deque. Return true if the operation is successful.
"""
if self.isFull():
return False
self._front = self._dec_p(self._front)
self._array[self._front] = value
self._size += 1
return True
def insertLast(self, value: int) -> bool:
"""
Adds an item at the rear of Deque. Return true if the operation is successful.
"""
if self.isFull():
return False
self._array[self._rear] = value
self._rear = self._inc_p(self._rear)
self._size += 1
return True
def deleteFront(self) -> bool:
"""
Deletes an item from the front of Deque. Return true if the operation is successful.
"""
if self.isEmpty():
return False
self._array[self._front] = None
self._front = self._inc_p(self._front)
self._size -= 1
return True
def deleteLast(self) -> bool:
"""
Deletes an item from the rear of Deque. Return true if the operation is successful.
"""
if self.isEmpty():
return False
self._rear = self._dec_p(self._rear)
self._array[self._rear] = None
self._size -= 1
return True
def getFront(self) -> int:
"""
Get the front item from the deque.
"""
if self.isEmpty():
return -1
return self._array[self._front]
def getRear(self) -> int:
"""
Get the last item from the deque.
"""
if self.isEmpty():
return -1
# ac 只取值
return self._array[self._dec_p(self._rear)]
# error 取值的同时移动了尾指针
# self._rear = self._dec_p(self._rear)
# return self._array[self._rear]
def isEmpty(self) -> bool:
"""
Checks whether the circular deque is empty or not.
"""
return self._size == 0
def isFull(self) -> bool:
"""
Checks whether the circular deque is full or not.
"""
return self._size == len(self._array)
def _inc_p(self, index):
return (index + 1) % len(self._array)
def _dec_p(self, index):
return (index - 1 + len(self._array)) % len(self._array)
def __str__(self):
return " ".join(map(str, self))
def __iter__(self):
s = self._front
while True:
yield self._array[s]
# s += 1
s = (s + 1) % len(self._array)
if s == self._rear:
return
if __name__ == '__main__':
try:
# built-in deque
d = deque(maxlen=5)
d.appendleft(5)
d.appendleft(7)
d.appendleft(4)
d.appendleft(3)
d.append(1)
d.append(9) # insert success
print(f'rear----{d[-1]}')
print(d)
# custom deque
obj = MyCircularDeque(5)
obj.insertFront(5)
obj.insertFront(7)
obj.insertFront(4)
obj.insertFront(3)
obj.insertLast(1)
# obj.deleteLast()
obj.insertLast(9) # insert error
print(f'rear----{obj.getRear()}')
print(obj)
# test ArrayQueue
aq = queue.ArrayQueue(3)
aq.enqueue(5)
aq.enqueue(3)
aq.dequeue()
aq.enqueue(6)
aq.enqueue(9)
print(f'aq.first:{aq.first()}')
print(aq)
# ArrayQueue inner
print(len(aq._array))
print(aq._array)
# test LinkedListQueue
lq = queue.LinkedListQueue()
lq.enqueue(5)
lq.enqueue(3)
lq.dequeue()
lq.enqueue(6)
lq.enqueue(9)
print(f'lq.first:{lq.first()}')
print(lq)
lq.dequeue()
lq.dequeue()
lq.dequeue()
# print(f'lq.first:{lq.first()}')
print(lq)
except Exception as e:
print(f'test happen error:{e}')
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,768
|
ddu365/datawhale-code
|
refs/heads/master
|
/task7/backtracking.py
|
# N皇后
# 解题思路:
# 判断棋盘上某一行某一列的皇后与之前行上的皇后 是否在同列或同对角线上,
# 如果在,移动当前行的皇后至下一列,再判断是否在同列或同对角线上,如果移动到最后仍然在,则回溯到上一行,重复上述移动与判断的过程。
# 总结: 走不通,就掉头[采用深度优先的策略去搜索问题的解]
def place(x, k): # 判断是否冲突
for i in range(1, k):
# x[i] == x[k]判断i行皇后与k行皇后是否在同一列
# abs(x[i] - x[k]) == abs(i - k)判断i行的皇后与k行的皇后是否在对角线上
if x[i] == x[k] or abs(x[i] - x[k]) == abs(i - k):
return False
return True
# get one solution
def queens(n):
k = 1 # 设置初始皇后为第一个
x = [0 for _ in range(n + 1)] # 设置x列表初始值为0
while k > 0:
x[k] = x[k] + 1 # 在当前列的下一列开始
while x[k] <= n and not place(x, k): # 不满足条件,继续搜索下一列位置
x[k] = x[k] + 1
if x[k] <= n: # 判断是否为最后一个,不是就执行下一行
if k == n: # 是最后一个皇后,退出
break
else: # 不是,则处理下一行皇后
k = k + 1 # 执行下一行
x[k] = 0 # 初始化,从第一列开始
else: # n列均不满足,回溯到上一行
x[k] = 0 # 初始化列到第一列
k = k - 1 # 回溯到上一行
return x[1:] # 返回1-8个皇后的位置
print(queens(4))
# problems: https://leetcode-cn.com/problems/n-queens/
# get all solutions
class Solution:
def solveNQueens(self, n):
res = []
a = [0 for _ in range(n)] # save the position of queen each row
def back(a, k): # k: the row to be checked
if k >= n:
l = []
for pos in a:
s = ['.' for _ in range(len(a))]
s[pos - 1] = 'Q'
l.append(''.join(s))
res.append(l)
else:
for i in range(1, n + 1):
a[k] = i
if self.is_safe(a, k):
back(a, k + 1) # 递归回溯
back(a, 0)
return res
# rows before k are judged with k
def is_safe(self, a, k): # k: the row to be checked
for i in range(k):
if a[i] == a[k] or abs(a[i] - a[k]) == k - i:
return False
return True
# 0-1背包
# 回溯法 通过采用深度优先的策略去搜索所有的可行解,取其中最优的一种解
bestV = 0
curW = 0
curV = 0
bestx = None
def backtrack(i):
global bestV, curW, curV, x, bestx
if i >= n:
if bestV < curV:
bestV = curV
bestx = x[:]
else:
if curW+w[i] <= c:
x[i] = True
curW += w[i]
curV += v[i]
backtrack(i+1)
curW -= w[i]
curV -= v[i]
x[i] = False
backtrack(i+1)
if __name__ == '__main__':
n = 5
c = 10
w = [2, 2, 6, 5, 4]
v = [6, 3, 5, 4, 6]
x = [False for i in range(n)]
backtrack(0)
print(bestV)
print(bestx)
# if __name__ == '__main__':
# solution = Solution()
# print(solution.solveNQueens(4))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,769
|
ddu365/datawhale-code
|
refs/heads/master
|
/task1/linkedlist_test.py
|
import task1.linkedlist as ll
import math
# 题目链接 https://leetcode-cn.com/problems/middle-of-the-linked-list/
def middle_node(head):
count = 0
d = {}
while head:
count += 1
d[count] = head
head = head.next
return d[math.ceil((count - 1) / 2) + 1].value
def reverse_singly_linked_list(head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
prev = None
while head:
# current = head
# head = head.next
# current.next = prev
# prev = current
current, head = head, head.next
current.next, prev = prev, current
return prev
def merge_two_list(l1, l2):
ret = cur = ll.SinglyLinkedListNode(None)
while l1 and l2:
if l1.value < l2.value:
# cur.next = l1
# l1 = l1.next
cur.next, l1 = l1, l1.next
else:
# cur.next = l2
# l2 = l2.next
cur.next, l2 = l2, l2.next
cur = cur.next
cur.next = l1 or l2
return ret.next
if __name__ == '__main__':
try:
# SinglyLinkedList
print('-'*20, 'SinglyLinkedList', '-'*20)
sLinkedList = ll.SinglyLinkedList()
header = sLinkedList.append(3)
sLinkedList.append(5)
sLinkedList.remove_first()
sLinkedList.append(9)
sLinkedList.add_first(8)
print(sLinkedList)
sLinkedList.insert(2, 2)
print(sLinkedList)
print(len(sLinkedList))
sLinkedList.delete(3)
print(sLinkedList)
# SinglyCircularLinkedList
print('o' * 20, 'SinglyCircularLinkedList', '0' * 20)
scLinkedList = ll.SinglyCircularLinkedList()
scLinkedList.append(3)
scLinkedList.append(5)
scLinkedList.remove_first()
scLinkedList.append(9)
scLinkedList.add_first(8)
print(scLinkedList)
scLinkedList.insert(2, 2)
print(scLinkedList)
print(len(scLinkedList))
scLinkedList.delete(3)
print(scLinkedList)
# DoublyLinkedList
print('=' * 20, 'SinglyCircularLinkedList', '=' * 20)
dLinkedList = ll.DoublyLinkedList()
dLinkedList.append(3)
dLinkedList.append(5)
dLinkedList.remove_first()
dLinkedList.append(9)
dLinkedList.add_first(8)
print(dLinkedList)
dLinkedList.insert(2, 2)
print(dLinkedList)
print(len(dLinkedList))
dLinkedList.delete(3)
print(dLinkedList)
# reverse_singly_linked_list
print('#' * 20, 'reverse_singly_linked_list', '#' * 20)
print('before reverse')
print([value for value in header])
reverse_header = reverse_singly_linked_list(header)
print('after reverse')
print([value for value in reverse_header])
# merge_two_list
s1 = ll.SinglyLinkedList()
s1.append(1)
s1.append(2)
l1 = s1.append(6)
s2 = ll.SinglyLinkedList()
s2.append(5)
s2.append(7)
l2 = s2.append(9)
print(f'l1:{s1} l2:{s2}')
l12 = merge_two_list(l1, l2)
print(f'merge l1 l2 --> {[value for value in l12]}')
# middle_node
print(f'middle_node of l12:{middle_node(l12)}')
except Exception as e:
print(f'test happen a error:{e}')
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,770
|
ddu365/datawhale-code
|
refs/heads/master
|
/task3/selection_sort.py
|
def selection_sort(s):
"""
sort the elements of list s using the selection-sort algorithm
Complexity: best O(n^2) avg O(n^2), worst O(n^2)
"""
for i in range(len(s)):
min_index = i
for j in range(i+1, len(s)):
if s[j] < s[min_index]:
min_index = j
s[i], s[min_index] = s[min_index], s[i]
return s
if __name__ == '__main__':
ul = [85, 24, 63, 45, 17, 31, 96, 50]
print(selection_sort(ul))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,771
|
ddu365/datawhale-code
|
refs/heads/master
|
/task2/brute_force.py
|
def find_brute(t, p):
"""
字符串匹配--bf 暴力搜索
:param t: 主串
:param p: 模式串
:return: 返回 子串p开始的t的最低索引(没找到则为-1)
"""
n, m = len(t), len(p)
for i in range(n-m+1):
k = 0
while k < m and t[i+k] == p[k]:
k += 1
if k == m:
return i
return -1
if __name__ == '__main__':
T = 'abacaabaccabacabaabb'
P = 'abacab'
print(find_brute(T, P))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,772
|
ddu365/datawhale-code
|
refs/heads/master
|
/task6/graph_adj_matrix.py
|
"""
图的分类:
按边是否有方向可分为有向图和无向图
按边是否有权重可分为有权图和无权图
图的表示:
邻接表: 将所有与图中某个顶点u相连的顶点依此表示出来 [无需事先指定顶点数]
邻接矩阵: 一维数组存储图的顶点信息,二维数组(邻接矩阵)存储顶点间边的信息 [需事先指定顶点数]
"""
import copy
import queue
class Vertex:
def __init__(self, val):
self.val = val
# Mark all nodes unvisited
self.visited = False
def get_vertex_val(self):
return self.val
def set_vertex_val(self, val):
self.val = val
def set_visited(self):
self.visited = True
def get_visited(self):
return self.visited
def __str__(self):
return str(self.val)
class Graph:
def __init__(self, num_vertex, is_directed=False):
self.is_directed = is_directed
self.adj_matrix = [[-1] * num_vertex for _ in range(num_vertex)]
self.num_vertex = num_vertex
self.vertices = []
for i in range(0, self.num_vertex):
self.vertices.append(Vertex(i))
def set_vertex(self, v_idx, v_val):
if 0 <= v_idx < self.num_vertex:
self.vertices[v_idx].set_vertex_val(v_val)
else:
raise IndexError('index out of the range')
def get_vertex_idx(self, v_val):
for v_idx in range(0, self.num_vertex):
if v_val == self.vertices[v_idx].get_vertex_val():
return v_idx
return -1
def get_vertex(self, v_idx):
if not 0 <= v_idx < self.num_vertex:
raise IndexError('index out of the range')
return self.vertices[v_idx]
def add_edge(self, source, target, weight=0):
if self.get_vertex_idx(source) != -1 and self.get_vertex_idx(target) != -1:
self.adj_matrix[self.get_vertex_idx(source)][self.get_vertex_idx(target)] = weight
if not self.is_directed: # 无向图
self.adj_matrix[self.get_vertex_idx(target)][self.get_vertex_idx(source)] = weight
def get_edges(self):
edges = []
for u_idx in range(0, self.num_vertex):
for v_idx in range(0, self.num_vertex):
if self.adj_matrix[u_idx][v_idx] != -1:
u_val = self.vertices[u_idx].get_vertex_val()
v_val = self.vertices[v_idx].get_vertex_val()
edges.append((u_val, v_val, self.adj_matrix[u_idx][v_idx]))
return edges
def get_vertices(self):
vertices = []
for v_idx in range(0, self.num_vertex):
vertices.append(self.vertices[v_idx].get_vertex_val())
return vertices
def print_matrix(self):
for u_idx in range(0, self.num_vertex):
row = []
for v_idx in range(0, self.num_vertex):
row.append(self.adj_matrix[u_idx][v_idx])
print(row)
# 参考图示 https://www.jianshu.com/p/70952b51f0c8
def dfs_traverse(self, start):
stack = [start]
res = []
while stack:
v_node = stack.pop()
if not v_node.get_visited():
v_node.set_visited()
res.append(v_node.get_vertex_val())
v_node_idx = self.get_vertex_idx(v_node.get_vertex_val())
for target_idx in range(0, self.num_vertex):
if self.adj_matrix[v_node_idx][target_idx] != -1 and v_node_idx != target_idx:
stack.append(self.get_vertex(target_idx))
return res
# 参考图示 https://www.jianshu.com/p/70952b51f0c8
def bfs_traverse(self, start):
queue = [start]
res = []
while queue:
v_node = queue.pop(0)
if not v_node.get_visited():
v_node.set_visited()
res.append(v_node.get_vertex_val())
v_node_idx = self.get_vertex_idx(v_node.get_vertex_val())
for target_idx in range(0, self.num_vertex):
if self.adj_matrix[v_node_idx][target_idx] != -1 and v_node_idx != target_idx:
queue.append(self.get_vertex(target_idx))
return res
# 求给定源顶点src到各顶点的最短路径,也叫单源最短路径
# 参考链接:
# https://bradfieldcs.com/algos/graphs/dijkstras-algorithm/
# https://wiki.jikexueyuan.com/project/easy-learn-algorithm/dijkstra.html
def dijkstra(self, src): # src 顶点对象
pq = queue.PriorityQueue()
dis = {self.get_vertex(idx).get_vertex_val(): float('inf') for idx in range(0, self.num_vertex) \
if self.get_vertex(idx).get_vertex_val() != src.get_vertex_val()}
dis[src.get_vertex_val()] = 0
entry_lookup = {}
for v, d in dis.items():
entry = [d, v]
entry_lookup[v] = entry
pq.put(entry)
while not pq.empty():
u_val = pq.get()[1]
u_idx = self.get_vertex_idx(u_val)
for target_idx in range(0, self.num_vertex):
target_val = self.get_vertex(target_idx).get_vertex_val()
if self.adj_matrix[u_idx][target_idx] != -1 and target_val in entry_lookup:
if dis[u_val] + self.adj_matrix[u_idx][target_idx] < dis[target_val]:
dis[target_val] = dis[u_val] + self.adj_matrix[u_idx][target_idx]
entry_lookup[target_val][0] = dis[target_val]
return dis
if __name__ == '__main__':
g = Graph(5, is_directed=True) #
g.set_vertex(0, 'a')
g.set_vertex(1, 'b')
g.set_vertex(2, 'c')
g.set_vertex(3, 'd')
g.set_vertex(4, 'e')
print(g.get_vertices())
g.add_edge('a', 'b', 4)
g.add_edge('a', 'c', 1)
g.add_edge('c', 'b', 2)
g.add_edge('b', 'e', 4)
g.add_edge('c', 'd', 4)
g.add_edge('d', 'e', 4)
# dfs时g的所有顶点已被访问过
cp_g = copy.deepcopy(g)
g.print_matrix()
print(g.get_edges())
print('[dfs]', g.dfs_traverse(g.get_vertex(0)))
print('[bfs]', cp_g.bfs_traverse(cp_g.get_vertex(0)))
print('[dijkstra]', g.dijkstra(g.get_vertex(0)))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,773
|
ddu365/datawhale-code
|
refs/heads/master
|
/task5/bst.py
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class BST:
def __init__(self):
self._root = None
def get_root(self):
return self._root
def insert(self, data):
if self._root is None:
self._root = Node(data)
return True
else:
self.insert_recur(self._root, data)
def insert_recur(self, root, data):
if data == root.data:
return False
elif data < root.data:
if root.left is None:
root.left = Node(data)
return True
else:
return self.insert_recur(root.left, data)
else:
if root.right is None:
root.right = Node(data)
return True
else:
return self.insert_recur(root.right, data)
def delete(self, data):
if self._root is None:
return None
return self.delete_recur(self._root, data)
def delete_recur(self, root, data):
if root.data == data:
if root.left:
# Find the right most leaf of the left sub-tree
left_right_most = root.left
while left_right_most.right:
left_right_most = left_right_most.right
# Attach right child to the right of that leaf
left_right_most.right = root.right
# Return left child instead of root, a.k.a delete root
return root.left
else:
return root.right
# If left or right child got deleted, the returned root is the child of the deleted node.
elif root.data > data:
root.left = self.delete_recur(root.left, data)
else:
root.right = self.delete_recur(root.right, data)
return root
def search(self, data):
return self.search_recur(self._root, data)
def search_recur(self, root, data):
if root is None:
return False
if data == root.data:
return True
elif data < root.data:
return self.search_recur(root.left, data)
else:
return self.search_recur(root.right, data)
def size(self):
return self.size_recur(self._root)
def size_recur(self, root):
if root is None:
return 0
else:
return 1 + self.size_recur(root.left) + self.size_recur(root.right)
# 前序遍历[深度遍历-栈]: 根结点 ---> 左子树 ---> 右子树
def pre_order_recur(self, root):
if root is not None:
print(str(root.data), end=' ')
self.pre_order_recur(root.left)
self.pre_order_recur(root.right)
def pre_order_iter(self, root):
node_stack = []
cur_node = root
while cur_node or node_stack:
while cur_node:
print(str(cur_node.data), end=' ')
node_stack.append(cur_node)
cur_node = cur_node.left
cur_node = node_stack.pop()
cur_node = cur_node.right
# 第二种写法
# if cur_node is not None:
# print(str(cur_node.data), end=' ')
# node_stack.append(cur_node)
# cur_node = cur_node.left
# else:
# tmp_node = node_stack.pop()
# cur_node = tmp_node.right
# 中序遍历[深度遍历-栈]: 左子树 ---> 根结点 ---> 右子树
def in_order_recur(self, root):
if root is not None:
self.in_order_recur(root.left)
print(str(root.data), end=' ')
self.in_order_recur(root.right)
def in_order_iter(self, root):
node_stack = []
cur_node = root
while cur_node or node_stack:
while cur_node:
node_stack.append(cur_node)
cur_node = cur_node.left
cur_node = node_stack.pop()
print(str(cur_node.data), end=' ')
cur_node = cur_node.right
# 第二种写法
# if cur_node is not None:
# node_stack.append(cur_node)
# cur_node = cur_node.left
# else:
# tmp_node = node_stack.pop()
# print(str(tmp_node.data), end=' ')
# cur_node = tmp_node.right
# 后序遍历[深度遍历-栈]: 左子树 ---> 右子树 ---> 根结点
def post_order_recur(self, root):
if root is not None:
self.post_order_recur(root.left)
self.post_order_recur(root.right)
print(str(root.data), end=' ')
def post_order_iter(self, root):
node_stack_forward = []
node_stack_reverse = []
cur_node = root
node_stack_forward.append(cur_node)
while node_stack_forward:
cur_node = node_stack_forward.pop()
if cur_node.left:
node_stack_forward.append(cur_node.left)
if cur_node.right:
node_stack_forward.append(cur_node.right)
node_stack_reverse.append(cur_node)
while node_stack_reverse:
cur_node = node_stack_reverse.pop()
print(str(cur_node.data), end=' ')
# 层级遍历[广度遍历-队列] 每层一个列表
def level_order2(self, root):
res = []
if root is None:
return res
node_queue = []
node_queue.append(root)
while node_queue:
level_value = []
level_node_nums = len(node_queue)
for _ in range(level_node_nums):
node = node_queue.pop(0)
if node.left is not None:
node_queue.append(node.left)
if node.right is not None:
node_queue.append(node.right)
level_value.append(node.data)
res.append(level_value)
return res
# 层级遍历[广度遍历-队列] 整个层一个列表
def level_order(self, root):
res = []
if root is None:
return res
node_queue = []
node = root
node_queue.append(node)
while node_queue:
node = node_queue.pop(0)
res.append(node.data)
if node.left:
node_queue.append(node.left)
if node.right:
node_queue.append(node.right)
return res
# 后继节点: 节点data值大于该节点data值并且值最小的节点
def successor_node(self, data):
successor = None
cur_node = self._root
while cur_node:
if data < cur_node.data:
successor = cur_node
cur_node = cur_node.left
else:
cur_node = cur_node.right
return successor.data
# 前驱节点: 节点data值小于该节点data值并且值最大的节点
def predecessor_node(self, data):
predecessor = None
cur_node = self._root
while cur_node:
if data > cur_node.data:
predecessor = cur_node
cur_node = cur_node.right
else:
cur_node = cur_node.left
return predecessor.data
if __name__ == '__main__':
bst = BST()
bst.insert(44)
bst.insert(17)
bst.insert(88)
bst.insert(8)
bst.insert(32)
bst.insert(65)
bst.insert(97)
bst.insert(28)
bst.insert(54)
bst.insert(82)
bst.insert(93)
bst.insert(29)
bst.insert(76)
bst.insert(68)
bst.insert(80)
print('++++++before deleting node[88]++++++')
print(bst.search(88))
print(bst.size())
print('[pre_order_recur]', end='\t')
bst.pre_order_recur(bst.get_root())
print()
print('[pre_order_iter]', end='\t')
bst.pre_order_iter(bst.get_root())
print()
print('[in_order_recur]', end='\t')
bst.in_order_recur(bst.get_root())
print()
print('[in_order_iter]', end='\t\t')
bst.in_order_iter(bst.get_root())
print()
print('[post_order_recur]', end='\t')
bst.post_order_recur(bst.get_root())
print()
print('[post_order_iter]', end='\t')
bst.post_order_iter(bst.get_root())
print()
print('[level_order]', end='\t')
print(bst.level_order(bst.get_root()))
print('[level_order2]', end='\t')
print(bst.level_order2(bst.get_root()))
print()
print('[successor_node[29]]', end='\t')
print(bst.successor_node(29))
print()
print('[predecessor_node[29]]', end='\t')
print(bst.predecessor_node(29))
bst.delete(88)
print()
print('++++++after deleting node[88]++++++')
print(bst.search(88))
print(bst.size())
print('[pre_order_recur]', end='\t')
bst.pre_order_recur(bst.get_root())
print()
print('[in_order_recur]', end='\t')
bst.in_order_recur(bst.get_root())
print()
print('[post_order_recur]', end='\t')
bst.post_order_recur(bst.get_root())
print()
print('[level_order]', end='\t')
print(bst.level_order(bst.get_root()))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,774
|
ddu365/datawhale-code
|
refs/heads/master
|
/task2/array_test.py
|
import task2.array as cusArr
if __name__ == '__main__':
try:
a = cusArr.DynamicArray(2)
a.append(4)
a.append(5)
print(a)
print(len(a))
print(a.get_capacity())
# 动态扩容
a.append(8)
print(a)
print(len(a))
print(a.get_capacity())
# 改
a[0] = 9
print(a)
print(len(a))
print(a.get_capacity())
# 删
del a[1]
print(a)
print(len(a))
print(a.get_capacity())
# 读
print(a[0])
except Exception as e:
print(f'test happen a error:{e}')
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,775
|
ddu365/datawhale-code
|
refs/heads/master
|
/task3/binary_search.py
|
def bs(s, t):
"""
标准二分查找
:param s: 有序数组
:param t: target
:return: target在s中的索引
"""
l, r = 0, len(s) - 1
while l <= r:
m = (l + r) // 2
if t < s[m]:
r = m - 1
elif t > s[m]:
l = m + 1
else:
return m
return -1
def bs_fist_ge(s, t):
"""
查找第一个大于等于给定值的元素(模糊二分查找)
:param s: 有序数组
:param t: target
:return: 第一个大于等于target的索引,没有返回-1
"""
l, r = 0, len(s) - 1
while l <= r:
m = (l + r) // 2
if t <= s[m]:
if m == 0 or s[m-1] < t:
return m
else:
r = m - 1
else:
l = m + 1
return -1
def bs_last_le(s, t):
"""
查找最后一个小于等于给定值的元素(模糊二分查找)
:param s: 有序数组
:param t: target
:return: 最后一个小于等于target的索引,没有返回-1
"""
l, r = 0, len(s) - 1
while l <= r:
m = (l + r) // 2
if t >= s[m]:
if m == r or s[m + 1] > t:
return m
else:
l = m + 1
else:
r = m - 1
return -1
if __name__ == '__main__':
s = [-1, 0, 2, 5, 9, 12, 23]
t = 3
print(bs(s, t))
print(bs_fist_ge(s, t))
print(bs_last_le(s, t))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,776
|
ddu365/datawhale-code
|
refs/heads/master
|
/task1/stack_test.py
|
import task1.stack as stack
class Browser:
def __init__(self):
self._fs = stack.ArrayStack() # forwards stack
self._bs = stack.LinkedListStack() # backwards stack
@staticmethod
def print_url(action, url):
print(f'action: {action} -- current page is {url}')
def open(self, url):
self._bs.push(url)
self.print_url('open', url)
def back(self):
if len(self._bs) == 0 and len(self._fs) == 0:
print('the action after open!!')
return
if len(self._bs) > 0:
self._fs.push(self._bs.pop())
if len(self._bs) > 0:
self.print_url('back', self._bs.peek())
else:
print('no pages to back, try go or open')
def go(self):
if len(self._bs) == 0 and len(self._fs) == 0:
print('the action after back!!')
return
if len(self._fs) > 0:
self._bs.push(self._fs.pop())
self.print_url('go', self._bs.peek())
else:
print('no pages to go, try back or open')
def close(self):
self._bs.clear()
self._fs.clear()
def state(self):
print(f'fs:{self._fs}')
print(f'bs:{self._bs}')
if __name__ == '__main__':
try:
browser = Browser()
browser.go()
browser.back()
browser.open('www.google.com')
browser.open('www.github.com')
browser.open('www.52nlp.com')
browser.open('spark.apache.com')
browser.open('www.python.com')
browser.back()
browser.back()
print('----stack state---')
browser.state()
browser.back()
browser.back()
browser.close()
browser.state()
except IndexError as e:
print(f'test happen error:{e}')
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,777
|
ddu365/datawhale-code
|
refs/heads/master
|
/task3/merge_sort.py
|
"""
the merge-sort algorithm proceeds as follows:
1. Divide: If S has zero or one element, return S immediately; it is already
sorted. Otherwise (S has at least two elements), remove all the elements
from S and put them into two sequences, S1 and S2 , each containing about
half of the elements of S; that is, S1 contains the first n/2 elements of S,
and S2 contains the remaining n/2 elements.
2. Conquer: Recursively sort sequences S1 and S2 .
3. Combine: Put back the elements into S by merging the sorted sequences S1
and S2 into a sorted sequence.
"""
def merge_sort(s):
"""sort the elements of list using the merge-sort algorithm."""
n = len(s)
if n < 2:
return
# divide
mid = n // 2
s1 = s[:mid]
s2 = s[mid:]
# conquer (recursion)
merge_sort(s1)
merge_sort(s2)
# combine (merge results)
merge(s1, s2, s)
return s
def merge(s1, s2, s):
"""
merge two sorted list s1 and s2 into a proper size list s
Complexity: best O(n*log(n)) avg O(n*log(n)), worst O(n*log(n))
"""
i = j = 0
while i + j < len(s):
if j == len(s2) or (i < len(s1) and s1[i] < s2[j]):
s[i+j] = s1[i]
i += 1
else:
s[i+j] = s2[j]
j += 1
if __name__ == '__main__':
ul = [85, 24, 63, 45, 17, 31, 96, 50]
print(merge_sort(ul))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,778
|
ddu365/datawhale-code
|
refs/heads/master
|
/task3/insert_sort.py
|
def insert_sort(s):
"""
sort the elements of list s using the insert-sort algorithm
Complexity: best O(n) avg O(n^2), worst O(n^2)
"""
for i in range(1, len(s)):
cur = s[i]
j = i
while j > 0 and cur < s[j-1]:
s[j] = s[j-1]
j -= 1
s[j] = cur
return s
if __name__ == '__main__':
ul = [85, 24, 63, 45, 17, 31, 96, 50]
print(insert_sort(ul))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,779
|
ddu365/datawhale-code
|
refs/heads/master
|
/task1/linkedlist.py
|
""""
Pros
Linked Lists have constant-time insertions and deletions in any position,
in comparison, arrays require O(n) time to do the same thing.
Linked lists can continue to expand without having to specify
their size ahead of time (remember our lectures on Array sizing
form the Array Sequence section of the course!)
Cons
To access an element in a linked list, you need to take O(k) time
to go from the head of the list to the kth element.
In contrast, arrays have constant time operations to access
elements in an array.
"""
from abc import ABCMeta, abstractmethod
class AbstractLinkedList(metaclass=ABCMeta):
def __init__(self):
self._size = 0
def is_empty(self):
return self._size == 0
def __str__(self):
return " ".join(map(str, self))
def __len__(self):
return self._size
@abstractmethod
def add_first(self, value):
pass
@abstractmethod
def append(self, value):
pass
@abstractmethod
def insert(self, pos, value):
pass
@abstractmethod
def remove_first(self):
pass
@abstractmethod
def __iter__(self):
pass
class DoublyLinkedListNode(object):
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
class SinglyLinkedListNode(object):
def __init__(self, value):
self.value = value
self.next = None
def __iter__(self):
p = self
while True:
yield p.value
p = p.next
if p is None:
return
class SinglyLinkedList(AbstractLinkedList):
def __init__(self): # , node=None
super().__init__()
self._head = None
self._tail = None
# 头部插入
def add_first(self, value):
n = SinglyLinkedListNode(value)
if self.is_empty():
self._head = n
self._tail = n
else:
n.next = self._head
self._head = n
self._size += 1
return self._head
# 尾部插入
def append(self, value):
n = SinglyLinkedListNode(value)
if self.is_empty():
self._head = n
self._tail = n
else:
self._tail.next = n
self._tail = n
# without self._tail version
# p = self._head
# while p.next is not None:
# p = p.next
# p.next = n
self._size += 1
return self._head
# 任意位置插入 从零开始计数
def insert(self, pos, value):
if pos <= 0:
self.add_first(value)
return
if pos >= len(self):
self.append(value)
return
count = 0
n = SinglyLinkedListNode(value)
p = self._head
while count < pos - 1:
count += 1
p = p.next
n.next = p.next
p.next = n
self._size += 1
return self._head
# 头部删除
def remove_first(self):
if self.is_empty():
return
self._head = self._head.next
self._size -= 1
return self._head
# 任意位置删除 从零开始计数
def delete(self, pos):
if pos <= 0:
self.remove_first()
return
if pos >= len(self):
pos = len(self)
count = 0
p = self._head
while count < pos - 1:
count += 1
p = p.next
p.next = p.next.next
self._size -= 1
return self._head
def __iter__(self):
h = self._head
while True:
if h is None:
return
yield h.value
h = h.next
class SinglyCircularLinkedList(SinglyLinkedList):
def __init__(self):
super().__init__()
# 头部插入
def add_first(self, value):
n = SinglyLinkedListNode(value)
if self.is_empty():
self._head = n
self._tail = n
self._tail.next = self._head # add tail-to-head
else:
n.next = self._head
self._head = n
self._tail.next = self._head # add tail-to-head
self._size += 1
# 尾部插入
def append(self, value):
n = SinglyLinkedListNode(value)
if self.is_empty():
self._head = n
self._tail = n
self._tail.next = self._head # add tail-to-head
else:
self._tail.next = n
self._tail = n
self._tail.next = self._head # add tail-to-head
self._size += 1
# 头部删除
def remove_first(self):
if self.is_empty():
return
self._head = self._head.next
self._tail.next = self._head # add tail-to-head
self._size -= 1
def __iter__(self):
h = self._head
while True:
yield h.value
h = h.next
if h is self._head:
return
class DoublyLinkedList(AbstractLinkedList):
def __init__(self):
super().__init__()
self._header = DoublyLinkedListNode(None)
self._trailer = DoublyLinkedListNode(None)
self._header.next = self._trailer
self._trailer.prev = self._header
# 头部插入
def add_first(self, value):
n = DoublyLinkedListNode(value)
if self.is_empty():
self._header.next = n
self._trailer.prev = n
n.prev = self._header
n.next = self._trailer
else:
header_next_node = self._header.next
self._header.next = n
header_next_node.pre = n
n.prev = self._header
n.next = header_next_node
self._size += 1
# 尾部插入
def append(self, value):
n = DoublyLinkedListNode(value)
if self.is_empty():
self._header.next = n
self._trailer.prev = n
n.prev = self._header
else:
trailer_prev_node = self._trailer.prev
self._trailer.prev = n
trailer_prev_node.next = n
n.prev = trailer_prev_node
n.next = self._trailer
self._size += 1
# 任意位置插入 从零开始计数
def insert(self, pos, value):
if pos <= 0:
self.add_first(value)
return
if pos >= len(self):
self.append(value)
return
count = 0
n = DoublyLinkedListNode(value)
p = self._header.next
while count < pos - 1:
count += 1
p = p.next
n.next = p.next
p.next = n
p.next.prev = n
n.prev = p
self._size += 1
# 头部删除
def remove_first(self):
if self.is_empty():
return
header_node = self._header.next
self._header.next = header_node.next
header_node.next.prev = self._header
# error writing
# self._header.next = self._header.next.next
# self._header.next.next.prev = self._header
self._size -= 1
# 任意位置删除 从零开始计数
def delete(self, pos):
if pos <= 0:
self.remove_first()
return
if pos >= len(self):
pos = len(self)
count = 0
p = self._header
while count < pos:
count += 1
p = p.next
delete_node = p.next
p.next = delete_node.next
delete_node.next.prev = p
self._size -= 1
def __iter__(self):
h = self._header
while True:
if h.next.value is None:
return
yield h.next.value
h = h.next
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,780
|
ddu365/datawhale-code
|
refs/heads/master
|
/task5/priority_queue.py
|
from queue import PriorityQueue
from task5.heap import MinHeap
# built-in PriorityQueue using heapq and thread safe
que = PriorityQueue()
que.put(85)
que.put(24)
que.put(63)
que.put(45)
que.put(17)
que.put(31)
que.put(96)
que.put(50)
print('[built-in PriorityQueue]', [que.get() for _ in range(que.qsize())])
# my simple priority queue
class MyPriorityQueue:
def __init__(self):
self._heap = MinHeap()
self._size = 0
def put(self, value):
self._heap.insert(value)
self._size += 1
def get(self):
self._size -= 1
return self._heap.remove_min()
def __len__(self):
return self._size
que = MyPriorityQueue()
que.put(85)
que.put(24)
que.put(63)
que.put(45)
que.put(17)
que.put(31)
que.put(96)
que.put(50)
print('[MyPriorityQueue]', [que.get() for _ in range(len(que))])
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def merge_k_lists(lists):
dummy = ListNode(None)
curr = dummy
q = PriorityQueue()
for node in lists:
if node:
q.put((node.val, node))
while not q.empty():
# 取出堆顶(最小堆)元组的节点
curr.next = q.get()[1]
curr = curr.next
if curr.next:
q.put((curr.next.val, curr.next))
return dummy.next
if __name__ == '__main__':
l = []
merge_v = []
l1 = ListNode(2)
l1.next = ListNode(5)
l1.next.next = ListNode(7)
l2 = ListNode(3)
l2.next = ListNode(4)
l2.next.next = ListNode(9)
l3 = ListNode(0)
l3.next = ListNode(6)
l3.next.next = ListNode(8)
l.append(l1)
l.append(l2)
l.append(l3)
res = merge_k_lists(l)
while res:
merge_v.append(res.val)
res = res.next
print('[merge k lists]', merge_v)
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,781
|
ddu365/datawhale-code
|
refs/heads/master
|
/task7/recursion.py
|
from time import time
from functools import lru_cache
# 题目链接: https://leetcode-cn.com/problems/climbing-stairs/
# 递推公式: f(n) = f(n-1) + f(n-2) (f(1)=1,f(2)=2)
# 当n较大时,用递归会存在大量重复的存储与计算,效率低
# 自定义装饰器
# 参考链接: https://blog.csdn.net/mp624183768/article/details/79522231
def memo(func):
cache = {}
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
# @memo
# def climbStairs(n):
# if n == 1:
# return 1
# elif n == 2:
# return 2
#
# return climbStairs(n - 1) + climbStairs(n - 2)
# 系统级装饰器
@lru_cache()
def climbStairs(n):
if n == 1:
return 1
elif n == 2:
return 2
return climbStairs(n - 1) + climbStairs(n - 2)
# 带缓存的递归
# def climbStairs(n, cache=None):
# if cache is None:
# # if not cache: # 这样写有问题
# cache = {}
# if n in cache:
# return cache[n]
# if n == 1:
# return 1
# elif n == 2:
# return 2
# cache[n] = climbStairs(n-1, cache) + climbStairs(n-2, cache)
#
# return cache[n]
# 动态规划
# def climbStairs(n):
# if n == 1:
# return 1
# elif n == 2:
# return 2
# else:
# i = 1
# j = 2
# for _ in range(2, n):
# i, j = j, i+j
# return j
start = time()
print('[use time]', time()-start, '[result]', climbStairs(35)) # time s
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,782
|
ddu365/datawhale-code
|
refs/heads/master
|
/task2/trie.py
|
"""
Implement a trie with insert, search, and startsWith methods.
Note:
You may assume that all inputs are consist of lowercase letters a-z.
"""
import collections
class TrieNode:
def __init__(self):
# 设置字典的默认值(value)为TrieNode类型
self.children = collections.defaultdict(TrieNode)
self.is_word = False
class Trie:
def __init__(self):
self._root = TrieNode()
def insert(self, word):
cur = self._root
for e in word:
cur = cur.children[e]
cur.is_word = True
def search(self, word):
cur = self._root
for e in word:
cur = cur.children.get(e)
if cur is None:
return False
return cur.is_word
def starts_with(self, prefix):
cur = self._root
for e in prefix:
cur = cur.children.get(e)
if cur is None:
return False
return True
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,783
|
ddu365/datawhale-code
|
refs/heads/master
|
/task4/lru.py
|
from functools import lru_cache
import urllib
# 参考链接: https://leetcode-cn.com/problems/lru-cache/
class LRUCache:
def __init__(self, capacity: int):
import collections
self.capacity = capacity
self._cache = collections.OrderedDict()
def get(self, key: int) -> int:
if self._cache.get(key):
val = self._cache.pop(key)
self._cache[key] = val
return self._cache[key]
return -1
def put(self, key: int, value: int) -> None:
if self._cache.get(key):
self._cache.pop(key)
self._cache[key] = value
else:
if len(self._cache) == self.capacity:
self._cache.popitem(last=False)
self._cache[key] = value
# python 内置lru缓存
@lru_cache(maxsize=32)
def get_pep(num):
"""
Retrieve text of a Python Enhancement Proposal
"""
resource = 'http://www.python.org/dev/peps/pep-%04d/' % num
try:
with urllib.request.urlopen(resource) as s:
return s.read()
except urllib.error.HTTPError:
return 'Not Found'
for n in 8, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991:
pep = get_pep(n)
print(n, len(pep))
print(get_pep.cache_info())
@lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
print([fib(n) for n in range(16)])
print(fib.cache_info())
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
23,784
|
ddu365/datawhale-code
|
refs/heads/master
|
/task3/quick_sort.py
|
"""
the quick-sort algorithm consists of the following three steps :
1. Divide: If S has at least two elements (nothing needs to be done if S has
zero or one element), select a specific element x from S, which is called the
pivot. As is common practice, choose the pivot x to be the last element in S.
Remove all the elements from S and put them into three sequences:
• L, storing the elements in S less than x
• E, storing the elements in S equal to x
• G, storing the elements in S greater than x
Of course, if the elements of S are distinct, then E holds just one element—
the pivot itself.
2. Conquer: Recursively sort sequences L and G.
3. Combine: Put back the elements into S in order by first inserting the elements
of L, then those of E, and finally those of G.
"""
def quick_sort(s):
"""
sort the elements of list s using the quick-sort algorithm
Complexity: best O(n*log(n)) avg O(n*log(n)), worst O(n^2)
"""
n = len(s)
if n < 2:
return
# divide
# using the last element as the arbitrary pivot
p = s[-1]
l, e, g = [], [], []
while len(s) > 0:
if s[-1] < p:
l.append(s.pop())
elif s[-1] > p:
g.append(s.pop())
else:
e.append(s.pop())
# conquer (recursion)
quick_sort(l)
quick_sort(g)
# concatenate
s.extend(l)
s.extend(e)
s.extend(g)
return s
# improved method (saving memory)
def inplace_quick_sort(s):
if len(s) < 2:
return s
return inplace_quick_sort_recur(s, 0, len(s)-1)
def inplace_quick_sort_recur(s, l, r):
if l >= r:
return
p = s[r]
left = l
right = r-1
while left <= right:
while left <= right and s[left] < p:
left += 1
while left <= right and s[right] > p:
right -= 1
if left <= right:
s[left], s[right] = s[right], s[left]
left, right = left + 1, right - 1
s[left], s[r] = s[r], s[left]
inplace_quick_sort_recur(s, l, left-1)
inplace_quick_sort_recur(s, left+1, r)
return s
if __name__ == '__main__':
ul = [85, 24, 63, 45, 17, 31, 96, 50]
print(quick_sort(ul))
print(inplace_quick_sort(ul))
|
{"/task1/linkedlist_test.py": ["/task1/linkedlist.py"], "/task2/array_test.py": ["/task2/array.py"], "/task5/priority_queue.py": ["/task5/heap.py"], "/task2/trie_test.py": ["/task2/trie.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.