repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.cleanup_event_loop
|
python
|
def cleanup_event_loop(self):
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
|
Cleanup an event loop and close it down forever.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L99-L110
| null |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.add_tier
|
python
|
def add_tier(self, coro, **kwargs):
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
|
Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L123-L132
|
[
"def assertNotFinalized(self):\n \"\"\" Ensure the cell is not used more than once. \"\"\"\n if self.finalized:\n raise RuntimeError('Already finalized: %s' % self)\n"
] |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.append_tier
|
python
|
def append_tier(self, coro, **kwargs):
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
|
Implicitly source from the tail tier like a pipe.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L134-L137
|
[
"def add_tier(self, coro, **kwargs):\n \"\"\" Add a coroutine to the cell as a task tier. The source can be a\n single value or a list of either `Tier` types or coroutine functions\n already added to a `Tier` via `add_tier`. \"\"\"\n self.assertNotFinalized()\n assert asyncio.iscoroutinefunction(coro)\n tier = self.Tier(self, coro, **kwargs)\n self.tiers.append(tier)\n self.tiers_coro_map[coro] = tier\n return tier\n"
] |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.tier
|
python
|
def tier(self, *args, append=True, source=None, **kwargs):
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
|
Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L145-L159
| null |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.cleaner
|
python
|
def cleaner(self, coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
|
Function decorator for a cleanup coroutine.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L161-L166
|
[
"def add_cleaner(self, coro):\n \"\"\" Add a coroutine to run after the cell is done. This is for the\n user to perform any cleanup such as closing sockets. \"\"\"\n self.assertNotFinalized()\n self.cleaners.append(coro)\n"
] |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.finalize
|
python
|
def finalize(self):
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
|
Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L168-L182
|
[
"def assertNotFinalized(self):\n \"\"\" Ensure the cell is not used more than once. \"\"\"\n if self.finalized:\n raise RuntimeError('Already finalized: %s' % self)\n",
"def add_tier(self, coro, **kwargs):\n \"\"\" Add a coroutine to the cell as a task tier. The source can be a\n single value or a list of either `Tier` types or coroutine functions\n already added to a `Tier` via `add_tier`. \"\"\"\n self.assertNotFinalized()\n assert asyncio.iscoroutinefunction(coro)\n tier = self.Tier(self, coro, **kwargs)\n self.tiers.append(tier)\n self.tiers_coro_map[coro] = tier\n return tier\n"
] |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.output
|
python
|
def output(self):
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
|
Produce a classic generator for this cell's final results.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L200-L206
|
[
"def finalize(self):\n \"\"\" Look at our tiers and setup the final data flow. Once this is run\n a cell can not be modified again. \"\"\"\n self.assertNotFinalized()\n starters = []\n finishers = []\n for x in self.tiers:\n if not x.sources:\n starters.append(x)\n if not x.dests:\n finishers.append(x)\n self.add_tier(self.output_feed, source=finishers)\n self.coord.setup_wrap(self)\n self.finalized = True\n return starters\n",
"def _output(self, starters):\n for x in starters:\n self.loop.create_task(x.enqueue_task(None))\n while True:\n while self.output_buffer:\n yield self.output_buffer.popleft()\n if not self.done():\n with self.loop_policy:\n self.event_loop()\n if self.pending_exception:\n exc = self.pending_exception\n self.pending_exception = None\n try:\n raise exc\n finally:\n del exc\n else:\n flushed = False\n for t in self.tiers:\n if t.buffer:\n self.loop.create_task(t.flush())\n flushed = True\n if not flushed and not self.output_buffer:\n break\n with self.loop_policy:\n self.loop.run_until_complete(self.clean())\n",
"def close(self):\n if self.closed:\n return\n self.closed = True\n if self.finalized:\n self.coord.close_wrap()\n self.cleanup_event_loop()\n for x in self.tiers:\n x.close()\n self.tiers = None\n self.tiers_coro_map = None\n self.cleaners = None\n self.coord = None\n"
] |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.event_loop
|
python
|
def event_loop(self):
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
|
Run the event loop once.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L208-L218
| null |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.clean
|
python
|
def clean(self):
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
|
Run all of the cleaners added by the user.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L248-L252
| null |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
jashort/SmartFileSorter
|
smartfilesorter/actionplugins/renameto.py
|
RenameTo.do_action
|
python
|
def do_action(self, target, dry_run=False):
original_path = os.path.dirname(target)
original_filename, original_extension = os.path.splitext(os.path.basename(target))
new_filename = re.sub(self.match, self.replace_with, original_filename) + original_extension
destination = os.path.join(original_path, new_filename)
if dry_run is True:
self.logger.debug("Dry run: Skipping rename {0} to {1}".format(target, new_filename))
return target
else:
self.logger.debug("Renaming {0} to {1}".format(original_filename + original_extension,
new_filename + original_extension))
if not os.path.exists(destination):
try:
shutil.move(target, destination)
except IOError:
self.logger.error("Error renaming file {0} to {1}".format(target, new_filename))
raise IOError
else:
self.logger.error("Destination file already exists: {0}".format(new_filename))
raise IOError
return destination
|
:param target: Full path and filename
:param dry_run: True - don't actually perform action. False: perform action. No effect for this rule.
:return: filename: Full path and filename after action completes
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/actionplugins/renameto.py#L28-L57
| null |
class RenameTo(object):
"""
Renames a given file. Performs a case sensitive search and replace on the filename, then renames it.
Also supports regular expressions.
"""
config_name = 'rename-to'
def __init__(self, parameters):
self.logger = logging.getLogger(__name__)
if 'match' in parameters:
self.match = parameters['match']
else:
raise ValueError('rename-to rule must have parameter "match"')
if 'replace-with' in parameters:
if parameters['replace-with'] is None:
self.replace_with = ''
else:
self.replace_with = parameters['replace-with']
else:
raise ValueError('rename-to rule must have "replace-with" parameter')
|
jashort/SmartFileSorter
|
smartfilesorter/ruleset.py
|
RuleSet.add_match_rule
|
python
|
def add_match_rule(self, rule_config_name):
self.logger.debug('Adding match rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_match_plugins, self.match_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_match_plugins, self.match_rules)
|
Adds the given match rule to this ruleset's match rules
:param rule_config_name: Plugin's config name
:return: None
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L37-L50
|
[
"def add_rule(self, config_name, value, plugins, destination):\n \"\"\"\n Adds a rule. Use add_action_rule or add_match_rule instead\n :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)\n :param config_name: config_name of the plugin to add\n :param value: configuration information for the rule\n :param plugins: list of all available plugins\n :param destination: list to append plugin to (self.action_rules or self.match_rules)\n :return:\n \"\"\"\n if config_name in plugins:\n rule = plugins[config_name](value)\n destination.append(rule)\n else:\n self.logger.error(\"Plugin with config_name {0} not found\".format(config_name))\n raise IndexError(\"Plugin with config_name {0} not found\".format(config_name))\n"
] |
class RuleSet(object):
"""
A ruleset is a collection of associated match rules and actions. For example:
- file-extension-is: .log
- filename-starts-with: ex
- move-to: /archive/logs/
Match rules are a boolean AND operation -- all must match. Actions are all applied in order.
"""
def __init__(self, yaml_section, match_plugins={}, action_plugins={}):
self.logger = logging.getLogger('SmartFileSorter.RuleSet')
self.name = yaml_section['name']
self.logger.debug("Creating ruleset: {0}".format(self.name))
self.match_rules = []
self.action_rules = []
self.available_match_plugins = match_plugins
self.available_action_plugins = action_plugins
self.add_action_rules(yaml_section['action'])
self.add_match_rules(yaml_section['match'])
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Ruleset: {0}'.format(self.name)
def __lt__(self, other):
return self.name < other.name
def add_action_rule(self, rule_config_name):
"""
Adds the given action rule to this ruleset's action rules
:param rule_config_name:
:return:
"""
self.logger.debug('Adding action rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)
def add_rule(self, config_name, value, plugins, destination):
"""
Adds a rule. Use add_action_rule or add_match_rule instead
:param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)
:param config_name: config_name of the plugin to add
:param value: configuration information for the rule
:param plugins: list of all available plugins
:param destination: list to append plugin to (self.action_rules or self.match_rules)
:return:
"""
if config_name in plugins:
rule = plugins[config_name](value)
destination.append(rule)
else:
self.logger.error("Plugin with config_name {0} not found".format(config_name))
raise IndexError("Plugin with config_name {0} not found".format(config_name))
def matches_all_rules(self, target_filename):
"""
Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean
"""
for rule in self.match_rules:
if rule.test(target_filename) is False:
return False
self.logger.debug('{0}: {1} - {2}'.format(self.name,
os.path.basename(target_filename),
'Match'))
return True
def do_actions(self, target_filename, dry_run=False):
"""
Runs all the given action rules in this ruleset on target_filename
:param target_filename:
:retrn: filename Filename and path after any actions have been completed
\ """
for rule in self.action_rules:
target_filename = rule.do_action(target_filename, dry_run)
raise StopProcessingException
def add_action_rules(self, action_rules):
"""
Add the given action rules to the ruleset. Handles single rules or a list of rules.
:param action_rules: Object representing YAML section from config file
:return:
Example action_rules object:
['print-file-info', {'move-to': '/tmp'}, 'stop-processing']
"""
if type(action_rules) == list:
for r in action_rules:
self.add_action_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_action_rule(action_rules)
def add_match_rules(self, match_rules):
"""
Add the given match rules to the ruleset. Handles single rules or a list of rules.
:param match_rules: Object representing YAML section from config file
:return:
Example match_rules object:
[{'filename-starts-with': 'abc'}, {'filename-ends-with': 'xyz']
"""
if type(match_rules) == list:
for r in match_rules:
self.add_match_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_match_rule(match_rules)
|
jashort/SmartFileSorter
|
smartfilesorter/ruleset.py
|
RuleSet.add_action_rule
|
python
|
def add_action_rule(self, rule_config_name):
self.logger.debug('Adding action rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)
|
Adds the given action rule to this ruleset's action rules
:param rule_config_name:
:return:
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L52-L65
|
[
"def add_rule(self, config_name, value, plugins, destination):\n \"\"\"\n Adds a rule. Use add_action_rule or add_match_rule instead\n :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)\n :param config_name: config_name of the plugin to add\n :param value: configuration information for the rule\n :param plugins: list of all available plugins\n :param destination: list to append plugin to (self.action_rules or self.match_rules)\n :return:\n \"\"\"\n if config_name in plugins:\n rule = plugins[config_name](value)\n destination.append(rule)\n else:\n self.logger.error(\"Plugin with config_name {0} not found\".format(config_name))\n raise IndexError(\"Plugin with config_name {0} not found\".format(config_name))\n"
] |
class RuleSet(object):
"""
A ruleset is a collection of associated match rules and actions. For example:
- file-extension-is: .log
- filename-starts-with: ex
- move-to: /archive/logs/
Match rules are a boolean AND operation -- all must match. Actions are all applied in order.
"""
def __init__(self, yaml_section, match_plugins={}, action_plugins={}):
self.logger = logging.getLogger('SmartFileSorter.RuleSet')
self.name = yaml_section['name']
self.logger.debug("Creating ruleset: {0}".format(self.name))
self.match_rules = []
self.action_rules = []
self.available_match_plugins = match_plugins
self.available_action_plugins = action_plugins
self.add_action_rules(yaml_section['action'])
self.add_match_rules(yaml_section['match'])
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Ruleset: {0}'.format(self.name)
def __lt__(self, other):
return self.name < other.name
def add_match_rule(self, rule_config_name):
"""
Adds the given match rule to this ruleset's match rules
:param rule_config_name: Plugin's config name
:return: None
"""
self.logger.debug('Adding match rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_match_plugins, self.match_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_match_plugins, self.match_rules)
def add_rule(self, config_name, value, plugins, destination):
"""
Adds a rule. Use add_action_rule or add_match_rule instead
:param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)
:param config_name: config_name of the plugin to add
:param value: configuration information for the rule
:param plugins: list of all available plugins
:param destination: list to append plugin to (self.action_rules or self.match_rules)
:return:
"""
if config_name in plugins:
rule = plugins[config_name](value)
destination.append(rule)
else:
self.logger.error("Plugin with config_name {0} not found".format(config_name))
raise IndexError("Plugin with config_name {0} not found".format(config_name))
def matches_all_rules(self, target_filename):
"""
Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean
"""
for rule in self.match_rules:
if rule.test(target_filename) is False:
return False
self.logger.debug('{0}: {1} - {2}'.format(self.name,
os.path.basename(target_filename),
'Match'))
return True
def do_actions(self, target_filename, dry_run=False):
"""
Runs all the given action rules in this ruleset on target_filename
:param target_filename:
:retrn: filename Filename and path after any actions have been completed
\ """
for rule in self.action_rules:
target_filename = rule.do_action(target_filename, dry_run)
raise StopProcessingException
def add_action_rules(self, action_rules):
"""
Add the given action rules to the ruleset. Handles single rules or a list of rules.
:param action_rules: Object representing YAML section from config file
:return:
Example action_rules object:
['print-file-info', {'move-to': '/tmp'}, 'stop-processing']
"""
if type(action_rules) == list:
for r in action_rules:
self.add_action_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_action_rule(action_rules)
def add_match_rules(self, match_rules):
"""
Add the given match rules to the ruleset. Handles single rules or a list of rules.
:param match_rules: Object representing YAML section from config file
:return:
Example match_rules object:
[{'filename-starts-with': 'abc'}, {'filename-ends-with': 'xyz']
"""
if type(match_rules) == list:
for r in match_rules:
self.add_match_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_match_rule(match_rules)
|
jashort/SmartFileSorter
|
smartfilesorter/ruleset.py
|
RuleSet.add_rule
|
python
|
def add_rule(self, config_name, value, plugins, destination):
if config_name in plugins:
rule = plugins[config_name](value)
destination.append(rule)
else:
self.logger.error("Plugin with config_name {0} not found".format(config_name))
raise IndexError("Plugin with config_name {0} not found".format(config_name))
|
Adds a rule. Use add_action_rule or add_match_rule instead
:param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)
:param config_name: config_name of the plugin to add
:param value: configuration information for the rule
:param plugins: list of all available plugins
:param destination: list to append plugin to (self.action_rules or self.match_rules)
:return:
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L67-L82
| null |
class RuleSet(object):
"""
A ruleset is a collection of associated match rules and actions. For example:
- file-extension-is: .log
- filename-starts-with: ex
- move-to: /archive/logs/
Match rules are a boolean AND operation -- all must match. Actions are all applied in order.
"""
def __init__(self, yaml_section, match_plugins={}, action_plugins={}):
self.logger = logging.getLogger('SmartFileSorter.RuleSet')
self.name = yaml_section['name']
self.logger.debug("Creating ruleset: {0}".format(self.name))
self.match_rules = []
self.action_rules = []
self.available_match_plugins = match_plugins
self.available_action_plugins = action_plugins
self.add_action_rules(yaml_section['action'])
self.add_match_rules(yaml_section['match'])
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Ruleset: {0}'.format(self.name)
def __lt__(self, other):
return self.name < other.name
def add_match_rule(self, rule_config_name):
"""
Adds the given match rule to this ruleset's match rules
:param rule_config_name: Plugin's config name
:return: None
"""
self.logger.debug('Adding match rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_match_plugins, self.match_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_match_plugins, self.match_rules)
def add_action_rule(self, rule_config_name):
"""
Adds the given action rule to this ruleset's action rules
:param rule_config_name:
:return:
"""
self.logger.debug('Adding action rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)
def matches_all_rules(self, target_filename):
"""
Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean
"""
for rule in self.match_rules:
if rule.test(target_filename) is False:
return False
self.logger.debug('{0}: {1} - {2}'.format(self.name,
os.path.basename(target_filename),
'Match'))
return True
def do_actions(self, target_filename, dry_run=False):
"""
Runs all the given action rules in this ruleset on target_filename
:param target_filename:
:retrn: filename Filename and path after any actions have been completed
\ """
for rule in self.action_rules:
target_filename = rule.do_action(target_filename, dry_run)
raise StopProcessingException
def add_action_rules(self, action_rules):
"""
Add the given action rules to the ruleset. Handles single rules or a list of rules.
:param action_rules: Object representing YAML section from config file
:return:
Example action_rules object:
['print-file-info', {'move-to': '/tmp'}, 'stop-processing']
"""
if type(action_rules) == list:
for r in action_rules:
self.add_action_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_action_rule(action_rules)
def add_match_rules(self, match_rules):
"""
Add the given match rules to the ruleset. Handles single rules or a list of rules.
:param match_rules: Object representing YAML section from config file
:return:
Example match_rules object:
[{'filename-starts-with': 'abc'}, {'filename-ends-with': 'xyz']
"""
if type(match_rules) == list:
for r in match_rules:
self.add_match_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_match_rule(match_rules)
|
jashort/SmartFileSorter
|
smartfilesorter/ruleset.py
|
RuleSet.matches_all_rules
|
python
|
def matches_all_rules(self, target_filename):
for rule in self.match_rules:
if rule.test(target_filename) is False:
return False
self.logger.debug('{0}: {1} - {2}'.format(self.name,
os.path.basename(target_filename),
'Match'))
return True
|
Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L84-L98
| null |
class RuleSet(object):
"""
A ruleset is a collection of associated match rules and actions. For example:
- file-extension-is: .log
- filename-starts-with: ex
- move-to: /archive/logs/
Match rules are a boolean AND operation -- all must match. Actions are all applied in order.
"""
def __init__(self, yaml_section, match_plugins={}, action_plugins={}):
self.logger = logging.getLogger('SmartFileSorter.RuleSet')
self.name = yaml_section['name']
self.logger.debug("Creating ruleset: {0}".format(self.name))
self.match_rules = []
self.action_rules = []
self.available_match_plugins = match_plugins
self.available_action_plugins = action_plugins
self.add_action_rules(yaml_section['action'])
self.add_match_rules(yaml_section['match'])
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Ruleset: {0}'.format(self.name)
def __lt__(self, other):
return self.name < other.name
def add_match_rule(self, rule_config_name):
"""
Adds the given match rule to this ruleset's match rules
:param rule_config_name: Plugin's config name
:return: None
"""
self.logger.debug('Adding match rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_match_plugins, self.match_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_match_plugins, self.match_rules)
def add_action_rule(self, rule_config_name):
"""
Adds the given action rule to this ruleset's action rules
:param rule_config_name:
:return:
"""
self.logger.debug('Adding action rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)
def add_rule(self, config_name, value, plugins, destination):
"""
Adds a rule. Use add_action_rule or add_match_rule instead
:param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)
:param config_name: config_name of the plugin to add
:param value: configuration information for the rule
:param plugins: list of all available plugins
:param destination: list to append plugin to (self.action_rules or self.match_rules)
:return:
"""
if config_name in plugins:
rule = plugins[config_name](value)
destination.append(rule)
else:
self.logger.error("Plugin with config_name {0} not found".format(config_name))
raise IndexError("Plugin with config_name {0} not found".format(config_name))
def do_actions(self, target_filename, dry_run=False):
"""
Runs all the given action rules in this ruleset on target_filename
:param target_filename:
:retrn: filename Filename and path after any actions have been completed
\ """
for rule in self.action_rules:
target_filename = rule.do_action(target_filename, dry_run)
raise StopProcessingException
def add_action_rules(self, action_rules):
"""
Add the given action rules to the ruleset. Handles single rules or a list of rules.
:param action_rules: Object representing YAML section from config file
:return:
Example action_rules object:
['print-file-info', {'move-to': '/tmp'}, 'stop-processing']
"""
if type(action_rules) == list:
for r in action_rules:
self.add_action_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_action_rule(action_rules)
def add_match_rules(self, match_rules):
"""
Add the given match rules to the ruleset. Handles single rules or a list of rules.
:param match_rules: Object representing YAML section from config file
:return:
Example match_rules object:
[{'filename-starts-with': 'abc'}, {'filename-ends-with': 'xyz']
"""
if type(match_rules) == list:
for r in match_rules:
self.add_match_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_match_rule(match_rules)
|
jashort/SmartFileSorter
|
smartfilesorter/ruleset.py
|
RuleSet.do_actions
|
python
|
def do_actions(self, target_filename, dry_run=False):
for rule in self.action_rules:
target_filename = rule.do_action(target_filename, dry_run)
raise StopProcessingException
|
Runs all the given action rules in this ruleset on target_filename
:param target_filename:
:retrn: filename Filename and path after any actions have been completed
\
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L100-L109
| null |
class RuleSet(object):
"""
A ruleset is a collection of associated match rules and actions. For example:
- file-extension-is: .log
- filename-starts-with: ex
- move-to: /archive/logs/
Match rules are a boolean AND operation -- all must match. Actions are all applied in order.
"""
def __init__(self, yaml_section, match_plugins={}, action_plugins={}):
self.logger = logging.getLogger('SmartFileSorter.RuleSet')
self.name = yaml_section['name']
self.logger.debug("Creating ruleset: {0}".format(self.name))
self.match_rules = []
self.action_rules = []
self.available_match_plugins = match_plugins
self.available_action_plugins = action_plugins
self.add_action_rules(yaml_section['action'])
self.add_match_rules(yaml_section['match'])
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Ruleset: {0}'.format(self.name)
def __lt__(self, other):
return self.name < other.name
def add_match_rule(self, rule_config_name):
"""
Adds the given match rule to this ruleset's match rules
:param rule_config_name: Plugin's config name
:return: None
"""
self.logger.debug('Adding match rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_match_plugins, self.match_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_match_plugins, self.match_rules)
def add_action_rule(self, rule_config_name):
"""
Adds the given action rule to this ruleset's action rules
:param rule_config_name:
:return:
"""
self.logger.debug('Adding action rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)
def add_rule(self, config_name, value, plugins, destination):
"""
Adds a rule. Use add_action_rule or add_match_rule instead
:param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)
:param config_name: config_name of the plugin to add
:param value: configuration information for the rule
:param plugins: list of all available plugins
:param destination: list to append plugin to (self.action_rules or self.match_rules)
:return:
"""
if config_name in plugins:
rule = plugins[config_name](value)
destination.append(rule)
else:
self.logger.error("Plugin with config_name {0} not found".format(config_name))
raise IndexError("Plugin with config_name {0} not found".format(config_name))
def matches_all_rules(self, target_filename):
"""
Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean
"""
for rule in self.match_rules:
if rule.test(target_filename) is False:
return False
self.logger.debug('{0}: {1} - {2}'.format(self.name,
os.path.basename(target_filename),
'Match'))
return True
def add_action_rules(self, action_rules):
"""
Add the given action rules to the ruleset. Handles single rules or a list of rules.
:param action_rules: Object representing YAML section from config file
:return:
Example action_rules object:
['print-file-info', {'move-to': '/tmp'}, 'stop-processing']
"""
if type(action_rules) == list:
for r in action_rules:
self.add_action_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_action_rule(action_rules)
def add_match_rules(self, match_rules):
"""
Add the given match rules to the ruleset. Handles single rules or a list of rules.
:param match_rules: Object representing YAML section from config file
:return:
Example match_rules object:
[{'filename-starts-with': 'abc'}, {'filename-ends-with': 'xyz']
"""
if type(match_rules) == list:
for r in match_rules:
self.add_match_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_match_rule(match_rules)
|
jashort/SmartFileSorter
|
smartfilesorter/ruleset.py
|
RuleSet.add_action_rules
|
python
|
def add_action_rules(self, action_rules):
if type(action_rules) == list:
for r in action_rules:
self.add_action_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_action_rule(action_rules)
|
Add the given action rules to the ruleset. Handles single rules or a list of rules.
:param action_rules: Object representing YAML section from config file
:return:
Example action_rules object:
['print-file-info', {'move-to': '/tmp'}, 'stop-processing']
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L111-L125
|
[
"def add_action_rule(self, rule_config_name):\n \"\"\"\n Adds the given action rule to this ruleset's action rules\n :param rule_config_name:\n :return:\n \"\"\"\n self.logger.debug('Adding action rule {0}'.format(rule_config_name))\n # Handle rules that are just a string, like 'stop-processing'\n if type(rule_config_name) == str:\n self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules)\n # Handle rules built from key-value pairs\n elif type(rule_config_name) == dict:\n for r in rule_config_name:\n self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)\n"
] |
class RuleSet(object):
"""
A ruleset is a collection of associated match rules and actions. For example:
- file-extension-is: .log
- filename-starts-with: ex
- move-to: /archive/logs/
Match rules are a boolean AND operation -- all must match. Actions are all applied in order.
"""
def __init__(self, yaml_section, match_plugins={}, action_plugins={}):
self.logger = logging.getLogger('SmartFileSorter.RuleSet')
self.name = yaml_section['name']
self.logger.debug("Creating ruleset: {0}".format(self.name))
self.match_rules = []
self.action_rules = []
self.available_match_plugins = match_plugins
self.available_action_plugins = action_plugins
self.add_action_rules(yaml_section['action'])
self.add_match_rules(yaml_section['match'])
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Ruleset: {0}'.format(self.name)
def __lt__(self, other):
return self.name < other.name
def add_match_rule(self, rule_config_name):
"""
Adds the given match rule to this ruleset's match rules
:param rule_config_name: Plugin's config name
:return: None
"""
self.logger.debug('Adding match rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_match_plugins, self.match_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_match_plugins, self.match_rules)
def add_action_rule(self, rule_config_name):
"""
Adds the given action rule to this ruleset's action rules
:param rule_config_name:
:return:
"""
self.logger.debug('Adding action rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)
def add_rule(self, config_name, value, plugins, destination):
"""
Adds a rule. Use add_action_rule or add_match_rule instead
:param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)
:param config_name: config_name of the plugin to add
:param value: configuration information for the rule
:param plugins: list of all available plugins
:param destination: list to append plugin to (self.action_rules or self.match_rules)
:return:
"""
if config_name in plugins:
rule = plugins[config_name](value)
destination.append(rule)
else:
self.logger.error("Plugin with config_name {0} not found".format(config_name))
raise IndexError("Plugin with config_name {0} not found".format(config_name))
def matches_all_rules(self, target_filename):
"""
Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean
"""
for rule in self.match_rules:
if rule.test(target_filename) is False:
return False
self.logger.debug('{0}: {1} - {2}'.format(self.name,
os.path.basename(target_filename),
'Match'))
return True
def do_actions(self, target_filename, dry_run=False):
"""
Runs all the given action rules in this ruleset on target_filename
:param target_filename:
:retrn: filename Filename and path after any actions have been completed
\ """
for rule in self.action_rules:
target_filename = rule.do_action(target_filename, dry_run)
raise StopProcessingException
def add_match_rules(self, match_rules):
"""
Add the given match rules to the ruleset. Handles single rules or a list of rules.
:param match_rules: Object representing YAML section from config file
:return:
Example match_rules object:
[{'filename-starts-with': 'abc'}, {'filename-ends-with': 'xyz']
"""
if type(match_rules) == list:
for r in match_rules:
self.add_match_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_match_rule(match_rules)
|
jashort/SmartFileSorter
|
smartfilesorter/ruleset.py
|
RuleSet.add_match_rules
|
python
|
def add_match_rules(self, match_rules):
if type(match_rules) == list:
for r in match_rules:
self.add_match_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_match_rule(match_rules)
|
Add the given match rules to the ruleset. Handles single rules or a list of rules.
:param match_rules: Object representing YAML section from config file
:return:
Example match_rules object:
[{'filename-starts-with': 'abc'}, {'filename-ends-with': 'xyz']
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L127-L141
|
[
"def add_match_rule(self, rule_config_name):\n \"\"\"\n Adds the given match rule to this ruleset's match rules\n :param rule_config_name: Plugin's config name\n :return: None\n \"\"\"\n self.logger.debug('Adding match rule {0}'.format(rule_config_name))\n # Handle rules that are just a string, like 'stop-processing'\n if type(rule_config_name) == str:\n self.add_rule(rule_config_name, None, self.available_match_plugins, self.match_rules)\n # Handle rules built from key-value pairs\n elif type(rule_config_name) == dict:\n for r in rule_config_name:\n self.add_rule(r, rule_config_name[r], self.available_match_plugins, self.match_rules)\n"
] |
class RuleSet(object):
"""
A ruleset is a collection of associated match rules and actions. For example:
- file-extension-is: .log
- filename-starts-with: ex
- move-to: /archive/logs/
Match rules are a boolean AND operation -- all must match. Actions are all applied in order.
"""
def __init__(self, yaml_section, match_plugins={}, action_plugins={}):
self.logger = logging.getLogger('SmartFileSorter.RuleSet')
self.name = yaml_section['name']
self.logger.debug("Creating ruleset: {0}".format(self.name))
self.match_rules = []
self.action_rules = []
self.available_match_plugins = match_plugins
self.available_action_plugins = action_plugins
self.add_action_rules(yaml_section['action'])
self.add_match_rules(yaml_section['match'])
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Ruleset: {0}'.format(self.name)
def __lt__(self, other):
return self.name < other.name
def add_match_rule(self, rule_config_name):
"""
Adds the given match rule to this ruleset's match rules
:param rule_config_name: Plugin's config name
:return: None
"""
self.logger.debug('Adding match rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_match_plugins, self.match_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_match_plugins, self.match_rules)
def add_action_rule(self, rule_config_name):
"""
Adds the given action rule to this ruleset's action rules
:param rule_config_name:
:return:
"""
self.logger.debug('Adding action rule {0}'.format(rule_config_name))
# Handle rules that are just a string, like 'stop-processing'
if type(rule_config_name) == str:
self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules)
# Handle rules built from key-value pairs
elif type(rule_config_name) == dict:
for r in rule_config_name:
self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)
def add_rule(self, config_name, value, plugins, destination):
"""
Adds a rule. Use add_action_rule or add_match_rule instead
:param rule_wrapper: Rule wrapper class (ActionRule or MatchRule)
:param config_name: config_name of the plugin to add
:param value: configuration information for the rule
:param plugins: list of all available plugins
:param destination: list to append plugin to (self.action_rules or self.match_rules)
:return:
"""
if config_name in plugins:
rule = plugins[config_name](value)
destination.append(rule)
else:
self.logger.error("Plugin with config_name {0} not found".format(config_name))
raise IndexError("Plugin with config_name {0} not found".format(config_name))
def matches_all_rules(self, target_filename):
"""
Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean
"""
for rule in self.match_rules:
if rule.test(target_filename) is False:
return False
self.logger.debug('{0}: {1} - {2}'.format(self.name,
os.path.basename(target_filename),
'Match'))
return True
def do_actions(self, target_filename, dry_run=False):
"""
Runs all the given action rules in this ruleset on target_filename
:param target_filename:
:retrn: filename Filename and path after any actions have been completed
\ """
for rule in self.action_rules:
target_filename = rule.do_action(target_filename, dry_run)
raise StopProcessingException
def add_action_rules(self, action_rules):
"""
Add the given action rules to the ruleset. Handles single rules or a list of rules.
:param action_rules: Object representing YAML section from config file
:return:
Example action_rules object:
['print-file-info', {'move-to': '/tmp'}, 'stop-processing']
"""
if type(action_rules) == list:
for r in action_rules:
self.add_action_rule(r)
else:
# Handle a single rule being passed in that's not in a list
self.add_action_rule(action_rules)
|
jashort/SmartFileSorter
|
smartfilesorter/smartfilesorter.py
|
SmartFileSorter.parse_arguments
|
python
|
def parse_arguments(arguments=sys.argv[1:]):
args = docopt.docopt(doc="""
Smart File Sorter
Usage:
sfp RULEFILE DIRECTORY [--debug] [--dry-run] [--log LOGFILE]
sfp [--debug] --list-plugins
Options:
RULEFILE Rule configuration file to execute
DIRECTORY Directory of files to process
--debug Log extra information during processing
--dry-run Log actions but do not make any changes
--log LOGFILE Specify log output file
--list-plugins Print match and action plugin information
""", argv=arguments)
return args
|
Process command line arguments
:param: List of strings containing command line arguments, defaults to sys.argv[1:]
:return: docopt args object
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L22-L43
| null |
class SmartFileSorter(object):
def __init__(self):
self.args = None
self.logger = None
self.match_plugins = []
self.action_plugins = []
@staticmethod
def create_logger(self, args={}):
"""
Create and configure the program's logger object.
Log levels:
DEBUG - Log everything. Hidden unless --debug is used.
INFO - information only
ERROR - Critical error
:param args: Object containing program's parsed command line arguments
:return: None
"""
# Set up logging
logger = logging.getLogger("SmartFileSorter")
logger.level = logging.INFO
if '--debug' in args and args['--debug'] is True:
logger.setLevel(logging.DEBUG)
file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
console_log_formatter = logging.Formatter('%(message)s')
# Log to stdout
stdout_stream = logging.StreamHandler(stream=sys.stdout)
stdout_stream.setFormatter(console_log_formatter)
logger.addHandler(stdout_stream)
# Log to file if the option is chosen
if '--log' in args and args['--log'] is not None:
logfile = open(args['--log'], 'w')
logfile_stream = logging.StreamHandler(stream=logfile)
logfile_stream.setFormatter(file_log_formatter)
logger.addHandler(logfile_stream)
if '--dry-run' in args and args['--dry-run'] is True:
logger.info('Running with --dry-run parameter. Actions will not be performed.')
self.logger = logger
def get_files(self, path):
"""
Yields full path and filename for each file that exists in the given directory. Will
ignore hidden files (that start with a ".") and directories
:param path: Directory or filename
:return:
"""
if os.path.isfile(path):
self.logger.debug('Called with single file as target: {0}'.format(path))
yield path
return
self.logger.debug('Getting list of files in {0}'.format(path))
try:
for f in os.listdir(path):
cur_file = os.path.join(path, f)
if f[0] != '.' and os.path.isfile(cur_file):
yield cur_file
except OSError:
self.logger.error('Could not read files from {0}'.format(path))
raise
def load_rules(self, filename):
"""
Load rules from YAML configuration in the given stream object
:param filename: Filename of rule YAML file
:return: rules object
"""
self.logger.debug('Reading rules from %s', filename)
try:
in_file = open(filename)
except IOError:
self.logger.error('Error opening {0}'.format(filename))
raise
y = None
try:
y = yaml.load(in_file)
except yaml.YAMLError as exc:
if hasattr(exc, 'problem_mark'):
self.logger.error('Error parsing rules{0}'.format(exc.problem_mark))
else:
self.logger.error('Error parsing rules in {0}'.format(in_file.name))
raise
return y
def load_plugins(self, plugin_path):
"""
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
"""
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
else:
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing
self.logger.debug('Done loading plugins')
return plugins
@staticmethod
def build_rules(rule_yaml, match_plugins, action_plugins):
"""
Convert parsed rule YAML in to a list of ruleset objects
:param rule_yaml: Dictionary parsed from YAML rule file
:param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object)
:param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object)
:return: list of rules
"""
rule_sets = []
for yaml_section in rule_yaml:
rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins))
return rule_sets
def run(self, args):
"""
Load plugins and run with the configuration given in args
:param args: Object containing program's parsed command line arguments
:return: None
"""
module_dir = os.path.dirname(__file__)
self.match_plugins = self.load_plugins(os.path.join(module_dir, 'matchplugins/'))
self.action_plugins = self.load_plugins(os.path.join(module_dir, 'actionplugins/'))
if args['--list-plugins'] is True:
print("\nAvailable Match Plugins:")
for m in sorted(self.match_plugins):
print(m)
print("\nAvailable Action Plugins:")
for a in sorted(self.action_plugins):
print(a)
sys.exit()
rule_yaml = self.load_rules(args['RULEFILE'])
rules = self.build_rules(rule_yaml, self.match_plugins, self.action_plugins)
files_analyzed = 0
files_matched = 0
result_count = collections.defaultdict(int)
for cur_file in self.get_files(args['DIRECTORY']):
self.logger.debug("Processing {0}".format(cur_file))
files_analyzed += 1
for ruleset in rules:
if ruleset.matches_all_rules(cur_file):
files_matched += 1
result_count[ruleset] += 1
# If the file matches all rules in the ruleset, do whatever
# actions the ruleset specifies. Stop processing if the
# ruleset says stop.
try:
ruleset.do_actions(cur_file, args['--dry-run'])
except StopProcessingException:
break
self.logger.info("Files matched: {0}/{1}".format(files_matched, files_analyzed))
for ruleset in sorted(result_count):
self.logger.info("{0}: {1} files".format(ruleset.name, result_count[ruleset]))
|
jashort/SmartFileSorter
|
smartfilesorter/smartfilesorter.py
|
SmartFileSorter.create_logger
|
python
|
def create_logger(self, args={}):
# Set up logging
logger = logging.getLogger("SmartFileSorter")
logger.level = logging.INFO
if '--debug' in args and args['--debug'] is True:
logger.setLevel(logging.DEBUG)
file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
console_log_formatter = logging.Formatter('%(message)s')
# Log to stdout
stdout_stream = logging.StreamHandler(stream=sys.stdout)
stdout_stream.setFormatter(console_log_formatter)
logger.addHandler(stdout_stream)
# Log to file if the option is chosen
if '--log' in args and args['--log'] is not None:
logfile = open(args['--log'], 'w')
logfile_stream = logging.StreamHandler(stream=logfile)
logfile_stream.setFormatter(file_log_formatter)
logger.addHandler(logfile_stream)
if '--dry-run' in args and args['--dry-run'] is True:
logger.info('Running with --dry-run parameter. Actions will not be performed.')
self.logger = logger
|
Create and configure the program's logger object.
Log levels:
DEBUG - Log everything. Hidden unless --debug is used.
INFO - information only
ERROR - Critical error
:param args: Object containing program's parsed command line arguments
:return: None
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L45-L81
| null |
class SmartFileSorter(object):
def __init__(self):
self.args = None
self.logger = None
self.match_plugins = []
self.action_plugins = []
@staticmethod
def parse_arguments(arguments=sys.argv[1:]):
"""
Process command line arguments
:param: List of strings containing command line arguments, defaults to sys.argv[1:]
:return: docopt args object
"""
args = docopt.docopt(doc="""
Smart File Sorter
Usage:
sfp RULEFILE DIRECTORY [--debug] [--dry-run] [--log LOGFILE]
sfp [--debug] --list-plugins
Options:
RULEFILE Rule configuration file to execute
DIRECTORY Directory of files to process
--debug Log extra information during processing
--dry-run Log actions but do not make any changes
--log LOGFILE Specify log output file
--list-plugins Print match and action plugin information
""", argv=arguments)
return args
def get_files(self, path):
"""
Yields full path and filename for each file that exists in the given directory. Will
ignore hidden files (that start with a ".") and directories
:param path: Directory or filename
:return:
"""
if os.path.isfile(path):
self.logger.debug('Called with single file as target: {0}'.format(path))
yield path
return
self.logger.debug('Getting list of files in {0}'.format(path))
try:
for f in os.listdir(path):
cur_file = os.path.join(path, f)
if f[0] != '.' and os.path.isfile(cur_file):
yield cur_file
except OSError:
self.logger.error('Could not read files from {0}'.format(path))
raise
def load_rules(self, filename):
"""
Load rules from YAML configuration in the given stream object
:param filename: Filename of rule YAML file
:return: rules object
"""
self.logger.debug('Reading rules from %s', filename)
try:
in_file = open(filename)
except IOError:
self.logger.error('Error opening {0}'.format(filename))
raise
y = None
try:
y = yaml.load(in_file)
except yaml.YAMLError as exc:
if hasattr(exc, 'problem_mark'):
self.logger.error('Error parsing rules{0}'.format(exc.problem_mark))
else:
self.logger.error('Error parsing rules in {0}'.format(in_file.name))
raise
return y
def load_plugins(self, plugin_path):
"""
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
"""
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
else:
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing
self.logger.debug('Done loading plugins')
return plugins
@staticmethod
def build_rules(rule_yaml, match_plugins, action_plugins):
"""
Convert parsed rule YAML in to a list of ruleset objects
:param rule_yaml: Dictionary parsed from YAML rule file
:param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object)
:param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object)
:return: list of rules
"""
rule_sets = []
for yaml_section in rule_yaml:
rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins))
return rule_sets
def run(self, args):
"""
Load plugins and run with the configuration given in args
:param args: Object containing program's parsed command line arguments
:return: None
"""
module_dir = os.path.dirname(__file__)
self.match_plugins = self.load_plugins(os.path.join(module_dir, 'matchplugins/'))
self.action_plugins = self.load_plugins(os.path.join(module_dir, 'actionplugins/'))
if args['--list-plugins'] is True:
print("\nAvailable Match Plugins:")
for m in sorted(self.match_plugins):
print(m)
print("\nAvailable Action Plugins:")
for a in sorted(self.action_plugins):
print(a)
sys.exit()
rule_yaml = self.load_rules(args['RULEFILE'])
rules = self.build_rules(rule_yaml, self.match_plugins, self.action_plugins)
files_analyzed = 0
files_matched = 0
result_count = collections.defaultdict(int)
for cur_file in self.get_files(args['DIRECTORY']):
self.logger.debug("Processing {0}".format(cur_file))
files_analyzed += 1
for ruleset in rules:
if ruleset.matches_all_rules(cur_file):
files_matched += 1
result_count[ruleset] += 1
# If the file matches all rules in the ruleset, do whatever
# actions the ruleset specifies. Stop processing if the
# ruleset says stop.
try:
ruleset.do_actions(cur_file, args['--dry-run'])
except StopProcessingException:
break
self.logger.info("Files matched: {0}/{1}".format(files_matched, files_analyzed))
for ruleset in sorted(result_count):
self.logger.info("{0}: {1} files".format(ruleset.name, result_count[ruleset]))
|
jashort/SmartFileSorter
|
smartfilesorter/smartfilesorter.py
|
SmartFileSorter.get_files
|
python
|
def get_files(self, path):
if os.path.isfile(path):
self.logger.debug('Called with single file as target: {0}'.format(path))
yield path
return
self.logger.debug('Getting list of files in {0}'.format(path))
try:
for f in os.listdir(path):
cur_file = os.path.join(path, f)
if f[0] != '.' and os.path.isfile(cur_file):
yield cur_file
except OSError:
self.logger.error('Could not read files from {0}'.format(path))
raise
|
Yields full path and filename for each file that exists in the given directory. Will
ignore hidden files (that start with a ".") and directories
:param path: Directory or filename
:return:
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L83-L105
| null |
class SmartFileSorter(object):
def __init__(self):
self.args = None
self.logger = None
self.match_plugins = []
self.action_plugins = []
@staticmethod
def parse_arguments(arguments=sys.argv[1:]):
"""
Process command line arguments
:param: List of strings containing command line arguments, defaults to sys.argv[1:]
:return: docopt args object
"""
args = docopt.docopt(doc="""
Smart File Sorter
Usage:
sfp RULEFILE DIRECTORY [--debug] [--dry-run] [--log LOGFILE]
sfp [--debug] --list-plugins
Options:
RULEFILE Rule configuration file to execute
DIRECTORY Directory of files to process
--debug Log extra information during processing
--dry-run Log actions but do not make any changes
--log LOGFILE Specify log output file
--list-plugins Print match and action plugin information
""", argv=arguments)
return args
def create_logger(self, args={}):
"""
Create and configure the program's logger object.
Log levels:
DEBUG - Log everything. Hidden unless --debug is used.
INFO - information only
ERROR - Critical error
:param args: Object containing program's parsed command line arguments
:return: None
"""
# Set up logging
logger = logging.getLogger("SmartFileSorter")
logger.level = logging.INFO
if '--debug' in args and args['--debug'] is True:
logger.setLevel(logging.DEBUG)
file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
console_log_formatter = logging.Formatter('%(message)s')
# Log to stdout
stdout_stream = logging.StreamHandler(stream=sys.stdout)
stdout_stream.setFormatter(console_log_formatter)
logger.addHandler(stdout_stream)
# Log to file if the option is chosen
if '--log' in args and args['--log'] is not None:
logfile = open(args['--log'], 'w')
logfile_stream = logging.StreamHandler(stream=logfile)
logfile_stream.setFormatter(file_log_formatter)
logger.addHandler(logfile_stream)
if '--dry-run' in args and args['--dry-run'] is True:
logger.info('Running with --dry-run parameter. Actions will not be performed.')
self.logger = logger
def load_rules(self, filename):
"""
Load rules from YAML configuration in the given stream object
:param filename: Filename of rule YAML file
:return: rules object
"""
self.logger.debug('Reading rules from %s', filename)
try:
in_file = open(filename)
except IOError:
self.logger.error('Error opening {0}'.format(filename))
raise
y = None
try:
y = yaml.load(in_file)
except yaml.YAMLError as exc:
if hasattr(exc, 'problem_mark'):
self.logger.error('Error parsing rules{0}'.format(exc.problem_mark))
else:
self.logger.error('Error parsing rules in {0}'.format(in_file.name))
raise
return y
def load_plugins(self, plugin_path):
"""
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
"""
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
else:
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing
self.logger.debug('Done loading plugins')
return plugins
@staticmethod
def build_rules(rule_yaml, match_plugins, action_plugins):
"""
Convert parsed rule YAML in to a list of ruleset objects
:param rule_yaml: Dictionary parsed from YAML rule file
:param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object)
:param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object)
:return: list of rules
"""
rule_sets = []
for yaml_section in rule_yaml:
rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins))
return rule_sets
def run(self, args):
"""
Load plugins and run with the configuration given in args
:param args: Object containing program's parsed command line arguments
:return: None
"""
module_dir = os.path.dirname(__file__)
self.match_plugins = self.load_plugins(os.path.join(module_dir, 'matchplugins/'))
self.action_plugins = self.load_plugins(os.path.join(module_dir, 'actionplugins/'))
if args['--list-plugins'] is True:
print("\nAvailable Match Plugins:")
for m in sorted(self.match_plugins):
print(m)
print("\nAvailable Action Plugins:")
for a in sorted(self.action_plugins):
print(a)
sys.exit()
rule_yaml = self.load_rules(args['RULEFILE'])
rules = self.build_rules(rule_yaml, self.match_plugins, self.action_plugins)
files_analyzed = 0
files_matched = 0
result_count = collections.defaultdict(int)
for cur_file in self.get_files(args['DIRECTORY']):
self.logger.debug("Processing {0}".format(cur_file))
files_analyzed += 1
for ruleset in rules:
if ruleset.matches_all_rules(cur_file):
files_matched += 1
result_count[ruleset] += 1
# If the file matches all rules in the ruleset, do whatever
# actions the ruleset specifies. Stop processing if the
# ruleset says stop.
try:
ruleset.do_actions(cur_file, args['--dry-run'])
except StopProcessingException:
break
self.logger.info("Files matched: {0}/{1}".format(files_matched, files_analyzed))
for ruleset in sorted(result_count):
self.logger.info("{0}: {1} files".format(ruleset.name, result_count[ruleset]))
|
jashort/SmartFileSorter
|
smartfilesorter/smartfilesorter.py
|
SmartFileSorter.load_rules
|
python
|
def load_rules(self, filename):
self.logger.debug('Reading rules from %s', filename)
try:
in_file = open(filename)
except IOError:
self.logger.error('Error opening {0}'.format(filename))
raise
y = None
try:
y = yaml.load(in_file)
except yaml.YAMLError as exc:
if hasattr(exc, 'problem_mark'):
self.logger.error('Error parsing rules{0}'.format(exc.problem_mark))
else:
self.logger.error('Error parsing rules in {0}'.format(in_file.name))
raise
return y
|
Load rules from YAML configuration in the given stream object
:param filename: Filename of rule YAML file
:return: rules object
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L107-L130
| null |
class SmartFileSorter(object):
def __init__(self):
self.args = None
self.logger = None
self.match_plugins = []
self.action_plugins = []
@staticmethod
def parse_arguments(arguments=sys.argv[1:]):
"""
Process command line arguments
:param: List of strings containing command line arguments, defaults to sys.argv[1:]
:return: docopt args object
"""
args = docopt.docopt(doc="""
Smart File Sorter
Usage:
sfp RULEFILE DIRECTORY [--debug] [--dry-run] [--log LOGFILE]
sfp [--debug] --list-plugins
Options:
RULEFILE Rule configuration file to execute
DIRECTORY Directory of files to process
--debug Log extra information during processing
--dry-run Log actions but do not make any changes
--log LOGFILE Specify log output file
--list-plugins Print match and action plugin information
""", argv=arguments)
return args
def create_logger(self, args={}):
"""
Create and configure the program's logger object.
Log levels:
DEBUG - Log everything. Hidden unless --debug is used.
INFO - information only
ERROR - Critical error
:param args: Object containing program's parsed command line arguments
:return: None
"""
# Set up logging
logger = logging.getLogger("SmartFileSorter")
logger.level = logging.INFO
if '--debug' in args and args['--debug'] is True:
logger.setLevel(logging.DEBUG)
file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
console_log_formatter = logging.Formatter('%(message)s')
# Log to stdout
stdout_stream = logging.StreamHandler(stream=sys.stdout)
stdout_stream.setFormatter(console_log_formatter)
logger.addHandler(stdout_stream)
# Log to file if the option is chosen
if '--log' in args and args['--log'] is not None:
logfile = open(args['--log'], 'w')
logfile_stream = logging.StreamHandler(stream=logfile)
logfile_stream.setFormatter(file_log_formatter)
logger.addHandler(logfile_stream)
if '--dry-run' in args and args['--dry-run'] is True:
logger.info('Running with --dry-run parameter. Actions will not be performed.')
self.logger = logger
def get_files(self, path):
"""
Yields full path and filename for each file that exists in the given directory. Will
ignore hidden files (that start with a ".") and directories
:param path: Directory or filename
:return:
"""
if os.path.isfile(path):
self.logger.debug('Called with single file as target: {0}'.format(path))
yield path
return
self.logger.debug('Getting list of files in {0}'.format(path))
try:
for f in os.listdir(path):
cur_file = os.path.join(path, f)
if f[0] != '.' and os.path.isfile(cur_file):
yield cur_file
except OSError:
self.logger.error('Could not read files from {0}'.format(path))
raise
def load_plugins(self, plugin_path):
"""
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
"""
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
else:
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing
self.logger.debug('Done loading plugins')
return plugins
@staticmethod
def build_rules(rule_yaml, match_plugins, action_plugins):
"""
Convert parsed rule YAML in to a list of ruleset objects
:param rule_yaml: Dictionary parsed from YAML rule file
:param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object)
:param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object)
:return: list of rules
"""
rule_sets = []
for yaml_section in rule_yaml:
rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins))
return rule_sets
def run(self, args):
"""
Load plugins and run with the configuration given in args
:param args: Object containing program's parsed command line arguments
:return: None
"""
module_dir = os.path.dirname(__file__)
self.match_plugins = self.load_plugins(os.path.join(module_dir, 'matchplugins/'))
self.action_plugins = self.load_plugins(os.path.join(module_dir, 'actionplugins/'))
if args['--list-plugins'] is True:
print("\nAvailable Match Plugins:")
for m in sorted(self.match_plugins):
print(m)
print("\nAvailable Action Plugins:")
for a in sorted(self.action_plugins):
print(a)
sys.exit()
rule_yaml = self.load_rules(args['RULEFILE'])
rules = self.build_rules(rule_yaml, self.match_plugins, self.action_plugins)
files_analyzed = 0
files_matched = 0
result_count = collections.defaultdict(int)
for cur_file in self.get_files(args['DIRECTORY']):
self.logger.debug("Processing {0}".format(cur_file))
files_analyzed += 1
for ruleset in rules:
if ruleset.matches_all_rules(cur_file):
files_matched += 1
result_count[ruleset] += 1
# If the file matches all rules in the ruleset, do whatever
# actions the ruleset specifies. Stop processing if the
# ruleset says stop.
try:
ruleset.do_actions(cur_file, args['--dry-run'])
except StopProcessingException:
break
self.logger.info("Files matched: {0}/{1}".format(files_matched, files_analyzed))
for ruleset in sorted(result_count):
self.logger.info("{0}: {1} files".format(ruleset.name, result_count[ruleset]))
|
jashort/SmartFileSorter
|
smartfilesorter/smartfilesorter.py
|
SmartFileSorter.load_plugins
|
python
|
def load_plugins(self, plugin_path):
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
else:
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing
self.logger.debug('Done loading plugins')
return plugins
|
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L132-L178
| null |
class SmartFileSorter(object):
def __init__(self):
self.args = None
self.logger = None
self.match_plugins = []
self.action_plugins = []
@staticmethod
def parse_arguments(arguments=sys.argv[1:]):
"""
Process command line arguments
:param: List of strings containing command line arguments, defaults to sys.argv[1:]
:return: docopt args object
"""
args = docopt.docopt(doc="""
Smart File Sorter
Usage:
sfp RULEFILE DIRECTORY [--debug] [--dry-run] [--log LOGFILE]
sfp [--debug] --list-plugins
Options:
RULEFILE Rule configuration file to execute
DIRECTORY Directory of files to process
--debug Log extra information during processing
--dry-run Log actions but do not make any changes
--log LOGFILE Specify log output file
--list-plugins Print match and action plugin information
""", argv=arguments)
return args
def create_logger(self, args={}):
"""
Create and configure the program's logger object.
Log levels:
DEBUG - Log everything. Hidden unless --debug is used.
INFO - information only
ERROR - Critical error
:param args: Object containing program's parsed command line arguments
:return: None
"""
# Set up logging
logger = logging.getLogger("SmartFileSorter")
logger.level = logging.INFO
if '--debug' in args and args['--debug'] is True:
logger.setLevel(logging.DEBUG)
file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
console_log_formatter = logging.Formatter('%(message)s')
# Log to stdout
stdout_stream = logging.StreamHandler(stream=sys.stdout)
stdout_stream.setFormatter(console_log_formatter)
logger.addHandler(stdout_stream)
# Log to file if the option is chosen
if '--log' in args and args['--log'] is not None:
logfile = open(args['--log'], 'w')
logfile_stream = logging.StreamHandler(stream=logfile)
logfile_stream.setFormatter(file_log_formatter)
logger.addHandler(logfile_stream)
if '--dry-run' in args and args['--dry-run'] is True:
logger.info('Running with --dry-run parameter. Actions will not be performed.')
self.logger = logger
def get_files(self, path):
"""
Yields full path and filename for each file that exists in the given directory. Will
ignore hidden files (that start with a ".") and directories
:param path: Directory or filename
:return:
"""
if os.path.isfile(path):
self.logger.debug('Called with single file as target: {0}'.format(path))
yield path
return
self.logger.debug('Getting list of files in {0}'.format(path))
try:
for f in os.listdir(path):
cur_file = os.path.join(path, f)
if f[0] != '.' and os.path.isfile(cur_file):
yield cur_file
except OSError:
self.logger.error('Could not read files from {0}'.format(path))
raise
def load_rules(self, filename):
"""
Load rules from YAML configuration in the given stream object
:param filename: Filename of rule YAML file
:return: rules object
"""
self.logger.debug('Reading rules from %s', filename)
try:
in_file = open(filename)
except IOError:
self.logger.error('Error opening {0}'.format(filename))
raise
y = None
try:
y = yaml.load(in_file)
except yaml.YAMLError as exc:
if hasattr(exc, 'problem_mark'):
self.logger.error('Error parsing rules{0}'.format(exc.problem_mark))
else:
self.logger.error('Error parsing rules in {0}'.format(in_file.name))
raise
return y
@staticmethod
def build_rules(rule_yaml, match_plugins, action_plugins):
"""
Convert parsed rule YAML in to a list of ruleset objects
:param rule_yaml: Dictionary parsed from YAML rule file
:param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object)
:param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object)
:return: list of rules
"""
rule_sets = []
for yaml_section in rule_yaml:
rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins))
return rule_sets
def run(self, args):
"""
Load plugins and run with the configuration given in args
:param args: Object containing program's parsed command line arguments
:return: None
"""
module_dir = os.path.dirname(__file__)
self.match_plugins = self.load_plugins(os.path.join(module_dir, 'matchplugins/'))
self.action_plugins = self.load_plugins(os.path.join(module_dir, 'actionplugins/'))
if args['--list-plugins'] is True:
print("\nAvailable Match Plugins:")
for m in sorted(self.match_plugins):
print(m)
print("\nAvailable Action Plugins:")
for a in sorted(self.action_plugins):
print(a)
sys.exit()
rule_yaml = self.load_rules(args['RULEFILE'])
rules = self.build_rules(rule_yaml, self.match_plugins, self.action_plugins)
files_analyzed = 0
files_matched = 0
result_count = collections.defaultdict(int)
for cur_file in self.get_files(args['DIRECTORY']):
self.logger.debug("Processing {0}".format(cur_file))
files_analyzed += 1
for ruleset in rules:
if ruleset.matches_all_rules(cur_file):
files_matched += 1
result_count[ruleset] += 1
# If the file matches all rules in the ruleset, do whatever
# actions the ruleset specifies. Stop processing if the
# ruleset says stop.
try:
ruleset.do_actions(cur_file, args['--dry-run'])
except StopProcessingException:
break
self.logger.info("Files matched: {0}/{1}".format(files_matched, files_analyzed))
for ruleset in sorted(result_count):
self.logger.info("{0}: {1} files".format(ruleset.name, result_count[ruleset]))
|
jashort/SmartFileSorter
|
smartfilesorter/smartfilesorter.py
|
SmartFileSorter.build_rules
|
python
|
def build_rules(rule_yaml, match_plugins, action_plugins):
rule_sets = []
for yaml_section in rule_yaml:
rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins))
return rule_sets
|
Convert parsed rule YAML in to a list of ruleset objects
:param rule_yaml: Dictionary parsed from YAML rule file
:param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object)
:param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object)
:return: list of rules
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L181-L193
| null |
class SmartFileSorter(object):
def __init__(self):
self.args = None
self.logger = None
self.match_plugins = []
self.action_plugins = []
@staticmethod
def parse_arguments(arguments=sys.argv[1:]):
"""
Process command line arguments
:param: List of strings containing command line arguments, defaults to sys.argv[1:]
:return: docopt args object
"""
args = docopt.docopt(doc="""
Smart File Sorter
Usage:
sfp RULEFILE DIRECTORY [--debug] [--dry-run] [--log LOGFILE]
sfp [--debug] --list-plugins
Options:
RULEFILE Rule configuration file to execute
DIRECTORY Directory of files to process
--debug Log extra information during processing
--dry-run Log actions but do not make any changes
--log LOGFILE Specify log output file
--list-plugins Print match and action plugin information
""", argv=arguments)
return args
def create_logger(self, args={}):
"""
Create and configure the program's logger object.
Log levels:
DEBUG - Log everything. Hidden unless --debug is used.
INFO - information only
ERROR - Critical error
:param args: Object containing program's parsed command line arguments
:return: None
"""
# Set up logging
logger = logging.getLogger("SmartFileSorter")
logger.level = logging.INFO
if '--debug' in args and args['--debug'] is True:
logger.setLevel(logging.DEBUG)
file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
console_log_formatter = logging.Formatter('%(message)s')
# Log to stdout
stdout_stream = logging.StreamHandler(stream=sys.stdout)
stdout_stream.setFormatter(console_log_formatter)
logger.addHandler(stdout_stream)
# Log to file if the option is chosen
if '--log' in args and args['--log'] is not None:
logfile = open(args['--log'], 'w')
logfile_stream = logging.StreamHandler(stream=logfile)
logfile_stream.setFormatter(file_log_formatter)
logger.addHandler(logfile_stream)
if '--dry-run' in args and args['--dry-run'] is True:
logger.info('Running with --dry-run parameter. Actions will not be performed.')
self.logger = logger
def get_files(self, path):
"""
Yields full path and filename for each file that exists in the given directory. Will
ignore hidden files (that start with a ".") and directories
:param path: Directory or filename
:return:
"""
if os.path.isfile(path):
self.logger.debug('Called with single file as target: {0}'.format(path))
yield path
return
self.logger.debug('Getting list of files in {0}'.format(path))
try:
for f in os.listdir(path):
cur_file = os.path.join(path, f)
if f[0] != '.' and os.path.isfile(cur_file):
yield cur_file
except OSError:
self.logger.error('Could not read files from {0}'.format(path))
raise
def load_rules(self, filename):
"""
Load rules from YAML configuration in the given stream object
:param filename: Filename of rule YAML file
:return: rules object
"""
self.logger.debug('Reading rules from %s', filename)
try:
in_file = open(filename)
except IOError:
self.logger.error('Error opening {0}'.format(filename))
raise
y = None
try:
y = yaml.load(in_file)
except yaml.YAMLError as exc:
if hasattr(exc, 'problem_mark'):
self.logger.error('Error parsing rules{0}'.format(exc.problem_mark))
else:
self.logger.error('Error parsing rules in {0}'.format(in_file.name))
raise
return y
def load_plugins(self, plugin_path):
"""
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
"""
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
else:
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing
self.logger.debug('Done loading plugins')
return plugins
@staticmethod
def run(self, args):
"""
Load plugins and run with the configuration given in args
:param args: Object containing program's parsed command line arguments
:return: None
"""
module_dir = os.path.dirname(__file__)
self.match_plugins = self.load_plugins(os.path.join(module_dir, 'matchplugins/'))
self.action_plugins = self.load_plugins(os.path.join(module_dir, 'actionplugins/'))
if args['--list-plugins'] is True:
print("\nAvailable Match Plugins:")
for m in sorted(self.match_plugins):
print(m)
print("\nAvailable Action Plugins:")
for a in sorted(self.action_plugins):
print(a)
sys.exit()
rule_yaml = self.load_rules(args['RULEFILE'])
rules = self.build_rules(rule_yaml, self.match_plugins, self.action_plugins)
files_analyzed = 0
files_matched = 0
result_count = collections.defaultdict(int)
for cur_file in self.get_files(args['DIRECTORY']):
self.logger.debug("Processing {0}".format(cur_file))
files_analyzed += 1
for ruleset in rules:
if ruleset.matches_all_rules(cur_file):
files_matched += 1
result_count[ruleset] += 1
# If the file matches all rules in the ruleset, do whatever
# actions the ruleset specifies. Stop processing if the
# ruleset says stop.
try:
ruleset.do_actions(cur_file, args['--dry-run'])
except StopProcessingException:
break
self.logger.info("Files matched: {0}/{1}".format(files_matched, files_analyzed))
for ruleset in sorted(result_count):
self.logger.info("{0}: {1} files".format(ruleset.name, result_count[ruleset]))
|
jashort/SmartFileSorter
|
smartfilesorter/smartfilesorter.py
|
SmartFileSorter.run
|
python
|
def run(self, args):
module_dir = os.path.dirname(__file__)
self.match_plugins = self.load_plugins(os.path.join(module_dir, 'matchplugins/'))
self.action_plugins = self.load_plugins(os.path.join(module_dir, 'actionplugins/'))
if args['--list-plugins'] is True:
print("\nAvailable Match Plugins:")
for m in sorted(self.match_plugins):
print(m)
print("\nAvailable Action Plugins:")
for a in sorted(self.action_plugins):
print(a)
sys.exit()
rule_yaml = self.load_rules(args['RULEFILE'])
rules = self.build_rules(rule_yaml, self.match_plugins, self.action_plugins)
files_analyzed = 0
files_matched = 0
result_count = collections.defaultdict(int)
for cur_file in self.get_files(args['DIRECTORY']):
self.logger.debug("Processing {0}".format(cur_file))
files_analyzed += 1
for ruleset in rules:
if ruleset.matches_all_rules(cur_file):
files_matched += 1
result_count[ruleset] += 1
# If the file matches all rules in the ruleset, do whatever
# actions the ruleset specifies. Stop processing if the
# ruleset says stop.
try:
ruleset.do_actions(cur_file, args['--dry-run'])
except StopProcessingException:
break
self.logger.info("Files matched: {0}/{1}".format(files_matched, files_analyzed))
for ruleset in sorted(result_count):
self.logger.info("{0}: {1} files".format(ruleset.name, result_count[ruleset]))
|
Load plugins and run with the configuration given in args
:param args: Object containing program's parsed command line arguments
:return: None
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L195-L239
|
[
"def get_files(self, path):\n \"\"\"\n Yields full path and filename for each file that exists in the given directory. Will\n ignore hidden files (that start with a \".\") and directories\n :param path: Directory or filename\n :return:\n \"\"\"\n\n if os.path.isfile(path):\n self.logger.debug('Called with single file as target: {0}'.format(path))\n yield path\n return\n\n self.logger.debug('Getting list of files in {0}'.format(path))\n\n try:\n for f in os.listdir(path):\n cur_file = os.path.join(path, f)\n if f[0] != '.' and os.path.isfile(cur_file):\n yield cur_file\n except OSError:\n self.logger.error('Could not read files from {0}'.format(path))\n raise\n",
"def load_rules(self, filename):\n \"\"\"\n Load rules from YAML configuration in the given stream object\n :param filename: Filename of rule YAML file\n :return: rules object\n \"\"\"\n self.logger.debug('Reading rules from %s', filename)\n try:\n in_file = open(filename)\n except IOError:\n self.logger.error('Error opening {0}'.format(filename))\n raise\n\n y = None\n try:\n y = yaml.load(in_file)\n except yaml.YAMLError as exc:\n if hasattr(exc, 'problem_mark'):\n self.logger.error('Error parsing rules{0}'.format(exc.problem_mark))\n\n else:\n self.logger.error('Error parsing rules in {0}'.format(in_file.name))\n raise\n return y\n",
"def load_plugins(self, plugin_path):\n \"\"\"\n Loads plugins from modules in plugin_path. Looks for the config_name property\n in each object that's found. If so, adds that to the dictionary with the\n config_name as the key. config_name should be unique between different plugins.\n\n :param plugin_path: Path to load plugins from\n :return: dictionary of plugins by config_name\n \"\"\"\n self.logger.debug('Loading plugins from {0}'.format(plugin_path))\n plugins = {}\n plugin_dir = os.path.realpath(plugin_path)\n sys.path.append(plugin_dir)\n\n for f in os.listdir(plugin_dir):\n if f.endswith(\".py\"):\n name = f[:-3]\n elif f.endswith(\".pyc\"):\n name = f[:-4]\n # Possible support for plugins inside directories - worth doing?\n # elif os.path.isdir(os.path.join(plugin_dir, f)):\n # name = f\n else:\n continue\n\n try:\n self.logger.debug('Adding plugin from: {0}'.format(f))\n mod = __import__(name, globals(), locals(), [], 0)\n\n for plugin_class in inspect.getmembers(mod):\n if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc\n continue\n if hasattr(plugin_class[1], 'config_name'):\n if plugin_class[1].config_name is not None:\n # Skip plugins where config_name is None, like the base classes\n plugins[plugin_class[1].config_name] = plugin_class[1]\n self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))\n\n # Todo: Add error checking here. If a plugin with that name already exists,\n # log an error. Quit or continue?\n\n except ImportError as e:\n self.logger.error(e)\n pass # problem importing\n\n self.logger.debug('Done loading plugins')\n return plugins\n",
"def build_rules(rule_yaml, match_plugins, action_plugins):\n \"\"\"\n Convert parsed rule YAML in to a list of ruleset objects\n :param rule_yaml: Dictionary parsed from YAML rule file\n :param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object)\n :param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object)\n :return: list of rules\n \"\"\"\n rule_sets = []\n\n for yaml_section in rule_yaml:\n rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins))\n return rule_sets\n"
] |
class SmartFileSorter(object):
def __init__(self):
self.args = None
self.logger = None
self.match_plugins = []
self.action_plugins = []
@staticmethod
def parse_arguments(arguments=sys.argv[1:]):
"""
Process command line arguments
:param: List of strings containing command line arguments, defaults to sys.argv[1:]
:return: docopt args object
"""
args = docopt.docopt(doc="""
Smart File Sorter
Usage:
sfp RULEFILE DIRECTORY [--debug] [--dry-run] [--log LOGFILE]
sfp [--debug] --list-plugins
Options:
RULEFILE Rule configuration file to execute
DIRECTORY Directory of files to process
--debug Log extra information during processing
--dry-run Log actions but do not make any changes
--log LOGFILE Specify log output file
--list-plugins Print match and action plugin information
""", argv=arguments)
return args
def create_logger(self, args={}):
"""
Create and configure the program's logger object.
Log levels:
DEBUG - Log everything. Hidden unless --debug is used.
INFO - information only
ERROR - Critical error
:param args: Object containing program's parsed command line arguments
:return: None
"""
# Set up logging
logger = logging.getLogger("SmartFileSorter")
logger.level = logging.INFO
if '--debug' in args and args['--debug'] is True:
logger.setLevel(logging.DEBUG)
file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
console_log_formatter = logging.Formatter('%(message)s')
# Log to stdout
stdout_stream = logging.StreamHandler(stream=sys.stdout)
stdout_stream.setFormatter(console_log_formatter)
logger.addHandler(stdout_stream)
# Log to file if the option is chosen
if '--log' in args and args['--log'] is not None:
logfile = open(args['--log'], 'w')
logfile_stream = logging.StreamHandler(stream=logfile)
logfile_stream.setFormatter(file_log_formatter)
logger.addHandler(logfile_stream)
if '--dry-run' in args and args['--dry-run'] is True:
logger.info('Running with --dry-run parameter. Actions will not be performed.')
self.logger = logger
def get_files(self, path):
"""
Yields full path and filename for each file that exists in the given directory. Will
ignore hidden files (that start with a ".") and directories
:param path: Directory or filename
:return:
"""
if os.path.isfile(path):
self.logger.debug('Called with single file as target: {0}'.format(path))
yield path
return
self.logger.debug('Getting list of files in {0}'.format(path))
try:
for f in os.listdir(path):
cur_file = os.path.join(path, f)
if f[0] != '.' and os.path.isfile(cur_file):
yield cur_file
except OSError:
self.logger.error('Could not read files from {0}'.format(path))
raise
def load_rules(self, filename):
"""
Load rules from YAML configuration in the given stream object
:param filename: Filename of rule YAML file
:return: rules object
"""
self.logger.debug('Reading rules from %s', filename)
try:
in_file = open(filename)
except IOError:
self.logger.error('Error opening {0}'.format(filename))
raise
y = None
try:
y = yaml.load(in_file)
except yaml.YAMLError as exc:
if hasattr(exc, 'problem_mark'):
self.logger.error('Error parsing rules{0}'.format(exc.problem_mark))
else:
self.logger.error('Error parsing rules in {0}'.format(in_file.name))
raise
return y
def load_plugins(self, plugin_path):
"""
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
"""
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
else:
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing
self.logger.debug('Done loading plugins')
return plugins
@staticmethod
def build_rules(rule_yaml, match_plugins, action_plugins):
"""
Convert parsed rule YAML in to a list of ruleset objects
:param rule_yaml: Dictionary parsed from YAML rule file
:param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object)
:param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object)
:return: list of rules
"""
rule_sets = []
for yaml_section in rule_yaml:
rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins))
return rule_sets
|
jashort/SmartFileSorter
|
smartfilesorter/actionplugins/printfileinfo.py
|
PrintFileInfo.do_action
|
python
|
def do_action(self, target, dry_run=False):
if dry_run is False:
try:
filename = os.path.basename(target)
size = os.path.getsize(target)
print("{0}\t{1}".format(filename, size))
except OSError:
self.logger.error("Error getting size for file: {0}".format(target))
return target
|
:param target: Full path and filename
:param dry_run: True - don't actually perform action. False: perform action. No effect for this rule.
:return: filename: Full path and filename after action completes
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/actionplugins/printfileinfo.py#L15-L29
| null |
class PrintFileInfo(object):
"""
Prints the filename and size to stdout. Mostly used for testing.
"""
config_name = 'print-file-info'
def __init__(self, value=None):
self.value = value
self.logger = logging.getLogger(__name__)
|
jashort/SmartFileSorter
|
smartfilesorter/actionplugins/moveto.py
|
MoveTo.do_action
|
python
|
def do_action(self, target, dry_run=False):
# Get full path to the file in the destination directory
new_filename = os.path.join(self.destination, os.path.basename(target))
# If the file exists in the destination directory, append _NNN to the name where NNN is
# a zero padded number. Starts at _001
while os.path.exists(new_filename):
# if filename ends in _NNN, start at that number
filename, extension = os.path.splitext(os.path.basename(new_filename))
if filename[-4] == '_' and filename[-3:].isdigit():
# Split the number off the filename and increment it, handle suffixes longer than 3 numbers
current = filename.split('_')[-1]
filename_root = filename[0:-len(current)-1]
current = int(current) + 1
new_filename = os.path.join(self.destination,
(filename_root +
"_{0:03d}".format(current) +
extension))
else:
# No number suffix found in the filename, just start at _001
new_filename = os.path.join(self.destination,
(filename + "_001" + extension))
if dry_run is False:
self.logger.debug("Moving {0} to {1}".format(target, new_filename))
if not os.path.exists(new_filename):
try:
shutil.move(target, new_filename)
except IOError:
self.logger.error("Error moving file {0} to {1}".format(target, self.destination))
raise IOError
else:
self.logger.error("Destination file already exists: {0}".format(new_filename))
raise IOError
return new_filename
else:
self.logger.debug("Dry run. Skipping move {0} to {1}".format(target, new_filename))
return target
|
:param target: Full path and filename
:param dry_run: True - don't actually perform action. False: perform action.
:return: filename: Full path and filename after action completes
|
train
|
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/actionplugins/moveto.py#L17-L62
| null |
class MoveTo(object):
"""
Moves a given file to a new directory
"""
config_name = 'move-to'
def __init__(self, destination):
self.destination = os.path.expanduser(destination)
self.continue_processing = False
self.logger = logging.getLogger(__name__)
|
litters/shrew
|
shrew/utils/auth.py
|
unlock_keychain
|
python
|
def unlock_keychain(username):
if 'SSH_TTY' not in os.environ:
return
# Don't unlock if we've already seen this user.
if username in _unlocked:
return
_unlocked.add(username)
if sys.platform == 'darwin':
sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n")
subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
If the user is running via SSH, their Keychain must be unlocked first.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L133-L147
| null |
#
# Copyright 2014 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Deal with password loading & saving.
To opt-in to the various key chains, please create a file: ~/.shew/config.ini with the contents:
[keychain]
gnome-keychain.enable = True
kde-keychain.enable = True
crypted-keychain.enable = True
Note that the OS X keychain is always available on OS X.
"""
import getpass
from logging import getLogger
import os
import subprocess
import sys
from brownie.caching import memoize
import keyring
from keyring.backend import OSXKeychain, GnomeKeyring, KDEKWallet, CryptedFileKeyring
from keyring.errors import PasswordSetError
from shrew.utils.config import load_config
AUTH_SECTION = 'org.asf.auth'
KEYCHAIN_SECTION = 'keychain'
log = getLogger(__name__)
_unlocked = set()
AUTH_CONFIG_DEFAULTS = {'gnome-keychain.enable': False,
'kde-keychain.enable': False,
'crypted-keychain.enable': False}
AUTH_SECTIONS = [AUTH_SECTION, KEYCHAIN_SECTION]
class FixedOSXKeychain(OSXKeychain):
""" OSXKeychain does not implement delete_password() yet """
@staticmethod
def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain")
@memoize
def initialize_keychain():
# NB: keyring has a config file, but it only allows a single keyring to be
# selected, instead of reusing it's supported() method check against a list
# of backend implementations to try.
keyring_backends = []
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.get(KEYCHAIN_SECTION, 'crypted-keychain.enable'):
keyring_backends.insert(0, CryptedFileKeyring())
if config.get(KEYCHAIN_SECTION, 'kde-keychain.enable'):
keyring_backends.insert(0, KDEKWallet())
if config.get(KEYCHAIN_SECTION, 'gnome-keychain.enable'):
keyring_backends.insert(0, GnomeKeyring())
keyring_backends.insert(0, FixedOSXKeychain())
keyring_backends.sort(key=lambda x: -x.supported())
keyring.set_keyring(keyring_backends[0])
# Return True if there are any supported keychains.
return not all(i.supported() == -1 for i in keyring_backends)
def clear_username_from_store():
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
config.remove(AUTH_SECTION, 'username')
def get_username(use_store=False):
if use_store:
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.has_option(AUTH_SECTION, 'username'):
username = config.get(AUTH_SECTION, 'username')
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
config.set(AUTH_SECTION, 'username', username)
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
return username
def save_password(entry, password, username=None):
"""
Saves the given password in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param password: The password to save in the keychain.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.set_password(entry, username, password)
except Exception as e:
log.warn("Unable to set password in keyring. Continuing..")
log.debug(e)
def remove_password(entry, username=None):
"""
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e)
def get_password(entry=None, username=None, prompt=None, always_ask=False):
"""
Prompt the user for a password on stdin.
:param username: The username to get the password for. Default is the current user.
:param entry: The entry in the keychain. This is a caller specific key.
:param prompt: The entry in the keychain. This is a caller specific key.
:param always_ask: Force the user to enter the password every time.
"""
password = None
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if prompt is None:
prompt = "Enter %s's password: " % username
if has_keychain and entry is not None and always_ask is False:
password = get_password_from_keyring(entry, username)
if password is None:
password = getpass.getpass(prompt=prompt)
return password
def get_password_from_keyring(entry=None, username=None):
"""
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain and entry is not None:
try:
return keyring.get_password(entry, username)
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return None
def validate_password(entry, username, check_function, password=None, retries=1, save_on_success=True, prompt=None, **check_args):
"""
Validate a password with a check function & retry if the password is incorrect.
Useful for after a user has changed their password in LDAP, but their local keychain entry is then out of sync.
:param str entry: The keychain entry to fetch a password from.
:param str username: The username to authenticate
:param func check_function: Check function to use. Should take (username, password, **check_args)
:param str password: The password to validate. If `None`, the user will be prompted.
:param int retries: Number of retries to prompt the user for.
:param bool save_on_success: Save the password if the validation was successful.
:param str prompt: Alternate prompt to use when asking for the user's password.
:returns: `True` on successful authentication. `False` otherwise.
:rtype: bool
"""
if password is None:
password = get_password(entry, username, prompt)
for _ in xrange(retries + 1):
if check_function(username, password, **check_args):
if save_on_success:
save_password(entry, password, username)
return True
log.error("Couldn't successfully authenticate your username & password..")
password = get_password(entry, username, prompt, always_ask=True)
return False
def get_stored_credentials():
"""
Gets the credentials, username and password, that have been stored in
~/.shrew/config.ini and the secure keychain respectively without bothering
to prompt the user if either credential cannot be found.
:returns: username and password
:rtype: tuple of str
"""
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
username = config.get(AUTH_SECTION, 'username')
if not username:
# if we don't have a username then we cannot lookup the password
return None, None
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain:
try:
password = keyring.get_password(AUTH_SECTION, username)
return username, password
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return username, None
|
litters/shrew
|
shrew/utils/auth.py
|
save_password
|
python
|
def save_password(entry, password, username=None):
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.set_password(entry, username, password)
except Exception as e:
log.warn("Unable to set password in keyring. Continuing..")
log.debug(e)
|
Saves the given password in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param password: The password to save in the keychain.
:param username: The username to get the password for. Default is the current user.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L150-L169
|
[
"def get_username(use_store=False):\n if use_store:\n with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:\n if config.has_option(AUTH_SECTION, 'username'):\n username = config.get(AUTH_SECTION, 'username')\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n config.set(AUTH_SECTION, 'username', username)\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n\n return username\n"
] |
#
# Copyright 2014 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Deal with password loading & saving.
To opt-in to the various key chains, please create a file: ~/.shew/config.ini with the contents:
[keychain]
gnome-keychain.enable = True
kde-keychain.enable = True
crypted-keychain.enable = True
Note that the OS X keychain is always available on OS X.
"""
import getpass
from logging import getLogger
import os
import subprocess
import sys
from brownie.caching import memoize
import keyring
from keyring.backend import OSXKeychain, GnomeKeyring, KDEKWallet, CryptedFileKeyring
from keyring.errors import PasswordSetError
from shrew.utils.config import load_config
AUTH_SECTION = 'org.asf.auth'
KEYCHAIN_SECTION = 'keychain'
log = getLogger(__name__)
_unlocked = set()
AUTH_CONFIG_DEFAULTS = {'gnome-keychain.enable': False,
'kde-keychain.enable': False,
'crypted-keychain.enable': False}
AUTH_SECTIONS = [AUTH_SECTION, KEYCHAIN_SECTION]
class FixedOSXKeychain(OSXKeychain):
""" OSXKeychain does not implement delete_password() yet """
@staticmethod
def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain")
@memoize
def initialize_keychain():
# NB: keyring has a config file, but it only allows a single keyring to be
# selected, instead of reusing it's supported() method check against a list
# of backend implementations to try.
keyring_backends = []
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.get(KEYCHAIN_SECTION, 'crypted-keychain.enable'):
keyring_backends.insert(0, CryptedFileKeyring())
if config.get(KEYCHAIN_SECTION, 'kde-keychain.enable'):
keyring_backends.insert(0, KDEKWallet())
if config.get(KEYCHAIN_SECTION, 'gnome-keychain.enable'):
keyring_backends.insert(0, GnomeKeyring())
keyring_backends.insert(0, FixedOSXKeychain())
keyring_backends.sort(key=lambda x: -x.supported())
keyring.set_keyring(keyring_backends[0])
# Return True if there are any supported keychains.
return not all(i.supported() == -1 for i in keyring_backends)
def clear_username_from_store():
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
config.remove(AUTH_SECTION, 'username')
def get_username(use_store=False):
if use_store:
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.has_option(AUTH_SECTION, 'username'):
username = config.get(AUTH_SECTION, 'username')
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
config.set(AUTH_SECTION, 'username', username)
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
return username
def unlock_keychain(username):
""" If the user is running via SSH, their Keychain must be unlocked first. """
if 'SSH_TTY' not in os.environ:
return
# Don't unlock if we've already seen this user.
if username in _unlocked:
return
_unlocked.add(username)
if sys.platform == 'darwin':
sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n")
subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def remove_password(entry, username=None):
"""
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e)
def get_password(entry=None, username=None, prompt=None, always_ask=False):
"""
Prompt the user for a password on stdin.
:param username: The username to get the password for. Default is the current user.
:param entry: The entry in the keychain. This is a caller specific key.
:param prompt: The entry in the keychain. This is a caller specific key.
:param always_ask: Force the user to enter the password every time.
"""
password = None
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if prompt is None:
prompt = "Enter %s's password: " % username
if has_keychain and entry is not None and always_ask is False:
password = get_password_from_keyring(entry, username)
if password is None:
password = getpass.getpass(prompt=prompt)
return password
def get_password_from_keyring(entry=None, username=None):
"""
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain and entry is not None:
try:
return keyring.get_password(entry, username)
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return None
def validate_password(entry, username, check_function, password=None, retries=1, save_on_success=True, prompt=None, **check_args):
"""
Validate a password with a check function & retry if the password is incorrect.
Useful for after a user has changed their password in LDAP, but their local keychain entry is then out of sync.
:param str entry: The keychain entry to fetch a password from.
:param str username: The username to authenticate
:param func check_function: Check function to use. Should take (username, password, **check_args)
:param str password: The password to validate. If `None`, the user will be prompted.
:param int retries: Number of retries to prompt the user for.
:param bool save_on_success: Save the password if the validation was successful.
:param str prompt: Alternate prompt to use when asking for the user's password.
:returns: `True` on successful authentication. `False` otherwise.
:rtype: bool
"""
if password is None:
password = get_password(entry, username, prompt)
for _ in xrange(retries + 1):
if check_function(username, password, **check_args):
if save_on_success:
save_password(entry, password, username)
return True
log.error("Couldn't successfully authenticate your username & password..")
password = get_password(entry, username, prompt, always_ask=True)
return False
def get_stored_credentials():
"""
Gets the credentials, username and password, that have been stored in
~/.shrew/config.ini and the secure keychain respectively without bothering
to prompt the user if either credential cannot be found.
:returns: username and password
:rtype: tuple of str
"""
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
username = config.get(AUTH_SECTION, 'username')
if not username:
# if we don't have a username then we cannot lookup the password
return None, None
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain:
try:
password = keyring.get_password(AUTH_SECTION, username)
return username, password
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return username, None
|
litters/shrew
|
shrew/utils/auth.py
|
remove_password
|
python
|
def remove_password(entry, username=None):
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e)
|
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L172-L191
|
[
"def get_username(use_store=False):\n if use_store:\n with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:\n if config.has_option(AUTH_SECTION, 'username'):\n username = config.get(AUTH_SECTION, 'username')\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n config.set(AUTH_SECTION, 'username', username)\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n\n return username\n"
] |
#
# Copyright 2014 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Deal with password loading & saving.
To opt-in to the various key chains, please create a file: ~/.shew/config.ini with the contents:
[keychain]
gnome-keychain.enable = True
kde-keychain.enable = True
crypted-keychain.enable = True
Note that the OS X keychain is always available on OS X.
"""
import getpass
from logging import getLogger
import os
import subprocess
import sys
from brownie.caching import memoize
import keyring
from keyring.backend import OSXKeychain, GnomeKeyring, KDEKWallet, CryptedFileKeyring
from keyring.errors import PasswordSetError
from shrew.utils.config import load_config
AUTH_SECTION = 'org.asf.auth'
KEYCHAIN_SECTION = 'keychain'
log = getLogger(__name__)
_unlocked = set()
AUTH_CONFIG_DEFAULTS = {'gnome-keychain.enable': False,
'kde-keychain.enable': False,
'crypted-keychain.enable': False}
AUTH_SECTIONS = [AUTH_SECTION, KEYCHAIN_SECTION]
class FixedOSXKeychain(OSXKeychain):
""" OSXKeychain does not implement delete_password() yet """
@staticmethod
def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain")
@memoize
def initialize_keychain():
# NB: keyring has a config file, but it only allows a single keyring to be
# selected, instead of reusing it's supported() method check against a list
# of backend implementations to try.
keyring_backends = []
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.get(KEYCHAIN_SECTION, 'crypted-keychain.enable'):
keyring_backends.insert(0, CryptedFileKeyring())
if config.get(KEYCHAIN_SECTION, 'kde-keychain.enable'):
keyring_backends.insert(0, KDEKWallet())
if config.get(KEYCHAIN_SECTION, 'gnome-keychain.enable'):
keyring_backends.insert(0, GnomeKeyring())
keyring_backends.insert(0, FixedOSXKeychain())
keyring_backends.sort(key=lambda x: -x.supported())
keyring.set_keyring(keyring_backends[0])
# Return True if there are any supported keychains.
return not all(i.supported() == -1 for i in keyring_backends)
def clear_username_from_store():
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
config.remove(AUTH_SECTION, 'username')
def get_username(use_store=False):
if use_store:
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.has_option(AUTH_SECTION, 'username'):
username = config.get(AUTH_SECTION, 'username')
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
config.set(AUTH_SECTION, 'username', username)
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
return username
def unlock_keychain(username):
""" If the user is running via SSH, their Keychain must be unlocked first. """
if 'SSH_TTY' not in os.environ:
return
# Don't unlock if we've already seen this user.
if username in _unlocked:
return
_unlocked.add(username)
if sys.platform == 'darwin':
sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n")
subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def save_password(entry, password, username=None):
"""
Saves the given password in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param password: The password to save in the keychain.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.set_password(entry, username, password)
except Exception as e:
log.warn("Unable to set password in keyring. Continuing..")
log.debug(e)
def get_password(entry=None, username=None, prompt=None, always_ask=False):
"""
Prompt the user for a password on stdin.
:param username: The username to get the password for. Default is the current user.
:param entry: The entry in the keychain. This is a caller specific key.
:param prompt: The entry in the keychain. This is a caller specific key.
:param always_ask: Force the user to enter the password every time.
"""
password = None
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if prompt is None:
prompt = "Enter %s's password: " % username
if has_keychain and entry is not None and always_ask is False:
password = get_password_from_keyring(entry, username)
if password is None:
password = getpass.getpass(prompt=prompt)
return password
def get_password_from_keyring(entry=None, username=None):
"""
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain and entry is not None:
try:
return keyring.get_password(entry, username)
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return None
def validate_password(entry, username, check_function, password=None, retries=1, save_on_success=True, prompt=None, **check_args):
"""
Validate a password with a check function & retry if the password is incorrect.
Useful for after a user has changed their password in LDAP, but their local keychain entry is then out of sync.
:param str entry: The keychain entry to fetch a password from.
:param str username: The username to authenticate
:param func check_function: Check function to use. Should take (username, password, **check_args)
:param str password: The password to validate. If `None`, the user will be prompted.
:param int retries: Number of retries to prompt the user for.
:param bool save_on_success: Save the password if the validation was successful.
:param str prompt: Alternate prompt to use when asking for the user's password.
:returns: `True` on successful authentication. `False` otherwise.
:rtype: bool
"""
if password is None:
password = get_password(entry, username, prompt)
for _ in xrange(retries + 1):
if check_function(username, password, **check_args):
if save_on_success:
save_password(entry, password, username)
return True
log.error("Couldn't successfully authenticate your username & password..")
password = get_password(entry, username, prompt, always_ask=True)
return False
def get_stored_credentials():
"""
Gets the credentials, username and password, that have been stored in
~/.shrew/config.ini and the secure keychain respectively without bothering
to prompt the user if either credential cannot be found.
:returns: username and password
:rtype: tuple of str
"""
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
username = config.get(AUTH_SECTION, 'username')
if not username:
# if we don't have a username then we cannot lookup the password
return None, None
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain:
try:
password = keyring.get_password(AUTH_SECTION, username)
return username, password
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return username, None
|
litters/shrew
|
shrew/utils/auth.py
|
get_password
|
python
|
def get_password(entry=None, username=None, prompt=None, always_ask=False):
password = None
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if prompt is None:
prompt = "Enter %s's password: " % username
if has_keychain and entry is not None and always_ask is False:
password = get_password_from_keyring(entry, username)
if password is None:
password = getpass.getpass(prompt=prompt)
return password
|
Prompt the user for a password on stdin.
:param username: The username to get the password for. Default is the current user.
:param entry: The entry in the keychain. This is a caller specific key.
:param prompt: The entry in the keychain. This is a caller specific key.
:param always_ask: Force the user to enter the password every time.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L194-L223
|
[
"def get_username(use_store=False):\n if use_store:\n with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:\n if config.has_option(AUTH_SECTION, 'username'):\n username = config.get(AUTH_SECTION, 'username')\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n config.set(AUTH_SECTION, 'username', username)\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n\n return username\n",
"def unlock_keychain(username):\n \"\"\" If the user is running via SSH, their Keychain must be unlocked first. \"\"\"\n\n if 'SSH_TTY' not in os.environ:\n return\n\n # Don't unlock if we've already seen this user.\n if username in _unlocked:\n return\n\n _unlocked.add(username)\n\n if sys.platform == 'darwin':\n sys.stderr.write(\"You are running under SSH. Please unlock your local OS X KeyChain:\\n\")\n subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n",
"def get_password_from_keyring(entry=None, username=None):\n \"\"\"\n :param entry: The entry in the keychain. This is a caller specific key.\n :param username: The username to get the password for. Default is the current user.\n \"\"\"\n\n if username is None:\n username = get_username()\n\n has_keychain = initialize_keychain()\n\n # Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.\n unlock_keychain(username)\n\n if has_keychain and entry is not None:\n try:\n return keyring.get_password(entry, username)\n except Exception as e:\n log.warn(\"Unable to get password from keyring. Continuing..\")\n log.debug(e)\n\n return None\n"
] |
#
# Copyright 2014 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Deal with password loading & saving.
To opt-in to the various key chains, please create a file: ~/.shew/config.ini with the contents:
[keychain]
gnome-keychain.enable = True
kde-keychain.enable = True
crypted-keychain.enable = True
Note that the OS X keychain is always available on OS X.
"""
import getpass
from logging import getLogger
import os
import subprocess
import sys
from brownie.caching import memoize
import keyring
from keyring.backend import OSXKeychain, GnomeKeyring, KDEKWallet, CryptedFileKeyring
from keyring.errors import PasswordSetError
from shrew.utils.config import load_config
AUTH_SECTION = 'org.asf.auth'
KEYCHAIN_SECTION = 'keychain'
log = getLogger(__name__)
_unlocked = set()
AUTH_CONFIG_DEFAULTS = {'gnome-keychain.enable': False,
'kde-keychain.enable': False,
'crypted-keychain.enable': False}
AUTH_SECTIONS = [AUTH_SECTION, KEYCHAIN_SECTION]
class FixedOSXKeychain(OSXKeychain):
""" OSXKeychain does not implement delete_password() yet """
@staticmethod
def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain")
@memoize
def initialize_keychain():
# NB: keyring has a config file, but it only allows a single keyring to be
# selected, instead of reusing it's supported() method check against a list
# of backend implementations to try.
keyring_backends = []
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.get(KEYCHAIN_SECTION, 'crypted-keychain.enable'):
keyring_backends.insert(0, CryptedFileKeyring())
if config.get(KEYCHAIN_SECTION, 'kde-keychain.enable'):
keyring_backends.insert(0, KDEKWallet())
if config.get(KEYCHAIN_SECTION, 'gnome-keychain.enable'):
keyring_backends.insert(0, GnomeKeyring())
keyring_backends.insert(0, FixedOSXKeychain())
keyring_backends.sort(key=lambda x: -x.supported())
keyring.set_keyring(keyring_backends[0])
# Return True if there are any supported keychains.
return not all(i.supported() == -1 for i in keyring_backends)
def clear_username_from_store():
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
config.remove(AUTH_SECTION, 'username')
def get_username(use_store=False):
if use_store:
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.has_option(AUTH_SECTION, 'username'):
username = config.get(AUTH_SECTION, 'username')
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
config.set(AUTH_SECTION, 'username', username)
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
return username
def unlock_keychain(username):
""" If the user is running via SSH, their Keychain must be unlocked first. """
if 'SSH_TTY' not in os.environ:
return
# Don't unlock if we've already seen this user.
if username in _unlocked:
return
_unlocked.add(username)
if sys.platform == 'darwin':
sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n")
subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def save_password(entry, password, username=None):
"""
Saves the given password in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param password: The password to save in the keychain.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.set_password(entry, username, password)
except Exception as e:
log.warn("Unable to set password in keyring. Continuing..")
log.debug(e)
def remove_password(entry, username=None):
"""
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e)
def get_password_from_keyring(entry=None, username=None):
"""
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain and entry is not None:
try:
return keyring.get_password(entry, username)
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return None
def validate_password(entry, username, check_function, password=None, retries=1, save_on_success=True, prompt=None, **check_args):
"""
Validate a password with a check function & retry if the password is incorrect.
Useful for after a user has changed their password in LDAP, but their local keychain entry is then out of sync.
:param str entry: The keychain entry to fetch a password from.
:param str username: The username to authenticate
:param func check_function: Check function to use. Should take (username, password, **check_args)
:param str password: The password to validate. If `None`, the user will be prompted.
:param int retries: Number of retries to prompt the user for.
:param bool save_on_success: Save the password if the validation was successful.
:param str prompt: Alternate prompt to use when asking for the user's password.
:returns: `True` on successful authentication. `False` otherwise.
:rtype: bool
"""
if password is None:
password = get_password(entry, username, prompt)
for _ in xrange(retries + 1):
if check_function(username, password, **check_args):
if save_on_success:
save_password(entry, password, username)
return True
log.error("Couldn't successfully authenticate your username & password..")
password = get_password(entry, username, prompt, always_ask=True)
return False
def get_stored_credentials():
"""
Gets the credentials, username and password, that have been stored in
~/.shrew/config.ini and the secure keychain respectively without bothering
to prompt the user if either credential cannot be found.
:returns: username and password
:rtype: tuple of str
"""
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
username = config.get(AUTH_SECTION, 'username')
if not username:
# if we don't have a username then we cannot lookup the password
return None, None
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain:
try:
password = keyring.get_password(AUTH_SECTION, username)
return username, password
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return username, None
|
litters/shrew
|
shrew/utils/auth.py
|
get_password_from_keyring
|
python
|
def get_password_from_keyring(entry=None, username=None):
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain and entry is not None:
try:
return keyring.get_password(entry, username)
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return None
|
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username to get the password for. Default is the current user.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L226-L247
|
[
"def get_username(use_store=False):\n if use_store:\n with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:\n if config.has_option(AUTH_SECTION, 'username'):\n username = config.get(AUTH_SECTION, 'username')\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n config.set(AUTH_SECTION, 'username', username)\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n\n return username\n",
"def unlock_keychain(username):\n \"\"\" If the user is running via SSH, their Keychain must be unlocked first. \"\"\"\n\n if 'SSH_TTY' not in os.environ:\n return\n\n # Don't unlock if we've already seen this user.\n if username in _unlocked:\n return\n\n _unlocked.add(username)\n\n if sys.platform == 'darwin':\n sys.stderr.write(\"You are running under SSH. Please unlock your local OS X KeyChain:\\n\")\n subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
] |
#
# Copyright 2014 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Deal with password loading & saving.
To opt-in to the various key chains, please create a file: ~/.shew/config.ini with the contents:
[keychain]
gnome-keychain.enable = True
kde-keychain.enable = True
crypted-keychain.enable = True
Note that the OS X keychain is always available on OS X.
"""
import getpass
from logging import getLogger
import os
import subprocess
import sys
from brownie.caching import memoize
import keyring
from keyring.backend import OSXKeychain, GnomeKeyring, KDEKWallet, CryptedFileKeyring
from keyring.errors import PasswordSetError
from shrew.utils.config import load_config
AUTH_SECTION = 'org.asf.auth'
KEYCHAIN_SECTION = 'keychain'
log = getLogger(__name__)
_unlocked = set()
AUTH_CONFIG_DEFAULTS = {'gnome-keychain.enable': False,
'kde-keychain.enable': False,
'crypted-keychain.enable': False}
AUTH_SECTIONS = [AUTH_SECTION, KEYCHAIN_SECTION]
class FixedOSXKeychain(OSXKeychain):
""" OSXKeychain does not implement delete_password() yet """
@staticmethod
def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain")
@memoize
def initialize_keychain():
# NB: keyring has a config file, but it only allows a single keyring to be
# selected, instead of reusing it's supported() method check against a list
# of backend implementations to try.
keyring_backends = []
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.get(KEYCHAIN_SECTION, 'crypted-keychain.enable'):
keyring_backends.insert(0, CryptedFileKeyring())
if config.get(KEYCHAIN_SECTION, 'kde-keychain.enable'):
keyring_backends.insert(0, KDEKWallet())
if config.get(KEYCHAIN_SECTION, 'gnome-keychain.enable'):
keyring_backends.insert(0, GnomeKeyring())
keyring_backends.insert(0, FixedOSXKeychain())
keyring_backends.sort(key=lambda x: -x.supported())
keyring.set_keyring(keyring_backends[0])
# Return True if there are any supported keychains.
return not all(i.supported() == -1 for i in keyring_backends)
def clear_username_from_store():
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
config.remove(AUTH_SECTION, 'username')
def get_username(use_store=False):
if use_store:
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.has_option(AUTH_SECTION, 'username'):
username = config.get(AUTH_SECTION, 'username')
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
config.set(AUTH_SECTION, 'username', username)
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
return username
def unlock_keychain(username):
""" If the user is running via SSH, their Keychain must be unlocked first. """
if 'SSH_TTY' not in os.environ:
return
# Don't unlock if we've already seen this user.
if username in _unlocked:
return
_unlocked.add(username)
if sys.platform == 'darwin':
sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n")
subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def save_password(entry, password, username=None):
"""
Saves the given password in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param password: The password to save in the keychain.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.set_password(entry, username, password)
except Exception as e:
log.warn("Unable to set password in keyring. Continuing..")
log.debug(e)
def remove_password(entry, username=None):
"""
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e)
def get_password(entry=None, username=None, prompt=None, always_ask=False):
"""
Prompt the user for a password on stdin.
:param username: The username to get the password for. Default is the current user.
:param entry: The entry in the keychain. This is a caller specific key.
:param prompt: The entry in the keychain. This is a caller specific key.
:param always_ask: Force the user to enter the password every time.
"""
password = None
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if prompt is None:
prompt = "Enter %s's password: " % username
if has_keychain and entry is not None and always_ask is False:
password = get_password_from_keyring(entry, username)
if password is None:
password = getpass.getpass(prompt=prompt)
return password
def validate_password(entry, username, check_function, password=None, retries=1, save_on_success=True, prompt=None, **check_args):
"""
Validate a password with a check function & retry if the password is incorrect.
Useful for after a user has changed their password in LDAP, but their local keychain entry is then out of sync.
:param str entry: The keychain entry to fetch a password from.
:param str username: The username to authenticate
:param func check_function: Check function to use. Should take (username, password, **check_args)
:param str password: The password to validate. If `None`, the user will be prompted.
:param int retries: Number of retries to prompt the user for.
:param bool save_on_success: Save the password if the validation was successful.
:param str prompt: Alternate prompt to use when asking for the user's password.
:returns: `True` on successful authentication. `False` otherwise.
:rtype: bool
"""
if password is None:
password = get_password(entry, username, prompt)
for _ in xrange(retries + 1):
if check_function(username, password, **check_args):
if save_on_success:
save_password(entry, password, username)
return True
log.error("Couldn't successfully authenticate your username & password..")
password = get_password(entry, username, prompt, always_ask=True)
return False
def get_stored_credentials():
"""
Gets the credentials, username and password, that have been stored in
~/.shrew/config.ini and the secure keychain respectively without bothering
to prompt the user if either credential cannot be found.
:returns: username and password
:rtype: tuple of str
"""
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
username = config.get(AUTH_SECTION, 'username')
if not username:
# if we don't have a username then we cannot lookup the password
return None, None
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain:
try:
password = keyring.get_password(AUTH_SECTION, username)
return username, password
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return username, None
|
litters/shrew
|
shrew/utils/auth.py
|
validate_password
|
python
|
def validate_password(entry, username, check_function, password=None, retries=1, save_on_success=True, prompt=None, **check_args):
if password is None:
password = get_password(entry, username, prompt)
for _ in xrange(retries + 1):
if check_function(username, password, **check_args):
if save_on_success:
save_password(entry, password, username)
return True
log.error("Couldn't successfully authenticate your username & password..")
password = get_password(entry, username, prompt, always_ask=True)
return False
|
Validate a password with a check function & retry if the password is incorrect.
Useful for after a user has changed their password in LDAP, but their local keychain entry is then out of sync.
:param str entry: The keychain entry to fetch a password from.
:param str username: The username to authenticate
:param func check_function: Check function to use. Should take (username, password, **check_args)
:param str password: The password to validate. If `None`, the user will be prompted.
:param int retries: Number of retries to prompt the user for.
:param bool save_on_success: Save the password if the validation was successful.
:param str prompt: Alternate prompt to use when asking for the user's password.
:returns: `True` on successful authentication. `False` otherwise.
:rtype: bool
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L250-L283
|
[
"def save_password(entry, password, username=None):\n \"\"\"\n Saves the given password in the user's keychain.\n\n :param entry: The entry in the keychain. This is a caller specific key.\n :param password: The password to save in the keychain.\n :param username: The username to get the password for. Default is the current user.\n \"\"\"\n\n if username is None:\n username = get_username()\n\n has_keychain = initialize_keychain()\n\n if has_keychain:\n try:\n keyring.set_password(entry, username, password)\n except Exception as e:\n log.warn(\"Unable to set password in keyring. Continuing..\")\n log.debug(e)\n",
"def get_password(entry=None, username=None, prompt=None, always_ask=False):\n \"\"\"\n Prompt the user for a password on stdin.\n\n :param username: The username to get the password for. Default is the current user.\n :param entry: The entry in the keychain. This is a caller specific key.\n :param prompt: The entry in the keychain. This is a caller specific key.\n :param always_ask: Force the user to enter the password every time.\n \"\"\"\n\n password = None\n\n if username is None:\n username = get_username()\n\n has_keychain = initialize_keychain()\n\n # Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.\n unlock_keychain(username)\n\n if prompt is None:\n prompt = \"Enter %s's password: \" % username\n\n if has_keychain and entry is not None and always_ask is False:\n password = get_password_from_keyring(entry, username)\n\n if password is None:\n password = getpass.getpass(prompt=prompt)\n\n return password\n"
] |
#
# Copyright 2014 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Deal with password loading & saving.
To opt-in to the various key chains, please create a file: ~/.shew/config.ini with the contents:
[keychain]
gnome-keychain.enable = True
kde-keychain.enable = True
crypted-keychain.enable = True
Note that the OS X keychain is always available on OS X.
"""
import getpass
from logging import getLogger
import os
import subprocess
import sys
from brownie.caching import memoize
import keyring
from keyring.backend import OSXKeychain, GnomeKeyring, KDEKWallet, CryptedFileKeyring
from keyring.errors import PasswordSetError
from shrew.utils.config import load_config
AUTH_SECTION = 'org.asf.auth'
KEYCHAIN_SECTION = 'keychain'
log = getLogger(__name__)
_unlocked = set()
AUTH_CONFIG_DEFAULTS = {'gnome-keychain.enable': False,
'kde-keychain.enable': False,
'crypted-keychain.enable': False}
AUTH_SECTIONS = [AUTH_SECTION, KEYCHAIN_SECTION]
class FixedOSXKeychain(OSXKeychain):
""" OSXKeychain does not implement delete_password() yet """
@staticmethod
def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain")
@memoize
def initialize_keychain():
# NB: keyring has a config file, but it only allows a single keyring to be
# selected, instead of reusing it's supported() method check against a list
# of backend implementations to try.
keyring_backends = []
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.get(KEYCHAIN_SECTION, 'crypted-keychain.enable'):
keyring_backends.insert(0, CryptedFileKeyring())
if config.get(KEYCHAIN_SECTION, 'kde-keychain.enable'):
keyring_backends.insert(0, KDEKWallet())
if config.get(KEYCHAIN_SECTION, 'gnome-keychain.enable'):
keyring_backends.insert(0, GnomeKeyring())
keyring_backends.insert(0, FixedOSXKeychain())
keyring_backends.sort(key=lambda x: -x.supported())
keyring.set_keyring(keyring_backends[0])
# Return True if there are any supported keychains.
return not all(i.supported() == -1 for i in keyring_backends)
def clear_username_from_store():
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
config.remove(AUTH_SECTION, 'username')
def get_username(use_store=False):
if use_store:
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.has_option(AUTH_SECTION, 'username'):
username = config.get(AUTH_SECTION, 'username')
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
config.set(AUTH_SECTION, 'username', username)
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
return username
def unlock_keychain(username):
""" If the user is running via SSH, their Keychain must be unlocked first. """
if 'SSH_TTY' not in os.environ:
return
# Don't unlock if we've already seen this user.
if username in _unlocked:
return
_unlocked.add(username)
if sys.platform == 'darwin':
sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n")
subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def save_password(entry, password, username=None):
"""
Saves the given password in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param password: The password to save in the keychain.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.set_password(entry, username, password)
except Exception as e:
log.warn("Unable to set password in keyring. Continuing..")
log.debug(e)
def remove_password(entry, username=None):
"""
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e)
def get_password(entry=None, username=None, prompt=None, always_ask=False):
"""
Prompt the user for a password on stdin.
:param username: The username to get the password for. Default is the current user.
:param entry: The entry in the keychain. This is a caller specific key.
:param prompt: The entry in the keychain. This is a caller specific key.
:param always_ask: Force the user to enter the password every time.
"""
password = None
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if prompt is None:
prompt = "Enter %s's password: " % username
if has_keychain and entry is not None and always_ask is False:
password = get_password_from_keyring(entry, username)
if password is None:
password = getpass.getpass(prompt=prompt)
return password
def get_password_from_keyring(entry=None, username=None):
"""
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain and entry is not None:
try:
return keyring.get_password(entry, username)
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return None
def get_stored_credentials():
"""
Gets the credentials, username and password, that have been stored in
~/.shrew/config.ini and the secure keychain respectively without bothering
to prompt the user if either credential cannot be found.
:returns: username and password
:rtype: tuple of str
"""
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
username = config.get(AUTH_SECTION, 'username')
if not username:
# if we don't have a username then we cannot lookup the password
return None, None
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain:
try:
password = keyring.get_password(AUTH_SECTION, username)
return username, password
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return username, None
|
litters/shrew
|
shrew/utils/auth.py
|
get_stored_credentials
|
python
|
def get_stored_credentials():
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
username = config.get(AUTH_SECTION, 'username')
if not username:
# if we don't have a username then we cannot lookup the password
return None, None
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain:
try:
password = keyring.get_password(AUTH_SECTION, username)
return username, password
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return username, None
|
Gets the credentials, username and password, that have been stored in
~/.shrew/config.ini and the secure keychain respectively without bothering
to prompt the user if either credential cannot be found.
:returns: username and password
:rtype: tuple of str
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L286-L313
| null |
#
# Copyright 2014 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Deal with password loading & saving.
To opt-in to the various key chains, please create a file: ~/.shew/config.ini with the contents:
[keychain]
gnome-keychain.enable = True
kde-keychain.enable = True
crypted-keychain.enable = True
Note that the OS X keychain is always available on OS X.
"""
import getpass
from logging import getLogger
import os
import subprocess
import sys
from brownie.caching import memoize
import keyring
from keyring.backend import OSXKeychain, GnomeKeyring, KDEKWallet, CryptedFileKeyring
from keyring.errors import PasswordSetError
from shrew.utils.config import load_config
AUTH_SECTION = 'org.asf.auth'
KEYCHAIN_SECTION = 'keychain'
log = getLogger(__name__)
_unlocked = set()
AUTH_CONFIG_DEFAULTS = {'gnome-keychain.enable': False,
'kde-keychain.enable': False,
'crypted-keychain.enable': False}
AUTH_SECTIONS = [AUTH_SECTION, KEYCHAIN_SECTION]
class FixedOSXKeychain(OSXKeychain):
""" OSXKeychain does not implement delete_password() yet """
@staticmethod
def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain")
@memoize
def initialize_keychain():
# NB: keyring has a config file, but it only allows a single keyring to be
# selected, instead of reusing it's supported() method check against a list
# of backend implementations to try.
keyring_backends = []
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.get(KEYCHAIN_SECTION, 'crypted-keychain.enable'):
keyring_backends.insert(0, CryptedFileKeyring())
if config.get(KEYCHAIN_SECTION, 'kde-keychain.enable'):
keyring_backends.insert(0, KDEKWallet())
if config.get(KEYCHAIN_SECTION, 'gnome-keychain.enable'):
keyring_backends.insert(0, GnomeKeyring())
keyring_backends.insert(0, FixedOSXKeychain())
keyring_backends.sort(key=lambda x: -x.supported())
keyring.set_keyring(keyring_backends[0])
# Return True if there are any supported keychains.
return not all(i.supported() == -1 for i in keyring_backends)
def clear_username_from_store():
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
config.remove(AUTH_SECTION, 'username')
def get_username(use_store=False):
if use_store:
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
if config.has_option(AUTH_SECTION, 'username'):
username = config.get(AUTH_SECTION, 'username')
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
config.set(AUTH_SECTION, 'username', username)
else:
username = raw_input("Username [%s]: " % getpass.getuser())
if not username:
username = getpass.getuser()
return username
def unlock_keychain(username):
""" If the user is running via SSH, their Keychain must be unlocked first. """
if 'SSH_TTY' not in os.environ:
return
# Don't unlock if we've already seen this user.
if username in _unlocked:
return
_unlocked.add(username)
if sys.platform == 'darwin':
sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n")
subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def save_password(entry, password, username=None):
"""
Saves the given password in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param password: The password to save in the keychain.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.set_password(entry, username, password)
except Exception as e:
log.warn("Unable to set password in keyring. Continuing..")
log.debug(e)
def remove_password(entry, username=None):
"""
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e)
def get_password(entry=None, username=None, prompt=None, always_ask=False):
"""
Prompt the user for a password on stdin.
:param username: The username to get the password for. Default is the current user.
:param entry: The entry in the keychain. This is a caller specific key.
:param prompt: The entry in the keychain. This is a caller specific key.
:param always_ask: Force the user to enter the password every time.
"""
password = None
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if prompt is None:
prompt = "Enter %s's password: " % username
if has_keychain and entry is not None and always_ask is False:
password = get_password_from_keyring(entry, username)
if password is None:
password = getpass.getpass(prompt=prompt)
return password
def get_password_from_keyring(entry=None, username=None):
"""
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username to get the password for. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain and entry is not None:
try:
return keyring.get_password(entry, username)
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return None
def validate_password(entry, username, check_function, password=None, retries=1, save_on_success=True, prompt=None, **check_args):
"""
Validate a password with a check function & retry if the password is incorrect.
Useful for after a user has changed their password in LDAP, but their local keychain entry is then out of sync.
:param str entry: The keychain entry to fetch a password from.
:param str username: The username to authenticate
:param func check_function: Check function to use. Should take (username, password, **check_args)
:param str password: The password to validate. If `None`, the user will be prompted.
:param int retries: Number of retries to prompt the user for.
:param bool save_on_success: Save the password if the validation was successful.
:param str prompt: Alternate prompt to use when asking for the user's password.
:returns: `True` on successful authentication. `False` otherwise.
:rtype: bool
"""
if password is None:
password = get_password(entry, username, prompt)
for _ in xrange(retries + 1):
if check_function(username, password, **check_args):
if save_on_success:
save_password(entry, password, username)
return True
log.error("Couldn't successfully authenticate your username & password..")
password = get_password(entry, username, prompt, always_ask=True)
return False
|
litters/shrew
|
shrew/utils/auth.py
|
FixedOSXKeychain.delete_password
|
python
|
def delete_password(service, username):
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain")
|
Delete the password for the username of the service.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L60-L79
| null |
class FixedOSXKeychain(OSXKeychain):
""" OSXKeychain does not implement delete_password() yet """
@staticmethod
|
litters/shrew
|
shrew/cli.py
|
entrypoint
|
python
|
def entrypoint(method, depth=1, cls=None):
current_frame = inspect.currentframe(depth).f_locals
if '__name__' in current_frame and current_frame['__name__'] == '__main__':
if cls is None:
cls = CLI
method(cls())
return method
|
Run a method as your __main__ via decorator.
Example::
@shrew.cli.entrypoint
def main(cli):
...
Shorthand for::
def main():
cli = shrew.cli.CLI()
...
if __name__ == '__main__':
method()
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L30-L61
| null |
#
# Copyright 2014 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ConfigParser
import contextlib
import inspect
from logging import getLogger
import logging
import os
import sys
import argparse
from shrew.utils.auth import get_username, get_password, AUTH_SECTION, save_password, AUTH_SECTIONS, remove_password
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_config_option(self, default=None):
""" Add a --config option to the argument parser. """
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
def add_username_password(self, use_store=False):
""" Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
"""
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
def _add_documentation_link(self, links, section, variable_name, default=None):
"""
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
"""
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
def run(self, accept_unrecognized_args=False):
"""
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
"""
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
class ExitedCleanly(StandardError):
""" Use instead of sys.exit() to throw an exception but not log an error. """
pass
def prompt_options(text, options):
for idx, value in enumerate(options):
print '%s: %s' % (idx + 1, value)
text += ' [1 to %s, enter to skip] ' % len(options)
while True:
answer = raw_input(text)
if not answer:
return None
if answer.isdigit():
answer = int(answer)
if 1 <= answer <= len(options):
return answer - 1
def prompt_yes_no(text, default=None):
if default is None:
text += ' (y/n) '
elif default:
text += ' (Y/n) '
else:
text += ' (y/N) '
while True:
answer = raw_input(text)
if not answer and default is not None:
return default
if answer.lower() == 'y':
return True
if answer.lower() == 'n':
return False
|
litters/shrew
|
shrew/cli.py
|
CLI.add_config_option
|
python
|
def add_config_option(self, default=None):
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
|
Add a --config option to the argument parser.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L216-L220
| null |
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_username_password(self, use_store=False):
""" Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
"""
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
def _add_documentation_link(self, links, section, variable_name, default=None):
"""
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
"""
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
def run(self, accept_unrecognized_args=False):
"""
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
"""
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
|
litters/shrew
|
shrew/cli.py
|
CLI.add_username_password
|
python
|
def add_username_password(self, use_store=False):
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
|
Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L222-L230
| null |
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_config_option(self, default=None):
""" Add a --config option to the argument parser. """
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
def _add_documentation_link(self, links, section, variable_name, default=None):
"""
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
"""
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
def run(self, accept_unrecognized_args=False):
"""
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
"""
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
|
litters/shrew
|
shrew/cli.py
|
CLI._add_documentation_link
|
python
|
def _add_documentation_link(self, links, section, variable_name, default=None):
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
|
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L232-L243
| null |
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_config_option(self, default=None):
""" Add a --config option to the argument parser. """
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
def add_username_password(self, use_store=False):
""" Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
"""
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
def run(self, accept_unrecognized_args=False):
"""
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
"""
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
|
litters/shrew
|
shrew/cli.py
|
CLI.__parse_args
|
python
|
def __parse_args(self, accept_unrecognized_args=False):
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
|
Invoke the argument parser.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L245-L265
| null |
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_config_option(self, default=None):
""" Add a --config option to the argument parser. """
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
def add_username_password(self, use_store=False):
""" Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
"""
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
def _add_documentation_link(self, links, section, variable_name, default=None):
"""
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
"""
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
def run(self, accept_unrecognized_args=False):
"""
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
"""
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
|
litters/shrew
|
shrew/cli.py
|
CLI.__parse_config
|
python
|
def __parse_config(self):
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
|
Invoke the config file parser.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L267-L272
| null |
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_config_option(self, default=None):
""" Add a --config option to the argument parser. """
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
def add_username_password(self, use_store=False):
""" Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
"""
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
def _add_documentation_link(self, links, section, variable_name, default=None):
"""
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
"""
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
def run(self, accept_unrecognized_args=False):
"""
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
"""
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
|
litters/shrew
|
shrew/cli.py
|
CLI.__process_username_password
|
python
|
def __process_username_password(self):
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
|
If indicated, process the username and password
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L274-L289
|
[
"def get_username(use_store=False):\n if use_store:\n with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:\n if config.has_option(AUTH_SECTION, 'username'):\n username = config.get(AUTH_SECTION, 'username')\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n config.set(AUTH_SECTION, 'username', username)\n else:\n username = raw_input(\"Username [%s]: \" % getpass.getuser())\n if not username:\n username = getpass.getuser()\n\n return username\n",
"def save_password(entry, password, username=None):\n \"\"\"\n Saves the given password in the user's keychain.\n\n :param entry: The entry in the keychain. This is a caller specific key.\n :param password: The password to save in the keychain.\n :param username: The username to get the password for. Default is the current user.\n \"\"\"\n\n if username is None:\n username = get_username()\n\n has_keychain = initialize_keychain()\n\n if has_keychain:\n try:\n keyring.set_password(entry, username, password)\n except Exception as e:\n log.warn(\"Unable to set password in keyring. Continuing..\")\n log.debug(e)\n",
"def remove_password(entry, username=None):\n \"\"\"\n Removes the password for the specific user in the user's keychain.\n\n :param entry: The entry in the keychain. This is a caller specific key.\n :param username: The username whose password is to be removed. Default is the current user.\n \"\"\"\n\n if username is None:\n username = get_username()\n\n has_keychain = initialize_keychain()\n\n if has_keychain:\n try:\n keyring.delete_password(entry, username)\n except Exception as e:\n print e\n log.warn(\"Unable to delete password in keyring. Continuing..\")\n log.debug(e)\n",
"def get_password(entry=None, username=None, prompt=None, always_ask=False):\n \"\"\"\n Prompt the user for a password on stdin.\n\n :param username: The username to get the password for. Default is the current user.\n :param entry: The entry in the keychain. This is a caller specific key.\n :param prompt: The entry in the keychain. This is a caller specific key.\n :param always_ask: Force the user to enter the password every time.\n \"\"\"\n\n password = None\n\n if username is None:\n username = get_username()\n\n has_keychain = initialize_keychain()\n\n # Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.\n unlock_keychain(username)\n\n if prompt is None:\n prompt = \"Enter %s's password: \" % username\n\n if has_keychain and entry is not None and always_ask is False:\n password = get_password_from_keyring(entry, username)\n\n if password is None:\n password = getpass.getpass(prompt=prompt)\n\n return password\n"
] |
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_config_option(self, default=None):
""" Add a --config option to the argument parser. """
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
def add_username_password(self, use_store=False):
""" Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
"""
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
def _add_documentation_link(self, links, section, variable_name, default=None):
"""
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
"""
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
def run(self, accept_unrecognized_args=False):
"""
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
"""
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
|
litters/shrew
|
shrew/cli.py
|
CLI.__finish_initializing
|
python
|
def __finish_initializing(self):
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
|
Handle any initialization after arguments & config has been parsed.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L291-L327
| null |
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_config_option(self, default=None):
""" Add a --config option to the argument parser. """
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
def add_username_password(self, use_store=False):
""" Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
"""
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
def _add_documentation_link(self, links, section, variable_name, default=None):
"""
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
"""
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
def run(self, accept_unrecognized_args=False):
"""
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
"""
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
|
litters/shrew
|
shrew/cli.py
|
CLI.run
|
python
|
def run(self, accept_unrecognized_args=False):
self.__parse_args(accept_unrecognized_args)
self.__parse_config()
self.__process_username_password()
self.__finish_initializing()
exit_status = 0
try:
yield self
except ExitedCleanly:
pass
except (Exception, KeyboardInterrupt) as e:
# Run any method a library or caller might have registered.
for base in type(e).mro():
if base in self.exceptions:
self.exceptions[base](e)
break
else:
self.log.exception(e)
exit_status = os.EX_SOFTWARE
finally:
logging.shutdown()
sys.exit(exit_status)
|
Called via the `with` statement to invoke the :func:`contextlib.contextmanager`.
Control is then yielded back to the caller.
All exceptions are caught & a stack trace emitted, except in the case of `shrew.cli.ExitedCleanly`.
|
train
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L339-L372
|
[
"def __parse_args(self, accept_unrecognized_args=False):\n \"\"\" Invoke the argument parser. \"\"\"\n\n # If the user provided a description, use it. Otherwise grab the doc string.\n if self.description:\n self.argparser.description = self.description\n elif getattr(sys.modules['__main__'], '__doc__', None):\n self.argparser.description = getattr(sys.modules['__main__'], '__doc__')\n else:\n self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__\n\n self.argparser.epilog = self.epilog\n\n # Only if there aren't any other command line arguments.\n if len(sys.argv) == 1 and self.argument_defaults:\n self.argparser.set_defaults(**self.argument_defaults)\n\n if accept_unrecognized_args:\n self.args, self.unrecognized_args = self.argparser.parse_known_args()\n else:\n self.args = self.argparser.parse_args()\n",
"def __parse_config(self):\n \"\"\" Invoke the config file parser. \"\"\"\n\n if self.should_parse_config and (self.args.config or self.config_file):\n self.config = ConfigParser.SafeConfigParser()\n self.config.read(self.args.config or self.config_file)\n",
"def __process_username_password(self):\n \"\"\" If indicated, process the username and password \"\"\"\n\n if self.use_username_password_store is not None:\n if self.args.clear_store:\n with load_config(sections=AUTH_SECTIONS) as config:\n config.remove_option(AUTH_SECTION, 'username')\n if not self.args.username:\n self.args.username = get_username(use_store=self.use_username_password_store)\n\n if self.args.clear_store:\n remove_password(AUTH_SECTION, username=self.args.username)\n if not self.args.password:\n self.args.password = get_password(AUTH_SECTION, username=self.args.username)\n if self.use_username_password_store:\n save_password(AUTH_SECTION, self.args.password, self.args.username)\n",
"def __finish_initializing(self):\n \"\"\" Handle any initialization after arguments & config has been parsed. \"\"\"\n\n if self.args.debug or self.args.trace:\n # Set the console (StreamHandler) to allow debug statements.\n\n if self.args.debug:\n self.console.setLevel(logging.DEBUG)\n\n self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))\n\n # Set the global level to debug.\n if self.args.debug:\n self.log.setLevel(logging.DEBUG)\n\n if self.args.log or self.log_file:\n\n # Allow the user to override the default log file handler.\n try:\n self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)\n except Exception:\n self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)\n\n self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))\n self.log_file_handler.setLevel(logging.DEBUG)\n\n self.log.addHandler(self.log_file_handler)\n\n # Allow cli.log, args & self to be accessed from __main__\n if not hasattr(sys.modules['__main__'], 'log'):\n sys.modules['__main__'].log = self.log\n\n if not hasattr(sys.modules['__main__'], 'cli'):\n sys.modules['__main__'].cli = self\n\n if not hasattr(sys.modules['__main__'], 'args'):\n sys.modules['__main__'].args = self.args\n"
] |
class CLI(object):
"""
Initialize a command line helper instance.
Example::
import shrew.cli
# Adding a version variable will automatically let you use --version on the command line.
# VERSION is also acceptable.
version = "1.0"
@shrew.cli.entrypoint
def main(cli):
cli.add_argument("-p", "--podling", required=True, default="yoko", help="Podling to operate on.")
cli.add_argument("-q", dest="quiet", action="store_true", help="An example flag")
with cli.run():
if not cli.args.quiet:
cli.log.info("Operating in Incubator podling: %s", cli.args.podling)
.. note::
When you use shrew.cli the following are available in __main__: cli, args & log.
.. note::
If using --log or --log-file, you can override the default FileHandler by supplying
a log_file_handler() function that returns a valid logging.handler.
Example::
def log_file_handler(filename):
return logging.handlers.TimedRotatingFileHandler(filename, when='midnight', backupCount=14)
.. note::
Example::
@shrew.cli.entrypoint
def main(cli):
...
cli.influx_logger.enable_reporting()
with cli.run():
...
"""
exceptions = {}
def __init__(self, name=None):
if name is None:
name = os.path.basename(sys.argv[0])
#: The name of this cli instance.
self.name = name
#: :mod:`argparse` replaces optparse in Python 2.7, it is installable as a stand-alone module.
self.argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
self.argparser.add_argument(
'--debug', action='store_true', default=False, help='Turn on debug mode / logging.'
)
self.argparser.add_argument(
'--trace', action='store_true', default=False, help='Turn on trace logging. Implies --debug.'
)
self.argparser.add_argument(
'--log', help='Log file destination. Defaults to stdout only.'
)
#: Call into a :mod:`logging` instance.
self.log = getLogger(self.name)
# If the user has version defined, display it.
for name in ('version', 'VERSION'):
if hasattr(sys.modules['__main__'], name):
self.argparser.add_argument('--version', action='version', version='%(prog)s ' + getattr(sys.modules['__main__'], name))
#: :class:`ConfigParser` configuration object if --config was passed on the command line.
#: Default is None.
self.config = None
#: Configuration file to load. Default is None.
self.config_file = None
#: Description for the argument parser.
self.description = None
#: Epilog for the argument parser.
self.epilog = None
#: Parsed arguments.
self.args = None
#: Unrecognized arguments.
self.unrecognized_args = None
# Disable logging by default, programs can enable if it is wanted
if self.log:
# Keep the handler around, so we can change it's level later.
self.console = logging.StreamHandler()
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
self.console.setLevel(logging.INFO)
self.log.addHandler(self.console)
# This controls the global level for loggers. Filtering will occur in handlers for levels.
self.log.setLevel(logging.INFO)
#: Caller can always write DEBUG level logs to a filename.
#: Using --log on the command line will override this variable.
self.log_file = None
#: :class:`logging` FileHandler instance if --log was passed, or `log_file` is set.
self.log_file_handler = None
# Defaults to use only when there are no other arguments.
self.argument_defaults = None
# Flag to determine if we should parse the --config file.
self.should_parse_config = False
# Should we process the username and passwords
self.use_username_password_store = None
def add_argument(self, *args, **kwargs):
"""
Add a command line argument.
The current underlying implementation uses :mod:`argparse`.
"""
self.argparser.add_argument(*args, **kwargs)
def add_argument_defaults(self, **kwargs):
"""
Set defaults to be passed to the argument parser ONLY when there are
no other arguments on the command line. If you want regular defaults,
use the default= setting on :meth:`add_argument`.
Example::
cli.add_argument_defaults(start=True, debug=True)
"""
self.argument_defaults = kwargs
def add_config_option(self, default=None):
""" Add a --config option to the argument parser. """
self.argparser.add_argument('--config', default=default, help='Config file to read. Defaults to: %(default)s')
self.should_parse_config = True
def add_username_password(self, use_store=False):
""" Add --username and --password options
:param bool use_store: Name of the section (concept, command line options, API reference)
"""
self.argparser.add_argument('--username', default=None, help='Username')
self.argparser.add_argument('--password', default=None, help='Password')
self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore')
self.use_username_password_store = use_store
def _add_documentation_link(self, links, section, variable_name, default=None):
"""
:param list links: List of links to append link of the form "Section: <link>", if link available
:param str section: Name of the section (concept, command line options, API reference)
:param str variable_name: Variable name in main module that should hold URL to documentation
:param str default: Default URL to documentation
"""
url = getattr(sys.modules['__main__'], variable_name, default)
if url:
links.append('%s: %s' % (section, url))
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
@classmethod
def register_exception(cls, exception, func):
"""
Allow callers to register a function to be run when the given
exception is raised while inside a cli.run() context manager.
"""
cls.exceptions[exception] = func
@contextlib.contextmanager
|
veeti/decent
|
decent/error.py
|
Error.as_dict
|
python
|
def as_dict(self, join='.'):
if self.path:
path = [str(node) for node in self.path]
else:
path = ''
return { join.join(path): self.message }
|
Returns the error as a path to message dictionary. Paths are joined
with the ``join`` string.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/error.py#L28-L37
| null |
class Error(DecentError):
"""
A single validation error.
The ``message`` contains an explanation for the error: for example, "this
value must be at least 10 characters long".
The ``path`` is a list of keys to the field this error is for. This is
usually automatically set by the :class:`decent.schema.Schema` and/or
validator callable being used.
"""
def __init__(self, message, path=None):
self.message = message
if path:
self.path = path[:]
else:
self.path = []
@property
def messages(self):
return [self.message]
@property
def paths(self):
return [self.path]
def __str__(self):
return str(self.message)
|
veeti/decent
|
decent/error.py
|
Invalid.as_dict
|
python
|
def as_dict(self, join='.'):
result = {}
for e in self.errors:
result.update(e.as_dict(join))
return result
|
Returns all the errors in this collection as a path to message
dictionary. Paths are joined with the ``join`` string.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/error.py#L64-L72
| null |
class Invalid(Error):
"""
A collection of one or more validation errors for a schema.
"""
def __init__(self, errors=None):
self.errors = []
if errors:
self.errors.extend(errors[:])
def append(self, error):
self.errors.append(error)
@property
def message(self):
"""
The first error message in this collection.
"""
if self.errors:
return self.errors[0].message
@property
def path(self):
"""
The first error path in this collection.
"""
if self.errors:
return self.errors[0].path
@property
def messages(self):
"""
The list of error messages in this collection.
"""
return [e.message for e in self.errors]
@property
def paths(self):
"""
The list of error paths in this collection.
"""
return [e.path for e in self.errors]
def __str__(self):
return ', '.join(self.messages)
def __getitem__(self, i):
return self.errors[i]
def __len__(self):
return len(self.errors)
|
veeti/decent
|
decent/validators.py
|
All
|
python
|
def All(*validators):
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
|
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L11-L21
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Any
|
python
|
def Any(*validators):
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
|
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L23-L37
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Maybe
|
python
|
def Maybe(validator):
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
|
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L39-L48
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Msg
|
python
|
def Msg(validator, message):
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
|
Wraps the given validator callable, replacing any error messages raised.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L50-L61
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Default
|
python
|
def Default(default):
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
|
Creates a validator callable that replaces ``None`` with the specified
default value.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L63-L73
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Eq
|
python
|
def Eq(value, message="Not equal to {!s}"):
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
|
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L77-L90
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Type
|
python
|
def Type(expected, message="Not of type {}"):
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
|
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L92-L105
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Instance
|
python
|
def Instance(expected, message="Not an instance of {}"):
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
|
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L107-L119
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Coerce
|
python
|
def Coerce(type, message="Not a valid {} value"):
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
|
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L121-L134
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
List
|
python
|
def List(validator):
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
|
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L138-L167
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Boolean
|
python
|
def Boolean():
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
|
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L171-L208
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Range
|
python
|
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
|
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L212-L231
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Length
|
python
|
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
|
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L233-L247
|
[
"def Range(min=None, max=None, min_message=\"Must be at least {min}\", max_message=\"Must be at most {max}\"):\n \"\"\"\n Creates a validator that checks if the given numeric value is in the\n specified range, inclusive.\n\n Accepts values specified by ``numbers.Number`` only, excluding booleans.\n\n The error messages raised can be customized with ``min_message`` and\n ``max_message``. The ``min`` and ``max`` arguments are formatted.\n \"\"\"\n @wraps(Range)\n def built(value):\n if not isinstance(value, numbers.Number) or isinstance(value, bool):\n raise Error(\"Not a number\")\n if min is not None and min > value:\n raise Error(min_message.format(min=min, max=max))\n if max is not None and value > max:\n raise Error(max_message.format(min=min, max=max))\n return value\n return built\n"
] |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
NotEmpty
|
python
|
def NotEmpty():
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
|
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L286-L296
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
## String conversions
def Uuid(to_uuid=True):
"""
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
"""
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
veeti/decent
|
decent/validators.py
|
Uuid
|
python
|
def Uuid(to_uuid=True):
@wraps(Uuid)
def built(value):
invalid = Error("Not a valid UUID")
if isinstance(value, uuid.UUID):
return value
elif not isinstance(value, six.string_types):
raise invalid
try:
as_uuid = uuid.UUID(value)
except (ValueError, AttributeError) as e:
raise invalid
if to_uuid:
return as_uuid
return value
return built
|
Creates a UUID validator. Will raise an error for non-string types and
non-UUID values.
The given value will be converted to an instance of ``uuid.UUID`` unless
``to_uuid`` is ``False``.
|
train
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L300-L325
| null |
from functools import wraps
import numbers
import uuid
import six
from decent.error import Error, Invalid
## Helpers
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built
def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built
def Default(default):
"""
Creates a validator callable that replaces ``None`` with the specified
default value.
"""
@wraps(Default)
def built(value):
if value == None:
return default
return value
return built
## Basics
def Eq(value, message="Not equal to {!s}"):
"""
Creates a validator that compares the equality of the given value to
``value``.
A custom message can be specified with ``message``. It will be formatted
with ``value``.
"""
@wraps(Eq)
def built(_value):
if _value != value:
raise Error(message.format(value))
return _value
return built
def Type(expected, message="Not of type {}"):
"""
Creates a validator that compares the type of the given value to
``expected``. This is a direct type() equality check. Also see
``Instance``, which is an isinstance() check.
A custom message can be specified with ``message``.
"""
@wraps(Type)
def built(value):
if type(value) != expected:
raise Error(message.format(expected.__name__))
return value
return built
def Instance(expected, message="Not an instance of {}"):
"""
Creates a validator that checks if the given value is an instance of
``expected``.
A custom message can be specified with ``message``.
"""
@wraps(Instance)
def built(value):
if not isinstance(value, expected):
raise Error(message.format(expected.__name__))
return value
return built
def Coerce(type, message="Not a valid {} value"):
"""
Creates a validator that attempts to coerce the given value to the
specified ``type``. Will raise an error if the coercion fails.
A custom message can be specified with ``message``.
"""
@wraps(Coerce)
def built(value):
try:
return type(value)
except (TypeError, ValueError) as e:
raise Error(message.format(type.__name__))
return built
## Collections
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
## Booleans
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
## Numbers
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built
def Length(min=None, max=None, min_message="Must have a length of at least {min}", max_message="Must have a length of at most {max}"):
"""
Creates a validator that checks if the given value's length is in the
specified range, inclusive. (Returns the original value.)
See :func:`.Range`.
"""
validator = Range(min, max, min_message, max_message)
@wraps(Length)
def built(value):
if not hasattr(value, '__len__'):
raise Error("Does not have a length")
validator(len(value))
return value
return built
## Strings
def _string_function(value, name):
if not isinstance(value, six.string_types):
raise Error("Must be a string")
return getattr(value, name)()
def Lower():
"""
Creates a validator that converts the input string to lowercase. Will raise
an error for non-string types.
"""
@wraps(Lower)
def built(value):
return _string_function(value, 'lower')
return built
def Upper():
"""
Creates a validator that converts the input string to UPPERCASE. Will raise
an error for non-string types.
"""
@wraps(Upper)
def built(value):
return _string_function(value, 'upper')
return built
def Strip():
"""
Creates a validator that strips the input string of whitespace. Will raise
an error for non-string types.
"""
@wraps(Strip)
def built(value):
return _string_function(value, 'strip')
return built
def NotEmpty():
"""
Creates a validator that validates the given string is not empty. Will
raise an error for non-string types.
"""
@wraps(NotEmpty)
def built(value):
if not isinstance(value, six.string_types) or not value:
raise Error("Must not be empty")
return value
return built
## String conversions
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
has_gis
|
python
|
def has_gis(wrapped, instance, args, kwargs):
if gis:
return wrapped(*args, **kwargs)
else:
warn(MISSING_GIS)
|
Skip function execution if there are no presamples
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L22-L27
| null |
from multiprocessing import Pool, cpu_count
from warnings import warn
import hashlib
import itertools
import json
import os
import wrapt
try:
from shapely.geometry import shape, mapping
from shapely.ops import cascaded_union
import fiona
gis = True
except ImportError:
gis = False
MISSING_GIS = """Function not available: GIS libraries (fiona and shapely) not installed"""
@wrapt.decorator
DATA_FILEPATH = os.path.join(os.path.dirname(__file__), "data")
def sha256(filepath, blocksize=65536):
"""Generate SHA 256 hash for file at `filepath`"""
hasher = hashlib.sha256()
fo = open(filepath, 'rb')
buf = fo.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fo.read(blocksize)
return hasher.hexdigest()
@has_gis
def _to_shapely(data):
return shape(data['geometry'])
@has_gis
def _to_fiona(data):
return mapping(data)
@has_gis
def _union(args):
label, fp, face_ids = args
shapes = []
with fiona.drivers():
with fiona.open(fp) as src:
for feat in src:
if int(feat['properties']['id']) in face_ids:
shapes.append(_to_shapely(feat))
return label, cascaded_union(shapes)
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def check_data(self):
"""Check that definitions file is present, and that faces file is readable."""
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
def load_definitions(self):
"""Load mapping of country names to face ids"""
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
"""Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids)."""
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
@has_gis
def construct_difference(self, parent, excluded, name=None, fp=None):
"""Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids)."""
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def write_geoms_to_file(self, fp, geoms, names=None):
"""Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field."""
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
sha256
|
python
|
def sha256(filepath, blocksize=65536):
hasher = hashlib.sha256()
fo = open(filepath, 'rb')
buf = fo.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fo.read(blocksize)
return hasher.hexdigest()
|
Generate SHA 256 hash for file at `filepath`
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L33-L41
| null |
from multiprocessing import Pool, cpu_count
from warnings import warn
import hashlib
import itertools
import json
import os
import wrapt
try:
from shapely.geometry import shape, mapping
from shapely.ops import cascaded_union
import fiona
gis = True
except ImportError:
gis = False
MISSING_GIS = """Function not available: GIS libraries (fiona and shapely) not installed"""
@wrapt.decorator
def has_gis(wrapped, instance, args, kwargs):
"""Skip function execution if there are no presamples"""
if gis:
return wrapped(*args, **kwargs)
else:
warn(MISSING_GIS)
DATA_FILEPATH = os.path.join(os.path.dirname(__file__), "data")
@has_gis
def _to_shapely(data):
return shape(data['geometry'])
@has_gis
def _to_fiona(data):
return mapping(data)
@has_gis
def _union(args):
label, fp, face_ids = args
shapes = []
with fiona.drivers():
with fiona.open(fp) as src:
for feat in src:
if int(feat['properties']['id']) in face_ids:
shapes.append(_to_shapely(feat))
return label, cascaded_union(shapes)
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def check_data(self):
"""Check that definitions file is present, and that faces file is readable."""
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
def load_definitions(self):
"""Load mapping of country names to face ids"""
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
"""Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids)."""
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
@has_gis
def construct_difference(self, parent, excluded, name=None, fp=None):
"""Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids)."""
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def write_geoms_to_file(self, fp, geoms, names=None):
"""Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field."""
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
ConstructiveGeometries.check_data
|
python
|
def check_data(self):
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
|
Check that definitions file is present, and that faces file is readable.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L73-L82
|
[
"def sha256(filepath, blocksize=65536):\n \"\"\"Generate SHA 256 hash for file at `filepath`\"\"\"\n hasher = hashlib.sha256()\n fo = open(filepath, 'rb')\n buf = fo.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = fo.read(blocksize)\n return hasher.hexdigest()\n"
] |
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def load_definitions(self):
"""Load mapping of country names to face ids"""
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
"""Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids)."""
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
@has_gis
def construct_difference(self, parent, excluded, name=None, fp=None):
"""Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids)."""
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def write_geoms_to_file(self, fp, geoms, names=None):
"""Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field."""
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
ConstructiveGeometries.load_definitions
|
python
|
def load_definitions(self):
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
|
Load mapping of country names to face ids
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L84-L88
| null |
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def check_data(self):
"""Check that definitions file is present, and that faces file is readable."""
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
"""Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids)."""
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
@has_gis
def construct_difference(self, parent, excluded, name=None, fp=None):
"""Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids)."""
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def write_geoms_to_file(self, fp, geoms, names=None):
"""Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field."""
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
ConstructiveGeometries.construct_rest_of_world
|
python
|
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
|
Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids).
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L90-L111
| null |
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def check_data(self):
"""Check that definitions file is present, and that faces file is readable."""
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
def load_definitions(self):
"""Load mapping of country names to face ids"""
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
@has_gis
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
@has_gis
def construct_difference(self, parent, excluded, name=None, fp=None):
"""Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids)."""
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def write_geoms_to_file(self, fp, geoms, names=None):
"""Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field."""
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
ConstructiveGeometries.construct_rest_of_worlds
|
python
|
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
|
Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L114-L141
| null |
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def check_data(self):
"""Check that definitions file is present, and that faces file is readable."""
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
def load_definitions(self):
"""Load mapping of country names to face ids"""
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
"""Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids)."""
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
@has_gis
def construct_difference(self, parent, excluded, name=None, fp=None):
"""Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids)."""
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def write_geoms_to_file(self, fp, geoms, names=None):
"""Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field."""
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
ConstructiveGeometries.construct_rest_of_worlds_mapping
|
python
|
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
|
Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L143-L182
|
[
"def sha256(filepath, blocksize=65536):\n \"\"\"Generate SHA 256 hash for file at `filepath`\"\"\"\n hasher = hashlib.sha256()\n fo = open(filepath, 'rb')\n buf = fo.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = fo.read(blocksize)\n return hasher.hexdigest()\n"
] |
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def check_data(self):
"""Check that definitions file is present, and that faces file is readable."""
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
def load_definitions(self):
"""Load mapping of country names to face ids"""
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
"""Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids)."""
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
@has_gis
def construct_difference(self, parent, excluded, name=None, fp=None):
"""Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids)."""
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def write_geoms_to_file(self, fp, geoms, names=None):
"""Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field."""
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
ConstructiveGeometries.construct_difference
|
python
|
def construct_difference(self, parent, excluded, name=None, fp=None):
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
|
Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids).
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L185-L200
| null |
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def check_data(self):
"""Check that definitions file is present, and that faces file is readable."""
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
def load_definitions(self):
"""Load mapping of country names to face ids"""
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
"""Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids)."""
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
@has_gis
@has_gis
def write_geoms_to_file(self, fp, geoms, names=None):
"""Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field."""
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
cmutel/constructive_geometries
|
constructive_geometries/cg.py
|
ConstructiveGeometries.write_geoms_to_file
|
python
|
def write_geoms_to_file(self, fp, geoms, names=None):
if fp[-5:] != '.gpkg':
fp = fp + '.gpkg'
if names is not None:
assert len(geoms) == len(names), "Inconsistent length of geometries and names"
else:
names = ("Merged geometry {}".format(count) for count in itertools.count())
meta = {
'crs': {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'},
'driver': 'GPKG',
'schema': {'geometry': 'MultiPolygon', 'properties': {'name': 'str', 'id': 'int'}}
}
with fiona.drivers():
with fiona.open(fp, 'w', **meta) as sink:
for geom, name, count in zip(geoms, names, itertools.count(1)):
sink.write({
'geometry': _to_fiona(geom),
'properties': {'name': name, 'id': count}
})
return fp
|
Write unioned geometries ``geoms`` to filepath ``fp``. Optionally use ``names`` in name field.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L203-L223
| null |
class ConstructiveGeometries(object):
def __init__(self):
self.data_fp = os.path.join(DATA_FILEPATH, "faces.json")
self.faces_fp = os.path.join(DATA_FILEPATH, "faces.gpkg")
self.check_data()
self.load_definitions()
def check_data(self):
"""Check that definitions file is present, and that faces file is readable."""
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp)
def load_definitions(self):
"""Load mapping of country names to face ids"""
self.data = dict(json.load(open(self.data_fp))['data'])
self.all_faces = set(self.data.pop("__all__"))
self.locations = set(self.data.keys())
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True):
"""Construct rest-of-world geometry and optionally write to filepath ``fp``.
Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids)."""
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
set().union(*[set(self.data[loc]) for loc in excluded])
)
if not geom:
return included
elif not gis:
warn(MISSING_GIS)
return
geom = _union(included)[1]
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj
@has_gis
def construct_difference(self, parent, excluded, name=None, fp=None):
"""Construct geometry from ``parent`` without the regions in ``excluded`` and optionally write to filepath ``fp``.
``excluded`` must be an iterable of location strings (not face ids)."""
assert parent in self.locations, "Can't find location {}".format(parent)
for location in excluded:
assert location in self.locations, "Can't find location {}".format(location)
included = set(self.data[parent]).difference(
reduce(set.union, [set(self.data[loc]) for loc in excluded])
)
geom = _union(included)
if fp:
self.write_geoms_to_file(fp, [geom], [name] if name else None)
return fp
else:
return geom
@has_gis
|
cmutel/constructive_geometries
|
constructive_geometries/geomatcher.py
|
resolved_row
|
python
|
def resolved_row(objs, geomatcher):
def get_locations(lst):
for elem in lst:
try:
yield elem['location']
except TypeError:
yield elem
geomatcher['RoW'] = geomatcher.faces.difference(
reduce(
set.union,
[geomatcher[obj] for obj in get_locations(objs)]
)
)
yield geomatcher
del geomatcher['RoW']
|
Temporarily insert ``RoW`` into ``geomatcher.topology``, defined by the topo faces not used in ``objs``.
Will overwrite any existing ``RoW``.
On exiting the context manager, ``RoW`` is deleted.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L260-L280
|
[
"def get_locations(lst):\n for elem in lst:\n try:\n yield elem['location']\n except TypeError:\n yield elem\n"
] |
from . import ConstructiveGeometries
from collections.abc import MutableMapping
from contextlib import contextmanager
from functools import reduce
import country_converter as coco
class Geomatcher(MutableMapping):
"""Object managing spatial relationships using the a world topology.
``Geomatcher`` takes as its base data a definition of the world split into topological faces. This definition is provided by the `constructive_geometries <>`__ library. A toplogical face is a polygon which does not overlap any other topological face. In ``constructive_geometries``, these faces are defined by integer ids, so e.g. Ireland is:
.. code-block:: python
>>> from constructive_geometries import ConstructiveGeometries
>>> cg = ConstructiveGeometries()
>>> cg.data['IE']
[325, 327, 328, 334, 336, 337, 338, 345, 346, 347, 348, 350, 374, 2045]
By default, Geomatcher is populated with all world countries, and all ecoinvent regions. The instance of Geomatcher created in this file also includes the IMAGE world regions.
Geospatial definitions are namespaced, except for countries. Countries are therefore defined by their ISO two-letter codes, but other data should be referenced by a tuple of its namespace and identifier, e.g. ``('ecoinvent', 'NAFTA')``. You can also set a default namespace, either in instantiation (``Geomatcher(default_namespace="foo")``) or afterwards (``geomatcher_instance.default_namespace = 'foo'``). The default namespace is ``'ecoinvent'``.
Geomatcher supports the following operations:
* Retrieving face ids for a given location, acting as a dictionary (``geomatcher['foo']``)
* Adding new geospatial definitions, either directly with face ids or relative to existing definitions
* Splitting faces to allow for finer-scale regionalization
* Intersection, contained, and within calculations with several configuration options.
Initialization arguments:
* ``topology``: A dictionary of ``{str: set}`` labels to faces ids. Default is ``ecoinvent``, which loads the world and ecoinvent definitions from ``constructive_geometries``.
* ``default_namespace``: String defining the default search namespace. Default is ``'ecoinvent'``.
* ``use_coco``: Boolean, default ``True``. Use the `country_converter <https://github.com/konstantinstadler/country_converter>`__ library to fuzzy match country identifiers, e.g. "Austria" instead of "AT".
"""
__seen = set()
def __init__(self, topology='ecoinvent', default_namespace=None, use_coco=True):
self.coco = use_coco
if topology == 'ecoinvent':
self.default_namespace = 'ecoinvent'
def ns(x):
if len(x) == 2 or x == 'RoW':
return x
else:
return ('ecoinvent', x)
cg = ConstructiveGeometries()
self.topology = {ns(x): set(y) for x, y in cg.data.items()
if x != "__all__"}
self['GLO'] = reduce(set.union, self.topology.values())
else:
self.default_namespace = default_namespace
self.topology = topology
if not self.topology:
self.topology = {}
self.faces = set()
else:
self.faces = reduce(set.union, self.topology.values())
def __contains__(self, key):
return key in self.topology
def __getitem__(self, key):
if key == 'RoW' and 'RoW' not in self.topology:
return set()
return self.topology[self._actual_key(key)]
def __setitem__(self, key, value):
try:
key = self._actual_key(key)
except KeyError:
pass
self.topology[key] = value
def __delitem__(self, key):
del self.topology[self._actual_key(key)]
def __len__(self):
return len(self.topology)
def __iter__(self):
return iter(self.topology)
def _actual_key(self, key):
"""Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``."""
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location")
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
"""Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly."""
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
"""Get all locations that intersect this location.
Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that are completely within this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
"""
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def split_face(self, face, number=None, ids=None):
"""Split a topological face into a number of small faces.
* ``face``: The face to split. Must be in the topology.
* ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces.
* ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored.
Returns the new face ids.
"""
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids
def add_definitions(self, data, namespace, relative=True):
"""Add new topological definitions to ``self.topology``.
If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE:
.. code-block:: python
{"Russia Region": [
"AM",
"AZ",
"GE",
"RU"
]}
Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets:
.. code-block:: python
{
'A': {1, 2, 3},
'B': {2, 3, 4},
}
"""
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
})
@contextmanager
|
cmutel/constructive_geometries
|
constructive_geometries/geomatcher.py
|
Geomatcher._actual_key
|
python
|
def _actual_key(self, key):
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location")
|
Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L87-L102
| null |
class Geomatcher(MutableMapping):
"""Object managing spatial relationships using the a world topology.
``Geomatcher`` takes as its base data a definition of the world split into topological faces. This definition is provided by the `constructive_geometries <>`__ library. A toplogical face is a polygon which does not overlap any other topological face. In ``constructive_geometries``, these faces are defined by integer ids, so e.g. Ireland is:
.. code-block:: python
>>> from constructive_geometries import ConstructiveGeometries
>>> cg = ConstructiveGeometries()
>>> cg.data['IE']
[325, 327, 328, 334, 336, 337, 338, 345, 346, 347, 348, 350, 374, 2045]
By default, Geomatcher is populated with all world countries, and all ecoinvent regions. The instance of Geomatcher created in this file also includes the IMAGE world regions.
Geospatial definitions are namespaced, except for countries. Countries are therefore defined by their ISO two-letter codes, but other data should be referenced by a tuple of its namespace and identifier, e.g. ``('ecoinvent', 'NAFTA')``. You can also set a default namespace, either in instantiation (``Geomatcher(default_namespace="foo")``) or afterwards (``geomatcher_instance.default_namespace = 'foo'``). The default namespace is ``'ecoinvent'``.
Geomatcher supports the following operations:
* Retrieving face ids for a given location, acting as a dictionary (``geomatcher['foo']``)
* Adding new geospatial definitions, either directly with face ids or relative to existing definitions
* Splitting faces to allow for finer-scale regionalization
* Intersection, contained, and within calculations with several configuration options.
Initialization arguments:
* ``topology``: A dictionary of ``{str: set}`` labels to faces ids. Default is ``ecoinvent``, which loads the world and ecoinvent definitions from ``constructive_geometries``.
* ``default_namespace``: String defining the default search namespace. Default is ``'ecoinvent'``.
* ``use_coco``: Boolean, default ``True``. Use the `country_converter <https://github.com/konstantinstadler/country_converter>`__ library to fuzzy match country identifiers, e.g. "Austria" instead of "AT".
"""
__seen = set()
def __init__(self, topology='ecoinvent', default_namespace=None, use_coco=True):
self.coco = use_coco
if topology == 'ecoinvent':
self.default_namespace = 'ecoinvent'
def ns(x):
if len(x) == 2 or x == 'RoW':
return x
else:
return ('ecoinvent', x)
cg = ConstructiveGeometries()
self.topology = {ns(x): set(y) for x, y in cg.data.items()
if x != "__all__"}
self['GLO'] = reduce(set.union, self.topology.values())
else:
self.default_namespace = default_namespace
self.topology = topology
if not self.topology:
self.topology = {}
self.faces = set()
else:
self.faces = reduce(set.union, self.topology.values())
def __contains__(self, key):
return key in self.topology
def __getitem__(self, key):
if key == 'RoW' and 'RoW' not in self.topology:
return set()
return self.topology[self._actual_key(key)]
def __setitem__(self, key, value):
try:
key = self._actual_key(key)
except KeyError:
pass
self.topology[key] = value
def __delitem__(self, key):
del self.topology[self._actual_key(key)]
def __len__(self):
return len(self.topology)
def __iter__(self):
return iter(self.topology)
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
"""Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly."""
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
"""Get all locations that intersect this location.
Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that are completely within this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
"""
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def split_face(self, face, number=None, ids=None):
"""Split a topological face into a number of small faces.
* ``face``: The face to split. Must be in the topology.
* ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces.
* ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored.
Returns the new face ids.
"""
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids
def add_definitions(self, data, namespace, relative=True):
"""Add new topological definitions to ``self.topology``.
If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE:
.. code-block:: python
{"Russia Region": [
"AM",
"AZ",
"GE",
"RU"
]}
Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets:
.. code-block:: python
{
'A': {1, 2, 3},
'B': {2, 3, 4},
}
"""
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
})
|
cmutel/constructive_geometries
|
constructive_geometries/geomatcher.py
|
Geomatcher._finish_filter
|
python
|
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst
|
Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L104-L132
|
[
"def _actual_key(self, key):\n \"\"\"Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``.\"\"\"\n if key in self or key in (\"RoW\", \"GLO\"):\n return key\n elif (self.default_namespace, key) in self:\n return (self.default_namespace, key)\n\n if isinstance(key, str) and self.coco:\n new = coco.convert(names=[key], to='ISO2', not_found=None)\n if new in self:\n if new not in self.__seen:\n self.__seen.add(key)\n print(\"Geomatcher: Used '{}' for '{}'\".format(new, key))\n return new\n\n raise KeyError(\"Can't find this location\")\n"
] |
class Geomatcher(MutableMapping):
"""Object managing spatial relationships using the a world topology.
``Geomatcher`` takes as its base data a definition of the world split into topological faces. This definition is provided by the `constructive_geometries <>`__ library. A toplogical face is a polygon which does not overlap any other topological face. In ``constructive_geometries``, these faces are defined by integer ids, so e.g. Ireland is:
.. code-block:: python
>>> from constructive_geometries import ConstructiveGeometries
>>> cg = ConstructiveGeometries()
>>> cg.data['IE']
[325, 327, 328, 334, 336, 337, 338, 345, 346, 347, 348, 350, 374, 2045]
By default, Geomatcher is populated with all world countries, and all ecoinvent regions. The instance of Geomatcher created in this file also includes the IMAGE world regions.
Geospatial definitions are namespaced, except for countries. Countries are therefore defined by their ISO two-letter codes, but other data should be referenced by a tuple of its namespace and identifier, e.g. ``('ecoinvent', 'NAFTA')``. You can also set a default namespace, either in instantiation (``Geomatcher(default_namespace="foo")``) or afterwards (``geomatcher_instance.default_namespace = 'foo'``). The default namespace is ``'ecoinvent'``.
Geomatcher supports the following operations:
* Retrieving face ids for a given location, acting as a dictionary (``geomatcher['foo']``)
* Adding new geospatial definitions, either directly with face ids or relative to existing definitions
* Splitting faces to allow for finer-scale regionalization
* Intersection, contained, and within calculations with several configuration options.
Initialization arguments:
* ``topology``: A dictionary of ``{str: set}`` labels to faces ids. Default is ``ecoinvent``, which loads the world and ecoinvent definitions from ``constructive_geometries``.
* ``default_namespace``: String defining the default search namespace. Default is ``'ecoinvent'``.
* ``use_coco``: Boolean, default ``True``. Use the `country_converter <https://github.com/konstantinstadler/country_converter>`__ library to fuzzy match country identifiers, e.g. "Austria" instead of "AT".
"""
__seen = set()
def __init__(self, topology='ecoinvent', default_namespace=None, use_coco=True):
self.coco = use_coco
if topology == 'ecoinvent':
self.default_namespace = 'ecoinvent'
def ns(x):
if len(x) == 2 or x == 'RoW':
return x
else:
return ('ecoinvent', x)
cg = ConstructiveGeometries()
self.topology = {ns(x): set(y) for x, y in cg.data.items()
if x != "__all__"}
self['GLO'] = reduce(set.union, self.topology.values())
else:
self.default_namespace = default_namespace
self.topology = topology
if not self.topology:
self.topology = {}
self.faces = set()
else:
self.faces = reduce(set.union, self.topology.values())
def __contains__(self, key):
return key in self.topology
def __getitem__(self, key):
if key == 'RoW' and 'RoW' not in self.topology:
return set()
return self.topology[self._actual_key(key)]
def __setitem__(self, key, value):
try:
key = self._actual_key(key)
except KeyError:
pass
self.topology[key] = value
def __delitem__(self, key):
del self.topology[self._actual_key(key)]
def __len__(self):
return len(self.topology)
def __iter__(self):
return iter(self.topology)
def _actual_key(self, key):
"""Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``."""
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location")
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
"""Get all locations that intersect this location.
Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that are completely within this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
"""
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def split_face(self, face, number=None, ids=None):
"""Split a topological face into a number of small faces.
* ``face``: The face to split. Must be in the topology.
* ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces.
* ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored.
Returns the new face ids.
"""
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids
def add_definitions(self, data, namespace, relative=True):
"""Add new topological definitions to ``self.topology``.
If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE:
.. code-block:: python
{"Russia Region": [
"AM",
"AZ",
"GE",
"RU"
]}
Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets:
.. code-block:: python
{
'A': {1, 2, 3},
'B': {2, 3, 4},
}
"""
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
})
|
cmutel/constructive_geometries
|
constructive_geometries/geomatcher.py
|
Geomatcher.intersects
|
python
|
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
|
Get all locations that intersect this location.
Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L134-L153
|
[
"def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):\n \"\"\"Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.\"\"\"\n key = self._actual_key(key)\n locations = [x[0] for x in lst]\n\n if not include_self and key in locations:\n lst.pop(locations.index(key))\n\n lst.sort(key=lambda x: x[1], reverse=biggest_first)\n lst = [x for x, y in lst]\n\n # RoW in both key and lst, but not defined; only RoW remains if exclusive\n if key == 'RoW' and 'RoW' not in self and exclusive:\n return ['RoW'] if 'RoW' in lst else []\n elif exclusive:\n removed, remaining = set(), []\n while lst:\n current = lst.pop(0)\n faces = self[current]\n if not faces.intersection(removed):\n removed.update(faces)\n remaining.append(current)\n lst = remaining\n\n # If RoW not resolved, make it the smallest\n if 'RoW' not in self and 'RoW' in lst:\n lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))\n\n return lst\n"
] |
class Geomatcher(MutableMapping):
"""Object managing spatial relationships using the a world topology.
``Geomatcher`` takes as its base data a definition of the world split into topological faces. This definition is provided by the `constructive_geometries <>`__ library. A toplogical face is a polygon which does not overlap any other topological face. In ``constructive_geometries``, these faces are defined by integer ids, so e.g. Ireland is:
.. code-block:: python
>>> from constructive_geometries import ConstructiveGeometries
>>> cg = ConstructiveGeometries()
>>> cg.data['IE']
[325, 327, 328, 334, 336, 337, 338, 345, 346, 347, 348, 350, 374, 2045]
By default, Geomatcher is populated with all world countries, and all ecoinvent regions. The instance of Geomatcher created in this file also includes the IMAGE world regions.
Geospatial definitions are namespaced, except for countries. Countries are therefore defined by their ISO two-letter codes, but other data should be referenced by a tuple of its namespace and identifier, e.g. ``('ecoinvent', 'NAFTA')``. You can also set a default namespace, either in instantiation (``Geomatcher(default_namespace="foo")``) or afterwards (``geomatcher_instance.default_namespace = 'foo'``). The default namespace is ``'ecoinvent'``.
Geomatcher supports the following operations:
* Retrieving face ids for a given location, acting as a dictionary (``geomatcher['foo']``)
* Adding new geospatial definitions, either directly with face ids or relative to existing definitions
* Splitting faces to allow for finer-scale regionalization
* Intersection, contained, and within calculations with several configuration options.
Initialization arguments:
* ``topology``: A dictionary of ``{str: set}`` labels to faces ids. Default is ``ecoinvent``, which loads the world and ecoinvent definitions from ``constructive_geometries``.
* ``default_namespace``: String defining the default search namespace. Default is ``'ecoinvent'``.
* ``use_coco``: Boolean, default ``True``. Use the `country_converter <https://github.com/konstantinstadler/country_converter>`__ library to fuzzy match country identifiers, e.g. "Austria" instead of "AT".
"""
__seen = set()
def __init__(self, topology='ecoinvent', default_namespace=None, use_coco=True):
self.coco = use_coco
if topology == 'ecoinvent':
self.default_namespace = 'ecoinvent'
def ns(x):
if len(x) == 2 or x == 'RoW':
return x
else:
return ('ecoinvent', x)
cg = ConstructiveGeometries()
self.topology = {ns(x): set(y) for x, y in cg.data.items()
if x != "__all__"}
self['GLO'] = reduce(set.union, self.topology.values())
else:
self.default_namespace = default_namespace
self.topology = topology
if not self.topology:
self.topology = {}
self.faces = set()
else:
self.faces = reduce(set.union, self.topology.values())
def __contains__(self, key):
return key in self.topology
def __getitem__(self, key):
if key == 'RoW' and 'RoW' not in self.topology:
return set()
return self.topology[self._actual_key(key)]
def __setitem__(self, key, value):
try:
key = self._actual_key(key)
except KeyError:
pass
self.topology[key] = value
def __delitem__(self, key):
del self.topology[self._actual_key(key)]
def __len__(self):
return len(self.topology)
def __iter__(self):
return iter(self.topology)
def _actual_key(self, key):
"""Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``."""
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location")
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
"""Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly."""
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that are completely within this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
"""
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def split_face(self, face, number=None, ids=None):
"""Split a topological face into a number of small faces.
* ``face``: The face to split. Must be in the topology.
* ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces.
* ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored.
Returns the new face ids.
"""
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids
def add_definitions(self, data, namespace, relative=True):
"""Add new topological definitions to ``self.topology``.
If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE:
.. code-block:: python
{"Russia Region": [
"AM",
"AZ",
"GE",
"RU"
]}
Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets:
.. code-block:: python
{
'A': {1, 2, 3},
'B': {2, 3, 4},
}
"""
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
})
|
cmutel/constructive_geometries
|
constructive_geometries/geomatcher.py
|
Geomatcher.contained
|
python
|
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
|
Get all locations that are completely within this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L155-L175
|
[
"def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):\n \"\"\"Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.\"\"\"\n key = self._actual_key(key)\n locations = [x[0] for x in lst]\n\n if not include_self and key in locations:\n lst.pop(locations.index(key))\n\n lst.sort(key=lambda x: x[1], reverse=biggest_first)\n lst = [x for x, y in lst]\n\n # RoW in both key and lst, but not defined; only RoW remains if exclusive\n if key == 'RoW' and 'RoW' not in self and exclusive:\n return ['RoW'] if 'RoW' in lst else []\n elif exclusive:\n removed, remaining = set(), []\n while lst:\n current = lst.pop(0)\n faces = self[current]\n if not faces.intersection(removed):\n removed.update(faces)\n remaining.append(current)\n lst = remaining\n\n # If RoW not resolved, make it the smallest\n if 'RoW' not in self and 'RoW' in lst:\n lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))\n\n return lst\n"
] |
class Geomatcher(MutableMapping):
"""Object managing spatial relationships using the a world topology.
``Geomatcher`` takes as its base data a definition of the world split into topological faces. This definition is provided by the `constructive_geometries <>`__ library. A toplogical face is a polygon which does not overlap any other topological face. In ``constructive_geometries``, these faces are defined by integer ids, so e.g. Ireland is:
.. code-block:: python
>>> from constructive_geometries import ConstructiveGeometries
>>> cg = ConstructiveGeometries()
>>> cg.data['IE']
[325, 327, 328, 334, 336, 337, 338, 345, 346, 347, 348, 350, 374, 2045]
By default, Geomatcher is populated with all world countries, and all ecoinvent regions. The instance of Geomatcher created in this file also includes the IMAGE world regions.
Geospatial definitions are namespaced, except for countries. Countries are therefore defined by their ISO two-letter codes, but other data should be referenced by a tuple of its namespace and identifier, e.g. ``('ecoinvent', 'NAFTA')``. You can also set a default namespace, either in instantiation (``Geomatcher(default_namespace="foo")``) or afterwards (``geomatcher_instance.default_namespace = 'foo'``). The default namespace is ``'ecoinvent'``.
Geomatcher supports the following operations:
* Retrieving face ids for a given location, acting as a dictionary (``geomatcher['foo']``)
* Adding new geospatial definitions, either directly with face ids or relative to existing definitions
* Splitting faces to allow for finer-scale regionalization
* Intersection, contained, and within calculations with several configuration options.
Initialization arguments:
* ``topology``: A dictionary of ``{str: set}`` labels to faces ids. Default is ``ecoinvent``, which loads the world and ecoinvent definitions from ``constructive_geometries``.
* ``default_namespace``: String defining the default search namespace. Default is ``'ecoinvent'``.
* ``use_coco``: Boolean, default ``True``. Use the `country_converter <https://github.com/konstantinstadler/country_converter>`__ library to fuzzy match country identifiers, e.g. "Austria" instead of "AT".
"""
__seen = set()
def __init__(self, topology='ecoinvent', default_namespace=None, use_coco=True):
self.coco = use_coco
if topology == 'ecoinvent':
self.default_namespace = 'ecoinvent'
def ns(x):
if len(x) == 2 or x == 'RoW':
return x
else:
return ('ecoinvent', x)
cg = ConstructiveGeometries()
self.topology = {ns(x): set(y) for x, y in cg.data.items()
if x != "__all__"}
self['GLO'] = reduce(set.union, self.topology.values())
else:
self.default_namespace = default_namespace
self.topology = topology
if not self.topology:
self.topology = {}
self.faces = set()
else:
self.faces = reduce(set.union, self.topology.values())
def __contains__(self, key):
return key in self.topology
def __getitem__(self, key):
if key == 'RoW' and 'RoW' not in self.topology:
return set()
return self.topology[self._actual_key(key)]
def __setitem__(self, key, value):
try:
key = self._actual_key(key)
except KeyError:
pass
self.topology[key] = value
def __delitem__(self, key):
del self.topology[self._actual_key(key)]
def __len__(self):
return len(self.topology)
def __iter__(self):
return iter(self.topology)
def _actual_key(self, key):
"""Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``."""
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location")
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
"""Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly."""
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
"""Get all locations that intersect this location.
Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def split_face(self, face, number=None, ids=None):
"""Split a topological face into a number of small faces.
* ``face``: The face to split. Must be in the topology.
* ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces.
* ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored.
Returns the new face ids.
"""
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids
def add_definitions(self, data, namespace, relative=True):
"""Add new topological definitions to ``self.topology``.
If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE:
.. code-block:: python
{"Russia Region": [
"AM",
"AZ",
"GE",
"RU"
]}
Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets:
.. code-block:: python
{
'A': {1, 2, 3},
'B': {2, 3, 4},
}
"""
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
})
|
cmutel/constructive_geometries
|
constructive_geometries/geomatcher.py
|
Geomatcher.within
|
python
|
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
|
Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L177-L195
|
[
"def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):\n \"\"\"Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.\"\"\"\n key = self._actual_key(key)\n locations = [x[0] for x in lst]\n\n if not include_self and key in locations:\n lst.pop(locations.index(key))\n\n lst.sort(key=lambda x: x[1], reverse=biggest_first)\n lst = [x for x, y in lst]\n\n # RoW in both key and lst, but not defined; only RoW remains if exclusive\n if key == 'RoW' and 'RoW' not in self and exclusive:\n return ['RoW'] if 'RoW' in lst else []\n elif exclusive:\n removed, remaining = set(), []\n while lst:\n current = lst.pop(0)\n faces = self[current]\n if not faces.intersection(removed):\n removed.update(faces)\n remaining.append(current)\n lst = remaining\n\n # If RoW not resolved, make it the smallest\n if 'RoW' not in self and 'RoW' in lst:\n lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))\n\n return lst\n",
"_ = lambda key: [key] if key in possibles else []\n"
] |
class Geomatcher(MutableMapping):
"""Object managing spatial relationships using the a world topology.
``Geomatcher`` takes as its base data a definition of the world split into topological faces. This definition is provided by the `constructive_geometries <>`__ library. A toplogical face is a polygon which does not overlap any other topological face. In ``constructive_geometries``, these faces are defined by integer ids, so e.g. Ireland is:
.. code-block:: python
>>> from constructive_geometries import ConstructiveGeometries
>>> cg = ConstructiveGeometries()
>>> cg.data['IE']
[325, 327, 328, 334, 336, 337, 338, 345, 346, 347, 348, 350, 374, 2045]
By default, Geomatcher is populated with all world countries, and all ecoinvent regions. The instance of Geomatcher created in this file also includes the IMAGE world regions.
Geospatial definitions are namespaced, except for countries. Countries are therefore defined by their ISO two-letter codes, but other data should be referenced by a tuple of its namespace and identifier, e.g. ``('ecoinvent', 'NAFTA')``. You can also set a default namespace, either in instantiation (``Geomatcher(default_namespace="foo")``) or afterwards (``geomatcher_instance.default_namespace = 'foo'``). The default namespace is ``'ecoinvent'``.
Geomatcher supports the following operations:
* Retrieving face ids for a given location, acting as a dictionary (``geomatcher['foo']``)
* Adding new geospatial definitions, either directly with face ids or relative to existing definitions
* Splitting faces to allow for finer-scale regionalization
* Intersection, contained, and within calculations with several configuration options.
Initialization arguments:
* ``topology``: A dictionary of ``{str: set}`` labels to faces ids. Default is ``ecoinvent``, which loads the world and ecoinvent definitions from ``constructive_geometries``.
* ``default_namespace``: String defining the default search namespace. Default is ``'ecoinvent'``.
* ``use_coco``: Boolean, default ``True``. Use the `country_converter <https://github.com/konstantinstadler/country_converter>`__ library to fuzzy match country identifiers, e.g. "Austria" instead of "AT".
"""
__seen = set()
def __init__(self, topology='ecoinvent', default_namespace=None, use_coco=True):
self.coco = use_coco
if topology == 'ecoinvent':
self.default_namespace = 'ecoinvent'
def ns(x):
if len(x) == 2 or x == 'RoW':
return x
else:
return ('ecoinvent', x)
cg = ConstructiveGeometries()
self.topology = {ns(x): set(y) for x, y in cg.data.items()
if x != "__all__"}
self['GLO'] = reduce(set.union, self.topology.values())
else:
self.default_namespace = default_namespace
self.topology = topology
if not self.topology:
self.topology = {}
self.faces = set()
else:
self.faces = reduce(set.union, self.topology.values())
def __contains__(self, key):
return key in self.topology
def __getitem__(self, key):
if key == 'RoW' and 'RoW' not in self.topology:
return set()
return self.topology[self._actual_key(key)]
def __setitem__(self, key, value):
try:
key = self._actual_key(key)
except KeyError:
pass
self.topology[key] = value
def __delitem__(self, key):
del self.topology[self._actual_key(key)]
def __len__(self):
return len(self.topology)
def __iter__(self):
return iter(self.topology)
def _actual_key(self, key):
"""Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``."""
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location")
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
"""Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly."""
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
"""Get all locations that intersect this location.
Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that are completely within this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
"""
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def split_face(self, face, number=None, ids=None):
"""Split a topological face into a number of small faces.
* ``face``: The face to split. Must be in the topology.
* ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces.
* ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored.
Returns the new face ids.
"""
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids
def add_definitions(self, data, namespace, relative=True):
"""Add new topological definitions to ``self.topology``.
If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE:
.. code-block:: python
{"Russia Region": [
"AM",
"AZ",
"GE",
"RU"
]}
Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets:
.. code-block:: python
{
'A': {1, 2, 3},
'B': {2, 3, 4},
}
"""
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
})
|
cmutel/constructive_geometries
|
constructive_geometries/geomatcher.py
|
Geomatcher.split_face
|
python
|
def split_face(self, face, number=None, ids=None):
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids
|
Split a topological face into a number of small faces.
* ``face``: The face to split. Must be in the topology.
* ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces.
* ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored.
Returns the new face ids.
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L197-L223
| null |
class Geomatcher(MutableMapping):
"""Object managing spatial relationships using the a world topology.
``Geomatcher`` takes as its base data a definition of the world split into topological faces. This definition is provided by the `constructive_geometries <>`__ library. A toplogical face is a polygon which does not overlap any other topological face. In ``constructive_geometries``, these faces are defined by integer ids, so e.g. Ireland is:
.. code-block:: python
>>> from constructive_geometries import ConstructiveGeometries
>>> cg = ConstructiveGeometries()
>>> cg.data['IE']
[325, 327, 328, 334, 336, 337, 338, 345, 346, 347, 348, 350, 374, 2045]
By default, Geomatcher is populated with all world countries, and all ecoinvent regions. The instance of Geomatcher created in this file also includes the IMAGE world regions.
Geospatial definitions are namespaced, except for countries. Countries are therefore defined by their ISO two-letter codes, but other data should be referenced by a tuple of its namespace and identifier, e.g. ``('ecoinvent', 'NAFTA')``. You can also set a default namespace, either in instantiation (``Geomatcher(default_namespace="foo")``) or afterwards (``geomatcher_instance.default_namespace = 'foo'``). The default namespace is ``'ecoinvent'``.
Geomatcher supports the following operations:
* Retrieving face ids for a given location, acting as a dictionary (``geomatcher['foo']``)
* Adding new geospatial definitions, either directly with face ids or relative to existing definitions
* Splitting faces to allow for finer-scale regionalization
* Intersection, contained, and within calculations with several configuration options.
Initialization arguments:
* ``topology``: A dictionary of ``{str: set}`` labels to faces ids. Default is ``ecoinvent``, which loads the world and ecoinvent definitions from ``constructive_geometries``.
* ``default_namespace``: String defining the default search namespace. Default is ``'ecoinvent'``.
* ``use_coco``: Boolean, default ``True``. Use the `country_converter <https://github.com/konstantinstadler/country_converter>`__ library to fuzzy match country identifiers, e.g. "Austria" instead of "AT".
"""
__seen = set()
def __init__(self, topology='ecoinvent', default_namespace=None, use_coco=True):
self.coco = use_coco
if topology == 'ecoinvent':
self.default_namespace = 'ecoinvent'
def ns(x):
if len(x) == 2 or x == 'RoW':
return x
else:
return ('ecoinvent', x)
cg = ConstructiveGeometries()
self.topology = {ns(x): set(y) for x, y in cg.data.items()
if x != "__all__"}
self['GLO'] = reduce(set.union, self.topology.values())
else:
self.default_namespace = default_namespace
self.topology = topology
if not self.topology:
self.topology = {}
self.faces = set()
else:
self.faces = reduce(set.union, self.topology.values())
def __contains__(self, key):
return key in self.topology
def __getitem__(self, key):
if key == 'RoW' and 'RoW' not in self.topology:
return set()
return self.topology[self._actual_key(key)]
def __setitem__(self, key, value):
try:
key = self._actual_key(key)
except KeyError:
pass
self.topology[key] = value
def __delitem__(self, key):
del self.topology[self._actual_key(key)]
def __len__(self):
return len(self.topology)
def __iter__(self):
return iter(self.topology)
def _actual_key(self, key):
"""Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``."""
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location")
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
"""Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly."""
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
"""Get all locations that intersect this location.
Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that are completely within this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
"""
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def add_definitions(self, data, namespace, relative=True):
"""Add new topological definitions to ``self.topology``.
If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE:
.. code-block:: python
{"Russia Region": [
"AM",
"AZ",
"GE",
"RU"
]}
Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets:
.. code-block:: python
{
'A': {1, 2, 3},
'B': {2, 3, 4},
}
"""
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
})
|
cmutel/constructive_geometries
|
constructive_geometries/geomatcher.py
|
Geomatcher.add_definitions
|
python
|
def add_definitions(self, data, namespace, relative=True):
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
})
|
Add new topological definitions to ``self.topology``.
If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE:
.. code-block:: python
{"Russia Region": [
"AM",
"AZ",
"GE",
"RU"
]}
Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets:
.. code-block:: python
{
'A': {1, 2, 3},
'B': {2, 3, 4},
}
|
train
|
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L225-L256
| null |
class Geomatcher(MutableMapping):
"""Object managing spatial relationships using the a world topology.
``Geomatcher`` takes as its base data a definition of the world split into topological faces. This definition is provided by the `constructive_geometries <>`__ library. A toplogical face is a polygon which does not overlap any other topological face. In ``constructive_geometries``, these faces are defined by integer ids, so e.g. Ireland is:
.. code-block:: python
>>> from constructive_geometries import ConstructiveGeometries
>>> cg = ConstructiveGeometries()
>>> cg.data['IE']
[325, 327, 328, 334, 336, 337, 338, 345, 346, 347, 348, 350, 374, 2045]
By default, Geomatcher is populated with all world countries, and all ecoinvent regions. The instance of Geomatcher created in this file also includes the IMAGE world regions.
Geospatial definitions are namespaced, except for countries. Countries are therefore defined by their ISO two-letter codes, but other data should be referenced by a tuple of its namespace and identifier, e.g. ``('ecoinvent', 'NAFTA')``. You can also set a default namespace, either in instantiation (``Geomatcher(default_namespace="foo")``) or afterwards (``geomatcher_instance.default_namespace = 'foo'``). The default namespace is ``'ecoinvent'``.
Geomatcher supports the following operations:
* Retrieving face ids for a given location, acting as a dictionary (``geomatcher['foo']``)
* Adding new geospatial definitions, either directly with face ids or relative to existing definitions
* Splitting faces to allow for finer-scale regionalization
* Intersection, contained, and within calculations with several configuration options.
Initialization arguments:
* ``topology``: A dictionary of ``{str: set}`` labels to faces ids. Default is ``ecoinvent``, which loads the world and ecoinvent definitions from ``constructive_geometries``.
* ``default_namespace``: String defining the default search namespace. Default is ``'ecoinvent'``.
* ``use_coco``: Boolean, default ``True``. Use the `country_converter <https://github.com/konstantinstadler/country_converter>`__ library to fuzzy match country identifiers, e.g. "Austria" instead of "AT".
"""
__seen = set()
def __init__(self, topology='ecoinvent', default_namespace=None, use_coco=True):
self.coco = use_coco
if topology == 'ecoinvent':
self.default_namespace = 'ecoinvent'
def ns(x):
if len(x) == 2 or x == 'RoW':
return x
else:
return ('ecoinvent', x)
cg = ConstructiveGeometries()
self.topology = {ns(x): set(y) for x, y in cg.data.items()
if x != "__all__"}
self['GLO'] = reduce(set.union, self.topology.values())
else:
self.default_namespace = default_namespace
self.topology = topology
if not self.topology:
self.topology = {}
self.faces = set()
else:
self.faces = reduce(set.union, self.topology.values())
def __contains__(self, key):
return key in self.topology
def __getitem__(self, key):
if key == 'RoW' and 'RoW' not in self.topology:
return set()
return self.topology[self._actual_key(key)]
def __setitem__(self, key, value):
try:
key = self._actual_key(key)
except KeyError:
pass
self.topology[key] = value
def __delitem__(self, key):
del self.topology[self._actual_key(key)]
def __len__(self):
return len(self.topology)
def __iter__(self):
return iter(self.topology)
def _actual_key(self, key):
"""Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``."""
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location")
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
"""Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly."""
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
"""Get all locations that intersect this location.
Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that are completely within this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
"""
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
"""
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
def split_face(self, face, number=None, ids=None):
"""Split a topological face into a number of small faces.
* ``face``: The face to split. Must be in the topology.
* ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces.
* ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored.
Returns the new face ids.
"""
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids
|
maxfischer2781/include
|
include/inhibit.py
|
DisabledIncludeTypes.disable
|
python
|
def disable(self, identifier, children_only=False):
import_path = self._identifier2import_path(identifier=identifier)
if not children_only and import_path not in self._disabled:
self._disable_path(import_path)
self._disabled.add(import_path)
if import_path not in self._children_disabled:
self._children_disabled.add(import_path)
self._write_child_disabled()
|
Disable an include type
:param identifier: module or name of the include type
:param children_only: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type:
**module** (``include.files`` or ``include.mount.files``)
The base module implementing the include type.
These modules have a ``module.IMPORT_PATH`` attribute.
**implementation path** (``"include.files"``)
Import path of the module implementing the include type.
**mount path** (``"include.mount.files"``)
Mount path of the module implementing the include type.
**short path** (``"files"``)
Relative path of the module implementing the include type.
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/inhibit.py#L66-L94
|
[
"def _write_child_disabled(self):\n os.environ[self.environment_key] = ','.join(self._children_disabled)\n",
"def _disable_path(import_path):\n from . import _IMPORT_HOOKS\n if import_path in _IMPORT_HOOKS:\n sys.meta_path.remove(_IMPORT_HOOKS[import_path])\n _IMPORT_HOOKS[import_path] = _disabled_loader = DisabledTypeLoader(import_path)\n sys.meta_path.insert(sys.meta_path.index(mount.DEFAULT_MOUNT_LOADER), _disabled_loader)\n",
"def _identifier2import_path(self, identifier):\n if isinstance(identifier, types.ModuleType):\n return self._module_identifier2import_path(identifier)\n elif isinstance(identifier, _string_types):\n return self._string_identifier2import_path(identifier)\n else:\n raise TypeError('a name, mount name, short name or module of an import hook is required')\n"
] |
class DisabledIncludeTypes(object):
"""
Interface for disabling include types
Meta-container to control disabled include types.
The methods :py:meth:`disable` and :py:meth:`enable` allow to control which include types can be used.
Disabled types cannot be used to import code, be it explicitly or implicitly via bootstrapping.
Once a type is disabled, attempts to import code with it raise :py:exc:`DisabledIncludeError`.
This is provided as a two-level filter:
each type can be disabled either for both the current and any child process, or only for child processes.
It is not possible to enable an include type for child processes but not the current process.
Note that child processes inherit disabled types only on startup.
:note: This is a singleton, as it controls interpreter state.
"""
_singleton_instance = None
#: Key of the environment storing include types disabled for child processes
environment_key = 'PY_INCLUDE_DISABLE'
def __new__(cls):
if cls._singleton_instance is not None:
return cls._singleton_instance
self = cls._singleton_instance = collections.MutableSet.__new__(cls)
self._disabled = set(
self._identifier2import_path(identifier.strip())
for identifier in os.environ.get(self.environment_key, '').split(',')
if identifier
)
for import_path in self._disabled:
self._disable_path(import_path)
self._children_disabled = self._disabled.copy()
self._write_child_disabled()
return self
def __contains__(self, item):
return item in self._disabled
def __iter__(self):
return iter(self._disabled)
def __len__(self):
return len(self._disabled)
def __repr__(self):
return '<%s.%s for %r, children %r>' % (
self.__class__.__module__, self.__class__.__name__,
','.join(self._disabled),
','.join(self._children_disabled - self._disabled)
)
def enable(self, identifier, exclude_children=False):
"""
Enable a previously disabled include type
:param identifier: module or name of the include type
:param exclude_children: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type.
See :py:meth:`~.DisabledIncludeTypes.disable` for details.
"""
import_path = self._identifier2import_path(identifier=identifier)
if import_path in self._disabled:
self._enable_path(import_path)
self._disabled.remove(import_path)
if not exclude_children and import_path in self._children_disabled:
self._children_disabled.remove(import_path)
self._write_child_disabled()
def _write_child_disabled(self):
os.environ[self.environment_key] = ','.join(self._children_disabled)
@staticmethod
def _disable_path(import_path):
from . import _IMPORT_HOOKS
if import_path in _IMPORT_HOOKS:
sys.meta_path.remove(_IMPORT_HOOKS[import_path])
_IMPORT_HOOKS[import_path] = _disabled_loader = DisabledTypeLoader(import_path)
sys.meta_path.insert(sys.meta_path.index(mount.DEFAULT_MOUNT_LOADER), _disabled_loader)
@staticmethod
def _enable_path(import_path):
from . import _IMPORT_HOOKS
_disabled_loader = _IMPORT_HOOKS.pop(import_path)
sys.meta_path.remove(_disabled_loader)
# Translation of short identifiers to import module paths
def _identifier2import_path(self, identifier):
if isinstance(identifier, types.ModuleType):
return self._module_identifier2import_path(identifier)
elif isinstance(identifier, _string_types):
return self._string_identifier2import_path(identifier)
else:
raise TypeError('a name, mount name, short name or module of an import hook is required')
@staticmethod
def _module_identifier2import_path(module):
try:
return module.IMPORT_PATH
except AttributeError:
raise ValueError('module %r is not an import hook module' % module.__name__) # no module.IMPORT_PATH
def _string_identifier2import_path(self, identifier):
try:
# identifier is an import hook module name?
identifier = mount.DEFAULT_MOUNT_LOADER.name2mount(identifier)
except ValueError:
try:
# identifier is already a mount module name?
mount.DEFAULT_MOUNT_LOADER.mount2name(identifier)
except ValueError:
# identifier is an import hook module *short* name?
_module_name = '%s.%s' % (__name__.split('.', 1)[0], identifier)
try:
__import__(_module_name)
except ImportError:
raise ValueError(
'identifier %r cannot be resolved to an import hook module' %\
identifier # expect 'include.<type>', 'include.mount.<type>' or '<type>'
)
else:
return self._module_identifier2import_path(sys.modules[_module_name])
return identifier
|
maxfischer2781/include
|
include/inhibit.py
|
DisabledIncludeTypes.enable
|
python
|
def enable(self, identifier, exclude_children=False):
import_path = self._identifier2import_path(identifier=identifier)
if import_path in self._disabled:
self._enable_path(import_path)
self._disabled.remove(import_path)
if not exclude_children and import_path in self._children_disabled:
self._children_disabled.remove(import_path)
self._write_child_disabled()
|
Enable a previously disabled include type
:param identifier: module or name of the include type
:param exclude_children: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type.
See :py:meth:`~.DisabledIncludeTypes.disable` for details.
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/inhibit.py#L96-L112
|
[
"def _write_child_disabled(self):\n os.environ[self.environment_key] = ','.join(self._children_disabled)\n",
"def _enable_path(import_path):\n from . import _IMPORT_HOOKS\n _disabled_loader = _IMPORT_HOOKS.pop(import_path)\n sys.meta_path.remove(_disabled_loader)\n",
"def _identifier2import_path(self, identifier):\n if isinstance(identifier, types.ModuleType):\n return self._module_identifier2import_path(identifier)\n elif isinstance(identifier, _string_types):\n return self._string_identifier2import_path(identifier)\n else:\n raise TypeError('a name, mount name, short name or module of an import hook is required')\n"
] |
class DisabledIncludeTypes(object):
"""
Interface for disabling include types
Meta-container to control disabled include types.
The methods :py:meth:`disable` and :py:meth:`enable` allow to control which include types can be used.
Disabled types cannot be used to import code, be it explicitly or implicitly via bootstrapping.
Once a type is disabled, attempts to import code with it raise :py:exc:`DisabledIncludeError`.
This is provided as a two-level filter:
each type can be disabled either for both the current and any child process, or only for child processes.
It is not possible to enable an include type for child processes but not the current process.
Note that child processes inherit disabled types only on startup.
:note: This is a singleton, as it controls interpreter state.
"""
_singleton_instance = None
#: Key of the environment storing include types disabled for child processes
environment_key = 'PY_INCLUDE_DISABLE'
def __new__(cls):
if cls._singleton_instance is not None:
return cls._singleton_instance
self = cls._singleton_instance = collections.MutableSet.__new__(cls)
self._disabled = set(
self._identifier2import_path(identifier.strip())
for identifier in os.environ.get(self.environment_key, '').split(',')
if identifier
)
for import_path in self._disabled:
self._disable_path(import_path)
self._children_disabled = self._disabled.copy()
self._write_child_disabled()
return self
def __contains__(self, item):
return item in self._disabled
def __iter__(self):
return iter(self._disabled)
def __len__(self):
return len(self._disabled)
def __repr__(self):
return '<%s.%s for %r, children %r>' % (
self.__class__.__module__, self.__class__.__name__,
','.join(self._disabled),
','.join(self._children_disabled - self._disabled)
)
def disable(self, identifier, children_only=False):
"""
Disable an include type
:param identifier: module or name of the include type
:param children_only: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type:
**module** (``include.files`` or ``include.mount.files``)
The base module implementing the include type.
These modules have a ``module.IMPORT_PATH`` attribute.
**implementation path** (``"include.files"``)
Import path of the module implementing the include type.
**mount path** (``"include.mount.files"``)
Mount path of the module implementing the include type.
**short path** (``"files"``)
Relative path of the module implementing the include type.
"""
import_path = self._identifier2import_path(identifier=identifier)
if not children_only and import_path not in self._disabled:
self._disable_path(import_path)
self._disabled.add(import_path)
if import_path not in self._children_disabled:
self._children_disabled.add(import_path)
self._write_child_disabled()
def _write_child_disabled(self):
os.environ[self.environment_key] = ','.join(self._children_disabled)
@staticmethod
def _disable_path(import_path):
from . import _IMPORT_HOOKS
if import_path in _IMPORT_HOOKS:
sys.meta_path.remove(_IMPORT_HOOKS[import_path])
_IMPORT_HOOKS[import_path] = _disabled_loader = DisabledTypeLoader(import_path)
sys.meta_path.insert(sys.meta_path.index(mount.DEFAULT_MOUNT_LOADER), _disabled_loader)
@staticmethod
def _enable_path(import_path):
from . import _IMPORT_HOOKS
_disabled_loader = _IMPORT_HOOKS.pop(import_path)
sys.meta_path.remove(_disabled_loader)
# Translation of short identifiers to import module paths
def _identifier2import_path(self, identifier):
if isinstance(identifier, types.ModuleType):
return self._module_identifier2import_path(identifier)
elif isinstance(identifier, _string_types):
return self._string_identifier2import_path(identifier)
else:
raise TypeError('a name, mount name, short name or module of an import hook is required')
@staticmethod
def _module_identifier2import_path(module):
try:
return module.IMPORT_PATH
except AttributeError:
raise ValueError('module %r is not an import hook module' % module.__name__) # no module.IMPORT_PATH
def _string_identifier2import_path(self, identifier):
try:
# identifier is an import hook module name?
identifier = mount.DEFAULT_MOUNT_LOADER.name2mount(identifier)
except ValueError:
try:
# identifier is already a mount module name?
mount.DEFAULT_MOUNT_LOADER.mount2name(identifier)
except ValueError:
# identifier is an import hook module *short* name?
_module_name = '%s.%s' % (__name__.split('.', 1)[0], identifier)
try:
__import__(_module_name)
except ImportError:
raise ValueError(
'identifier %r cannot be resolved to an import hook module' %\
identifier # expect 'include.<type>', 'include.mount.<type>' or '<type>'
)
else:
return self._module_identifier2import_path(sys.modules[_module_name])
return identifier
|
maxfischer2781/include
|
include/inhibit.py
|
DisabledTypeLoader.load_module
|
python
|
def load_module(self, name):
# allow reload noop
if name in sys.modules:
return sys.modules[name]
raise DisabledIncludeError('Include type %r disabled, cannot import module %r' % (self._module_prefix, name))
|
Load and return a module
If the module is already loaded, the existing module is returned.
Otherwise, raises :py:exc:`DisabledIncludeError`.
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/inhibit.py#L184-L194
| null |
class DisabledTypeLoader(import_hook.BaseIncludeLoader):
def uri2module(self, uri):
"""
Convert an unencoded source uri to an encoded module name
Always raises :py:exc:`DisabledIncludeError`.
"""
raise DisabledIncludeError('Include type %r disabled' % self._module_prefix)
|
maxfischer2781/include
|
include/__init__.py
|
disable
|
python
|
def disable(identifier, children_only=False):
DISABLED_TYPES.disable(identifier=identifier, children_only=children_only)
|
Disable an include type
:param identifier: module or name of the include type
:param children_only: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type.
See :py:meth:`~.DisabledIncludeTypes.disable` for details.
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/__init__.py#L68-L78
| null |
from __future__ import absolute_import
import sys
import weakref
# weak reference to installed hooks
_IMPORT_HOOKS = weakref.WeakValueDictionary()
# must have _IMPORT_HOOKS to bootstrap hook disabling
from .inhibit import DISABLED_TYPES
def path(file_path):
"""
Include module code from a file identified by its path
:param file_path: path to a file containing module code
:type file_path: str
:return: the imported module
:rtype: module
Comparable to ``execfile``, but respects the rules and constraints of modules.
If invoked again with the same ``file_path``, the same module is returned.
.. code:: python
import include
my_config = include.path('/etc/sysconfig/app_conf.py')
"""
from . import files
return _import_url(module_url=file_path, include_type=files)
def source(source_code):
"""
Include module code directly from a string
:param source_code: source code of the module
:type source_code: str
:return: the imported module
:rtype: module
Comparable to ``exec`` in a separate ``globals`` namespace, but respects the rules and constraints of modules.
If invoked again with the same ``source_code``, the same module is returned.
.. code:: python
>>> import include
>>> my_module = include.source(
>>> \"\"\"
... def foo():
... return {constant}
... \"\"\".format(constant=3))
>>> my_module.foo() == 3
True
"""
from . import encoded
return _import_url(module_url=source_code, include_type=encoded)
def _import_url(module_url, include_type):
if include_type.IMPORT_PATH not in _IMPORT_HOOKS:
include_type.install()
import_hook = _IMPORT_HOOKS[include_type.IMPORT_PATH]
module_path = import_hook.uri2module(module_url)
__import__(module_path)
return sys.modules[module_path]
def enable(identifier, exclude_children=False):
"""
Enable a previously disabled include type
:param identifier: module or name of the include type
:param exclude_children: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type.
See :py:meth:`~.DisabledIncludeTypes.disable` for details.
"""
DISABLED_TYPES.enable(identifier=identifier, exclude_children=exclude_children)
|
maxfischer2781/include
|
include/__init__.py
|
enable
|
python
|
def enable(identifier, exclude_children=False):
DISABLED_TYPES.enable(identifier=identifier, exclude_children=exclude_children)
|
Enable a previously disabled include type
:param identifier: module or name of the include type
:param exclude_children: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type.
See :py:meth:`~.DisabledIncludeTypes.disable` for details.
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/__init__.py#L81-L91
| null |
from __future__ import absolute_import
import sys
import weakref
# weak reference to installed hooks
_IMPORT_HOOKS = weakref.WeakValueDictionary()
# must have _IMPORT_HOOKS to bootstrap hook disabling
from .inhibit import DISABLED_TYPES
def path(file_path):
"""
Include module code from a file identified by its path
:param file_path: path to a file containing module code
:type file_path: str
:return: the imported module
:rtype: module
Comparable to ``execfile``, but respects the rules and constraints of modules.
If invoked again with the same ``file_path``, the same module is returned.
.. code:: python
import include
my_config = include.path('/etc/sysconfig/app_conf.py')
"""
from . import files
return _import_url(module_url=file_path, include_type=files)
def source(source_code):
"""
Include module code directly from a string
:param source_code: source code of the module
:type source_code: str
:return: the imported module
:rtype: module
Comparable to ``exec`` in a separate ``globals`` namespace, but respects the rules and constraints of modules.
If invoked again with the same ``source_code``, the same module is returned.
.. code:: python
>>> import include
>>> my_module = include.source(
>>> \"\"\"
... def foo():
... return {constant}
... \"\"\".format(constant=3))
>>> my_module.foo() == 3
True
"""
from . import encoded
return _import_url(module_url=source_code, include_type=encoded)
def _import_url(module_url, include_type):
if include_type.IMPORT_PATH not in _IMPORT_HOOKS:
include_type.install()
import_hook = _IMPORT_HOOKS[include_type.IMPORT_PATH]
module_path = import_hook.uri2module(module_url)
__import__(module_path)
return sys.modules[module_path]
def disable(identifier, children_only=False):
"""
Disable an include type
:param identifier: module or name of the include type
:param children_only: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type.
See :py:meth:`~.DisabledIncludeTypes.disable` for details.
"""
DISABLED_TYPES.disable(identifier=identifier, children_only=children_only)
|
maxfischer2781/include
|
include/encoded/import_hook.py
|
EncodedModuleLoader.module2uri
|
python
|
def module2uri(self, module_name):
encoded_str = super(EncodedModuleLoader, self).module2uri(module_name)
encoded = encoded_str.encode('ASCII')
compressed = base64.b64decode(encoded, b'+&')
return zlib.decompress(compressed)
|
Convert an encoded module name to an unencoded source uri
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/encoded/import_hook.py#L17-L22
|
[
"def module2uri(self, module_name):\n \"\"\"Convert an encoded module name to an unencoded source uri\"\"\"\n assert module_name.startswith(self.module_prefix), 'incompatible module name'\n path = module_name[len(self.module_prefix):]\n path = path.replace('&#DOT', '.')\n return path.replace('&#SEP', os.sep)\n"
] |
class EncodedModuleLoader(import_hook.BaseIncludeLoader):
"""
Load python modules from their encoded content
This import hook allows storing and using module content as a compressed
data blob.
"""
def uri2module(self, uri):
"""Convert an unencoded source uri to an encoded module name"""
# uri is the source code of the module
compressed = zlib.compress(uri)
encoded = base64.b64encode(compressed, b'+&')
encoded_str = encoded.decode('ASCII')
return super(EncodedModuleLoader, self).uri2module(encoded_str)
def load_module(self, name):
"""
Load and return a module
Always returns the corresponding module. If the module is already
loaded, the existing module is returned.
"""
if name in sys.modules:
return sys.modules[name]
module_source = self.module2uri(name)
module_container = tempfile.NamedTemporaryFile(suffix='.py', delete=False)
with module_container:
module_container.write(module_source)
module_container.file.seek(0)
module = imp.load_module(name, module_container.file, module_container.name, ('.py', 'U', imp.PY_SOURCE))
module.__source__ = module_container
module.__loader__ = self
sys.modules[name] = module
return module
|
maxfischer2781/include
|
include/encoded/import_hook.py
|
EncodedModuleLoader.uri2module
|
python
|
def uri2module(self, uri):
# uri is the source code of the module
compressed = zlib.compress(uri)
encoded = base64.b64encode(compressed, b'+&')
encoded_str = encoded.decode('ASCII')
return super(EncodedModuleLoader, self).uri2module(encoded_str)
|
Convert an unencoded source uri to an encoded module name
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/encoded/import_hook.py#L24-L30
|
[
"def uri2module(self, uri):\n \"\"\"Convert an unencoded source uri to an encoded module name\"\"\"\n module_name = uri.replace('.', '&#DOT')\n module_name = module_name.replace(os.sep, '&#SEP')\n return self.module_prefix + module_name\n"
] |
class EncodedModuleLoader(import_hook.BaseIncludeLoader):
"""
Load python modules from their encoded content
This import hook allows storing and using module content as a compressed
data blob.
"""
def module2uri(self, module_name):
"""Convert an encoded module name to an unencoded source uri"""
encoded_str = super(EncodedModuleLoader, self).module2uri(module_name)
encoded = encoded_str.encode('ASCII')
compressed = base64.b64decode(encoded, b'+&')
return zlib.decompress(compressed)
def load_module(self, name):
"""
Load and return a module
Always returns the corresponding module. If the module is already
loaded, the existing module is returned.
"""
if name in sys.modules:
return sys.modules[name]
module_source = self.module2uri(name)
module_container = tempfile.NamedTemporaryFile(suffix='.py', delete=False)
with module_container:
module_container.write(module_source)
module_container.file.seek(0)
module = imp.load_module(name, module_container.file, module_container.name, ('.py', 'U', imp.PY_SOURCE))
module.__source__ = module_container
module.__loader__ = self
sys.modules[name] = module
return module
|
maxfischer2781/include
|
include/encoded/import_hook.py
|
EncodedModuleLoader.load_module
|
python
|
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
module_source = self.module2uri(name)
module_container = tempfile.NamedTemporaryFile(suffix='.py', delete=False)
with module_container:
module_container.write(module_source)
module_container.file.seek(0)
module = imp.load_module(name, module_container.file, module_container.name, ('.py', 'U', imp.PY_SOURCE))
module.__source__ = module_container
module.__loader__ = self
sys.modules[name] = module
return module
|
Load and return a module
Always returns the corresponding module. If the module is already
loaded, the existing module is returned.
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/encoded/import_hook.py#L32-L50
|
[
"def module2uri(self, module_name):\n \"\"\"Convert an encoded module name to an unencoded source uri\"\"\"\n encoded_str = super(EncodedModuleLoader, self).module2uri(module_name)\n encoded = encoded_str.encode('ASCII')\n compressed = base64.b64decode(encoded, b'+&')\n return zlib.decompress(compressed)\n"
] |
class EncodedModuleLoader(import_hook.BaseIncludeLoader):
"""
Load python modules from their encoded content
This import hook allows storing and using module content as a compressed
data blob.
"""
def module2uri(self, module_name):
"""Convert an encoded module name to an unencoded source uri"""
encoded_str = super(EncodedModuleLoader, self).module2uri(module_name)
encoded = encoded_str.encode('ASCII')
compressed = base64.b64decode(encoded, b'+&')
return zlib.decompress(compressed)
def uri2module(self, uri):
"""Convert an unencoded source uri to an encoded module name"""
# uri is the source code of the module
compressed = zlib.compress(uri)
encoded = base64.b64encode(compressed, b'+&')
encoded_str = encoded.decode('ASCII')
return super(EncodedModuleLoader, self).uri2module(encoded_str)
|
maxfischer2781/include
|
include/mount/__init__.py
|
MountLoader.load_module
|
python
|
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
# load the actual import hook module
module_name = self.mount2name(name)
__import__(module_name)
# alias the import hook module to the mount, so both can be used interchangeably
module = sys.modules[name] = sys.modules[module_name]
module.install()
return module
|
Load and return a module
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L9-L19
|
[
"def mount2name(self, mount):\n \"\"\"Convert a mount name to a module name\"\"\"\n if not self.is_mount(mount):\n raise ValueError('%r is not a supported mount name' % (mount,))\n return mount.replace(self.mount_prefix, self.module_prefix)\n"
] |
class MountLoader(object):
def __init__(self, mount_prefix, module_prefix):
self.mount_prefix = mount_prefix
self.module_prefix = module_prefix
def find_module(self, name, path=None):
if name.startswith(self.mount_prefix) and name.count('.') - self.mount_prefix.count('.') == 1:
return self
return None
def is_module(self, name):
"""Test that `name` is a module name"""
if self.module_prefix.startswith(self.mount_prefix):
return name.startswith(self.module_prefix)
return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix)
def is_mount(self, name):
"""Test that `name` is a mount name"""
if self.mount_prefix.startswith(self.module_prefix):
return name.startswith(self.mount_prefix)
return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix)
def name2mount(self, name):
"""Convert a module name to a mount name"""
if not self.is_module(name):
raise ValueError('%r is not a supported module name' % (name, ))
return name.replace(self.module_prefix, self.mount_prefix)
def mount2name(self, mount):
"""Convert a mount name to a module name"""
if not self.is_mount(mount):
raise ValueError('%r is not a supported mount name' % (mount,))
return mount.replace(self.mount_prefix, self.module_prefix)
|
maxfischer2781/include
|
include/mount/__init__.py
|
MountLoader.is_module
|
python
|
def is_module(self, name):
if self.module_prefix.startswith(self.mount_prefix):
return name.startswith(self.module_prefix)
return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix)
|
Test that `name` is a module name
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L26-L30
| null |
class MountLoader(object):
def __init__(self, mount_prefix, module_prefix):
self.mount_prefix = mount_prefix
self.module_prefix = module_prefix
def load_module(self, name):
"""Load and return a module"""
if name in sys.modules:
return sys.modules[name]
# load the actual import hook module
module_name = self.mount2name(name)
__import__(module_name)
# alias the import hook module to the mount, so both can be used interchangeably
module = sys.modules[name] = sys.modules[module_name]
module.install()
return module
def find_module(self, name, path=None):
if name.startswith(self.mount_prefix) and name.count('.') - self.mount_prefix.count('.') == 1:
return self
return None
def is_mount(self, name):
"""Test that `name` is a mount name"""
if self.mount_prefix.startswith(self.module_prefix):
return name.startswith(self.mount_prefix)
return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix)
def name2mount(self, name):
"""Convert a module name to a mount name"""
if not self.is_module(name):
raise ValueError('%r is not a supported module name' % (name, ))
return name.replace(self.module_prefix, self.mount_prefix)
def mount2name(self, mount):
"""Convert a mount name to a module name"""
if not self.is_mount(mount):
raise ValueError('%r is not a supported mount name' % (mount,))
return mount.replace(self.mount_prefix, self.module_prefix)
|
maxfischer2781/include
|
include/mount/__init__.py
|
MountLoader.is_mount
|
python
|
def is_mount(self, name):
if self.mount_prefix.startswith(self.module_prefix):
return name.startswith(self.mount_prefix)
return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix)
|
Test that `name` is a mount name
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L32-L36
| null |
class MountLoader(object):
def __init__(self, mount_prefix, module_prefix):
self.mount_prefix = mount_prefix
self.module_prefix = module_prefix
def load_module(self, name):
"""Load and return a module"""
if name in sys.modules:
return sys.modules[name]
# load the actual import hook module
module_name = self.mount2name(name)
__import__(module_name)
# alias the import hook module to the mount, so both can be used interchangeably
module = sys.modules[name] = sys.modules[module_name]
module.install()
return module
def find_module(self, name, path=None):
if name.startswith(self.mount_prefix) and name.count('.') - self.mount_prefix.count('.') == 1:
return self
return None
def is_module(self, name):
"""Test that `name` is a module name"""
if self.module_prefix.startswith(self.mount_prefix):
return name.startswith(self.module_prefix)
return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix)
def name2mount(self, name):
"""Convert a module name to a mount name"""
if not self.is_module(name):
raise ValueError('%r is not a supported module name' % (name, ))
return name.replace(self.module_prefix, self.mount_prefix)
def mount2name(self, mount):
"""Convert a mount name to a module name"""
if not self.is_mount(mount):
raise ValueError('%r is not a supported mount name' % (mount,))
return mount.replace(self.mount_prefix, self.module_prefix)
|
maxfischer2781/include
|
include/mount/__init__.py
|
MountLoader.name2mount
|
python
|
def name2mount(self, name):
if not self.is_module(name):
raise ValueError('%r is not a supported module name' % (name, ))
return name.replace(self.module_prefix, self.mount_prefix)
|
Convert a module name to a mount name
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L38-L42
|
[
"def is_module(self, name):\n \"\"\"Test that `name` is a module name\"\"\"\n if self.module_prefix.startswith(self.mount_prefix):\n return name.startswith(self.module_prefix)\n return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix)\n"
] |
class MountLoader(object):
def __init__(self, mount_prefix, module_prefix):
self.mount_prefix = mount_prefix
self.module_prefix = module_prefix
def load_module(self, name):
"""Load and return a module"""
if name in sys.modules:
return sys.modules[name]
# load the actual import hook module
module_name = self.mount2name(name)
__import__(module_name)
# alias the import hook module to the mount, so both can be used interchangeably
module = sys.modules[name] = sys.modules[module_name]
module.install()
return module
def find_module(self, name, path=None):
if name.startswith(self.mount_prefix) and name.count('.') - self.mount_prefix.count('.') == 1:
return self
return None
def is_module(self, name):
"""Test that `name` is a module name"""
if self.module_prefix.startswith(self.mount_prefix):
return name.startswith(self.module_prefix)
return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix)
def is_mount(self, name):
"""Test that `name` is a mount name"""
if self.mount_prefix.startswith(self.module_prefix):
return name.startswith(self.mount_prefix)
return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix)
def mount2name(self, mount):
"""Convert a mount name to a module name"""
if not self.is_mount(mount):
raise ValueError('%r is not a supported mount name' % (mount,))
return mount.replace(self.mount_prefix, self.module_prefix)
|
maxfischer2781/include
|
include/mount/__init__.py
|
MountLoader.mount2name
|
python
|
def mount2name(self, mount):
if not self.is_mount(mount):
raise ValueError('%r is not a supported mount name' % (mount,))
return mount.replace(self.mount_prefix, self.module_prefix)
|
Convert a mount name to a module name
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L44-L48
|
[
"def is_mount(self, name):\n \"\"\"Test that `name` is a mount name\"\"\"\n if self.mount_prefix.startswith(self.module_prefix):\n return name.startswith(self.mount_prefix)\n return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix)\n"
] |
class MountLoader(object):
def __init__(self, mount_prefix, module_prefix):
self.mount_prefix = mount_prefix
self.module_prefix = module_prefix
def load_module(self, name):
"""Load and return a module"""
if name in sys.modules:
return sys.modules[name]
# load the actual import hook module
module_name = self.mount2name(name)
__import__(module_name)
# alias the import hook module to the mount, so both can be used interchangeably
module = sys.modules[name] = sys.modules[module_name]
module.install()
return module
def find_module(self, name, path=None):
if name.startswith(self.mount_prefix) and name.count('.') - self.mount_prefix.count('.') == 1:
return self
return None
def is_module(self, name):
"""Test that `name` is a module name"""
if self.module_prefix.startswith(self.mount_prefix):
return name.startswith(self.module_prefix)
return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix)
def is_mount(self, name):
"""Test that `name` is a mount name"""
if self.mount_prefix.startswith(self.module_prefix):
return name.startswith(self.mount_prefix)
return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix)
def name2mount(self, name):
"""Convert a module name to a mount name"""
if not self.is_module(name):
raise ValueError('%r is not a supported module name' % (name, ))
return name.replace(self.module_prefix, self.mount_prefix)
|
maxfischer2781/include
|
include/base/import_hook.py
|
BaseIncludeLoader.module2uri
|
python
|
def module2uri(self, module_name):
assert module_name.startswith(self.module_prefix), 'incompatible module name'
path = module_name[len(self.module_prefix):]
path = path.replace('&#DOT', '.')
return path.replace('&#SEP', os.sep)
|
Convert an encoded module name to an unencoded source uri
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/base/import_hook.py#L47-L52
| null |
class BaseIncludeLoader(object):
"""
Import hook to load Python modules from an arbitrary location
:param module_prefix: prefix for modules to import
:type module_prefix: str
Base class for import hooks to non-standard code sources. Implements the
general structure for encoded sources: a module source translates to an
artificial module path of the form ``<module_prefix>.<encoded_name>``. The
``module_prefix`` identifies the code source type (and import hook) while the
``encoded_name`` contains all required information to retrieve the code.
For example, a ``module_prefix`` of ``include.type.files`` could identify
a source file type, and an ``encoded_name`` of ``SLASHtmpSLASHfooDOTpy`` point
to the path ``/tmp/foo.py``. The resulting module would appear as
``include.type.files.SLASHtmpSLASHfooDOTpy``.
Note that ``module_prefix`` must point to a valid package, not a module.
It will be actually imported by the regular import machinery, and can be
used to bootstrap hooks.
The ``encoded_name`` is a free form field. The base class provides means to
escape invalid and reserved symbols (``/`` and ``.``), but subclasses are
free to use them if it is suitable for them. Hooks should use ``encoded_name``
to store a URI (or similar) to retrieve source code. As per Python rules,
including a dot (``.``) in the ``encoded_name`` requires the hook to import
each portion separately.
"""
def __init__(self, module_prefix):
self._module_prefix = ''
self.module_prefix = module_prefix
@property
def module_prefix(self):
raw_prefix = self._module_prefix.rstrip('.')
include_type = raw_prefix.split('.')[-1]
return raw_prefix + '.' + include_type.upper() + '::'
@module_prefix.setter
def module_prefix(self, value):
self._module_prefix = value.rstrip('.')
def uri2module(self, uri):
"""Convert an unencoded source uri to an encoded module name"""
module_name = uri.replace('.', '&#DOT')
module_name = module_name.replace(os.sep, '&#SEP')
return self.module_prefix + module_name
def load_module(self, name):
"""
Load and return a module
Always returns the corresponding module. If the module is already
loaded, the existing module is returned.
"""
raise NotImplementedError
def find_module(self, fullname, path=None):
"""
Find the appropriate loader for module ``name``
:param fullname: ``__name__`` of the module to import
:type fullname: str
:param path: ``__path__`` of the *parent* package already imported
:type path: str or None
"""
# path points to the top-level package path if any
# and we can only import sub-modules/-packages
if path is None:
return
if fullname.startswith(self.module_prefix):
return self
else:
return None
def __repr__(self):
return '<%s.%s for path %r at 0x%x>' % (
self.__class__.__module__, self.__class__.__name__, self._module_prefix, id(self)
)
|
maxfischer2781/include
|
include/base/import_hook.py
|
BaseIncludeLoader.uri2module
|
python
|
def uri2module(self, uri):
module_name = uri.replace('.', '&#DOT')
module_name = module_name.replace(os.sep, '&#SEP')
return self.module_prefix + module_name
|
Convert an unencoded source uri to an encoded module name
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/base/import_hook.py#L54-L58
| null |
class BaseIncludeLoader(object):
"""
Import hook to load Python modules from an arbitrary location
:param module_prefix: prefix for modules to import
:type module_prefix: str
Base class for import hooks to non-standard code sources. Implements the
general structure for encoded sources: a module source translates to an
artificial module path of the form ``<module_prefix>.<encoded_name>``. The
``module_prefix`` identifies the code source type (and import hook) while the
``encoded_name`` contains all required information to retrieve the code.
For example, a ``module_prefix`` of ``include.type.files`` could identify
a source file type, and an ``encoded_name`` of ``SLASHtmpSLASHfooDOTpy`` point
to the path ``/tmp/foo.py``. The resulting module would appear as
``include.type.files.SLASHtmpSLASHfooDOTpy``.
Note that ``module_prefix`` must point to a valid package, not a module.
It will be actually imported by the regular import machinery, and can be
used to bootstrap hooks.
The ``encoded_name`` is a free form field. The base class provides means to
escape invalid and reserved symbols (``/`` and ``.``), but subclasses are
free to use them if it is suitable for them. Hooks should use ``encoded_name``
to store a URI (or similar) to retrieve source code. As per Python rules,
including a dot (``.``) in the ``encoded_name`` requires the hook to import
each portion separately.
"""
def __init__(self, module_prefix):
self._module_prefix = ''
self.module_prefix = module_prefix
@property
def module_prefix(self):
raw_prefix = self._module_prefix.rstrip('.')
include_type = raw_prefix.split('.')[-1]
return raw_prefix + '.' + include_type.upper() + '::'
@module_prefix.setter
def module_prefix(self, value):
self._module_prefix = value.rstrip('.')
def module2uri(self, module_name):
"""Convert an encoded module name to an unencoded source uri"""
assert module_name.startswith(self.module_prefix), 'incompatible module name'
path = module_name[len(self.module_prefix):]
path = path.replace('&#DOT', '.')
return path.replace('&#SEP', os.sep)
def load_module(self, name):
"""
Load and return a module
Always returns the corresponding module. If the module is already
loaded, the existing module is returned.
"""
raise NotImplementedError
def find_module(self, fullname, path=None):
"""
Find the appropriate loader for module ``name``
:param fullname: ``__name__`` of the module to import
:type fullname: str
:param path: ``__path__`` of the *parent* package already imported
:type path: str or None
"""
# path points to the top-level package path if any
# and we can only import sub-modules/-packages
if path is None:
return
if fullname.startswith(self.module_prefix):
return self
else:
return None
def __repr__(self):
return '<%s.%s for path %r at 0x%x>' % (
self.__class__.__module__, self.__class__.__name__, self._module_prefix, id(self)
)
|
maxfischer2781/include
|
include/base/import_hook.py
|
BaseIncludeLoader.find_module
|
python
|
def find_module(self, fullname, path=None):
# path points to the top-level package path if any
# and we can only import sub-modules/-packages
if path is None:
return
if fullname.startswith(self.module_prefix):
return self
else:
return None
|
Find the appropriate loader for module ``name``
:param fullname: ``__name__`` of the module to import
:type fullname: str
:param path: ``__path__`` of the *parent* package already imported
:type path: str or None
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/base/import_hook.py#L69-L85
| null |
class BaseIncludeLoader(object):
"""
Import hook to load Python modules from an arbitrary location
:param module_prefix: prefix for modules to import
:type module_prefix: str
Base class for import hooks to non-standard code sources. Implements the
general structure for encoded sources: a module source translates to an
artificial module path of the form ``<module_prefix>.<encoded_name>``. The
``module_prefix`` identifies the code source type (and import hook) while the
``encoded_name`` contains all required information to retrieve the code.
For example, a ``module_prefix`` of ``include.type.files`` could identify
a source file type, and an ``encoded_name`` of ``SLASHtmpSLASHfooDOTpy`` point
to the path ``/tmp/foo.py``. The resulting module would appear as
``include.type.files.SLASHtmpSLASHfooDOTpy``.
Note that ``module_prefix`` must point to a valid package, not a module.
It will be actually imported by the regular import machinery, and can be
used to bootstrap hooks.
The ``encoded_name`` is a free form field. The base class provides means to
escape invalid and reserved symbols (``/`` and ``.``), but subclasses are
free to use them if it is suitable for them. Hooks should use ``encoded_name``
to store a URI (or similar) to retrieve source code. As per Python rules,
including a dot (``.``) in the ``encoded_name`` requires the hook to import
each portion separately.
"""
def __init__(self, module_prefix):
self._module_prefix = ''
self.module_prefix = module_prefix
@property
def module_prefix(self):
raw_prefix = self._module_prefix.rstrip('.')
include_type = raw_prefix.split('.')[-1]
return raw_prefix + '.' + include_type.upper() + '::'
@module_prefix.setter
def module_prefix(self, value):
self._module_prefix = value.rstrip('.')
def module2uri(self, module_name):
"""Convert an encoded module name to an unencoded source uri"""
assert module_name.startswith(self.module_prefix), 'incompatible module name'
path = module_name[len(self.module_prefix):]
path = path.replace('&#DOT', '.')
return path.replace('&#SEP', os.sep)
def uri2module(self, uri):
"""Convert an unencoded source uri to an encoded module name"""
module_name = uri.replace('.', '&#DOT')
module_name = module_name.replace(os.sep, '&#SEP')
return self.module_prefix + module_name
def load_module(self, name):
"""
Load and return a module
Always returns the corresponding module. If the module is already
loaded, the existing module is returned.
"""
raise NotImplementedError
def __repr__(self):
return '<%s.%s for path %r at 0x%x>' % (
self.__class__.__module__, self.__class__.__name__, self._module_prefix, id(self)
)
|
maxfischer2781/include
|
include/files/import_hook.py
|
FilePathLoader.load_module
|
python
|
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
path = self.module2uri(name)
if os.path.isfile(path):
return self._load_module(name, path)
elif os.path.isdir(path):
return self._load_package(name, path)
else:
raise ImportError("Missing module source file %r" % path)
|
Load and return a module
Always returns the corresponding module. If the module is already
loaded, the existing module is returned.
|
train
|
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/files/import_hook.py#L15-L30
|
[
"def module2uri(self, module_name):\n \"\"\"Convert an encoded module name to an unencoded source uri\"\"\"\n assert module_name.startswith(self.module_prefix), 'incompatible module name'\n path = module_name[len(self.module_prefix):]\n path = path.replace('&#DOT', '.')\n return path.replace('&#SEP', os.sep)\n",
"def _load_module(self, name, path):\n module = imp.load_source(name, path)\n module.__loader__ = self\n sys.modules[name] = module\n return module\n"
] |
class FilePathLoader(import_hook.BaseIncludeLoader):
"""
Load python file from their path
This import hook allows using encoded paths as module names to load modules
directly from their file.
"""
def _load_module(self, name, path):
module = imp.load_source(name, path)
module.__loader__ = self
sys.modules[name] = module
return module
def _load_package(self, name, path):
# regular package with content
init_path = os.path.join(path, '__init__.py')
if os.path.exists(init_path):
module = imp.load_source(name, init_path)
# Py3 namespace package
elif os.path.isdir(path):
module = imp.new_module(name)
else:
raise ImportError("Missing package source directory %r" % path)
module.__loader__ = self
sys.modules[name] = module
return module
|
jspricke/python-abook
|
abook.py
|
abook2vcf
|
python
|
def abook2vcf():
from argparse import ArgumentParser, FileType
from os.path import expanduser
from sys import stdout
parser = ArgumentParser(description='Converter from Abook to vCard syntax.')
parser.add_argument('infile', nargs='?', default=expanduser('~/.abook/addressbook'),
help='The Abook file to process (default: ~/.abook/addressbook)')
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output vCard file (default: stdout)')
args = parser.parse_args()
args.outfile.write(Abook(args.infile).to_vcf())
|
Command line tool to convert from Abook to vCard
|
train
|
https://github.com/jspricke/python-abook/blob/cc58ad998303ce9a8b347a3317158c8f7cd0529f/abook.py#L328-L341
|
[
"def to_vcf(self):\n \"\"\" Converts to vCard string\"\"\"\n return '\\r\\n'.join([v.serialize() for v in self.to_vcards()])\n"
] |
# Python library to convert between Abook and vCard
#
# Copyright (C) 2013-2018 Jochen Sprickerhof
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Python library to convert between Abook and vCard"""
from configparser import ConfigParser
from hashlib import sha1
from os.path import getmtime, dirname, expanduser, join
from socket import getfqdn
from threading import Lock
from vobject import readOne, readComponents, vCard
from vobject.vcard import Name, Address
class Abook(object):
"""Represents a Abook addressbook"""
def __init__(self, filename=expanduser('~/.abook/addressbook')):
"""Constructor
filename -- the filename to load (default: ~/.abook/addressbook)
"""
self._filename = filename
self._last_modified = 0
self._book = []
self._lock = Lock()
self._update()
def _update(self):
""" Update internal state."""
with self._lock:
if getmtime(self._filename) > self._last_modified:
self._last_modified = getmtime(self._filename)
self._book = ConfigParser(default_section='format')
self._book.read(self._filename)
def to_vcf(self):
""" Converts to vCard string"""
return '\r\n'.join([v.serialize() for v in self.to_vcards()])
def append(self, text):
"""Appends an address to the Abook addressbook"""
return self.append_vobject(readOne(text))
def append_vobject(self, vcard, filename=None):
"""Appends an address to the Abook addressbook
vcard -- vCard to append
filename -- unused
return the new UID of the appended vcard
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
section = str(max([-1] + [int(k) for k in book.sections()]) + 1)
Abook.to_abook(vcard, section, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(book[section])
def remove(self, uid, filename=None):
"""Removes an address to the Abook addressbook
uid -- UID of the entry to remove
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
del book[uid.split('@')[0]]
with open(self._filename, 'w') as fp:
book.write(fp, False)
def replace(self, uid, text):
"""Updates an address to the Abook addressbook"""
return self.replace_vobject(uid, readOne(text))
def replace_vobject(self, uid, vcard, filename=None):
"""Updates an address to the Abook addressbook
uid -- uid of the entry to replace
vcard -- vCard of the new content
filename -- unused
"""
entry = uid.split('@')[0]
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
Abook.to_abook(vcard, entry, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(self._book[entry])
def move_vobject(self, uuid, from_filename, to_filename):
"""Updates the addressbook of an address
Not implemented
"""
pass
@staticmethod
def _gen_uid(entry):
"""Generates a UID based on the index in the Abook file
Not that the index is just a number and abook tends to regenerate it upon sorting.
"""
return '%s@%s' % (entry.name, getfqdn())
@staticmethod
def _gen_name(name):
"""Splits the name into family and given name"""
return Name(family=name.split(' ')[-1], given=name.split(' ')[:-1])
@staticmethod
def _gen_addr(entry):
"""Generates a vCard Address object"""
return Address(street=entry.get('address', ''),
extended=entry.get('address2', ''),
city=entry.get('city', ''),
region=entry.get('state', ''),
code=entry.get('zip', ''),
country=entry.get('country', ''))
def _add_photo(self, card, name):
"""Tries to load a photo and add it to the vCard"""
try:
photo_file = join(dirname(self._filename), 'photo/%s.jpeg' % name)
jpeg = open(photo_file, 'rb').read()
photo = card.add('photo')
photo.type_param = 'jpeg'
photo.encoding_param = 'b'
photo.value = jpeg
except IOError:
pass
def _to_vcard(self, entry):
"""Return a vCard of the Abook entry"""
card = vCard()
card.add('uid').value = Abook._gen_uid(entry)
card.add('fn').value = entry['name']
card.add('n').value = Abook._gen_name(entry['name'])
if 'email' in entry:
for email in entry['email'].split(','):
card.add('email').value = email
addr_comps = ['address', 'address2', 'city', 'country', 'zip', 'country']
if any(comp in entry for comp in addr_comps):
card.add('adr').value = Abook._gen_addr(entry)
if 'other' in entry:
tel = card.add('tel')
tel.value = entry['other']
if 'phone' in entry:
tel = card.add('tel')
tel.type_param = 'home'
tel.value = entry['phone']
if 'workphone' in entry:
tel = card.add('tel')
tel.type_param = 'work'
tel.value = entry['workphone']
if 'mobile' in entry:
tel = card.add('tel')
tel.type_param = 'cell'
tel.value = entry['mobile']
if 'nick' in entry:
card.add('nickname').value = entry['nick']
if 'url' in entry:
card.add('url').value = entry['url']
if 'notes' in entry:
card.add('note').value = entry['notes']
self._add_photo(card, entry['name'])
return card
def get_uids(self, filename=None):
"""Return a list of UIDs
filename -- unused, for API compatibility only
"""
self._update()
return [Abook._gen_uid(self._book[entry]) for entry in self._book.sections()]
def get_filesnames(self):
"""All filenames"""
return [self._filename]
def get_meta(self):
"""Meta tags of the vCard collection"""
return {'tag': 'VADDRESSBOOK'}
def last_modified(self):
"""Last time the Abook file was parsed"""
self._update()
return self._last_modified
def to_vcards(self):
"""Return a list of vCards"""
self._update()
return [self._to_vcard(self._book[entry]) for entry in self._book.sections()]
def to_vobject_etag(self, filename, uid):
"""Return vCard and etag of one Abook entry
filename -- unused, for API compatibility only
uid -- the UID of the Abook entry
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return vCards and etags of all Abook entries in uids
filename -- unused, for API compatibility only
uids -- the UIDs of the Abook entries (all if None)
"""
self._update()
if not uids:
uids = self.get_uids(filename)
items = []
for uid in uids:
entry = self._book[uid.split('@')[0]]
# TODO add getmtime of photo
etag = sha1(str(dict(entry)).encode('utf-8'))
items.append((uid, self._to_vcard(entry), '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
"""
self._update()
return self._to_vcard(self._book[uid.split('@')[0]])
@staticmethod
def _conv_adr(adr, entry):
"""Converts to Abook address format"""
if adr.value.street:
entry['address'] = adr.value.street
if adr.value.extended:
entry['address2'] = adr.value.extended
if adr.value.city:
entry['city'] = adr.value.city
if adr.value.region:
entry['state'] = adr.value.region
if adr.value.code and adr.value.code != '0':
entry['zip'] = adr.value.code
if adr.value.country:
entry['country'] = adr.value.country
@staticmethod
def _conv_tel_list(tel_list, entry):
"""Converts to Abook phone types"""
for tel in tel_list:
if not hasattr(tel, 'TYPE_param'):
entry['other'] = tel.value
elif tel.TYPE_param.lower() == 'home':
entry['phone'] = tel.value
elif tel.TYPE_param.lower() == 'work':
entry['workphone'] = tel.value
elif tel.TYPE_param.lower() == 'cell':
entry['mobile'] = tel.value
@staticmethod
def to_abook(card, section, book, bookfile=None):
"""Converts a vCard to Abook"""
book[section] = {}
book[section]['name'] = card.fn.value
if hasattr(card, 'email'):
book[section]['email'] = ','.join([e.value for e in card.email_list])
if hasattr(card, 'adr'):
Abook._conv_adr(card.adr, book[section])
if hasattr(card, 'tel_list'):
Abook._conv_tel_list(card.tel_list, book[section])
if hasattr(card, 'nickname') and card.nickname.value:
book[section]['nick'] = card.nickname.value
if hasattr(card, 'url') and card.url.value:
book[section]['url'] = card.url.value
if hasattr(card, 'note') and card.note.value:
book[section]['notes'] = card.note.value
if hasattr(card, 'photo') and bookfile:
try:
photo_file = join(dirname(bookfile), 'photo/%s.%s' % (card.fn.value, card.photo.TYPE_param))
open(photo_file, 'wb').write(card.photo.value)
except IOError:
pass
@staticmethod
def abook_file(vcard, bookfile):
"""Write a new Abook file with the given vcards"""
book = ConfigParser(default_section='format')
book['format'] = {}
book['format']['program'] = 'abook'
book['format']['version'] = '0.6.1'
for (i, card) in enumerate(readComponents(vcard.read())):
Abook.to_abook(card, str(i), book, bookfile)
with open(bookfile, 'w') as fp:
book.write(fp, False)
def vcf2abook():
"""Command line tool to convert from vCard to Abook"""
from argparse import ArgumentParser, FileType
from sys import stdin
parser = ArgumentParser(description='Converter from vCard to Abook syntax.')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin,
help='Input vCard file (default: stdin)')
parser.add_argument('outfile', nargs='?', default=expanduser('~/.abook/addressbook'),
help='Output Abook file (default: ~/.abook/addressbook)')
args = parser.parse_args()
Abook.abook_file(args.infile, args.outfile)
|
jspricke/python-abook
|
abook.py
|
vcf2abook
|
python
|
def vcf2abook():
from argparse import ArgumentParser, FileType
from sys import stdin
parser = ArgumentParser(description='Converter from vCard to Abook syntax.')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin,
help='Input vCard file (default: stdin)')
parser.add_argument('outfile', nargs='?', default=expanduser('~/.abook/addressbook'),
help='Output Abook file (default: ~/.abook/addressbook)')
args = parser.parse_args()
Abook.abook_file(args.infile, args.outfile)
|
Command line tool to convert from vCard to Abook
|
train
|
https://github.com/jspricke/python-abook/blob/cc58ad998303ce9a8b347a3317158c8f7cd0529f/abook.py#L344-L356
|
[
"def abook_file(vcard, bookfile):\n \"\"\"Write a new Abook file with the given vcards\"\"\"\n book = ConfigParser(default_section='format')\n\n book['format'] = {}\n book['format']['program'] = 'abook'\n book['format']['version'] = '0.6.1'\n\n for (i, card) in enumerate(readComponents(vcard.read())):\n Abook.to_abook(card, str(i), book, bookfile)\n with open(bookfile, 'w') as fp:\n book.write(fp, False)\n"
] |
# Python library to convert between Abook and vCard
#
# Copyright (C) 2013-2018 Jochen Sprickerhof
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Python library to convert between Abook and vCard"""
from configparser import ConfigParser
from hashlib import sha1
from os.path import getmtime, dirname, expanduser, join
from socket import getfqdn
from threading import Lock
from vobject import readOne, readComponents, vCard
from vobject.vcard import Name, Address
class Abook(object):
"""Represents a Abook addressbook"""
def __init__(self, filename=expanduser('~/.abook/addressbook')):
"""Constructor
filename -- the filename to load (default: ~/.abook/addressbook)
"""
self._filename = filename
self._last_modified = 0
self._book = []
self._lock = Lock()
self._update()
def _update(self):
""" Update internal state."""
with self._lock:
if getmtime(self._filename) > self._last_modified:
self._last_modified = getmtime(self._filename)
self._book = ConfigParser(default_section='format')
self._book.read(self._filename)
def to_vcf(self):
""" Converts to vCard string"""
return '\r\n'.join([v.serialize() for v in self.to_vcards()])
def append(self, text):
"""Appends an address to the Abook addressbook"""
return self.append_vobject(readOne(text))
def append_vobject(self, vcard, filename=None):
"""Appends an address to the Abook addressbook
vcard -- vCard to append
filename -- unused
return the new UID of the appended vcard
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
section = str(max([-1] + [int(k) for k in book.sections()]) + 1)
Abook.to_abook(vcard, section, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(book[section])
def remove(self, uid, filename=None):
"""Removes an address to the Abook addressbook
uid -- UID of the entry to remove
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
del book[uid.split('@')[0]]
with open(self._filename, 'w') as fp:
book.write(fp, False)
def replace(self, uid, text):
"""Updates an address to the Abook addressbook"""
return self.replace_vobject(uid, readOne(text))
def replace_vobject(self, uid, vcard, filename=None):
"""Updates an address to the Abook addressbook
uid -- uid of the entry to replace
vcard -- vCard of the new content
filename -- unused
"""
entry = uid.split('@')[0]
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
Abook.to_abook(vcard, entry, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(self._book[entry])
def move_vobject(self, uuid, from_filename, to_filename):
"""Updates the addressbook of an address
Not implemented
"""
pass
@staticmethod
def _gen_uid(entry):
"""Generates a UID based on the index in the Abook file
Not that the index is just a number and abook tends to regenerate it upon sorting.
"""
return '%s@%s' % (entry.name, getfqdn())
@staticmethod
def _gen_name(name):
"""Splits the name into family and given name"""
return Name(family=name.split(' ')[-1], given=name.split(' ')[:-1])
@staticmethod
def _gen_addr(entry):
"""Generates a vCard Address object"""
return Address(street=entry.get('address', ''),
extended=entry.get('address2', ''),
city=entry.get('city', ''),
region=entry.get('state', ''),
code=entry.get('zip', ''),
country=entry.get('country', ''))
def _add_photo(self, card, name):
"""Tries to load a photo and add it to the vCard"""
try:
photo_file = join(dirname(self._filename), 'photo/%s.jpeg' % name)
jpeg = open(photo_file, 'rb').read()
photo = card.add('photo')
photo.type_param = 'jpeg'
photo.encoding_param = 'b'
photo.value = jpeg
except IOError:
pass
def _to_vcard(self, entry):
"""Return a vCard of the Abook entry"""
card = vCard()
card.add('uid').value = Abook._gen_uid(entry)
card.add('fn').value = entry['name']
card.add('n').value = Abook._gen_name(entry['name'])
if 'email' in entry:
for email in entry['email'].split(','):
card.add('email').value = email
addr_comps = ['address', 'address2', 'city', 'country', 'zip', 'country']
if any(comp in entry for comp in addr_comps):
card.add('adr').value = Abook._gen_addr(entry)
if 'other' in entry:
tel = card.add('tel')
tel.value = entry['other']
if 'phone' in entry:
tel = card.add('tel')
tel.type_param = 'home'
tel.value = entry['phone']
if 'workphone' in entry:
tel = card.add('tel')
tel.type_param = 'work'
tel.value = entry['workphone']
if 'mobile' in entry:
tel = card.add('tel')
tel.type_param = 'cell'
tel.value = entry['mobile']
if 'nick' in entry:
card.add('nickname').value = entry['nick']
if 'url' in entry:
card.add('url').value = entry['url']
if 'notes' in entry:
card.add('note').value = entry['notes']
self._add_photo(card, entry['name'])
return card
def get_uids(self, filename=None):
"""Return a list of UIDs
filename -- unused, for API compatibility only
"""
self._update()
return [Abook._gen_uid(self._book[entry]) for entry in self._book.sections()]
def get_filesnames(self):
"""All filenames"""
return [self._filename]
def get_meta(self):
"""Meta tags of the vCard collection"""
return {'tag': 'VADDRESSBOOK'}
def last_modified(self):
"""Last time the Abook file was parsed"""
self._update()
return self._last_modified
def to_vcards(self):
"""Return a list of vCards"""
self._update()
return [self._to_vcard(self._book[entry]) for entry in self._book.sections()]
def to_vobject_etag(self, filename, uid):
"""Return vCard and etag of one Abook entry
filename -- unused, for API compatibility only
uid -- the UID of the Abook entry
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return vCards and etags of all Abook entries in uids
filename -- unused, for API compatibility only
uids -- the UIDs of the Abook entries (all if None)
"""
self._update()
if not uids:
uids = self.get_uids(filename)
items = []
for uid in uids:
entry = self._book[uid.split('@')[0]]
# TODO add getmtime of photo
etag = sha1(str(dict(entry)).encode('utf-8'))
items.append((uid, self._to_vcard(entry), '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
"""
self._update()
return self._to_vcard(self._book[uid.split('@')[0]])
@staticmethod
def _conv_adr(adr, entry):
"""Converts to Abook address format"""
if adr.value.street:
entry['address'] = adr.value.street
if adr.value.extended:
entry['address2'] = adr.value.extended
if adr.value.city:
entry['city'] = adr.value.city
if adr.value.region:
entry['state'] = adr.value.region
if adr.value.code and adr.value.code != '0':
entry['zip'] = adr.value.code
if adr.value.country:
entry['country'] = adr.value.country
@staticmethod
def _conv_tel_list(tel_list, entry):
"""Converts to Abook phone types"""
for tel in tel_list:
if not hasattr(tel, 'TYPE_param'):
entry['other'] = tel.value
elif tel.TYPE_param.lower() == 'home':
entry['phone'] = tel.value
elif tel.TYPE_param.lower() == 'work':
entry['workphone'] = tel.value
elif tel.TYPE_param.lower() == 'cell':
entry['mobile'] = tel.value
@staticmethod
def to_abook(card, section, book, bookfile=None):
"""Converts a vCard to Abook"""
book[section] = {}
book[section]['name'] = card.fn.value
if hasattr(card, 'email'):
book[section]['email'] = ','.join([e.value for e in card.email_list])
if hasattr(card, 'adr'):
Abook._conv_adr(card.adr, book[section])
if hasattr(card, 'tel_list'):
Abook._conv_tel_list(card.tel_list, book[section])
if hasattr(card, 'nickname') and card.nickname.value:
book[section]['nick'] = card.nickname.value
if hasattr(card, 'url') and card.url.value:
book[section]['url'] = card.url.value
if hasattr(card, 'note') and card.note.value:
book[section]['notes'] = card.note.value
if hasattr(card, 'photo') and bookfile:
try:
photo_file = join(dirname(bookfile), 'photo/%s.%s' % (card.fn.value, card.photo.TYPE_param))
open(photo_file, 'wb').write(card.photo.value)
except IOError:
pass
@staticmethod
def abook_file(vcard, bookfile):
"""Write a new Abook file with the given vcards"""
book = ConfigParser(default_section='format')
book['format'] = {}
book['format']['program'] = 'abook'
book['format']['version'] = '0.6.1'
for (i, card) in enumerate(readComponents(vcard.read())):
Abook.to_abook(card, str(i), book, bookfile)
with open(bookfile, 'w') as fp:
book.write(fp, False)
def abook2vcf():
"""Command line tool to convert from Abook to vCard"""
from argparse import ArgumentParser, FileType
from os.path import expanduser
from sys import stdout
parser = ArgumentParser(description='Converter from Abook to vCard syntax.')
parser.add_argument('infile', nargs='?', default=expanduser('~/.abook/addressbook'),
help='The Abook file to process (default: ~/.abook/addressbook)')
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output vCard file (default: stdout)')
args = parser.parse_args()
args.outfile.write(Abook(args.infile).to_vcf())
|
jspricke/python-abook
|
abook.py
|
Abook._update
|
python
|
def _update(self):
with self._lock:
if getmtime(self._filename) > self._last_modified:
self._last_modified = getmtime(self._filename)
self._book = ConfigParser(default_section='format')
self._book.read(self._filename)
|
Update internal state.
|
train
|
https://github.com/jspricke/python-abook/blob/cc58ad998303ce9a8b347a3317158c8f7cd0529f/abook.py#L42-L48
| null |
class Abook(object):
"""Represents a Abook addressbook"""
def __init__(self, filename=expanduser('~/.abook/addressbook')):
"""Constructor
filename -- the filename to load (default: ~/.abook/addressbook)
"""
self._filename = filename
self._last_modified = 0
self._book = []
self._lock = Lock()
self._update()
def to_vcf(self):
""" Converts to vCard string"""
return '\r\n'.join([v.serialize() for v in self.to_vcards()])
def append(self, text):
"""Appends an address to the Abook addressbook"""
return self.append_vobject(readOne(text))
def append_vobject(self, vcard, filename=None):
"""Appends an address to the Abook addressbook
vcard -- vCard to append
filename -- unused
return the new UID of the appended vcard
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
section = str(max([-1] + [int(k) for k in book.sections()]) + 1)
Abook.to_abook(vcard, section, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(book[section])
def remove(self, uid, filename=None):
"""Removes an address to the Abook addressbook
uid -- UID of the entry to remove
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
del book[uid.split('@')[0]]
with open(self._filename, 'w') as fp:
book.write(fp, False)
def replace(self, uid, text):
"""Updates an address to the Abook addressbook"""
return self.replace_vobject(uid, readOne(text))
def replace_vobject(self, uid, vcard, filename=None):
"""Updates an address to the Abook addressbook
uid -- uid of the entry to replace
vcard -- vCard of the new content
filename -- unused
"""
entry = uid.split('@')[0]
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
Abook.to_abook(vcard, entry, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(self._book[entry])
def move_vobject(self, uuid, from_filename, to_filename):
"""Updates the addressbook of an address
Not implemented
"""
pass
@staticmethod
def _gen_uid(entry):
"""Generates a UID based on the index in the Abook file
Not that the index is just a number and abook tends to regenerate it upon sorting.
"""
return '%s@%s' % (entry.name, getfqdn())
@staticmethod
def _gen_name(name):
"""Splits the name into family and given name"""
return Name(family=name.split(' ')[-1], given=name.split(' ')[:-1])
@staticmethod
def _gen_addr(entry):
"""Generates a vCard Address object"""
return Address(street=entry.get('address', ''),
extended=entry.get('address2', ''),
city=entry.get('city', ''),
region=entry.get('state', ''),
code=entry.get('zip', ''),
country=entry.get('country', ''))
def _add_photo(self, card, name):
"""Tries to load a photo and add it to the vCard"""
try:
photo_file = join(dirname(self._filename), 'photo/%s.jpeg' % name)
jpeg = open(photo_file, 'rb').read()
photo = card.add('photo')
photo.type_param = 'jpeg'
photo.encoding_param = 'b'
photo.value = jpeg
except IOError:
pass
def _to_vcard(self, entry):
"""Return a vCard of the Abook entry"""
card = vCard()
card.add('uid').value = Abook._gen_uid(entry)
card.add('fn').value = entry['name']
card.add('n').value = Abook._gen_name(entry['name'])
if 'email' in entry:
for email in entry['email'].split(','):
card.add('email').value = email
addr_comps = ['address', 'address2', 'city', 'country', 'zip', 'country']
if any(comp in entry for comp in addr_comps):
card.add('adr').value = Abook._gen_addr(entry)
if 'other' in entry:
tel = card.add('tel')
tel.value = entry['other']
if 'phone' in entry:
tel = card.add('tel')
tel.type_param = 'home'
tel.value = entry['phone']
if 'workphone' in entry:
tel = card.add('tel')
tel.type_param = 'work'
tel.value = entry['workphone']
if 'mobile' in entry:
tel = card.add('tel')
tel.type_param = 'cell'
tel.value = entry['mobile']
if 'nick' in entry:
card.add('nickname').value = entry['nick']
if 'url' in entry:
card.add('url').value = entry['url']
if 'notes' in entry:
card.add('note').value = entry['notes']
self._add_photo(card, entry['name'])
return card
def get_uids(self, filename=None):
"""Return a list of UIDs
filename -- unused, for API compatibility only
"""
self._update()
return [Abook._gen_uid(self._book[entry]) for entry in self._book.sections()]
def get_filesnames(self):
"""All filenames"""
return [self._filename]
def get_meta(self):
"""Meta tags of the vCard collection"""
return {'tag': 'VADDRESSBOOK'}
def last_modified(self):
"""Last time the Abook file was parsed"""
self._update()
return self._last_modified
def to_vcards(self):
"""Return a list of vCards"""
self._update()
return [self._to_vcard(self._book[entry]) for entry in self._book.sections()]
def to_vobject_etag(self, filename, uid):
"""Return vCard and etag of one Abook entry
filename -- unused, for API compatibility only
uid -- the UID of the Abook entry
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return vCards and etags of all Abook entries in uids
filename -- unused, for API compatibility only
uids -- the UIDs of the Abook entries (all if None)
"""
self._update()
if not uids:
uids = self.get_uids(filename)
items = []
for uid in uids:
entry = self._book[uid.split('@')[0]]
# TODO add getmtime of photo
etag = sha1(str(dict(entry)).encode('utf-8'))
items.append((uid, self._to_vcard(entry), '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
"""
self._update()
return self._to_vcard(self._book[uid.split('@')[0]])
@staticmethod
def _conv_adr(adr, entry):
"""Converts to Abook address format"""
if adr.value.street:
entry['address'] = adr.value.street
if adr.value.extended:
entry['address2'] = adr.value.extended
if adr.value.city:
entry['city'] = adr.value.city
if adr.value.region:
entry['state'] = adr.value.region
if adr.value.code and adr.value.code != '0':
entry['zip'] = adr.value.code
if adr.value.country:
entry['country'] = adr.value.country
@staticmethod
def _conv_tel_list(tel_list, entry):
"""Converts to Abook phone types"""
for tel in tel_list:
if not hasattr(tel, 'TYPE_param'):
entry['other'] = tel.value
elif tel.TYPE_param.lower() == 'home':
entry['phone'] = tel.value
elif tel.TYPE_param.lower() == 'work':
entry['workphone'] = tel.value
elif tel.TYPE_param.lower() == 'cell':
entry['mobile'] = tel.value
@staticmethod
def to_abook(card, section, book, bookfile=None):
"""Converts a vCard to Abook"""
book[section] = {}
book[section]['name'] = card.fn.value
if hasattr(card, 'email'):
book[section]['email'] = ','.join([e.value for e in card.email_list])
if hasattr(card, 'adr'):
Abook._conv_adr(card.adr, book[section])
if hasattr(card, 'tel_list'):
Abook._conv_tel_list(card.tel_list, book[section])
if hasattr(card, 'nickname') and card.nickname.value:
book[section]['nick'] = card.nickname.value
if hasattr(card, 'url') and card.url.value:
book[section]['url'] = card.url.value
if hasattr(card, 'note') and card.note.value:
book[section]['notes'] = card.note.value
if hasattr(card, 'photo') and bookfile:
try:
photo_file = join(dirname(bookfile), 'photo/%s.%s' % (card.fn.value, card.photo.TYPE_param))
open(photo_file, 'wb').write(card.photo.value)
except IOError:
pass
@staticmethod
def abook_file(vcard, bookfile):
"""Write a new Abook file with the given vcards"""
book = ConfigParser(default_section='format')
book['format'] = {}
book['format']['program'] = 'abook'
book['format']['version'] = '0.6.1'
for (i, card) in enumerate(readComponents(vcard.read())):
Abook.to_abook(card, str(i), book, bookfile)
with open(bookfile, 'w') as fp:
book.write(fp, False)
|
jspricke/python-abook
|
abook.py
|
Abook.append_vobject
|
python
|
def append_vobject(self, vcard, filename=None):
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
section = str(max([-1] + [int(k) for k in book.sections()]) + 1)
Abook.to_abook(vcard, section, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(book[section])
|
Appends an address to the Abook addressbook
vcard -- vCard to append
filename -- unused
return the new UID of the appended vcard
|
train
|
https://github.com/jspricke/python-abook/blob/cc58ad998303ce9a8b347a3317158c8f7cd0529f/abook.py#L58-L72
|
[
"def _gen_uid(entry):\n \"\"\"Generates a UID based on the index in the Abook file\n Not that the index is just a number and abook tends to regenerate it upon sorting.\n \"\"\"\n return '%s@%s' % (entry.name, getfqdn())\n",
"def to_abook(card, section, book, bookfile=None):\n \"\"\"Converts a vCard to Abook\"\"\"\n book[section] = {}\n book[section]['name'] = card.fn.value\n\n if hasattr(card, 'email'):\n book[section]['email'] = ','.join([e.value for e in card.email_list])\n\n if hasattr(card, 'adr'):\n Abook._conv_adr(card.adr, book[section])\n\n if hasattr(card, 'tel_list'):\n Abook._conv_tel_list(card.tel_list, book[section])\n\n if hasattr(card, 'nickname') and card.nickname.value:\n book[section]['nick'] = card.nickname.value\n\n if hasattr(card, 'url') and card.url.value:\n book[section]['url'] = card.url.value\n\n if hasattr(card, 'note') and card.note.value:\n book[section]['notes'] = card.note.value\n\n if hasattr(card, 'photo') and bookfile:\n try:\n photo_file = join(dirname(bookfile), 'photo/%s.%s' % (card.fn.value, card.photo.TYPE_param))\n open(photo_file, 'wb').write(card.photo.value)\n except IOError:\n pass\n"
] |
class Abook(object):
"""Represents a Abook addressbook"""
def __init__(self, filename=expanduser('~/.abook/addressbook')):
"""Constructor
filename -- the filename to load (default: ~/.abook/addressbook)
"""
self._filename = filename
self._last_modified = 0
self._book = []
self._lock = Lock()
self._update()
def _update(self):
""" Update internal state."""
with self._lock:
if getmtime(self._filename) > self._last_modified:
self._last_modified = getmtime(self._filename)
self._book = ConfigParser(default_section='format')
self._book.read(self._filename)
def to_vcf(self):
""" Converts to vCard string"""
return '\r\n'.join([v.serialize() for v in self.to_vcards()])
def append(self, text):
"""Appends an address to the Abook addressbook"""
return self.append_vobject(readOne(text))
def remove(self, uid, filename=None):
"""Removes an address to the Abook addressbook
uid -- UID of the entry to remove
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
del book[uid.split('@')[0]]
with open(self._filename, 'w') as fp:
book.write(fp, False)
def replace(self, uid, text):
"""Updates an address to the Abook addressbook"""
return self.replace_vobject(uid, readOne(text))
def replace_vobject(self, uid, vcard, filename=None):
"""Updates an address to the Abook addressbook
uid -- uid of the entry to replace
vcard -- vCard of the new content
filename -- unused
"""
entry = uid.split('@')[0]
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
Abook.to_abook(vcard, entry, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(self._book[entry])
def move_vobject(self, uuid, from_filename, to_filename):
"""Updates the addressbook of an address
Not implemented
"""
pass
@staticmethod
def _gen_uid(entry):
"""Generates a UID based on the index in the Abook file
Not that the index is just a number and abook tends to regenerate it upon sorting.
"""
return '%s@%s' % (entry.name, getfqdn())
@staticmethod
def _gen_name(name):
"""Splits the name into family and given name"""
return Name(family=name.split(' ')[-1], given=name.split(' ')[:-1])
@staticmethod
def _gen_addr(entry):
"""Generates a vCard Address object"""
return Address(street=entry.get('address', ''),
extended=entry.get('address2', ''),
city=entry.get('city', ''),
region=entry.get('state', ''),
code=entry.get('zip', ''),
country=entry.get('country', ''))
def _add_photo(self, card, name):
"""Tries to load a photo and add it to the vCard"""
try:
photo_file = join(dirname(self._filename), 'photo/%s.jpeg' % name)
jpeg = open(photo_file, 'rb').read()
photo = card.add('photo')
photo.type_param = 'jpeg'
photo.encoding_param = 'b'
photo.value = jpeg
except IOError:
pass
def _to_vcard(self, entry):
"""Return a vCard of the Abook entry"""
card = vCard()
card.add('uid').value = Abook._gen_uid(entry)
card.add('fn').value = entry['name']
card.add('n').value = Abook._gen_name(entry['name'])
if 'email' in entry:
for email in entry['email'].split(','):
card.add('email').value = email
addr_comps = ['address', 'address2', 'city', 'country', 'zip', 'country']
if any(comp in entry for comp in addr_comps):
card.add('adr').value = Abook._gen_addr(entry)
if 'other' in entry:
tel = card.add('tel')
tel.value = entry['other']
if 'phone' in entry:
tel = card.add('tel')
tel.type_param = 'home'
tel.value = entry['phone']
if 'workphone' in entry:
tel = card.add('tel')
tel.type_param = 'work'
tel.value = entry['workphone']
if 'mobile' in entry:
tel = card.add('tel')
tel.type_param = 'cell'
tel.value = entry['mobile']
if 'nick' in entry:
card.add('nickname').value = entry['nick']
if 'url' in entry:
card.add('url').value = entry['url']
if 'notes' in entry:
card.add('note').value = entry['notes']
self._add_photo(card, entry['name'])
return card
def get_uids(self, filename=None):
"""Return a list of UIDs
filename -- unused, for API compatibility only
"""
self._update()
return [Abook._gen_uid(self._book[entry]) for entry in self._book.sections()]
def get_filesnames(self):
"""All filenames"""
return [self._filename]
def get_meta(self):
"""Meta tags of the vCard collection"""
return {'tag': 'VADDRESSBOOK'}
def last_modified(self):
"""Last time the Abook file was parsed"""
self._update()
return self._last_modified
def to_vcards(self):
"""Return a list of vCards"""
self._update()
return [self._to_vcard(self._book[entry]) for entry in self._book.sections()]
def to_vobject_etag(self, filename, uid):
"""Return vCard and etag of one Abook entry
filename -- unused, for API compatibility only
uid -- the UID of the Abook entry
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return vCards and etags of all Abook entries in uids
filename -- unused, for API compatibility only
uids -- the UIDs of the Abook entries (all if None)
"""
self._update()
if not uids:
uids = self.get_uids(filename)
items = []
for uid in uids:
entry = self._book[uid.split('@')[0]]
# TODO add getmtime of photo
etag = sha1(str(dict(entry)).encode('utf-8'))
items.append((uid, self._to_vcard(entry), '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
"""
self._update()
return self._to_vcard(self._book[uid.split('@')[0]])
@staticmethod
def _conv_adr(adr, entry):
"""Converts to Abook address format"""
if adr.value.street:
entry['address'] = adr.value.street
if adr.value.extended:
entry['address2'] = adr.value.extended
if adr.value.city:
entry['city'] = adr.value.city
if adr.value.region:
entry['state'] = adr.value.region
if adr.value.code and adr.value.code != '0':
entry['zip'] = adr.value.code
if adr.value.country:
entry['country'] = adr.value.country
@staticmethod
def _conv_tel_list(tel_list, entry):
"""Converts to Abook phone types"""
for tel in tel_list:
if not hasattr(tel, 'TYPE_param'):
entry['other'] = tel.value
elif tel.TYPE_param.lower() == 'home':
entry['phone'] = tel.value
elif tel.TYPE_param.lower() == 'work':
entry['workphone'] = tel.value
elif tel.TYPE_param.lower() == 'cell':
entry['mobile'] = tel.value
@staticmethod
def to_abook(card, section, book, bookfile=None):
"""Converts a vCard to Abook"""
book[section] = {}
book[section]['name'] = card.fn.value
if hasattr(card, 'email'):
book[section]['email'] = ','.join([e.value for e in card.email_list])
if hasattr(card, 'adr'):
Abook._conv_adr(card.adr, book[section])
if hasattr(card, 'tel_list'):
Abook._conv_tel_list(card.tel_list, book[section])
if hasattr(card, 'nickname') and card.nickname.value:
book[section]['nick'] = card.nickname.value
if hasattr(card, 'url') and card.url.value:
book[section]['url'] = card.url.value
if hasattr(card, 'note') and card.note.value:
book[section]['notes'] = card.note.value
if hasattr(card, 'photo') and bookfile:
try:
photo_file = join(dirname(bookfile), 'photo/%s.%s' % (card.fn.value, card.photo.TYPE_param))
open(photo_file, 'wb').write(card.photo.value)
except IOError:
pass
@staticmethod
def abook_file(vcard, bookfile):
"""Write a new Abook file with the given vcards"""
book = ConfigParser(default_section='format')
book['format'] = {}
book['format']['program'] = 'abook'
book['format']['version'] = '0.6.1'
for (i, card) in enumerate(readComponents(vcard.read())):
Abook.to_abook(card, str(i), book, bookfile)
with open(bookfile, 'w') as fp:
book.write(fp, False)
|
jspricke/python-abook
|
abook.py
|
Abook.remove
|
python
|
def remove(self, uid, filename=None):
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
del book[uid.split('@')[0]]
with open(self._filename, 'w') as fp:
book.write(fp, False)
|
Removes an address to the Abook addressbook
uid -- UID of the entry to remove
|
train
|
https://github.com/jspricke/python-abook/blob/cc58ad998303ce9a8b347a3317158c8f7cd0529f/abook.py#L74-L83
| null |
class Abook(object):
"""Represents a Abook addressbook"""
def __init__(self, filename=expanduser('~/.abook/addressbook')):
"""Constructor
filename -- the filename to load (default: ~/.abook/addressbook)
"""
self._filename = filename
self._last_modified = 0
self._book = []
self._lock = Lock()
self._update()
def _update(self):
""" Update internal state."""
with self._lock:
if getmtime(self._filename) > self._last_modified:
self._last_modified = getmtime(self._filename)
self._book = ConfigParser(default_section='format')
self._book.read(self._filename)
def to_vcf(self):
""" Converts to vCard string"""
return '\r\n'.join([v.serialize() for v in self.to_vcards()])
def append(self, text):
"""Appends an address to the Abook addressbook"""
return self.append_vobject(readOne(text))
def append_vobject(self, vcard, filename=None):
"""Appends an address to the Abook addressbook
vcard -- vCard to append
filename -- unused
return the new UID of the appended vcard
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
section = str(max([-1] + [int(k) for k in book.sections()]) + 1)
Abook.to_abook(vcard, section, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(book[section])
def replace(self, uid, text):
"""Updates an address to the Abook addressbook"""
return self.replace_vobject(uid, readOne(text))
def replace_vobject(self, uid, vcard, filename=None):
"""Updates an address to the Abook addressbook
uid -- uid of the entry to replace
vcard -- vCard of the new content
filename -- unused
"""
entry = uid.split('@')[0]
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
Abook.to_abook(vcard, entry, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(self._book[entry])
def move_vobject(self, uuid, from_filename, to_filename):
"""Updates the addressbook of an address
Not implemented
"""
pass
@staticmethod
def _gen_uid(entry):
"""Generates a UID based on the index in the Abook file
Not that the index is just a number and abook tends to regenerate it upon sorting.
"""
return '%s@%s' % (entry.name, getfqdn())
@staticmethod
def _gen_name(name):
"""Splits the name into family and given name"""
return Name(family=name.split(' ')[-1], given=name.split(' ')[:-1])
@staticmethod
def _gen_addr(entry):
"""Generates a vCard Address object"""
return Address(street=entry.get('address', ''),
extended=entry.get('address2', ''),
city=entry.get('city', ''),
region=entry.get('state', ''),
code=entry.get('zip', ''),
country=entry.get('country', ''))
def _add_photo(self, card, name):
"""Tries to load a photo and add it to the vCard"""
try:
photo_file = join(dirname(self._filename), 'photo/%s.jpeg' % name)
jpeg = open(photo_file, 'rb').read()
photo = card.add('photo')
photo.type_param = 'jpeg'
photo.encoding_param = 'b'
photo.value = jpeg
except IOError:
pass
def _to_vcard(self, entry):
"""Return a vCard of the Abook entry"""
card = vCard()
card.add('uid').value = Abook._gen_uid(entry)
card.add('fn').value = entry['name']
card.add('n').value = Abook._gen_name(entry['name'])
if 'email' in entry:
for email in entry['email'].split(','):
card.add('email').value = email
addr_comps = ['address', 'address2', 'city', 'country', 'zip', 'country']
if any(comp in entry for comp in addr_comps):
card.add('adr').value = Abook._gen_addr(entry)
if 'other' in entry:
tel = card.add('tel')
tel.value = entry['other']
if 'phone' in entry:
tel = card.add('tel')
tel.type_param = 'home'
tel.value = entry['phone']
if 'workphone' in entry:
tel = card.add('tel')
tel.type_param = 'work'
tel.value = entry['workphone']
if 'mobile' in entry:
tel = card.add('tel')
tel.type_param = 'cell'
tel.value = entry['mobile']
if 'nick' in entry:
card.add('nickname').value = entry['nick']
if 'url' in entry:
card.add('url').value = entry['url']
if 'notes' in entry:
card.add('note').value = entry['notes']
self._add_photo(card, entry['name'])
return card
def get_uids(self, filename=None):
"""Return a list of UIDs
filename -- unused, for API compatibility only
"""
self._update()
return [Abook._gen_uid(self._book[entry]) for entry in self._book.sections()]
def get_filesnames(self):
"""All filenames"""
return [self._filename]
def get_meta(self):
"""Meta tags of the vCard collection"""
return {'tag': 'VADDRESSBOOK'}
def last_modified(self):
"""Last time the Abook file was parsed"""
self._update()
return self._last_modified
def to_vcards(self):
"""Return a list of vCards"""
self._update()
return [self._to_vcard(self._book[entry]) for entry in self._book.sections()]
def to_vobject_etag(self, filename, uid):
"""Return vCard and etag of one Abook entry
filename -- unused, for API compatibility only
uid -- the UID of the Abook entry
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return vCards and etags of all Abook entries in uids
filename -- unused, for API compatibility only
uids -- the UIDs of the Abook entries (all if None)
"""
self._update()
if not uids:
uids = self.get_uids(filename)
items = []
for uid in uids:
entry = self._book[uid.split('@')[0]]
# TODO add getmtime of photo
etag = sha1(str(dict(entry)).encode('utf-8'))
items.append((uid, self._to_vcard(entry), '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
"""
self._update()
return self._to_vcard(self._book[uid.split('@')[0]])
@staticmethod
def _conv_adr(adr, entry):
"""Converts to Abook address format"""
if adr.value.street:
entry['address'] = adr.value.street
if adr.value.extended:
entry['address2'] = adr.value.extended
if adr.value.city:
entry['city'] = adr.value.city
if adr.value.region:
entry['state'] = adr.value.region
if adr.value.code and adr.value.code != '0':
entry['zip'] = adr.value.code
if adr.value.country:
entry['country'] = adr.value.country
@staticmethod
def _conv_tel_list(tel_list, entry):
"""Converts to Abook phone types"""
for tel in tel_list:
if not hasattr(tel, 'TYPE_param'):
entry['other'] = tel.value
elif tel.TYPE_param.lower() == 'home':
entry['phone'] = tel.value
elif tel.TYPE_param.lower() == 'work':
entry['workphone'] = tel.value
elif tel.TYPE_param.lower() == 'cell':
entry['mobile'] = tel.value
@staticmethod
def to_abook(card, section, book, bookfile=None):
"""Converts a vCard to Abook"""
book[section] = {}
book[section]['name'] = card.fn.value
if hasattr(card, 'email'):
book[section]['email'] = ','.join([e.value for e in card.email_list])
if hasattr(card, 'adr'):
Abook._conv_adr(card.adr, book[section])
if hasattr(card, 'tel_list'):
Abook._conv_tel_list(card.tel_list, book[section])
if hasattr(card, 'nickname') and card.nickname.value:
book[section]['nick'] = card.nickname.value
if hasattr(card, 'url') and card.url.value:
book[section]['url'] = card.url.value
if hasattr(card, 'note') and card.note.value:
book[section]['notes'] = card.note.value
if hasattr(card, 'photo') and bookfile:
try:
photo_file = join(dirname(bookfile), 'photo/%s.%s' % (card.fn.value, card.photo.TYPE_param))
open(photo_file, 'wb').write(card.photo.value)
except IOError:
pass
@staticmethod
def abook_file(vcard, bookfile):
"""Write a new Abook file with the given vcards"""
book = ConfigParser(default_section='format')
book['format'] = {}
book['format']['program'] = 'abook'
book['format']['version'] = '0.6.1'
for (i, card) in enumerate(readComponents(vcard.read())):
Abook.to_abook(card, str(i), book, bookfile)
with open(bookfile, 'w') as fp:
book.write(fp, False)
|
jspricke/python-abook
|
abook.py
|
Abook.replace_vobject
|
python
|
def replace_vobject(self, uid, vcard, filename=None):
entry = uid.split('@')[0]
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
Abook.to_abook(vcard, entry, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(self._book[entry])
|
Updates an address to the Abook addressbook
uid -- uid of the entry to replace
vcard -- vCard of the new content
filename -- unused
|
train
|
https://github.com/jspricke/python-abook/blob/cc58ad998303ce9a8b347a3317158c8f7cd0529f/abook.py#L89-L104
|
[
"def _gen_uid(entry):\n \"\"\"Generates a UID based on the index in the Abook file\n Not that the index is just a number and abook tends to regenerate it upon sorting.\n \"\"\"\n return '%s@%s' % (entry.name, getfqdn())\n",
"def to_abook(card, section, book, bookfile=None):\n \"\"\"Converts a vCard to Abook\"\"\"\n book[section] = {}\n book[section]['name'] = card.fn.value\n\n if hasattr(card, 'email'):\n book[section]['email'] = ','.join([e.value for e in card.email_list])\n\n if hasattr(card, 'adr'):\n Abook._conv_adr(card.adr, book[section])\n\n if hasattr(card, 'tel_list'):\n Abook._conv_tel_list(card.tel_list, book[section])\n\n if hasattr(card, 'nickname') and card.nickname.value:\n book[section]['nick'] = card.nickname.value\n\n if hasattr(card, 'url') and card.url.value:\n book[section]['url'] = card.url.value\n\n if hasattr(card, 'note') and card.note.value:\n book[section]['notes'] = card.note.value\n\n if hasattr(card, 'photo') and bookfile:\n try:\n photo_file = join(dirname(bookfile), 'photo/%s.%s' % (card.fn.value, card.photo.TYPE_param))\n open(photo_file, 'wb').write(card.photo.value)\n except IOError:\n pass\n"
] |
class Abook(object):
"""Represents a Abook addressbook"""
def __init__(self, filename=expanduser('~/.abook/addressbook')):
"""Constructor
filename -- the filename to load (default: ~/.abook/addressbook)
"""
self._filename = filename
self._last_modified = 0
self._book = []
self._lock = Lock()
self._update()
def _update(self):
""" Update internal state."""
with self._lock:
if getmtime(self._filename) > self._last_modified:
self._last_modified = getmtime(self._filename)
self._book = ConfigParser(default_section='format')
self._book.read(self._filename)
def to_vcf(self):
""" Converts to vCard string"""
return '\r\n'.join([v.serialize() for v in self.to_vcards()])
def append(self, text):
"""Appends an address to the Abook addressbook"""
return self.append_vobject(readOne(text))
def append_vobject(self, vcard, filename=None):
"""Appends an address to the Abook addressbook
vcard -- vCard to append
filename -- unused
return the new UID of the appended vcard
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
section = str(max([-1] + [int(k) for k in book.sections()]) + 1)
Abook.to_abook(vcard, section, book, self._filename)
with open(self._filename, 'w') as fp:
book.write(fp, False)
return Abook._gen_uid(book[section])
def remove(self, uid, filename=None):
"""Removes an address to the Abook addressbook
uid -- UID of the entry to remove
"""
book = ConfigParser(default_section='format')
with self._lock:
book.read(self._filename)
del book[uid.split('@')[0]]
with open(self._filename, 'w') as fp:
book.write(fp, False)
def replace(self, uid, text):
"""Updates an address to the Abook addressbook"""
return self.replace_vobject(uid, readOne(text))
def move_vobject(self, uuid, from_filename, to_filename):
"""Updates the addressbook of an address
Not implemented
"""
pass
@staticmethod
def _gen_uid(entry):
"""Generates a UID based on the index in the Abook file
Not that the index is just a number and abook tends to regenerate it upon sorting.
"""
return '%s@%s' % (entry.name, getfqdn())
@staticmethod
def _gen_name(name):
"""Splits the name into family and given name"""
return Name(family=name.split(' ')[-1], given=name.split(' ')[:-1])
@staticmethod
def _gen_addr(entry):
"""Generates a vCard Address object"""
return Address(street=entry.get('address', ''),
extended=entry.get('address2', ''),
city=entry.get('city', ''),
region=entry.get('state', ''),
code=entry.get('zip', ''),
country=entry.get('country', ''))
def _add_photo(self, card, name):
"""Tries to load a photo and add it to the vCard"""
try:
photo_file = join(dirname(self._filename), 'photo/%s.jpeg' % name)
jpeg = open(photo_file, 'rb').read()
photo = card.add('photo')
photo.type_param = 'jpeg'
photo.encoding_param = 'b'
photo.value = jpeg
except IOError:
pass
def _to_vcard(self, entry):
"""Return a vCard of the Abook entry"""
card = vCard()
card.add('uid').value = Abook._gen_uid(entry)
card.add('fn').value = entry['name']
card.add('n').value = Abook._gen_name(entry['name'])
if 'email' in entry:
for email in entry['email'].split(','):
card.add('email').value = email
addr_comps = ['address', 'address2', 'city', 'country', 'zip', 'country']
if any(comp in entry for comp in addr_comps):
card.add('adr').value = Abook._gen_addr(entry)
if 'other' in entry:
tel = card.add('tel')
tel.value = entry['other']
if 'phone' in entry:
tel = card.add('tel')
tel.type_param = 'home'
tel.value = entry['phone']
if 'workphone' in entry:
tel = card.add('tel')
tel.type_param = 'work'
tel.value = entry['workphone']
if 'mobile' in entry:
tel = card.add('tel')
tel.type_param = 'cell'
tel.value = entry['mobile']
if 'nick' in entry:
card.add('nickname').value = entry['nick']
if 'url' in entry:
card.add('url').value = entry['url']
if 'notes' in entry:
card.add('note').value = entry['notes']
self._add_photo(card, entry['name'])
return card
def get_uids(self, filename=None):
"""Return a list of UIDs
filename -- unused, for API compatibility only
"""
self._update()
return [Abook._gen_uid(self._book[entry]) for entry in self._book.sections()]
def get_filesnames(self):
"""All filenames"""
return [self._filename]
def get_meta(self):
"""Meta tags of the vCard collection"""
return {'tag': 'VADDRESSBOOK'}
def last_modified(self):
"""Last time the Abook file was parsed"""
self._update()
return self._last_modified
def to_vcards(self):
"""Return a list of vCards"""
self._update()
return [self._to_vcard(self._book[entry]) for entry in self._book.sections()]
def to_vobject_etag(self, filename, uid):
"""Return vCard and etag of one Abook entry
filename -- unused, for API compatibility only
uid -- the UID of the Abook entry
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return vCards and etags of all Abook entries in uids
filename -- unused, for API compatibility only
uids -- the UIDs of the Abook entries (all if None)
"""
self._update()
if not uids:
uids = self.get_uids(filename)
items = []
for uid in uids:
entry = self._book[uid.split('@')[0]]
# TODO add getmtime of photo
etag = sha1(str(dict(entry)).encode('utf-8'))
items.append((uid, self._to_vcard(entry), '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
"""
self._update()
return self._to_vcard(self._book[uid.split('@')[0]])
@staticmethod
def _conv_adr(adr, entry):
"""Converts to Abook address format"""
if adr.value.street:
entry['address'] = adr.value.street
if adr.value.extended:
entry['address2'] = adr.value.extended
if adr.value.city:
entry['city'] = adr.value.city
if adr.value.region:
entry['state'] = adr.value.region
if adr.value.code and adr.value.code != '0':
entry['zip'] = adr.value.code
if adr.value.country:
entry['country'] = adr.value.country
@staticmethod
def _conv_tel_list(tel_list, entry):
"""Converts to Abook phone types"""
for tel in tel_list:
if not hasattr(tel, 'TYPE_param'):
entry['other'] = tel.value
elif tel.TYPE_param.lower() == 'home':
entry['phone'] = tel.value
elif tel.TYPE_param.lower() == 'work':
entry['workphone'] = tel.value
elif tel.TYPE_param.lower() == 'cell':
entry['mobile'] = tel.value
@staticmethod
def to_abook(card, section, book, bookfile=None):
"""Converts a vCard to Abook"""
book[section] = {}
book[section]['name'] = card.fn.value
if hasattr(card, 'email'):
book[section]['email'] = ','.join([e.value for e in card.email_list])
if hasattr(card, 'adr'):
Abook._conv_adr(card.adr, book[section])
if hasattr(card, 'tel_list'):
Abook._conv_tel_list(card.tel_list, book[section])
if hasattr(card, 'nickname') and card.nickname.value:
book[section]['nick'] = card.nickname.value
if hasattr(card, 'url') and card.url.value:
book[section]['url'] = card.url.value
if hasattr(card, 'note') and card.note.value:
book[section]['notes'] = card.note.value
if hasattr(card, 'photo') and bookfile:
try:
photo_file = join(dirname(bookfile), 'photo/%s.%s' % (card.fn.value, card.photo.TYPE_param))
open(photo_file, 'wb').write(card.photo.value)
except IOError:
pass
@staticmethod
def abook_file(vcard, bookfile):
"""Write a new Abook file with the given vcards"""
book = ConfigParser(default_section='format')
book['format'] = {}
book['format']['program'] = 'abook'
book['format']['version'] = '0.6.1'
for (i, card) in enumerate(readComponents(vcard.read())):
Abook.to_abook(card, str(i), book, bookfile)
with open(bookfile, 'w') as fp:
book.write(fp, False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.