index
int64 0
731k
| package
stringlengths 2
98
β | name
stringlengths 1
76
| docstring
stringlengths 0
281k
β | code
stringlengths 4
1.07M
β | signature
stringlengths 2
42.8k
β |
|---|---|---|---|---|---|
722,632
|
paramz
|
load
|
Load a previously pickled model, using `m.pickle('path/to/file.pickle)'`
:param file_name: path/to/file.pickle
|
def load(file_or_path):
"""
Load a previously pickled model, using `m.pickle('path/to/file.pickle)'`
:param file_name: path/to/file.pickle
"""
from pickle import UnpicklingError
_python3 = True
try:
import cPickle as pickle
_python3 = False
except ImportError: #python3
import pickle
try:
if _python3:
strcl = str
p3kw = dict(encoding='latin1')
return _unpickle(file_or_path, pickle, strcl, p3kw)
else:
strcl = basestring
p3kw = {}
return _unpickle(file_or_path, pickle, strcl, p3kw)
except UnpicklingError: # pragma: no coverage
import pickle
return _unpickle(file_or_path, pickle, strcl, p3kw)
|
(file_or_path)
|
722,671
|
stackprinter.tracing
|
TracePrinter
|
Print a trace of all calls & returns in a piece of code as they are executed
Example:
```
with Traceprinter(style='color', depth_limit=5):
dosomething()
dosomethingelse()
```
Params
---
Accepts all keyword wargs accepted by stackprinter.format, and:
depth_limit: int (default: 20)
How many nested calls will be followed
print_function: callable (default: print)
some function of your choice that accepts a string
stop_on_exception: bool (default: True)
If False, plow through exceptions
|
class TracePrinter():
"""
Print a trace of all calls & returns in a piece of code as they are executed
Example:
```
with Traceprinter(style='color', depth_limit=5):
dosomething()
dosomethingelse()
```
Params
---
Accepts all keyword wargs accepted by stackprinter.format, and:
depth_limit: int (default: 20)
How many nested calls will be followed
print_function: callable (default: print)
some function of your choice that accepts a string
stop_on_exception: bool (default: True)
If False, plow through exceptions
"""
def __init__(self,
suppressed_paths=[],
depth_limit=20,
print_function=print,
stop_on_exception=True,
**formatter_kwargs):
self.fmt = get_formatter(**formatter_kwargs)
self.fmt_style = formatter_kwargs.get('style', 'plaintext')
assert isinstance(suppressed_paths, list)
self.suppressed_paths = suppressed_paths
self.emit = print_function
self.depth_limit = depth_limit
self.stop_on_exception = stop_on_exception
def __enter__(self):
depth = count_stack(sys._getframe(1))
self.enable(current_depth=depth)
return self
def __exit__(self, etype, evalue, tb):
self.disable()
if etype is None:
return True
def enable(self, force=False, current_depth=None):
if current_depth is None:
current_depth = count_stack(sys._getframe(1))
self.starting_depth = current_depth
self.previous_frame = None
self.trace_before = sys.gettrace()
if (self.trace_before is not None) and not force:
raise Exception("There is already a trace function registered: %r" % self.trace_before)
sys.settrace(self.trace)
def disable(self):
sys.settrace(self.trace_before)
try:
del self.previous_frame
except AttributeError:
pass
def trace(self, frame, event, arg):
depth = count_stack(frame) - self.starting_depth
if depth >= self.depth_limit:
return None
if 'call' in event:
callsite = frame.f_back
self.show(callsite)
self.show(frame)
elif 'return' in event:
val_str = ppr.format_value(arg, indent=11, truncation=1000)
ret_str = ' Return %s\n' % val_str
self.show(frame, note=ret_str)
elif event == 'exception':
exc_str = format_exception_message(*arg, style=self.fmt_style)
self.show(frame, note=exc_str)
if self.stop_on_exception:
self.disable()
return None
return self.trace
def show(self, frame, note=''):
if frame is None:
return
filepath = inspect.getsourcefile(frame) or inspect.getfile(frame)
if filepath in __file__:
return
elif match(filepath, self.suppressed_paths):
line_info = (filepath, frame.f_lineno, frame.f_code.co_name)
frame_str = 'File %s, line %s, in %s\n' % line_info
if len(note) > 123:
note == note[:120] + '...'
else:
frame_str = self.fmt(frame)
depth = count_stack(frame) - self.starting_depth
our_callsite = frame.f_back
callsite_of_previous_frame = getattr(self.previous_frame, 'f_back', -1)
if self.previous_frame is our_callsite and our_callsite is not None:
# we're a child frame
self.emit(add_indent(' ββββ\n', depth - 1))
if frame is callsite_of_previous_frame:
# we're a parent frame
self.emit(add_indent('ββββββββ\n', depth))
frame_str += note
self.emit(add_indent(frame_str, depth))
self.previous_frame = frame
|
(suppressed_paths=[], depth_limit=20, print_function=<built-in function print>, stop_on_exception=True, **formatter_kwargs)
|
722,672
|
stackprinter.tracing
|
__enter__
| null |
def __enter__(self):
depth = count_stack(sys._getframe(1))
self.enable(current_depth=depth)
return self
|
(self)
|
722,673
|
stackprinter.tracing
|
__exit__
| null |
def __exit__(self, etype, evalue, tb):
self.disable()
if etype is None:
return True
|
(self, etype, evalue, tb)
|
722,674
|
stackprinter.tracing
|
__init__
| null |
def __init__(self,
suppressed_paths=[],
depth_limit=20,
print_function=print,
stop_on_exception=True,
**formatter_kwargs):
self.fmt = get_formatter(**formatter_kwargs)
self.fmt_style = formatter_kwargs.get('style', 'plaintext')
assert isinstance(suppressed_paths, list)
self.suppressed_paths = suppressed_paths
self.emit = print_function
self.depth_limit = depth_limit
self.stop_on_exception = stop_on_exception
|
(self, suppressed_paths=[], depth_limit=20, print_function=<built-in function print>, stop_on_exception=True, **formatter_kwargs)
|
722,675
|
stackprinter.tracing
|
disable
| null |
def disable(self):
sys.settrace(self.trace_before)
try:
del self.previous_frame
except AttributeError:
pass
|
(self)
|
722,676
|
stackprinter.tracing
|
enable
| null |
def enable(self, force=False, current_depth=None):
if current_depth is None:
current_depth = count_stack(sys._getframe(1))
self.starting_depth = current_depth
self.previous_frame = None
self.trace_before = sys.gettrace()
if (self.trace_before is not None) and not force:
raise Exception("There is already a trace function registered: %r" % self.trace_before)
sys.settrace(self.trace)
|
(self, force=False, current_depth=None)
|
722,677
|
stackprinter.tracing
|
show
| null |
def show(self, frame, note=''):
if frame is None:
return
filepath = inspect.getsourcefile(frame) or inspect.getfile(frame)
if filepath in __file__:
return
elif match(filepath, self.suppressed_paths):
line_info = (filepath, frame.f_lineno, frame.f_code.co_name)
frame_str = 'File %s, line %s, in %s\n' % line_info
if len(note) > 123:
note == note[:120] + '...'
else:
frame_str = self.fmt(frame)
depth = count_stack(frame) - self.starting_depth
our_callsite = frame.f_back
callsite_of_previous_frame = getattr(self.previous_frame, 'f_back', -1)
if self.previous_frame is our_callsite and our_callsite is not None:
# we're a child frame
self.emit(add_indent(' ββββ\n', depth - 1))
if frame is callsite_of_previous_frame:
# we're a parent frame
self.emit(add_indent('ββββββββ\n', depth))
frame_str += note
self.emit(add_indent(frame_str, depth))
self.previous_frame = frame
|
(self, frame, note='')
|
722,678
|
stackprinter.tracing
|
trace
| null |
def trace(self, frame, event, arg):
depth = count_stack(frame) - self.starting_depth
if depth >= self.depth_limit:
return None
if 'call' in event:
callsite = frame.f_back
self.show(callsite)
self.show(frame)
elif 'return' in event:
val_str = ppr.format_value(arg, indent=11, truncation=1000)
ret_str = ' Return %s\n' % val_str
self.show(frame, note=ret_str)
elif event == 'exception':
exc_str = format_exception_message(*arg, style=self.fmt_style)
self.show(frame, note=exc_str)
if self.stop_on_exception:
self.disable()
return None
return self.trace
|
(self, frame, event, arg)
|
722,679
|
stackprinter
|
_guess_thing
|
default to the current exception or current stack frame
|
def _guess_thing(f):
""" default to the current exception or current stack frame"""
# the only reason this happens up here is to keep sys._getframe at the same
# call depth relative to an invocation of `show` or `format`, even when
# `format` is called _by_ `show`.
@wraps(f)
def show_or_format(thing=None, *args, **kwargs):
if thing is None:
thing = sys.exc_info()
if thing == (None, None, None):
thing = sys._getframe(1)
return f(thing, *args, **kwargs)
return show_or_format
|
(f)
|
722,680
|
stackprinter
|
_is_exc_info
| null |
def _is_exc_info(thing):
if not isinstance(thing, tuple) or len(thing) != 3:
return False
a, b, c = thing
return ((a is None or (isinstance(a, type) and BaseException in a.mro())) and
(b is None or (isinstance(b, BaseException))))
|
(thing)
|
722,681
|
stackprinter
|
_is_running_in_ipython
| null |
def _is_running_in_ipython():
try:
return __IPYTHON__
except NameError:
return False
|
()
|
722,682
|
stackprinter
|
_patch_ipython_excepthook
|
Replace ipython's built-in traceback printer, excellent though it is
|
def _patch_ipython_excepthook(**kwargs):
""" Replace ipython's built-in traceback printer, excellent though it is"""
global ipy_tb
blacklist = kwargs.get('suppressed_paths', [])
blacklist.append('site-packages/IPython/')
kwargs['suppressed_paths'] = blacklist
if 'file' in kwargs:
del kwargs['file']
def format_tb(*exc_tuple, **__):
unstructured_tb = format(exc_tuple, **kwargs)
structured_tb = [unstructured_tb] # \*coughs*
return structured_tb
import IPython
shell = IPython.get_ipython()
if ipy_tb is None:
ipy_tb = shell.InteractiveTB.structured_traceback
shell.InteractiveTB.structured_traceback = format_tb
|
(**kwargs)
|
722,683
|
stackprinter
|
_unpatch_ipython_excepthook
|
restore proper order in Ipython
|
def _unpatch_ipython_excepthook():
""" restore proper order in Ipython """
import IPython
shell = IPython.get_ipython()
if ipy_tb is not None:
shell.InteractiveTB.structured_traceback = ipy_tb
|
()
|
722,687
|
stackprinter
|
format
|
Render the traceback of an exception or a frame's call stack
Call this without arguments inside an `except` block to get a traceback for
the currently handled exception:
```
try:
something()
except:
logger.err(stackprinter.format(**kwargs))
```
Explicitly pass an exception (or a triple as returned by `sys.exc_info()`)
to handle that particular exception anywhere, also outside an except block.
```
try:
something()
except Exception as e:
last_exc = e
if last_exc:
logger.err(stackprinter.format(last_exc, **kwargs))
```
Pass a frame object to see the call stack leading up to that frame:
```
stack = stackprinter.format(sys._getframe(2), **kwargs))
```
Pass a thread object to see its current call stack:
```
thread = threading.Thread(target=something)
thread.start()
# (...)
stack = stackprinter.format(thread, **kwargs))
```
Note:
This displays variable values as they are _at the time of formatting_. In
multi-threaded programs, variables can change while we're busy walking
the stack & printing them. So, if nothing seems to make sense, consider that
your exception and the traceback messages are from slightly different times.
Sadly, there is no responsible way to freeze all other threads as soon
as we want to inspect some thread's call stack (...or is there?)
Params
---
thing: (optional) exception, sys.exc_info() tuple, frame or thread
What to format. Defaults to the currently handled exception or current
stack frame.
style: string
'plaintext' (default): Output just text
'darkbg', 'darkbg2', 'darkbg3', 'lightbg', 'lightbg2', 'lightbg3':
Enable colors, for use in terminals that support 256 ansi
colors or in jupyter notebooks (or even with `ansi2html`)
source_lines: int or 'all'
Select how much source code context will be shown.
int 0: Don't include a source listing.
int n > 0: Show n lines of code. (default: 5)
string 'all': Show the whole scope of the frame.
show_signature: bool (default True)
Always include the function header in the source code listing.
show_vals: str or None
Select which variable values will be shown.
'line': Show only the variables on the highlighted line.
'like_source' (default): Show only those visible in the source listing
'all': Show every variable in the scope of the frame.
None: Don't show any variable values.
truncate_vals: int
Maximum number of characters to be used for each variable value.
Default: 500
line_wrap: int (default 60)
Limit how many columns are available to print each variable
(excluding its name). Set to 0 or False to disable wrapping.
suppressed_paths: list of regex patterns
Set less verbose formatting for frames whose code lives in certain paths
(e.g. library code). Files whose path matches any of the given regex
patterns will be considered boring. The first call to boring code is
rendered with fewer code lines (but with argument values still visible),
while deeper calls within boring code get a single line and no variable
values.
Example: To hide numpy internals from the traceback, set
`suppressed_paths=[r"lib/python.*/site-packages/numpy"]`
or
`suppressed_paths=[re.compile(r"lib/python.*/site-packages/numpy")]`
suppressed_exceptions: list of exception classes
Show less verbose formatting for exceptions in this list.
By default, this list is `[KeyboardInterrupt]`. Set to `[]`
to force verbose formatting even on a keyboard interrupt.
suppressed_vars: list of regex patterns
Don't show the content of variables whose name matches any of the given
patterns.
Internally, this doesn't just filter the output, but stackprinter won't
even try to access these values at all. So this can also be used as a
workaround for rare issues around dynamic attribute lookups.
Example:
`suppressed_vars=[r".*password.*", r"certainobject\.certainproperty"]`
reverse: bool
List the innermost frame first.
add_summary: True, False, 'auto'
Append a compact list of involved files and source lines, similar
to the built-in traceback message.
'auto' (default): do that if the main traceback is longer than 50 lines.
|
def _guess_thing(f):
""" default to the current exception or current stack frame"""
# the only reason this happens up here is to keep sys._getframe at the same
# call depth relative to an invocation of `show` or `format`, even when
# `format` is called _by_ `show`.
@wraps(f)
def show_or_format(thing=None, *args, **kwargs):
if thing is None:
thing = sys.exc_info()
if thing == (None, None, None):
thing = sys._getframe(1)
return f(thing, *args, **kwargs)
return show_or_format
|
(thing=None, **kwargs)
|
722,688
|
stackprinter
|
format_current_exception
|
Render a traceback for the currently handled exception.
Params
--
**kwargs:
See `format`
|
def format_current_exception(**kwargs):
"""
Render a traceback for the currently handled exception.
Params
--
**kwargs:
See `format`
"""
return format(sys.exc_info(), **kwargs)
|
(**kwargs)
|
722,689
|
stackprinter
|
format_current_stack
|
Render the current thread's call stack.
Params
--
**kwargs:
See `format`
|
def format_current_stack(**kwargs):
""" Render the current thread's call stack.
Params
--
**kwargs:
See `format`
"""
return format(sys._getframe(1), **kwargs)
|
(**kwargs)
|
722,690
|
stackprinter
|
format_thread
| null |
def format_thread(thread, add_summary=False, **kwargs):
try:
fr = sys._current_frames()[thread.ident]
except KeyError:
return "%r: no frames found" % thread
else:
if 'suppressed_paths' not in kwargs:
kwargs['suppressed_paths'] = []
kwargs['suppressed_paths'] += [r"lib/python.*/threading\.py"]
msg = fmt.format_stack_from_frame(fr, **kwargs)
msg_indented = ' ' + '\n '.join(msg.split('\n')).strip()
return "%r\n\n%s" % (thread, msg_indented)
|
(thread, add_summary=False, **kwargs)
|
722,694
|
stackprinter
|
remove_excepthook
|
Reinstate the default excepthook
|
def remove_excepthook():
""" Reinstate the default excepthook """
if _is_running_in_ipython():
_unpatch_ipython_excepthook()
sys.excepthook = sys.__excepthook__
|
()
|
722,695
|
stackprinter
|
set_excepthook
|
Set sys.excepthook to print a detailed traceback for any uncaught exception.
See `format()` for available kwargs.
Examples:
----
Print to stdout instead of stderr:
```
set_excepthook(file='stdout')
```
Enable color output:
```
set_excepthook(style='darkbg') # or e.g. 'lightbg' (for more options see `format`)
```
If running under Ipython, this will, with a heavy heart, attempt to monkey
patch Ipython's traceback printer (which handles all exceptions internally,
thus bypassing the system excepthook). You can decide whether this sounds
like a sane idea.
To undo, call `remove_excepthook`.
Params
--
**kwargs:
See `show` and `format`
|
def set_excepthook(**kwargs):
"""
Set sys.excepthook to print a detailed traceback for any uncaught exception.
See `format()` for available kwargs.
Examples:
----
Print to stdout instead of stderr:
```
set_excepthook(file='stdout')
```
Enable color output:
```
set_excepthook(style='darkbg') # or e.g. 'lightbg' (for more options see `format`)
```
If running under Ipython, this will, with a heavy heart, attempt to monkey
patch Ipython's traceback printer (which handles all exceptions internally,
thus bypassing the system excepthook). You can decide whether this sounds
like a sane idea.
To undo, call `remove_excepthook`.
Params
--
**kwargs:
See `show` and `format`
"""
if _is_running_in_ipython():
_patch_ipython_excepthook(**kwargs)
else:
def hook(*args):
show(args, **kwargs)
sys.excepthook = hook
|
(**kwargs)
|
722,696
|
stackprinter
|
show
|
Print the traceback of an exception or a frame's call stack
Params
---
file: 'stderr', 'stdout' or file-like object
defaults to stderr
**kwargs:
See `format`
|
def _guess_thing(f):
""" default to the current exception or current stack frame"""
# the only reason this happens up here is to keep sys._getframe at the same
# call depth relative to an invocation of `show` or `format`, even when
# `format` is called _by_ `show`.
@wraps(f)
def show_or_format(thing=None, *args, **kwargs):
if thing is None:
thing = sys.exc_info()
if thing == (None, None, None):
thing = sys._getframe(1)
return f(thing, *args, **kwargs)
return show_or_format
|
(thing=None, file='stderr', **kwargs)
|
722,697
|
stackprinter
|
show_current_exception
|
Print a traceback for the currently handled exception.
Params
--
**kwargs:
See `show`
|
def show_current_exception(file=sys.stderr, **kwargs):
"""
Print a traceback for the currently handled exception.
Params
--
**kwargs:
See `show`
"""
if file is None:
return # see explanation in `show()`
print(format_current_exception(**kwargs), file=file)
|
(file=<_io.TextIOWrapper name='<stderr>' mode='w' encoding='utf-8'>, **kwargs)
|
722,698
|
stackprinter
|
show_current_stack
|
Print the current thread's call stack.
Params
--
**kwargs:
See `show`
|
def show_current_stack(**kwargs):
""" Print the current thread's call stack.
Params
--
**kwargs:
See `show`
"""
show(sys._getframe(1), **kwargs)
|
(**kwargs)
|
722,701
|
stackprinter.tracing
|
trace
|
Get a decorator to print all calls & returns in a function
Example:
```
@trace(style='color', depth_limit=5)
def dosometing():
(...)
```
Params
---
Accepts all keyword wargs accepted by stackprinter.format, and:
depth_limit: int (default: 20)
How many nested calls will be followed
print_function: callable (default: print)
some function of your choice that accepts a string
stop_on_exception: bool (default: True)
If False, plow through exceptions
|
def trace(*args, suppressed_paths=[], **formatter_kwargs):
"""
Get a decorator to print all calls & returns in a function
Example:
```
@trace(style='color', depth_limit=5)
def dosometing():
(...)
```
Params
---
Accepts all keyword wargs accepted by stackprinter.format, and:
depth_limit: int (default: 20)
How many nested calls will be followed
print_function: callable (default: print)
some function of your choice that accepts a string
stop_on_exception: bool (default: True)
If False, plow through exceptions
"""
traceprinter = TracePrinter(suppressed_paths=suppressed_paths,
**formatter_kwargs)
def deco(f):
def wrapper(*args, **formatter_kwargs):
traceprinter.enable(current_depth=count_stack(sys._getframe()) + 1)
result = f(*args, **formatter_kwargs)
traceprinter.disable()
return result
return wrapper
if args:
return deco(args[0])
else:
return deco
|
(*args, suppressed_paths=[], **formatter_kwargs)
|
722,707
|
brewer2mpl.brewer2mpl
|
BrewerMap
|
Representation of a colorbrewer2 color map with matplotlib compatible
views of the map.
Parameters
----------
name : str
map_type : str
colors : list
Colors as list of 0-255 RGB triplets.
Attributes
----------
name : str
map_type : str
number : int
Number of colors in color map.
colors : list
Colors as list of 0-255 RGB triplets.
colorbrewer2_url : str
hex_colors : list
mpl_colors : list
mpl_colormap : matplotlib LinearSegmentedColormap
|
class BrewerMap(_ColorMap):
"""
Representation of a colorbrewer2 color map with matplotlib compatible
views of the map.
Parameters
----------
name : str
map_type : str
colors : list
Colors as list of 0-255 RGB triplets.
Attributes
----------
name : str
map_type : str
number : int
Number of colors in color map.
colors : list
Colors as list of 0-255 RGB triplets.
colorbrewer2_url : str
hex_colors : list
mpl_colors : list
mpl_colormap : matplotlib LinearSegmentedColormap
"""
@property
def colorbrewer2_url(self):
"""
URL that can be used to view the color map at colorbrewer2.org.
"""
url = 'http://colorbrewer2.org/index.html?type={0}&scheme={1}&n={2}'
return url.format(self.type.lower(), self.name, self.number)
def colorbrewer2(self):
"""
View this color map at colorbrewer2.org. This will open
colorbrewer2.org in your default web browser.
"""
webbrowser.open_new_tab(self.colorbrewer2_url) # pragma: no cover
|
(name, map_type, colors)
|
722,708
|
brewer2mpl.brewer2mpl
|
__init__
| null |
def __init__(self, name, map_type, colors):
self.name = name
self.type = map_type
self.number = len(colors)
self.colors = colors
|
(self, name, map_type, colors)
|
722,709
|
brewer2mpl.brewer2mpl
|
colorbrewer2
|
View this color map at colorbrewer2.org. This will open
colorbrewer2.org in your default web browser.
|
def colorbrewer2(self):
"""
View this color map at colorbrewer2.org. This will open
colorbrewer2.org in your default web browser.
"""
webbrowser.open_new_tab(self.colorbrewer2_url) # pragma: no cover
|
(self)
|
722,710
|
brewer2mpl.brewer2mpl
|
get_mpl_colormap
|
A color map that can be used in matplotlib plots. Requires matplotlib
to be importable. Keyword arguments are passed to
`matplotlib.colors.LinearSegmentedColormap.from_list`.
|
def get_mpl_colormap(self, **kwargs):
"""
A color map that can be used in matplotlib plots. Requires matplotlib
to be importable. Keyword arguments are passed to
`matplotlib.colors.LinearSegmentedColormap.from_list`.
"""
if not HAVE_MPL: # pragma: no cover
raise RuntimeError('matplotlib not available.')
cmap = LinearSegmentedColormap.from_list(self.name,
self.mpl_colors, **kwargs)
return cmap
|
(self, **kwargs)
|
722,711
|
brewer2mpl.brewer2mpl
|
show_as_blocks
|
Show colors in the IPython Notebook using ipythonblocks.
Parameters
----------
block_size : int, optional
Size of displayed blocks.
|
def show_as_blocks(self, block_size=100):
"""
Show colors in the IPython Notebook using ipythonblocks.
Parameters
----------
block_size : int, optional
Size of displayed blocks.
"""
from ipythonblocks import BlockGrid
grid = BlockGrid(self.number, 1, block_size=block_size)
for block, color in zip(grid, self.colors):
block.rgb = color
grid.show()
|
(self, block_size=100)
|
722,713
|
brewer2mpl.brewer2mpl
|
get_map
|
Return a `BrewerMap` representation of the specified color map.
Parameters
----------
name : str
Name of color map. Use `print_maps` to see available color maps.
map_type : {'Sequential', 'Diverging', 'Qualitative'}
Select color map type.
number : int
Number of defined colors in color map.
reverse : bool, optional
Set to True to get the reversed color map.
|
def get_map(name, map_type, number, reverse=False):
"""
Return a `BrewerMap` representation of the specified color map.
Parameters
----------
name : str
Name of color map. Use `print_maps` to see available color maps.
map_type : {'Sequential', 'Diverging', 'Qualitative'}
Select color map type.
number : int
Number of defined colors in color map.
reverse : bool, optional
Set to True to get the reversed color map.
"""
number = str(number)
map_type = map_type.lower().capitalize()
# check for valid type
if map_type not in MAP_TYPES:
s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES)
raise ValueError(s)
# make a dict of lower case map name to map name so this can be
# insensitive to case.
# this would be a perfect spot for a dict comprehension but going to
# wait on that to preserve 2.6 compatibility.
# map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()}
map_names = dict((k.lower(), k) for k in COLOR_MAPS[map_type].keys())
# check for valid name
if name.lower() not in map_names:
s = 'Invalid color map name {0!r} for type {1!r}.\n'
s = s.format(name, map_type)
valid_names = [str(k) for k in COLOR_MAPS[map_type].keys()]
valid_names.sort()
s += 'Valid names are: {0}'.format(valid_names)
raise ValueError(s)
name = map_names[name.lower()]
# check for valid number
if number not in COLOR_MAPS[map_type][name]:
s = 'Invalid number for map type {0!r} and name {1!r}.\n'
s = s.format(map_type, str(name))
valid_numbers = [int(k) for k in COLOR_MAPS[map_type][name].keys()]
valid_numbers.sort()
s += 'Valid numbers are : {0}'.format(valid_numbers)
raise ValueError(s)
colors = COLOR_MAPS[map_type][name][number]['Colors']
if reverse:
name += '_r'
colors = [x for x in reversed(colors)]
return BrewerMap(name, map_type, colors)
|
(name, map_type, number, reverse=False)
|
722,714
|
brewer2mpl.brewer2mpl
|
print_all_maps
|
Print the name and number of defined colors of all available color maps.
|
def print_all_maps():
"""
Print the name and number of defined colors of all available color maps.
"""
for t in MAP_TYPES:
print_maps_by_type(t)
|
()
|
722,715
|
brewer2mpl.brewer2mpl
|
print_maps
|
Print maps by type and/or number of defined colors.
Parameters
----------
map_type : {'Sequential', 'Diverging', 'Qualitative'}, optional
Filter output by map type. By default all maps are printed.
number : int, optional
Filter output by number of defined colors. By default there is
no numeric filtering.
|
def print_maps(map_type=None, number=None):
"""
Print maps by type and/or number of defined colors.
Parameters
----------
map_type : {'Sequential', 'Diverging', 'Qualitative'}, optional
Filter output by map type. By default all maps are printed.
number : int, optional
Filter output by number of defined colors. By default there is
no numeric filtering.
"""
if not map_type and not number:
print_all_maps()
elif map_type:
print_maps_by_type(map_type, number)
else:
s = ('Invalid parameter combination. '
'number without map_type is not supported.')
raise ValueError(s)
|
(map_type=None, number=None)
|
722,716
|
brewer2mpl.brewer2mpl
|
print_maps_by_type
|
Print all available maps of a given type.
Parameters
----------
map_type : {'Sequential', 'Diverging', 'Qualitative'}
Select map type to print.
number : int, optional
Filter output by number of defined colors. By default there is
no numeric filtering.
|
def print_maps_by_type(map_type, number=None):
"""
Print all available maps of a given type.
Parameters
----------
map_type : {'Sequential', 'Diverging', 'Qualitative'}
Select map type to print.
number : int, optional
Filter output by number of defined colors. By default there is
no numeric filtering.
"""
map_type = map_type.lower().capitalize()
if map_type not in MAP_TYPES:
s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES)
raise ValueError(s)
print(map_type)
map_keys = sorted(COLOR_MAPS[map_type].keys())
format_str = '{0:8} : {1}'
for mk in map_keys:
num_keys = sorted(COLOR_MAPS[map_type][mk].keys(), key=int)
if not number or str(number) in num_keys:
num_str = '{' + ', '.join(num_keys) + '}'
print(format_str.format(mk, num_str))
|
(map_type, number=None)
|
722,720
|
trading212_rest
|
Trading212
|
Rest API client for Trading212
|
class Trading212:
"""Rest API client for Trading212"""
def __init__(self, api_key: str, demo: bool = True):
""" """
self._api_key = api_key
self.host = (
"https://live.trading212.com" if demo else "https://live.trading212.com"
)
def _get(self, endpoint: str, params=None, api_version: str = "v0"):
return self._process_response(
requests.get(
f"{self.host}/api/{api_version}/{endpoint}",
headers={"Authorization": self._api_key},
params=params,
)
)
def _post(self, endpoint: str, data: dict, api_version: str = "v0"):
return self._process_response(
requests.post(
f"{self.host}/api/{api_version}/{endpoint}",
headers={"Authorization": self._api_key},
data=data,
)
)
def _get_url(
self,
url,
):
return self._process_response(
requests.get(
f"{self.host}/{url}",
headers={"Authorization": self._api_key},
)
)
def _delete_url(
self,
url,
):
return self._process_response(
requests.delete(
f"{self.host}/{url}",
headers={"Authorization": self._api_key},
)
)
@staticmethod
def _process_response(resp):
try:
resp.raise_for_status()
except HTTPError as http_err:
logging.error(resp.text)
raise http_err
return resp.json()
def _process_items(self, response):
res = []
res += response["items"]
while next_page := response.get("nextPagePath"):
response = self._get_url(next_page)
res += response["items"]
return res
@staticmethod
def _validate_time_validity(time_validity: str):
if time_validity not in ["GTC", "DAY"]:
raise ValueError("time_validity must be one of GTC or DAY")
def orders(self, cursor: int = 0, ticker: str = None, limit: int = 50):
"""Historical order data"""
params = {"cursor": cursor, "limit": limit}
if ticker:
params["ticker"] = ticker
return self._process_items(self._get("equity/history/orders", params=params))
def dividends(self, cursor: int = 0, ticker: str = None, limit: int = 50):
"""Dividends paid out"""
params = {"cursor": cursor, "limit": limit}
if ticker:
params["ticker"] = ticker
return self._process_items(self._get("history/dividends", params=params))
def transactions(self, cursor: int = 0, limit: int = 50):
"""Transactions list"""
params = {"cursor": cursor, "limit": limit}
return self._process_items(self._get("history/transactions", params=params))
def instruments(self):
"""Tradeable instruments metadata"""
return self._get("equity/metadata/instruments")
def cash(self):
"""Account cash"""
return self._get("equity/account/cash")
def portfolio(self):
"""All open positions"""
return self._get("equity/portfolio")
def position(self, ticker: str):
"""Open position by ticker"""
return self._get(f"equity/portfolio/{ticker}")
def exchanges(self):
"""Exhange list"""
return self._get("equity/metadata/exchanges")
def account_info(self):
"""Account info"""
return self._get("equity/account/info")
def equity_orders(self):
"""All equity orders"""
return self._get("equity/orders")
def equity_order(self, id: int):
"""Equity order by ID"""
return self._get(f"equity/orders/{id}")
def equity_order_cancel(self, id: int):
"""Camcel equity order"""
return self._delete_url(f"equity/orders/{id}")
def equity_order_place_limit(
self, ticker: str, quantity: int, limit_price: float, time_validity: str
):
"""Place limit order"""
self._validate_time_validity(time_validity)
return self._post(
f"equity/orders/limit",
data={
"quantity": quantity,
"limitPrice": limit_price,
"ticker": ticker,
"timeValidity": time_validity,
},
)
def equity_order_place_market(self, ticker: str, quantity: int):
"""Place market order"""
return self._post(
f"equity/orders/market", data={"quantity": quantity, "ticker": ticker}
)
def equity_order_place_stop(
self, ticker: str, quantity: int, stop_price: float, time_validity: str
):
"""Place stop order"""
self._validate_time_validity(time_validity)
return self._post(
f"equity/orders/stop",
data={
"quantity": quantity,
"stopPrice": stop_price,
"ticker": ticker,
"timeValidity": time_validity,
},
)
def equity_order_place_stop_limit(
self,
ticker: str,
quantity: int,
stop_price: float,
limit_price: float,
time_validity: str,
):
"""Place stop-limit order"""
self._validate_time_validity(time_validity)
return self._post(
f"equity/orders/stop_limit",
data={
"quantity": quantity,
"stopPrice": stop_price,
"limitPrice": limit_price,
"ticker": ticker,
"timeValidity": time_validity,
},
)
def __repr__(self):
return "Trading212(api_key=****{}, demo={})".format(
self._api_key[-4:], self.host == "https://demo.trading212.com"
)
|
(api_key: str, demo: bool = True)
|
722,721
|
trading212_rest
|
__init__
|
def __init__(self, api_key: str, demo: bool = True):
""" """
self._api_key = api_key
self.host = (
"https://live.trading212.com" if demo else "https://live.trading212.com"
)
|
(self, api_key: str, demo: bool = True)
|
|
722,722
|
trading212_rest
|
__repr__
| null |
def __repr__(self):
return "Trading212(api_key=****{}, demo={})".format(
self._api_key[-4:], self.host == "https://demo.trading212.com"
)
|
(self)
|
722,723
|
trading212_rest
|
_delete_url
| null |
def _delete_url(
self,
url,
):
return self._process_response(
requests.delete(
f"{self.host}/{url}",
headers={"Authorization": self._api_key},
)
)
|
(self, url)
|
722,724
|
trading212_rest
|
_get
| null |
def _get(self, endpoint: str, params=None, api_version: str = "v0"):
return self._process_response(
requests.get(
f"{self.host}/api/{api_version}/{endpoint}",
headers={"Authorization": self._api_key},
params=params,
)
)
|
(self, endpoint: str, params=None, api_version: str = 'v0')
|
722,725
|
trading212_rest
|
_get_url
| null |
def _get_url(
self,
url,
):
return self._process_response(
requests.get(
f"{self.host}/{url}",
headers={"Authorization": self._api_key},
)
)
|
(self, url)
|
722,726
|
trading212_rest
|
_post
| null |
def _post(self, endpoint: str, data: dict, api_version: str = "v0"):
return self._process_response(
requests.post(
f"{self.host}/api/{api_version}/{endpoint}",
headers={"Authorization": self._api_key},
data=data,
)
)
|
(self, endpoint: str, data: dict, api_version: str = 'v0')
|
722,727
|
trading212_rest
|
_process_items
| null |
def _process_items(self, response):
res = []
res += response["items"]
while next_page := response.get("nextPagePath"):
response = self._get_url(next_page)
res += response["items"]
return res
|
(self, response)
|
722,728
|
trading212_rest
|
_process_response
| null |
@staticmethod
def _process_response(resp):
try:
resp.raise_for_status()
except HTTPError as http_err:
logging.error(resp.text)
raise http_err
return resp.json()
|
(resp)
|
722,729
|
trading212_rest
|
_validate_time_validity
| null |
@staticmethod
def _validate_time_validity(time_validity: str):
if time_validity not in ["GTC", "DAY"]:
raise ValueError("time_validity must be one of GTC or DAY")
|
(time_validity: str)
|
722,730
|
trading212_rest
|
account_info
|
Account info
|
def account_info(self):
"""Account info"""
return self._get("equity/account/info")
|
(self)
|
722,731
|
trading212_rest
|
cash
|
Account cash
|
def cash(self):
"""Account cash"""
return self._get("equity/account/cash")
|
(self)
|
722,732
|
trading212_rest
|
dividends
|
Dividends paid out
|
def dividends(self, cursor: int = 0, ticker: str = None, limit: int = 50):
"""Dividends paid out"""
params = {"cursor": cursor, "limit": limit}
if ticker:
params["ticker"] = ticker
return self._process_items(self._get("history/dividends", params=params))
|
(self, cursor: int = 0, ticker: Optional[str] = None, limit: int = 50)
|
722,733
|
trading212_rest
|
equity_order
|
Equity order by ID
|
def equity_order(self, id: int):
"""Equity order by ID"""
return self._get(f"equity/orders/{id}")
|
(self, id: int)
|
722,734
|
trading212_rest
|
equity_order_cancel
|
Camcel equity order
|
def equity_order_cancel(self, id: int):
"""Camcel equity order"""
return self._delete_url(f"equity/orders/{id}")
|
(self, id: int)
|
722,735
|
trading212_rest
|
equity_order_place_limit
|
Place limit order
|
def equity_order_place_limit(
self, ticker: str, quantity: int, limit_price: float, time_validity: str
):
"""Place limit order"""
self._validate_time_validity(time_validity)
return self._post(
f"equity/orders/limit",
data={
"quantity": quantity,
"limitPrice": limit_price,
"ticker": ticker,
"timeValidity": time_validity,
},
)
|
(self, ticker: str, quantity: int, limit_price: float, time_validity: str)
|
722,736
|
trading212_rest
|
equity_order_place_market
|
Place market order
|
def equity_order_place_market(self, ticker: str, quantity: int):
"""Place market order"""
return self._post(
f"equity/orders/market", data={"quantity": quantity, "ticker": ticker}
)
|
(self, ticker: str, quantity: int)
|
722,737
|
trading212_rest
|
equity_order_place_stop
|
Place stop order
|
def equity_order_place_stop(
self, ticker: str, quantity: int, stop_price: float, time_validity: str
):
"""Place stop order"""
self._validate_time_validity(time_validity)
return self._post(
f"equity/orders/stop",
data={
"quantity": quantity,
"stopPrice": stop_price,
"ticker": ticker,
"timeValidity": time_validity,
},
)
|
(self, ticker: str, quantity: int, stop_price: float, time_validity: str)
|
722,738
|
trading212_rest
|
equity_order_place_stop_limit
|
Place stop-limit order
|
def equity_order_place_stop_limit(
self,
ticker: str,
quantity: int,
stop_price: float,
limit_price: float,
time_validity: str,
):
"""Place stop-limit order"""
self._validate_time_validity(time_validity)
return self._post(
f"equity/orders/stop_limit",
data={
"quantity": quantity,
"stopPrice": stop_price,
"limitPrice": limit_price,
"ticker": ticker,
"timeValidity": time_validity,
},
)
|
(self, ticker: str, quantity: int, stop_price: float, limit_price: float, time_validity: str)
|
722,739
|
trading212_rest
|
equity_orders
|
All equity orders
|
def equity_orders(self):
"""All equity orders"""
return self._get("equity/orders")
|
(self)
|
722,740
|
trading212_rest
|
exchanges
|
Exhange list
|
def exchanges(self):
"""Exhange list"""
return self._get("equity/metadata/exchanges")
|
(self)
|
722,741
|
trading212_rest
|
instruments
|
Tradeable instruments metadata
|
def instruments(self):
"""Tradeable instruments metadata"""
return self._get("equity/metadata/instruments")
|
(self)
|
722,742
|
trading212_rest
|
orders
|
Historical order data
|
def orders(self, cursor: int = 0, ticker: str = None, limit: int = 50):
"""Historical order data"""
params = {"cursor": cursor, "limit": limit}
if ticker:
params["ticker"] = ticker
return self._process_items(self._get("equity/history/orders", params=params))
|
(self, cursor: int = 0, ticker: Optional[str] = None, limit: int = 50)
|
722,743
|
trading212_rest
|
portfolio
|
All open positions
|
def portfolio(self):
"""All open positions"""
return self._get("equity/portfolio")
|
(self)
|
722,744
|
trading212_rest
|
position
|
Open position by ticker
|
def position(self, ticker: str):
"""Open position by ticker"""
return self._get(f"equity/portfolio/{ticker}")
|
(self, ticker: str)
|
722,745
|
trading212_rest
|
transactions
|
Transactions list
|
def transactions(self, cursor: int = 0, limit: int = 50):
"""Transactions list"""
params = {"cursor": cursor, "limit": limit}
return self._process_items(self._get("history/transactions", params=params))
|
(self, cursor: int = 0, limit: int = 50)
|
722,752
|
beobench.utils
|
restart
|
Clean up remaining beobench processes and containers
before running new experiments.
This stops all docker containers still running. This
function is not called by other scheduler functions
to enable the parallel running of experiments.
|
def restart() -> None:
"""Clean up remaining beobench processes and containers
before running new experiments.
This stops all docker containers still running. This
function is not called by other scheduler functions
to enable the parallel running of experiments.
"""
shutdown()
|
() -> NoneType
|
722,753
|
beobench.experiment.scheduler
|
run
|
Run experiment.
This function allows the use to run experiments from the command line or python
interface.
Args:
config (str, dict, pathlib.Path or list, optional): experiment configuration.
This can either be a dictionary, or a path (str or pathlib) to a yaml file,
or a json str, or a list combining any number of the prior config types. If
no config is given, a default config is used.
method (str, optional): RL method to use in experiment. This overwrites any
method that is set in experiment file. For example 'PPO'. Defaults to None.
env (str, optional): environment to apply method to in experiment. This
overwrites any env set in experiment file. Defaults to None.
local_dir (str, optional): Directory to write experiment files to. This argument
is equivalent to the `local_dir` argument in `tune.run()`. Defaults to
None.
wandb_project (str, optional): Name of wandb project. Defaults to
None.
wandb_entity (str, optional): Name of wandb entity. Defaults to None.
wandb_group (str, optional): name of wandb run group. Defaults to None.
wandb_api_key (str, optional): wandb API key. Defaults to None.
use_gpu (bool, optional): whether to use GPU from the host system. Defaults to
False.
mlflow_name (str, optional): name of MLflow experiment. Defaults to None.
docker_shm_size(str, optional): size of the shared memory available to the
container. Defaults to None."
use_no_cache (bool, optional): whether to use cache to build experiment
container. Defaults to False. This will not do anything if force_build is
disabled, and image already exists.
dev_path (str, optional): file or github path to beobench package. For
developement purpose only. This will install a custom beobench version
inside the experiment container. By default the latest PyPI version is
installed.
no_additional_container (bool, optional): whether not to start another container
to run experiments in. Defaults to False, which means that another container
is started to run experiments in.
docker_flags (list[str], optional): list of docker flags to be added to
docker run command of Beobench experiment container.
beobench_extras (str, optional): extra dependencies to install with beobench.
Used during pip installation in experiment image, as in using the command:
`pip install beobench[<beobench_extras>]`
force_build (bool, optional): whether to force a re-build, even if
image already exists.
num_samples (int, optional): number of experiment samples to run. This defaults
to a single sample, i.e. just running the experiment once.
dry_run (bool, optional): whether to just dry run this function
without launching any docker containers or experiment processes.
Primarily intended for testing and debugging.
|
def run(
config: Union[str, dict, pathlib.Path, list] = None,
method: str = None,
gym: str = None,
env: str = None,
local_dir: str = None,
wandb_project: str = None,
wandb_entity: str = None,
wandb_group: str = None,
wandb_api_key: str = None,
mlflow_name: str = None,
use_gpu: bool = False,
docker_shm_size: str = None,
use_no_cache: bool = False,
dev_path: str = None,
no_additional_container: bool = False,
docker_flags: list[str] = None,
beobench_extras: str = None,
force_build: str = False,
num_samples: int = None,
dry_run: bool = False,
) -> None:
"""Run experiment.
This function allows the use to run experiments from the command line or python
interface.
Args:
config (str, dict, pathlib.Path or list, optional): experiment configuration.
This can either be a dictionary, or a path (str or pathlib) to a yaml file,
or a json str, or a list combining any number of the prior config types. If
no config is given, a default config is used.
method (str, optional): RL method to use in experiment. This overwrites any
method that is set in experiment file. For example 'PPO'. Defaults to None.
env (str, optional): environment to apply method to in experiment. This
overwrites any env set in experiment file. Defaults to None.
local_dir (str, optional): Directory to write experiment files to. This argument
is equivalent to the `local_dir` argument in `tune.run()`. Defaults to
None.
wandb_project (str, optional): Name of wandb project. Defaults to
None.
wandb_entity (str, optional): Name of wandb entity. Defaults to None.
wandb_group (str, optional): name of wandb run group. Defaults to None.
wandb_api_key (str, optional): wandb API key. Defaults to None.
use_gpu (bool, optional): whether to use GPU from the host system. Defaults to
False.
mlflow_name (str, optional): name of MLflow experiment. Defaults to None.
docker_shm_size(str, optional): size of the shared memory available to the
container. Defaults to None."
use_no_cache (bool, optional): whether to use cache to build experiment
container. Defaults to False. This will not do anything if force_build is
disabled, and image already exists.
dev_path (str, optional): file or github path to beobench package. For
developement purpose only. This will install a custom beobench version
inside the experiment container. By default the latest PyPI version is
installed.
no_additional_container (bool, optional): whether not to start another container
to run experiments in. Defaults to False, which means that another container
is started to run experiments in.
docker_flags (list[str], optional): list of docker flags to be added to
docker run command of Beobench experiment container.
beobench_extras (str, optional): extra dependencies to install with beobench.
Used during pip installation in experiment image, as in using the command:
`pip install beobench[<beobench_extras>]`
force_build (bool, optional): whether to force a re-build, even if
image already exists.
num_samples (int, optional): number of experiment samples to run. This defaults
to a single sample, i.e. just running the experiment once.
dry_run (bool, optional): whether to just dry run this function
without launching any docker containers or experiment processes.
Primarily intended for testing and debugging.
"""
logger.info("Starting experiment run ...")
# parsing relevant kwargs and adding them to config
kwarg_config = _create_config_from_kwargs(
local_dir=local_dir,
wandb_project=wandb_project,
wandb_entity=wandb_entity,
wandb_api_key=wandb_api_key,
wandb_group=wandb_group,
mlflow_name=mlflow_name,
use_gpu=use_gpu,
docker_shm_size=docker_shm_size,
use_no_cache=use_no_cache,
dev_path=dev_path,
docker_flags=docker_flags,
beobench_extras=beobench_extras,
force_build=force_build,
num_samples=num_samples,
)
# parse combined config
config = beobench.experiment.config_parser.parse([config, kwarg_config])
high_level_config = beobench.experiment.config_parser.get_high_level_config(
method=method, gym=gym, env=env
)
config = beobench.utils.merge_dicts(
config, high_level_config, let_b_overrule_a=True
)
# adding any defaults that haven't been set by given config
# The configs can be conflicting:
# config overrules user_config which overrules default_config.
config = beobench.experiment.config_parser.add_default_and_user_configs(config)
beobench.experiment.config_parser.check_config(config)
# running experiment num_samples times
num_samples = config["general"]["num_samples"]
for i in range(1, num_samples + 1):
# TODO: enable checking whether something is run in container
# and do not print the statement below if inside experiment container.
if (
config["env"]["config"] is not None
and "name" in config["env"]["config"].keys()
):
env_name = config["env"]["config"]["name"]
else:
env_name = "default"
logger.info(
(
f"Running experiment in container with "
f"'{env_name}' environment from '{config['env']['gym']}' gym."
)
)
logger.info(
(
f"Using agent from {config['agent']['origin']}. Sample {i} of"
f" {num_samples}."
)
)
autogen_config = beobench.experiment.config_parser.get_autogen_config()
config = beobench.utils.merge_dicts(
a=config, b=autogen_config, let_b_overrule_a=True
)
if no_additional_container:
# Execute experiment
# (this is usually reached from inside an experiment container)
logger.info("Running agent script.")
container_ro_dir_abs = CONTAINER_RO_DIR.absolute()
args = [
"python",
str(container_ro_dir_abs / _get_agent_file(config)[0].name),
]
if not dry_run:
subprocess.check_call(args)
else:
# First build container image and then execute experiment inside container
# But only run one experiment per container.
config["general"]["num_samples"] = 1
_build_and_run_in_container(config, dry_run=dry_run)
|
(config: Union[str, dict, pathlib.Path, list, NoneType] = None, method: Optional[str] = None, gym: Optional[str] = None, env: Optional[str] = None, local_dir: Optional[str] = None, wandb_project: Optional[str] = None, wandb_entity: Optional[str] = None, wandb_group: Optional[str] = None, wandb_api_key: Optional[str] = None, mlflow_name: Optional[str] = None, use_gpu: bool = False, docker_shm_size: Optional[str] = None, use_no_cache: bool = False, dev_path: Optional[str] = None, no_additional_container: bool = False, docker_flags: Optional[list[str]] = None, beobench_extras: Optional[str] = None, force_build: str = False, num_samples: Optional[int] = None, dry_run: bool = False) -> NoneType
|
722,755
|
fuzzy
|
DMetaphone
| null |
from fuzzy import DMetaphone
| null |
722,756
|
fuzzy
|
Soundex
| null |
from fuzzy import Soundex
| null |
722,759
|
gitdb.db.base
|
CachingDB
|
A database which uses caches to speed-up access
|
class CachingDB:
"""A database which uses caches to speed-up access"""
#{ Interface
def update_cache(self, force=False):
"""
Call this method if the underlying data changed to trigger an update
of the internal caching structures.
:param force: if True, the update must be performed. Otherwise the implementation
may decide not to perform an update if it thinks nothing has changed.
:return: True if an update was performed as something change indeed"""
# END interface
|
()
|
722,760
|
gitdb.db.base
|
update_cache
|
Call this method if the underlying data changed to trigger an update
of the internal caching structures.
:param force: if True, the update must be performed. Otherwise the implementation
may decide not to perform an update if it thinks nothing has changed.
:return: True if an update was performed as something change indeed
|
def update_cache(self, force=False):
"""
Call this method if the underlying data changed to trigger an update
of the internal caching structures.
:param force: if True, the update must be performed. Otherwise the implementation
may decide not to perform an update if it thinks nothing has changed.
:return: True if an update was performed as something change indeed"""
|
(self, force=False)
|
722,761
|
gitdb.db.base
|
CompoundDB
|
A database which delegates calls to sub-databases.
Databases are stored in the lazy-loaded _dbs attribute.
Define _set_cache_ to update it with your databases
|
class CompoundDB(ObjectDBR, LazyMixin, CachingDB):
"""A database which delegates calls to sub-databases.
Databases are stored in the lazy-loaded _dbs attribute.
Define _set_cache_ to update it with your databases"""
def _set_cache_(self, attr):
if attr == '_dbs':
self._dbs = list()
elif attr == '_db_cache':
self._db_cache = dict()
else:
super()._set_cache_(attr)
def _db_query(self, sha):
""":return: database containing the given 20 byte sha
:raise BadObject:"""
# most databases use binary representations, prevent converting
# it every time a database is being queried
try:
return self._db_cache[sha]
except KeyError:
pass
# END first level cache
for db in self._dbs:
if db.has_object(sha):
self._db_cache[sha] = db
return db
# END for each database
raise BadObject(sha)
#{ ObjectDBR interface
def has_object(self, sha):
try:
self._db_query(sha)
return True
except BadObject:
return False
# END handle exceptions
def info(self, sha):
return self._db_query(sha).info(sha)
def stream(self, sha):
return self._db_query(sha).stream(sha)
def size(self):
""":return: total size of all contained databases"""
return reduce(lambda x, y: x + y, (db.size() for db in self._dbs), 0)
def sha_iter(self):
return chain(*(db.sha_iter() for db in self._dbs))
#} END object DBR Interface
#{ Interface
def databases(self):
""":return: tuple of database instances we use for lookups"""
return tuple(self._dbs)
def update_cache(self, force=False):
# something might have changed, clear everything
self._db_cache.clear()
stat = False
for db in self._dbs:
if isinstance(db, CachingDB):
stat |= db.update_cache(force)
# END if is caching db
# END for each database to update
return stat
def partial_to_complete_sha_hex(self, partial_hexsha):
"""
:return: 20 byte binary sha1 from the given less-than-40 byte hexsha (bytes or str)
:param partial_hexsha: hexsha with less than 40 byte
:raise AmbiguousObjectName: """
databases = list()
_databases_recursive(self, databases)
partial_hexsha = force_text(partial_hexsha)
len_partial_hexsha = len(partial_hexsha)
if len_partial_hexsha % 2 != 0:
partial_binsha = hex_to_bin(partial_hexsha + "0")
else:
partial_binsha = hex_to_bin(partial_hexsha)
# END assure successful binary conversion
candidate = None
for db in databases:
full_bin_sha = None
try:
if hasattr(db, 'partial_to_complete_sha_hex'):
full_bin_sha = db.partial_to_complete_sha_hex(partial_hexsha)
else:
full_bin_sha = db.partial_to_complete_sha(partial_binsha, len_partial_hexsha)
# END handle database type
except BadObject:
continue
# END ignore bad objects
if full_bin_sha:
if candidate and candidate != full_bin_sha:
raise AmbiguousObjectName(partial_hexsha)
candidate = full_bin_sha
# END handle candidate
# END for each db
if not candidate:
raise BadObject(partial_binsha)
return candidate
#} END interface
|
()
|
722,762
|
gitdb.db.base
|
__contains__
| null |
def __contains__(self, sha):
return self.has_obj
|
(self, sha)
|
722,763
|
gitdb.util
|
__getattr__
|
Whenever an attribute is requested that we do not know, we allow it
to be created and set. Next time the same attribute is requested, it is simply
returned from our dict/slots.
|
def __getattr__(self, attr):
"""
Whenever an attribute is requested that we do not know, we allow it
to be created and set. Next time the same attribute is requested, it is simply
returned from our dict/slots. """
self._set_cache_(attr)
# will raise in case the cache was not created
return object.__getattribute__(self, attr)
|
(self, attr)
|
722,764
|
gitdb.db.base
|
_db_query
|
:return: database containing the given 20 byte sha
:raise BadObject:
|
def _db_query(self, sha):
""":return: database containing the given 20 byte sha
:raise BadObject:"""
# most databases use binary representations, prevent converting
# it every time a database is being queried
try:
return self._db_cache[sha]
except KeyError:
pass
# END first level cache
for db in self._dbs:
if db.has_object(sha):
self._db_cache[sha] = db
return db
# END for each database
raise BadObject(sha)
|
(self, sha)
|
722,765
|
gitdb.db.base
|
_set_cache_
| null |
def _set_cache_(self, attr):
if attr == '_dbs':
self._dbs = list()
elif attr == '_db_cache':
self._db_cache = dict()
else:
super()._set_cache_(attr)
|
(self, attr)
|
722,766
|
gitdb.db.base
|
databases
|
:return: tuple of database instances we use for lookups
|
def databases(self):
""":return: tuple of database instances we use for lookups"""
return tuple(self._dbs)
|
(self)
|
722,767
|
gitdb.db.base
|
has_object
| null |
def has_object(self, sha):
try:
self._db_query(sha)
return True
except BadObject:
return False
# END handle exceptions
|
(self, sha)
|
722,768
|
gitdb.db.base
|
info
| null |
def info(self, sha):
return self._db_query(sha).info(sha)
|
(self, sha)
|
722,769
|
gitdb.db.base
|
partial_to_complete_sha_hex
|
:return: 20 byte binary sha1 from the given less-than-40 byte hexsha (bytes or str)
:param partial_hexsha: hexsha with less than 40 byte
:raise AmbiguousObjectName:
|
def partial_to_complete_sha_hex(self, partial_hexsha):
"""
:return: 20 byte binary sha1 from the given less-than-40 byte hexsha (bytes or str)
:param partial_hexsha: hexsha with less than 40 byte
:raise AmbiguousObjectName: """
databases = list()
_databases_recursive(self, databases)
partial_hexsha = force_text(partial_hexsha)
len_partial_hexsha = len(partial_hexsha)
if len_partial_hexsha % 2 != 0:
partial_binsha = hex_to_bin(partial_hexsha + "0")
else:
partial_binsha = hex_to_bin(partial_hexsha)
# END assure successful binary conversion
candidate = None
for db in databases:
full_bin_sha = None
try:
if hasattr(db, 'partial_to_complete_sha_hex'):
full_bin_sha = db.partial_to_complete_sha_hex(partial_hexsha)
else:
full_bin_sha = db.partial_to_complete_sha(partial_binsha, len_partial_hexsha)
# END handle database type
except BadObject:
continue
# END ignore bad objects
if full_bin_sha:
if candidate and candidate != full_bin_sha:
raise AmbiguousObjectName(partial_hexsha)
candidate = full_bin_sha
# END handle candidate
# END for each db
if not candidate:
raise BadObject(partial_binsha)
return candidate
|
(self, partial_hexsha)
|
722,770
|
gitdb.db.base
|
sha_iter
| null |
def sha_iter(self):
return chain(*(db.sha_iter() for db in self._dbs))
|
(self)
|
722,771
|
gitdb.db.base
|
size
|
:return: total size of all contained databases
|
def size(self):
""":return: total size of all contained databases"""
return reduce(lambda x, y: x + y, (db.size() for db in self._dbs), 0)
|
(self)
|
722,772
|
gitdb.db.base
|
stream
| null |
def stream(self, sha):
return self._db_query(sha).stream(sha)
|
(self, sha)
|
722,773
|
gitdb.db.base
|
update_cache
| null |
def update_cache(self, force=False):
# something might have changed, clear everything
self._db_cache.clear()
stat = False
for db in self._dbs:
if isinstance(db, CachingDB):
stat |= db.update_cache(force)
# END if is caching db
# END for each database to update
return stat
|
(self, force=False)
|
722,774
|
gitdb.stream
|
DecompressMemMapReader
|
Reads data in chunks from a memory map and decompresses it. The client sees
only the uncompressed data, respective file-like read calls are handling on-demand
buffered decompression accordingly
A constraint on the total size of bytes is activated, simulating
a logical file within a possibly larger physical memory area
To read efficiently, you clearly don't want to read individual bytes, instead,
read a few kilobytes at least.
**Note:** The chunk-size should be carefully selected as it will involve quite a bit
of string copying due to the way the zlib is implemented. Its very wasteful,
hence we try to find a good tradeoff between allocation time and number of
times we actually allocate. An own zlib implementation would be good here
to better support streamed reading - it would only need to keep the mmap
and decompress it into chunks, that's all ...
|
class DecompressMemMapReader(LazyMixin):
"""Reads data in chunks from a memory map and decompresses it. The client sees
only the uncompressed data, respective file-like read calls are handling on-demand
buffered decompression accordingly
A constraint on the total size of bytes is activated, simulating
a logical file within a possibly larger physical memory area
To read efficiently, you clearly don't want to read individual bytes, instead,
read a few kilobytes at least.
**Note:** The chunk-size should be carefully selected as it will involve quite a bit
of string copying due to the way the zlib is implemented. Its very wasteful,
hence we try to find a good tradeoff between allocation time and number of
times we actually allocate. An own zlib implementation would be good here
to better support streamed reading - it would only need to keep the mmap
and decompress it into chunks, that's all ... """
__slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_close',
'_cbr', '_phi')
max_read_size = 512 * 1024 # currently unused
def __init__(self, m, close_on_deletion, size=None):
"""Initialize with mmap for stream reading
:param m: must be content data - use new if you have object data and no size"""
self._m = m
self._zip = zlib.decompressobj()
self._buf = None # buffer of decompressed bytes
self._buflen = 0 # length of bytes in buffer
if size is not None:
self._s = size # size of uncompressed data to read in total
self._br = 0 # num uncompressed bytes read
self._cws = 0 # start byte of compression window
self._cwe = 0 # end byte of compression window
self._cbr = 0 # number of compressed bytes read
self._phi = False # is True if we parsed the header info
self._close = close_on_deletion # close the memmap on deletion ?
def _set_cache_(self, attr):
assert attr == '_s'
# only happens for size, which is a marker to indicate we still
# have to parse the header from the stream
self._parse_header_info()
def __del__(self):
self.close()
def _parse_header_info(self):
"""If this stream contains object data, parse the header info and skip the
stream to a point where each read will yield object content
:return: parsed type_string, size"""
# read header
# should really be enough, cgit uses 8192 I believe
# And for good reason !! This needs to be that high for the header to be read correctly in all cases
maxb = 8192
self._s = maxb
hdr = self.read(maxb)
hdrend = hdr.find(NULL_BYTE)
typ, size = hdr[:hdrend].split(BYTE_SPACE)
size = int(size)
self._s = size
# adjust internal state to match actual header length that we ignore
# The buffer will be depleted first on future reads
self._br = 0
hdrend += 1
self._buf = BytesIO(hdr[hdrend:])
self._buflen = len(hdr) - hdrend
self._phi = True
return typ, size
#{ Interface
@classmethod
def new(self, m, close_on_deletion=False):
"""Create a new DecompressMemMapReader instance for acting as a read-only stream
This method parses the object header from m and returns the parsed
type and size, as well as the created stream instance.
:param m: memory map on which to operate. It must be object data ( header + contents )
:param close_on_deletion: if True, the memory map will be closed once we are
being deleted"""
inst = DecompressMemMapReader(m, close_on_deletion, 0)
typ, size = inst._parse_header_info()
return typ, size, inst
def data(self):
""":return: random access compatible data we are working on"""
return self._m
def close(self):
"""Close our underlying stream of compressed bytes if this was allowed during initialization
:return: True if we closed the underlying stream
:note: can be called safely
"""
if self._close:
if hasattr(self._m, 'close'):
self._m.close()
self._close = False
# END handle resource freeing
def compressed_bytes_read(self):
"""
:return: number of compressed bytes read. This includes the bytes it
took to decompress the header ( if there was one )"""
# ABSTRACT: When decompressing a byte stream, it can be that the first
# x bytes which were requested match the first x bytes in the loosely
# compressed datastream. This is the worst-case assumption that the reader
# does, it assumes that it will get at least X bytes from X compressed bytes
# in call cases.
# The caveat is that the object, according to our known uncompressed size,
# is already complete, but there are still some bytes left in the compressed
# stream that contribute to the amount of compressed bytes.
# How can we know that we are truly done, and have read all bytes we need
# to read ?
# Without help, we cannot know, as we need to obtain the status of the
# decompression. If it is not finished, we need to decompress more data
# until it is finished, to yield the actual number of compressed bytes
# belonging to the decompressed object
# We are using a custom zlib module for this, if its not present,
# we try to put in additional bytes up for decompression if feasible
# and check for the unused_data.
# Only scrub the stream forward if we are officially done with the
# bytes we were to have.
if self._br == self._s and not self._zip.unused_data:
# manipulate the bytes-read to allow our own read method to continue
# but keep the window at its current position
self._br = 0
if hasattr(self._zip, 'status'):
while self._zip.status == zlib.Z_OK:
self.read(mmap.PAGESIZE)
# END scrub-loop custom zlib
else:
# pass in additional pages, until we have unused data
while not self._zip.unused_data and self._cbr != len(self._m):
self.read(mmap.PAGESIZE)
# END scrub-loop default zlib
# END handle stream scrubbing
# reset bytes read, just to be sure
self._br = self._s
# END handle stream scrubbing
# unused data ends up in the unconsumed tail, which was removed
# from the count already
return self._cbr
#} END interface
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
"""Allows to reset the stream to restart reading
:raise ValueError: If offset and whence are not 0"""
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
raise ValueError("Can only seek to position 0")
# END handle offset
self._zip = zlib.decompressobj()
self._br = self._cws = self._cwe = self._cbr = 0
if self._phi:
self._phi = False
del(self._s) # trigger header parsing on first access
# END skip header
def read(self, size=-1):
if size < 1:
size = self._s - self._br
else:
size = min(size, self._s - self._br)
# END clamp size
if size == 0:
return b''
# END handle depletion
# deplete the buffer, then just continue using the decompress object
# which has an own buffer. We just need this to transparently parse the
# header from the zlib stream
dat = b''
if self._buf:
if self._buflen >= size:
# have enough data
dat = self._buf.read(size)
self._buflen -= size
self._br += size
return dat
else:
dat = self._buf.read() # ouch, duplicates data
size -= self._buflen
self._br += self._buflen
self._buflen = 0
self._buf = None
# END handle buffer len
# END handle buffer
# decompress some data
# Abstract: zlib needs to operate on chunks of our memory map ( which may
# be large ), as it will otherwise and always fill in the 'unconsumed_tail'
# attribute which possible reads our whole map to the end, forcing
# everything to be read from disk even though just a portion was requested.
# As this would be a nogo, we workaround it by passing only chunks of data,
# moving the window into the memory map along as we decompress, which keeps
# the tail smaller than our chunk-size. This causes 'only' the chunk to be
# copied once, and another copy of a part of it when it creates the unconsumed
# tail. We have to use it to hand in the appropriate amount of bytes during
# the next read.
tail = self._zip.unconsumed_tail
if tail:
# move the window, make it as large as size demands. For code-clarity,
# we just take the chunk from our map again instead of reusing the unconsumed
# tail. The latter one would safe some memory copying, but we could end up
# with not getting enough data uncompressed, so we had to sort that out as well.
# Now we just assume the worst case, hence the data is uncompressed and the window
# needs to be as large as the uncompressed bytes we want to read.
self._cws = self._cwe - len(tail)
self._cwe = self._cws + size
else:
cws = self._cws
self._cws = self._cwe
self._cwe = cws + size
# END handle tail
# if window is too small, make it larger so zip can decompress something
if self._cwe - self._cws < 8:
self._cwe = self._cws + 8
# END adjust winsize
# takes a slice, but doesn't copy the data, it says ...
indata = self._m[self._cws:self._cwe]
# get the actual window end to be sure we don't use it for computations
self._cwe = self._cws + len(indata)
dcompdat = self._zip.decompress(indata, size)
# update the amount of compressed bytes read
# We feed possibly overlapping chunks, which is why the unconsumed tail
# has to be taken into consideration, as well as the unused data
# if we hit the end of the stream
# NOTE: Behavior changed in PY2.7 onward, which requires special handling to make the tests work properly.
# They are thorough, and I assume it is truly working.
# Why is this logic as convoluted as it is ? Please look at the table in
# https://github.com/gitpython-developers/gitdb/issues/19 to learn about the test-results.
# Basically, on py2.6, you want to use branch 1, whereas on all other python version, the second branch
# will be the one that works.
# However, the zlib VERSIONs as well as the platform check is used to further match the entries in the
# table in the github issue. This is it ... it was the only way I could make this work everywhere.
# IT's CERTAINLY GOING TO BITE US IN THE FUTURE ... .
if getattr(zlib, 'ZLIB_RUNTIME_VERSION', zlib.ZLIB_VERSION) in ('1.2.7', '1.2.5') and not sys.platform == 'darwin':
unused_datalen = len(self._zip.unconsumed_tail)
else:
unused_datalen = len(self._zip.unconsumed_tail) + len(self._zip.unused_data)
# # end handle very special case ...
self._cbr += len(indata) - unused_datalen
self._br += len(dcompdat)
if dat:
dcompdat = dat + dcompdat
# END prepend our cached data
# it can happen, depending on the compression, that we get less bytes
# than ordered as it needs the final portion of the data as well.
# Recursively resolve that.
# Note: dcompdat can be empty even though we still appear to have bytes
# to read, if we are called by compressed_bytes_read - it manipulates
# us to empty the stream
if dcompdat and (len(dcompdat) - len(dat)) < size and self._br < self._s:
dcompdat += self.read(size - len(dcompdat))
# END handle special case
return dcompdat
|
(m, close_on_deletion, size=None)
|
722,777
|
gitdb.stream
|
__init__
|
Initialize with mmap for stream reading
:param m: must be content data - use new if you have object data and no size
|
def __init__(self, m, close_on_deletion, size=None):
"""Initialize with mmap for stream reading
:param m: must be content data - use new if you have object data and no size"""
self._m = m
self._zip = zlib.decompressobj()
self._buf = None # buffer of decompressed bytes
self._buflen = 0 # length of bytes in buffer
if size is not None:
self._s = size # size of uncompressed data to read in total
self._br = 0 # num uncompressed bytes read
self._cws = 0 # start byte of compression window
self._cwe = 0 # end byte of compression window
self._cbr = 0 # number of compressed bytes read
self._phi = False # is True if we parsed the header info
self._close = close_on_deletion # close the memmap on deletion ?
|
(self, m, close_on_deletion, size=None)
|
722,778
|
gitdb.stream
|
_parse_header_info
|
If this stream contains object data, parse the header info and skip the
stream to a point where each read will yield object content
:return: parsed type_string, size
|
def _parse_header_info(self):
"""If this stream contains object data, parse the header info and skip the
stream to a point where each read will yield object content
:return: parsed type_string, size"""
# read header
# should really be enough, cgit uses 8192 I believe
# And for good reason !! This needs to be that high for the header to be read correctly in all cases
maxb = 8192
self._s = maxb
hdr = self.read(maxb)
hdrend = hdr.find(NULL_BYTE)
typ, size = hdr[:hdrend].split(BYTE_SPACE)
size = int(size)
self._s = size
# adjust internal state to match actual header length that we ignore
# The buffer will be depleted first on future reads
self._br = 0
hdrend += 1
self._buf = BytesIO(hdr[hdrend:])
self._buflen = len(hdr) - hdrend
self._phi = True
return typ, size
|
(self)
|
722,779
|
gitdb.stream
|
_set_cache_
| null |
def _set_cache_(self, attr):
assert attr == '_s'
# only happens for size, which is a marker to indicate we still
# have to parse the header from the stream
self._parse_header_info()
|
(self, attr)
|
722,780
|
gitdb.stream
|
close
|
Close our underlying stream of compressed bytes if this was allowed during initialization
:return: True if we closed the underlying stream
:note: can be called safely
|
def close(self):
"""Close our underlying stream of compressed bytes if this was allowed during initialization
:return: True if we closed the underlying stream
:note: can be called safely
"""
if self._close:
if hasattr(self._m, 'close'):
self._m.close()
self._close = False
# END handle resource freeing
|
(self)
|
722,781
|
gitdb.stream
|
compressed_bytes_read
|
:return: number of compressed bytes read. This includes the bytes it
took to decompress the header ( if there was one )
|
def compressed_bytes_read(self):
"""
:return: number of compressed bytes read. This includes the bytes it
took to decompress the header ( if there was one )"""
# ABSTRACT: When decompressing a byte stream, it can be that the first
# x bytes which were requested match the first x bytes in the loosely
# compressed datastream. This is the worst-case assumption that the reader
# does, it assumes that it will get at least X bytes from X compressed bytes
# in call cases.
# The caveat is that the object, according to our known uncompressed size,
# is already complete, but there are still some bytes left in the compressed
# stream that contribute to the amount of compressed bytes.
# How can we know that we are truly done, and have read all bytes we need
# to read ?
# Without help, we cannot know, as we need to obtain the status of the
# decompression. If it is not finished, we need to decompress more data
# until it is finished, to yield the actual number of compressed bytes
# belonging to the decompressed object
# We are using a custom zlib module for this, if its not present,
# we try to put in additional bytes up for decompression if feasible
# and check for the unused_data.
# Only scrub the stream forward if we are officially done with the
# bytes we were to have.
if self._br == self._s and not self._zip.unused_data:
# manipulate the bytes-read to allow our own read method to continue
# but keep the window at its current position
self._br = 0
if hasattr(self._zip, 'status'):
while self._zip.status == zlib.Z_OK:
self.read(mmap.PAGESIZE)
# END scrub-loop custom zlib
else:
# pass in additional pages, until we have unused data
while not self._zip.unused_data and self._cbr != len(self._m):
self.read(mmap.PAGESIZE)
# END scrub-loop default zlib
# END handle stream scrubbing
# reset bytes read, just to be sure
self._br = self._s
# END handle stream scrubbing
# unused data ends up in the unconsumed tail, which was removed
# from the count already
return self._cbr
|
(self)
|
722,782
|
gitdb.stream
|
data
|
:return: random access compatible data we are working on
|
def data(self):
""":return: random access compatible data we are working on"""
return self._m
|
(self)
|
722,783
|
gitdb.stream
|
read
| null |
def read(self, size=-1):
if size < 1:
size = self._s - self._br
else:
size = min(size, self._s - self._br)
# END clamp size
if size == 0:
return b''
# END handle depletion
# deplete the buffer, then just continue using the decompress object
# which has an own buffer. We just need this to transparently parse the
# header from the zlib stream
dat = b''
if self._buf:
if self._buflen >= size:
# have enough data
dat = self._buf.read(size)
self._buflen -= size
self._br += size
return dat
else:
dat = self._buf.read() # ouch, duplicates data
size -= self._buflen
self._br += self._buflen
self._buflen = 0
self._buf = None
# END handle buffer len
# END handle buffer
# decompress some data
# Abstract: zlib needs to operate on chunks of our memory map ( which may
# be large ), as it will otherwise and always fill in the 'unconsumed_tail'
# attribute which possible reads our whole map to the end, forcing
# everything to be read from disk even though just a portion was requested.
# As this would be a nogo, we workaround it by passing only chunks of data,
# moving the window into the memory map along as we decompress, which keeps
# the tail smaller than our chunk-size. This causes 'only' the chunk to be
# copied once, and another copy of a part of it when it creates the unconsumed
# tail. We have to use it to hand in the appropriate amount of bytes during
# the next read.
tail = self._zip.unconsumed_tail
if tail:
# move the window, make it as large as size demands. For code-clarity,
# we just take the chunk from our map again instead of reusing the unconsumed
# tail. The latter one would safe some memory copying, but we could end up
# with not getting enough data uncompressed, so we had to sort that out as well.
# Now we just assume the worst case, hence the data is uncompressed and the window
# needs to be as large as the uncompressed bytes we want to read.
self._cws = self._cwe - len(tail)
self._cwe = self._cws + size
else:
cws = self._cws
self._cws = self._cwe
self._cwe = cws + size
# END handle tail
# if window is too small, make it larger so zip can decompress something
if self._cwe - self._cws < 8:
self._cwe = self._cws + 8
# END adjust winsize
# takes a slice, but doesn't copy the data, it says ...
indata = self._m[self._cws:self._cwe]
# get the actual window end to be sure we don't use it for computations
self._cwe = self._cws + len(indata)
dcompdat = self._zip.decompress(indata, size)
# update the amount of compressed bytes read
# We feed possibly overlapping chunks, which is why the unconsumed tail
# has to be taken into consideration, as well as the unused data
# if we hit the end of the stream
# NOTE: Behavior changed in PY2.7 onward, which requires special handling to make the tests work properly.
# They are thorough, and I assume it is truly working.
# Why is this logic as convoluted as it is ? Please look at the table in
# https://github.com/gitpython-developers/gitdb/issues/19 to learn about the test-results.
# Basically, on py2.6, you want to use branch 1, whereas on all other python version, the second branch
# will be the one that works.
# However, the zlib VERSIONs as well as the platform check is used to further match the entries in the
# table in the github issue. This is it ... it was the only way I could make this work everywhere.
# IT's CERTAINLY GOING TO BITE US IN THE FUTURE ... .
if getattr(zlib, 'ZLIB_RUNTIME_VERSION', zlib.ZLIB_VERSION) in ('1.2.7', '1.2.5') and not sys.platform == 'darwin':
unused_datalen = len(self._zip.unconsumed_tail)
else:
unused_datalen = len(self._zip.unconsumed_tail) + len(self._zip.unused_data)
# # end handle very special case ...
self._cbr += len(indata) - unused_datalen
self._br += len(dcompdat)
if dat:
dcompdat = dat + dcompdat
# END prepend our cached data
# it can happen, depending on the compression, that we get less bytes
# than ordered as it needs the final portion of the data as well.
# Recursively resolve that.
# Note: dcompdat can be empty even though we still appear to have bytes
# to read, if we are called by compressed_bytes_read - it manipulates
# us to empty the stream
if dcompdat and (len(dcompdat) - len(dat)) < size and self._br < self._s:
dcompdat += self.read(size - len(dcompdat))
# END handle special case
return dcompdat
|
(self, size=-1)
|
722,784
|
gitdb.stream
|
seek
|
Allows to reset the stream to restart reading
:raise ValueError: If offset and whence are not 0
|
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
"""Allows to reset the stream to restart reading
:raise ValueError: If offset and whence are not 0"""
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
raise ValueError("Can only seek to position 0")
# END handle offset
self._zip = zlib.decompressobj()
self._br = self._cws = self._cwe = self._cbr = 0
if self._phi:
self._phi = False
del(self._s) # trigger header parsing on first access
# END skip header
|
(self, offset, whence=0)
|
722,785
|
gitdb.stream
|
DeltaApplyReader
|
A reader which dynamically applies pack deltas to a base object, keeping the
memory demands to a minimum.
The size of the final object is only obtainable once all deltas have been
applied, unless it is retrieved from a pack index.
The uncompressed Delta has the following layout (MSB being a most significant
bit encoded dynamic size):
* MSB Source Size - the size of the base against which the delta was created
* MSB Target Size - the size of the resulting data after the delta was applied
* A list of one byte commands (cmd) which are followed by a specific protocol:
* cmd & 0x80 - copy delta_data[offset:offset+size]
* Followed by an encoded offset into the delta data
* Followed by an encoded size of the chunk to copy
* cmd & 0x7f - insert
* insert cmd bytes from the delta buffer into the output stream
* cmd == 0 - invalid operation ( or error in delta stream )
|
class DeltaApplyReader(LazyMixin):
"""A reader which dynamically applies pack deltas to a base object, keeping the
memory demands to a minimum.
The size of the final object is only obtainable once all deltas have been
applied, unless it is retrieved from a pack index.
The uncompressed Delta has the following layout (MSB being a most significant
bit encoded dynamic size):
* MSB Source Size - the size of the base against which the delta was created
* MSB Target Size - the size of the resulting data after the delta was applied
* A list of one byte commands (cmd) which are followed by a specific protocol:
* cmd & 0x80 - copy delta_data[offset:offset+size]
* Followed by an encoded offset into the delta data
* Followed by an encoded size of the chunk to copy
* cmd & 0x7f - insert
* insert cmd bytes from the delta buffer into the output stream
* cmd == 0 - invalid operation ( or error in delta stream )
"""
__slots__ = (
"_bstream", # base stream to which to apply the deltas
"_dstreams", # tuple of delta stream readers
"_mm_target", # memory map of the delta-applied data
"_size", # actual number of bytes in _mm_target
"_br" # number of bytes read
)
#{ Configuration
k_max_memory_move = 250 * 1000 * 1000
#} END configuration
def __init__(self, stream_list):
"""Initialize this instance with a list of streams, the first stream being
the delta to apply on top of all following deltas, the last stream being the
base object onto which to apply the deltas"""
assert len(stream_list) > 1, "Need at least one delta and one base stream"
self._bstream = stream_list[-1]
self._dstreams = tuple(stream_list[:-1])
self._br = 0
def _set_cache_too_slow_without_c(self, attr):
# the direct algorithm is fastest and most direct if there is only one
# delta. Also, the extra overhead might not be worth it for items smaller
# than X - definitely the case in python, every function call costs
# huge amounts of time
# if len(self._dstreams) * self._bstream.size < self.k_max_memory_move:
if len(self._dstreams) == 1:
return self._set_cache_brute_(attr)
# Aggregate all deltas into one delta in reverse order. Hence we take
# the last delta, and reverse-merge its ancestor delta, until we receive
# the final delta data stream.
dcl = connect_deltas(self._dstreams)
# call len directly, as the (optional) c version doesn't implement the sequence
# protocol
if dcl.rbound() == 0:
self._size = 0
self._mm_target = allocate_memory(0)
return
# END handle empty list
self._size = dcl.rbound()
self._mm_target = allocate_memory(self._size)
bbuf = allocate_memory(self._bstream.size)
stream_copy(self._bstream.read, bbuf.write, self._bstream.size, 256 * mmap.PAGESIZE)
# APPLY CHUNKS
write = self._mm_target.write
dcl.apply(bbuf, write)
self._mm_target.seek(0)
def _set_cache_brute_(self, attr):
"""If we are here, we apply the actual deltas"""
# TODO: There should be a special case if there is only one stream
# Then the default-git algorithm should perform a tad faster, as the
# delta is not peaked into, causing less overhead.
buffer_info_list = list()
max_target_size = 0
for dstream in self._dstreams:
buf = dstream.read(512) # read the header information + X
offset, src_size = msb_size(buf)
offset, target_size = msb_size(buf, offset)
buffer_info_list.append((buf[offset:], offset, src_size, target_size))
max_target_size = max(max_target_size, target_size)
# END for each delta stream
# sanity check - the first delta to apply should have the same source
# size as our actual base stream
base_size = self._bstream.size
target_size = max_target_size
# if we have more than 1 delta to apply, we will swap buffers, hence we must
# assure that all buffers we use are large enough to hold all the results
if len(self._dstreams) > 1:
base_size = target_size = max(base_size, max_target_size)
# END adjust buffer sizes
# Allocate private memory map big enough to hold the first base buffer
# We need random access to it
bbuf = allocate_memory(base_size)
stream_copy(self._bstream.read, bbuf.write, base_size, 256 * mmap.PAGESIZE)
# allocate memory map large enough for the largest (intermediate) target
# We will use it as scratch space for all delta ops. If the final
# target buffer is smaller than our allocated space, we just use parts
# of it upon return.
tbuf = allocate_memory(target_size)
# for each delta to apply, memory map the decompressed delta and
# work on the op-codes to reconstruct everything.
# For the actual copying, we use a seek and write pattern of buffer
# slices.
final_target_size = None
for (dbuf, offset, src_size, target_size), dstream in zip(reversed(buffer_info_list), reversed(self._dstreams)):
# allocate a buffer to hold all delta data - fill in the data for
# fast access. We do this as we know that reading individual bytes
# from our stream would be slower than necessary ( although possible )
# The dbuf buffer contains commands after the first two MSB sizes, the
# offset specifies the amount of bytes read to get the sizes.
ddata = allocate_memory(dstream.size - offset)
ddata.write(dbuf)
# read the rest from the stream. The size we give is larger than necessary
stream_copy(dstream.read, ddata.write, dstream.size, 256 * mmap.PAGESIZE)
#######################################################################
if 'c_apply_delta' in globals():
c_apply_delta(bbuf, ddata, tbuf)
else:
apply_delta_data(bbuf, src_size, ddata, len(ddata), tbuf.write)
#######################################################################
# finally, swap out source and target buffers. The target is now the
# base for the next delta to apply
bbuf, tbuf = tbuf, bbuf
bbuf.seek(0)
tbuf.seek(0)
final_target_size = target_size
# END for each delta to apply
# its already seeked to 0, constrain it to the actual size
# NOTE: in the end of the loop, it swaps buffers, hence our target buffer
# is not tbuf, but bbuf !
self._mm_target = bbuf
self._size = final_target_size
#{ Configuration
if not has_perf_mod:
_set_cache_ = _set_cache_brute_
else:
_set_cache_ = _set_cache_too_slow_without_c
#} END configuration
def read(self, count=0):
bl = self._size - self._br # bytes left
if count < 1 or count > bl:
count = bl
# NOTE: we could check for certain size limits, and possibly
# return buffers instead of strings to prevent byte copying
data = self._mm_target.read(count)
self._br += len(data)
return data
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
"""Allows to reset the stream to restart reading
:raise ValueError: If offset and whence are not 0"""
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
raise ValueError("Can only seek to position 0")
# END handle offset
self._br = 0
self._mm_target.seek(0)
#{ Interface
@classmethod
def new(cls, stream_list):
"""
Convert the given list of streams into a stream which resolves deltas
when reading from it.
:param stream_list: two or more stream objects, first stream is a Delta
to the object that you want to resolve, followed by N additional delta
streams. The list's last stream must be a non-delta stream.
:return: Non-Delta OPackStream object whose stream can be used to obtain
the decompressed resolved data
:raise ValueError: if the stream list cannot be handled"""
if len(stream_list) < 2:
raise ValueError("Need at least two streams")
# END single object special handling
if stream_list[-1].type_id in delta_types:
raise ValueError(
"Cannot resolve deltas if there is no base object stream, last one was type: %s" % stream_list[-1].type)
# END check stream
return cls(stream_list)
#} END interface
#{ OInfo like Interface
@property
def type(self):
return self._bstream.type
@property
def type_id(self):
return self._bstream.type_id
@property
def size(self):
""":return: number of uncompressed bytes in the stream"""
return self._size
#} END oinfo like interface
|
(stream_list)
|
722,787
|
gitdb.stream
|
__init__
|
Initialize this instance with a list of streams, the first stream being
the delta to apply on top of all following deltas, the last stream being the
base object onto which to apply the deltas
|
def __init__(self, stream_list):
"""Initialize this instance with a list of streams, the first stream being
the delta to apply on top of all following deltas, the last stream being the
base object onto which to apply the deltas"""
assert len(stream_list) > 1, "Need at least one delta and one base stream"
self._bstream = stream_list[-1]
self._dstreams = tuple(stream_list[:-1])
self._br = 0
|
(self, stream_list)
|
722,788
|
gitdb.stream
|
_set_cache_brute_
|
If we are here, we apply the actual deltas
|
def _set_cache_brute_(self, attr):
"""If we are here, we apply the actual deltas"""
# TODO: There should be a special case if there is only one stream
# Then the default-git algorithm should perform a tad faster, as the
# delta is not peaked into, causing less overhead.
buffer_info_list = list()
max_target_size = 0
for dstream in self._dstreams:
buf = dstream.read(512) # read the header information + X
offset, src_size = msb_size(buf)
offset, target_size = msb_size(buf, offset)
buffer_info_list.append((buf[offset:], offset, src_size, target_size))
max_target_size = max(max_target_size, target_size)
# END for each delta stream
# sanity check - the first delta to apply should have the same source
# size as our actual base stream
base_size = self._bstream.size
target_size = max_target_size
# if we have more than 1 delta to apply, we will swap buffers, hence we must
# assure that all buffers we use are large enough to hold all the results
if len(self._dstreams) > 1:
base_size = target_size = max(base_size, max_target_size)
# END adjust buffer sizes
# Allocate private memory map big enough to hold the first base buffer
# We need random access to it
bbuf = allocate_memory(base_size)
stream_copy(self._bstream.read, bbuf.write, base_size, 256 * mmap.PAGESIZE)
# allocate memory map large enough for the largest (intermediate) target
# We will use it as scratch space for all delta ops. If the final
# target buffer is smaller than our allocated space, we just use parts
# of it upon return.
tbuf = allocate_memory(target_size)
# for each delta to apply, memory map the decompressed delta and
# work on the op-codes to reconstruct everything.
# For the actual copying, we use a seek and write pattern of buffer
# slices.
final_target_size = None
for (dbuf, offset, src_size, target_size), dstream in zip(reversed(buffer_info_list), reversed(self._dstreams)):
# allocate a buffer to hold all delta data - fill in the data for
# fast access. We do this as we know that reading individual bytes
# from our stream would be slower than necessary ( although possible )
# The dbuf buffer contains commands after the first two MSB sizes, the
# offset specifies the amount of bytes read to get the sizes.
ddata = allocate_memory(dstream.size - offset)
ddata.write(dbuf)
# read the rest from the stream. The size we give is larger than necessary
stream_copy(dstream.read, ddata.write, dstream.size, 256 * mmap.PAGESIZE)
#######################################################################
if 'c_apply_delta' in globals():
c_apply_delta(bbuf, ddata, tbuf)
else:
apply_delta_data(bbuf, src_size, ddata, len(ddata), tbuf.write)
#######################################################################
# finally, swap out source and target buffers. The target is now the
# base for the next delta to apply
bbuf, tbuf = tbuf, bbuf
bbuf.seek(0)
tbuf.seek(0)
final_target_size = target_size
# END for each delta to apply
# its already seeked to 0, constrain it to the actual size
# NOTE: in the end of the loop, it swaps buffers, hence our target buffer
# is not tbuf, but bbuf !
self._mm_target = bbuf
self._size = final_target_size
|
(self, attr)
|
722,790
|
gitdb.stream
|
_set_cache_too_slow_without_c
| null |
def _set_cache_too_slow_without_c(self, attr):
# the direct algorithm is fastest and most direct if there is only one
# delta. Also, the extra overhead might not be worth it for items smaller
# than X - definitely the case in python, every function call costs
# huge amounts of time
# if len(self._dstreams) * self._bstream.size < self.k_max_memory_move:
if len(self._dstreams) == 1:
return self._set_cache_brute_(attr)
# Aggregate all deltas into one delta in reverse order. Hence we take
# the last delta, and reverse-merge its ancestor delta, until we receive
# the final delta data stream.
dcl = connect_deltas(self._dstreams)
# call len directly, as the (optional) c version doesn't implement the sequence
# protocol
if dcl.rbound() == 0:
self._size = 0
self._mm_target = allocate_memory(0)
return
# END handle empty list
self._size = dcl.rbound()
self._mm_target = allocate_memory(self._size)
bbuf = allocate_memory(self._bstream.size)
stream_copy(self._bstream.read, bbuf.write, self._bstream.size, 256 * mmap.PAGESIZE)
# APPLY CHUNKS
write = self._mm_target.write
dcl.apply(bbuf, write)
self._mm_target.seek(0)
|
(self, attr)
|
722,791
|
gitdb.stream
|
read
| null |
def read(self, count=0):
bl = self._size - self._br # bytes left
if count < 1 or count > bl:
count = bl
# NOTE: we could check for certain size limits, and possibly
# return buffers instead of strings to prevent byte copying
data = self._mm_target.read(count)
self._br += len(data)
return data
|
(self, count=0)
|
722,792
|
gitdb.stream
|
seek
|
Allows to reset the stream to restart reading
:raise ValueError: If offset and whence are not 0
|
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
"""Allows to reset the stream to restart reading
:raise ValueError: If offset and whence are not 0"""
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
raise ValueError("Can only seek to position 0")
# END handle offset
self._br = 0
self._mm_target.seek(0)
|
(self, offset, whence=0)
|
722,793
|
gitdb.stream
|
FDCompressedSha1Writer
|
Digests data written to it, making the sha available, then compress the
data and write it to the file descriptor
**Note:** operates on raw file descriptors
**Note:** for this to work, you have to use the close-method of this instance
|
class FDCompressedSha1Writer(Sha1Writer):
"""Digests data written to it, making the sha available, then compress the
data and write it to the file descriptor
**Note:** operates on raw file descriptors
**Note:** for this to work, you have to use the close-method of this instance"""
__slots__ = ("fd", "sha1", "zip")
# default exception
exc = IOError("Failed to write all bytes to filedescriptor")
def __init__(self, fd):
super().__init__()
self.fd = fd
self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
#{ Stream Interface
def write(self, data):
""":raise IOError: If not all bytes could be written
:return: length of incoming data"""
self.sha1.update(data)
cdata = self.zip.compress(data)
bytes_written = write(self.fd, cdata)
if bytes_written != len(cdata):
raise self.exc
return len(data)
def close(self):
remainder = self.zip.flush()
if write(self.fd, remainder) != len(remainder):
raise self.exc
return close(self.fd)
#} END stream interface
|
(fd)
|
722,794
|
gitdb.stream
|
__init__
| null |
def __init__(self, fd):
super().__init__()
self.fd = fd
self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
|
(self, fd)
|
722,795
|
gitdb.stream
|
close
| null |
def close(self):
remainder = self.zip.flush()
if write(self.fd, remainder) != len(remainder):
raise self.exc
return close(self.fd)
|
(self)
|
722,796
|
gitdb.stream
|
sha
|
:return: sha so far
:param as_hex: if True, sha will be hex-encoded, binary otherwise
|
def sha(self, as_hex=False):
""":return: sha so far
:param as_hex: if True, sha will be hex-encoded, binary otherwise"""
if as_hex:
return self.sha1.hexdigest()
return self.sha1.digest()
|
(self, as_hex=False)
|
722,797
|
gitdb.stream
|
write
|
:raise IOError: If not all bytes could be written
:return: length of incoming data
|
def write(self, data):
""":raise IOError: If not all bytes could be written
:return: length of incoming data"""
self.sha1.update(data)
cdata = self.zip.compress(data)
bytes_written = write(self.fd, cdata)
if bytes_written != len(cdata):
raise self.exc
return len(data)
|
(self, data)
|
722,798
|
gitdb.stream
|
FDStream
|
A simple wrapper providing the most basic functions on a file descriptor
with the fileobject interface. Cannot use os.fdopen as the resulting stream
takes ownership
|
class FDStream:
"""A simple wrapper providing the most basic functions on a file descriptor
with the fileobject interface. Cannot use os.fdopen as the resulting stream
takes ownership"""
__slots__ = ("_fd", '_pos')
def __init__(self, fd):
self._fd = fd
self._pos = 0
def write(self, data):
self._pos += len(data)
os.write(self._fd, data)
def read(self, count=0):
if count == 0:
count = os.path.getsize(self._filepath)
# END handle read everything
bytes = os.read(self._fd, count)
self._pos += len(bytes)
return bytes
def fileno(self):
return self._fd
def tell(self):
return self._pos
def close(self):
close(self._fd)
|
(fd)
|
722,799
|
gitdb.stream
|
__init__
| null |
def __init__(self, fd):
self._fd = fd
self._pos = 0
|
(self, fd)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.