docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Extract all tagged items from text.
Args:
text (str):
Text to extract the tagged items from. Each tagged item is a
paragraph that starts with a tag. It can also be a text list item.
Returns:
tuple[list[str], list[str], list[str]]:
A tuple of `(features, changes, fixes)` extracted from the given
text.
The tagged items are usually features/changes/fixes but it can be configured
through `pelconf.yaml`.
|
def extract_changelog_items(text, tags):
# type: (str) -> Dict[str, List[str]]
patterns = {x['header']: tag_re(x['tag']) for x in tags}
items = {x['header']: [] for x in tags}
curr_tag = None
curr_text = ''
for line in text.splitlines():
if not line.strip():
if curr_tag is not None:
items[curr_tag].append(curr_text)
curr_text = ''
curr_tag = None
for tag in tags:
m = patterns[tag['header']].match(line)
if m:
if curr_tag is not None:
items[curr_tag].append(curr_text)
curr_text = ''
curr_tag = tag['header']
line = m.group('text')
break
if curr_tag is not None:
curr_text = '{} {}'.format(curr_text.strip(), line.strip()).strip()
if curr_tag is not None:
items[curr_tag].append(curr_text)
return items
| 751,033
|
List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password.
|
def docker_list(registry_pass):
# type: (str) -> None
registry = conf.get('docker.registry', None)
if registry is None:
log.err("You must define docker.registry conf variable to list images")
sys.exit(-1)
registry_user = conf.get('docker.registry_user', None)
if registry_user is None:
registry_user = click.prompt("Username")
rc = client.RegistryClient(registry, registry_user, registry_pass)
images = {x: rc.list_tags(x) for x in rc.list_images()}
shell.cprint("<32>Images in <34>{} <32>registry:", registry)
for image, tags in images.items():
shell.cprint(' <92>{}', image)
for tag in tags:
shell.cprint(' <90>{}:<35>{}', image, tag)
| 751,126
|
Build docker image.
Args:
registry (str):
The name of the registry this image belongs to. If not given, the
resulting image will have a name without the registry.
image (dict[str, Any]):
The dict containing the information about the built image. This is
the same dictionary as defined in DOCKER_IMAGES variable.
|
def build_image(registry, image):
# type: (str, Dict[str, Any]) -> None
if ':' in image['name']:
_, tag = image['name'].split(':', 1)
else:
_, tag = image['name'], None
values = {
'registry': '' if registry is None else registry + '/',
'image': image['name'],
'tag': tag,
}
if tag is None:
args = [
'-t {registry}{image}'.format(**values),
'-t {registry}{image}:{version}'.format(
version=versioning.current(),
**values
),
]
else:
args = ['-t {registry}{image}'.format(**values)]
if 'file' in image:
args.append('-f {}'.format(conf.proj_path(image['file'])))
with conf.within_proj_dir(image.get('path', '.')):
log.info("Building <33>{registry}<35>/{image}", **values)
shell.run('docker build {args} .'.format(args=' '.join(args)))
| 751,127
|
Push the given image to selected repository.
Args:
registry (str):
The name of the registry we're pushing to. This is the address of
the repository without the protocol specification (no http(s)://)
image (dict[str, Any]):
The dict containing the information about the image. This is the
same dictionary as defined in DOCKER_IMAGES variable.
|
def push_image(registry, image):
# type: (str, Dict[str, Any]) -> None
values = {
'registry': registry,
'image': image['name'],
}
log.info("Pushing <33>{registry}<35>/{image}".format(**values))
shell.run('docker push {registry}/{image}'.format(**values))
| 751,128
|
Start working on a new feature by branching off develop.
This will create a new branch off develop called feature/<name>.
Args:
name (str):
The name of the new feature.
|
def start(name):
# type: (str) -> None
feature_name = 'feature/' + common.to_branch_name(name)
develop = conf.get('git.devel_branch', 'develop')
common.assert_on_branch(develop)
common.git_checkout(feature_name, create=True)
| 751,198
|
Remove all unnecessary files.
Args:
pretend (bool):
If set to **True**, do not delete any files, just show what would be
deleted.
exclude (list[str]):
A list of path patterns to exclude from deletion.
|
def clean(exclude):
# type: (bool, List[str]) -> None
pretend = context.get('pretend', False)
exclude = list(exclude) + conf.get('clean.exclude', [])
clean_patterns = conf.get('clean.patterns', [
'*__pycache__*',
'*.py[cod]',
'*.swp',
])
num_files = 0
with util.timed_block() as t:
files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude)
for path in files:
try:
num_files += 1
if not isdir(path):
log.info(' <91>[file] <90>{}', path)
not pretend and os.remove(path)
else:
log.info(' <91>[dir] <90>{}', path)
not pretend and rmtree(path)
except OSError:
log.info("<33>Failed to remove <90>{}", path)
if pretend:
msg = "Would delete <33>{}<32> files. Took <33>{}<32>s"
else:
msg = "Deleted <33>{}<32> files in <33>{}<32>s"
log.info(msg.format(num_files, t.elapsed_s))
| 751,208
|
Render the given text to Markdown.
This is a direct interface to ``cmark_markdown_to_html``.
Args:
text (str): The text to render to Markdown.
options (int): The cmark options.
Returns:
str: The rendered markdown.
|
def markdown_to_html(text, options=0):
encoded_text = text.encode('utf-8')
raw_result = _cmark.lib.cmark_markdown_to_html(
encoded_text, len(encoded_text), options)
return _cmark.ffi.string(raw_result).decode('utf-8')
| 752,050
|
Render the given text to Markdown, using extensions.
This is a high-level wrapper over the various functions needed to enable
extensions, attach them to a parser, and render html.
Args:
text (str): The text to render to Markdown.
options (int): The cmark options.
extensions (Sequence[str]): The list of extension names to use.
Returns:
str: The rendered markdown.
|
def markdown_to_html_with_extensions(text, options=0, extensions=None):
if extensions is None:
extensions = []
core_extensions_ensure_registered()
cmark_extensions = []
for extension_name in extensions:
extension = find_syntax_extension(extension_name)
if extension is None:
raise ValueError('Unknown extension {}'.format(extension_name))
cmark_extensions.append(extension)
parser = parser_new(options=options)
try:
for extension in cmark_extensions:
parser_attach_syntax_extension(parser, extension)
parser_feed(parser, text)
root = parser_finish(parser)
if _cmark.lib.cmark_node_get_type(root) == _cmark.lib.CMARK_NODE_NONE:
raise ValueError('Error parsing markdown!')
extensions_ll = parser_get_syntax_extensions(parser)
output = render_html(root, options=options, extensions=extensions_ll)
finally:
parser_free(parser)
return output
| 752,051
|
Parse a document and return the root node.
Args:
text (str): The text to parse.
options (int): The cmark options.
Returns:
Any: Opaque reference to the root node of the parsed syntax tree.
|
def parse_document(text, options=0):
encoded_text = text.encode('utf-8')
return _cmark.lib.cmark_parse_document(
encoded_text, len(encoded_text), options)
| 752,052
|
Render a given syntax tree as HTML.
Args:
root (Any): The reference to the root node of the syntax tree.
options (int): The cmark options.
extensions (Any): The reference to the syntax extensions, generally
from :func:`parser_get_syntax_extensions`
Returns:
str: The rendered HTML.
|
def render_html(root, options=0, extensions=None):
if extensions is None:
extensions = _cmark.ffi.NULL
raw_result = _cmark.lib.cmark_render_html(
root, options, extensions)
return _cmark.ffi.string(raw_result).decode('utf-8')
| 752,054
|
Django view get function.
Add items of extra_context, crumbs and grid to context.
Args:
request (): Django's request object.
*args (): request args.
**kwargs (): request kwargs.
Returns:
response: render to response with context.
|
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context.update(self.extra_context)
context['crumbs'] = self.get_crumbs()
context['title'] = self.title
context['suit'] = 'suit' in settings.INSTALLED_APPS
if context.get('dashboard_grid', None) is None and self.grid:
context['dashboard_grid'] = self.grid
return self.render_to_response(context)
| 752,551
|
Return a widget as real-time.
Args:
widget (Widget): the widget to register and return as real-time.
url_name (str): the URL name to call to get updated content.
url_regex (regex): the URL regex to be matched.
time_interval (int): the interval of refreshment in milliseconds.
Returns:
Widget: the "real-timed" widget.
|
def realtime(widget, url_name=None, url_regex=None, time_interval=None):
if not hasattr(widget, 'get_updated_content'):
raise AttributeError('Widget %s must implement get_updated_content '
'method.' % widget)
elif not callable(widget.get_updated_content):
raise ValueError('get_updated_content in widget %s is not callable'
% widget)
if url_name is None:
if getattr(widget, 'url_name', None) is not None:
url_name = widget.url_name
else:
url_name = widget.__class__.__name__
if url_name in [w.url_name for w in REALTIME_WIDGETS]:
raise ValueError('URL name %s is already used by another '
'real time widget.' % url_name)
if url_regex is None:
if getattr(widget, 'url_regex', None) is not None:
url_regex = widget.url_regex
else:
url_regex = sha256(url_name.encode('utf-8'))
url_regex = url_regex.hexdigest()[:32]
url_regex = 'realtime/' + url_regex
if url_regex in [w.url_regex for w in REALTIME_WIDGETS]:
raise ValueError('URL regex %s is already used by another '
'real time widget.' % url_regex)
if time_interval is None:
if getattr(widget, 'time_interval', None) is not None:
time_interval = widget.time_interval
else:
time_interval = app_settings.default_time_interval
from django.views.generic import View
from braces.views import AjaxResponseMixin, JSONResponseMixin
# pylama:ignore=C0111,R0201
class PartialResponse(JSONResponseMixin, AjaxResponseMixin, View):
def get_data(self):
return widget.get_updated_content()
def get(self, request, *args, **kwargs):
return self.get_ajax(request, *args, **kwargs)
def get_ajax(self, request, *args, **kwargs):
return self.render_json_response(self.get_data())
PartialResponse.url_name = url_name
PartialResponse.url_regex = url_regex
PartialResponse.time_interval = time_interval
REALTIME_WIDGETS.append(PartialResponse)
if not hasattr(widget, 'url_name'):
widget.url_name = url_name
if not hasattr(widget, 'url_regex'):
widget.url_regex = url_regex
if not hasattr(widget, 'time_interval'):
widget.time_interval = time_interval
return widget
| 752,552
|
Init method.
Args:
html_id (str): an ID to set on the HTML item.
name (str): the name of the item, displayed in HTML.
content (): suitable content according to chosen display.
template (str): the template responsible for display.
classes (str): additional classes to pass to the HTML item.
|
def __init__(self,
html_id=None,
name=None,
content=None,
template=None,
classes=None,
**kwargs):
if html_id is not None:
try:
self.html_id = html_id
except AttributeError:
self._html_id = html_id
if name is not None:
try:
self.name = name
except AttributeError:
self._name = name
if content is not None:
try:
self.content = content
except AttributeError:
self._content = content
if template is not None:
try:
self.template = template
except AttributeError:
self._template = template
if classes is not None:
try:
self.classes = classes
except AttributeError:
self._classes = classes
if not hasattr(self, 'template'):
raise AttributeError('template is a required widget attribute')
for kw, arg in kwargs.items():
setattr(self, kw, arg)
| 752,553
|
Init method.
Args:
*rows (): the instances of Row.
**kwargs (): not used.
|
def __init__(self, *rows, **kwargs):
if not all([isinstance(r, Row) for r in rows]):
raise TypeError('All elements of Grid must be Row instances')
self.type = 'grid'
self.rows = rows
| 752,554
|
Init method.
Args:
*columns (): the instances of Column.
**kwargs (): not used.
|
def __init__(self, *columns, **kwargs):
if not all([isinstance(c, Column) for c in columns]):
raise TypeError('All elements of Row must be Column instances')
self.type = 'row'
self.columns = columns
| 752,555
|
Init method.
Args:
*elements (): the rows or boxes.
**kwargs: the width can be passed through the keyword args [1-12].
|
def __init__(self, *elements, **kwargs):
if not all([isinstance(e, Row) or issubclass(type(e), Box)
for e in elements]):
raise TypeError('All elements of Column must '
'be Row or Box instances')
width = kwargs.pop('width', 12)
if width not in range(1, 13):
raise ValueError('Column width must be between 1 and 12')
self.type = 'column'
self.elements = elements
self.width = width
| 752,556
|
Init method.
Args:
html_id (str): an ID to set on the HTML box.
title (str): a title to display on the top of the box.
description (str): a description to display after the title box.
widgets (list): the box's list of widgets.
template (str): the path to a custom template to use for this box.
context (dict): additional context to pass to the box.
|
def __init__(self,
html_id=None,
title=None,
description=None,
widgets=None,
template=None,
context=None,
**kwargs):
if widgets is not None:
if not isinstance(widgets, (list, tuple)):
raise AttributeError('Box widgets attribute '
'must be a list or tuple')
if not all([isinstance(e, Widget) for e in widgets]):
raise ValueError('All elements of Box must be Widget instances') # noqa
try:
self.widgets = widgets
except AttributeError:
self._widgets = widgets
self.type = 'box'
if html_id is not None:
try:
self.html_id = html_id
except AttributeError:
self._html_id = html_id
if title is not None:
try:
self.title = title
except AttributeError:
self._title = title
if description is not None:
try:
self.description = description
except AttributeError:
self._description = description
if template is not None:
try:
self.template = template
except AttributeError:
self._template = template
if context is not None:
try:
self.context = context
except AttributeError:
self._context = context
for kw, arg in kwargs.items():
setattr(self, kw, arg)
| 752,557
|
Get the URL for real-time widgets.
Args:
admin_view_func (callable): an admin_view method from an AdminSite
instance. By default: identity.
Returns:
list: the list of the real-time URLs as django's ``url()``.
|
def get_realtime_urls(admin_view_func=lambda x: x):
from .widgets import REALTIME_WIDGETS
return [url(w.url_regex, admin_view_func(w.as_view()), name=w.url_name)
for w in REALTIME_WIDGETS]
| 752,558
|
Convert response data to a task type model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A model instance representing the task type from the
reponse data.
|
def response_data_to_model_instance(self, response_data):
# Coerce datetime strings into datetime objects
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
# Instantiate a model for the task instance
return super(
BaseTaskTypeManager, self
).response_data_to_model_instance(response_data)
| 753,670
|
[pandas.read_sql]
Arguments:
Query {[type]} -- [description]
Returns:
[pd.DataFrame or generate] -- [description]
|
def to_df(self, **kwargs):
return pd.read_sql(sql=self.statement, con=self.session.bind, **kwargs)
| 753,718
|
Represents a for operator.
:param iterator: A :class:`.PyteAugmentedValidator` that represents the iterable.
:param body: A list of instructions to execute on each loop.
Parameters:
iterator: _PyteAugmentedValidator
This should be a saved value that is iterable, i.e a saved list or something.
body: list
A list of instructions to execute, similarly to IF.
|
def __init__(self, iterator: _PyteAugmentedValidator, body: list):
self.iterator = iterator
self._body = list(util.flatten(body))
| 753,997
|
Initialize a user.
Args:
username (str): The user's username.
email (str): The user's email.
manager (:class:`saltant.models.user.UserManager`):
The manager which spawned this user instance.
|
def __init__(self, username, email, manager):
# Call parent constructor
super(User, self).__init__(manager)
# Add in user stuff
self.username = username
self.email = email
| 754,226
|
Save the GADDAG to file.
Args:
path: path to save the GADDAG to.
compressed: compress the saved GADDAG using gzip.
exist_ok: overwrite existing file at `path`.
|
def save(self, path, compressed=True, exist_ok=False):
path = os.path.expandvars(os.path.expanduser(path))
if os.path.isfile(path) and not exist_ok:
raise OSError(17, os.strerror(17), path)
if os.path.isdir(path):
path = os.path.join(path, "out.gdg")
if compressed:
bytes_written = cgaddag.gdg_save_compressed(self.gdg, path.encode("ascii"))
else:
bytes_written = cgaddag.gdg_save(self.gdg, path.encode("ascii"))
if bytes_written == -1:
errno = ctypes.c_int.in_dll(ctypes.pythonapi, "errno").value
raise OSError(errno, os.strerror(errno), path)
return bytes_written
| 754,254
|
Load a GADDAG from file, replacing the words currently in this GADDAG.
Args:
path: path to saved GADDAG to be loaded.
|
def load(self, path):
path = os.path.expandvars(os.path.expanduser(path))
gdg = cgaddag.gdg_load(path.encode("ascii"))
if not gdg:
errno = ctypes.c_int.in_dll(ctypes.pythonapi, "errno").value
raise OSError(errno, os.strerror(errno), path)
self.__del__()
self.gdg = gdg.contents
| 754,255
|
Find all words starting with a prefix.
Args:
prefix: A prefix to be searched for.
Returns:
A list of all words found.
|
def starts_with(self, prefix):
prefix = prefix.lower()
found_words = []
res = cgaddag.gdg_starts_with(self.gdg, prefix.encode(encoding="ascii"))
tmp = res
while tmp:
word = tmp.contents.str.decode("ascii")
found_words.append(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return found_words
| 754,256
|
Find all words containing a substring.
Args:
sub: A substring to be searched for.
Returns:
A list of all words found.
|
def contains(self, sub):
sub = sub.lower()
found_words = set()
res = cgaddag.gdg_contains(self.gdg, sub.encode(encoding="ascii"))
tmp = res
while tmp:
word = tmp.contents.str.decode("ascii")
found_words.add(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return list(found_words)
| 754,257
|
Find all words ending with a suffix.
Args:
suffix: A suffix to be searched for.
Returns:
A list of all words found.
|
def ends_with(self, suffix):
suffix = suffix.lower()
found_words = []
res = cgaddag.gdg_ends_with(self.gdg, suffix.encode(encoding="ascii"))
tmp = res
while tmp:
word = tmp.contents.str.decode("ascii")
found_words.append(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return found_words
| 754,258
|
Add a word to the GADDAG.
Args:
word: A word to be added to the GADDAG.
|
def add_word(self, word):
word = word.lower()
if not (word.isascii() and word.isalpha()):
raise ValueError("Invalid character in word '{}'".format(word))
word = word.encode(encoding="ascii")
result = cgaddag.gdg_add_word(self.gdg, word)
if result == 1:
raise ValueError("Invalid character in word '{}'".format(word))
elif result == 2:
raise MemoryError("Out of memory, GADDAG is in an undefined state")
| 754,259
|
Write a raw env value.
A ``None`` value clears the environment variable.
Args:
name: The environment variable name
value: The value to write
|
def write(name, value):
if value is not None:
environ[name] = builtins.str(value)
elif environ.get(name):
del environ[name]
| 754,276
|
Read the raw env value.
Read the raw environment variable or use the default. If the value is not
found and no default is set throw an exception.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
fallback: A list of fallback env variables to try and read if the primary environment
variable is unavailable.
|
def read(name, default=None, allow_none=False, fallback=None):
raw_value = environ.get(name)
if raw_value is None and fallback is not None:
if not isinstance(fallback, builtins.list) and not isinstance(fallback, builtins.tuple):
fallback = [fallback]
for fall in fallback:
raw_value = environ.get(fall)
if raw_value is not None:
break
if raw_value or raw_value == '':
return raw_value
elif default is not None or allow_none:
return default
else:
raise KeyError('Set the "{0}" environment variable'.format(name))
| 754,278
|
Get a string based environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
|
def str(name, default=None, allow_none=False, fallback=None):
value = read(name, default, allow_none, fallback=fallback)
if value is None and allow_none:
return None
else:
return builtins.str(value).strip()
| 754,280
|
Get a boolean based environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
|
def bool(name, default=None, allow_none=False, fallback=None):
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.bool):
return value
elif isinstance(value, builtins.int):
return True if value > 0 else False
elif value is None and allow_none:
return None
else:
value_str = builtins.str(value).lower().strip()
return _strtobool(value_str)
| 754,281
|
Get a string environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
|
def int(name, default=None, allow_none=False, fallback=None):
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.str):
value = value.strip()
if value is None and allow_none:
return None
else:
return builtins.int(value)
| 754,282
|
Get a list of strings or the default.
The individual list elements are whitespace-stripped.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
separator: The list item separator character or pattern
|
def list(name, default=None, allow_none=False, fallback=None, separator=','):
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.list):
return value
elif isinstance(value, builtins.str):
return _str_to_list(value, separator)
elif value is None and allow_none:
return None
else:
return [builtins.str(value)]
| 754,283
|
Traverse the GADDAG to the node at the end of the given characters.
Args:
chars: An string of characters to traverse in the GADDAG.
Returns:
The Node which is found by traversing the tree.
|
def follow(self, chars):
chars = chars.lower()
node = self.node
for char in chars:
node = cgaddag.gdg_follow_edge(self.gdg, node, char.encode("ascii"))
if not node:
raise KeyError(char)
return Node(self.gdg, node)
| 754,701
|
Get the raw docker link value.
Get the raw environment variable for the docker link
Args:
alias_name: The environment variable name
default: The default value if the link isn't available
allow_none: If the return value can be `None` (i.e. optional)
|
def read(alias_name, allow_none=False):
warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)
return core.read('{0}_PORT'.format(alias_name), default=None, allow_none=allow_none)
| 754,705
|
Return a boolean if the docker link is set or not and is a valid looking docker link value.
Args:
alias_name: The link alias name
|
def isset(alias_name):
warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)
raw_value = read(alias_name, allow_none=True)
if raw_value:
if re.compile(r'.+://.+:\d+').match(raw_value):
return True
else:
warnings.warn('"{0}_PORT={1}" does not look like a docker link.'.format(alias_name, raw_value), stacklevel=2)
return False
return False
| 754,707
|
Get the protocol from the docker link alias or return the default.
Args:
alias_name: The docker link alias
default: The default value if the link isn't available
allow_none: If the return value can be `None` (i.e. optional)
Examples:
Assuming a Docker link was created with ``docker --link postgres:db``
and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``.
>>> envitro.docker.protocol('DB')
tcp
|
def protocol(alias_name, default=None, allow_none=False):
warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)
try:
return _split_docker_link(alias_name)[0]
except KeyError as err:
if default or allow_none:
return default
else:
raise err
| 754,708
|
Get the port from the docker link alias or return the default.
Args:
alias_name: The docker link alias
default: The default value if the link isn't available
allow_none: If the return value can be `None` (i.e. optional)
Examples:
Assuming a Docker link was created with ``docker --link postgres:db``
and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``.
>>> envitro.docker.port('DB')
5432
|
def port(alias_name, default=None, allow_none=False):
warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)
try:
return int(_split_docker_link(alias_name)[2])
except KeyError as err:
if default or allow_none:
return default
else:
raise err
| 754,709
|
Get the model instance with a given id.
Args:
id (int or str): The primary identifier (e.g., pk or UUID)
for the task instance to get.
Returns:
:class:`saltant.models.resource.Model`:
A :class:`saltant.models.resource.Model` subclass
instance representing the resource requested.
|
def get(self, id):
# Get the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
response = self._client.session.get(request_url)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance
return self.response_data_to_model_instance(response.json())
| 755,004
|
Validates that a request was successful.
Args:
response_text (str): The response body of the request.
request_url (str): The URL the request was made at.
status_code (int): The status code of the response.
expected_status_code (int): The expected status code of the
response.
Raises:
:class:`saltant.exceptions.BadHttpRequestError`: The HTTP
request failed.
|
def validate_request_success(
response_text, request_url, status_code, expected_status_code
):
try:
assert status_code == expected_status_code
except AssertionError:
msg = (
"Request to {url} failed with status {status_code}:\n"
"The reponse from the request was as follows:\n\n"
"{content}"
).format(
url=request_url, status_code=status_code, content=response_text
)
raise BadHttpRequestError(msg)
| 755,005
|
Wait until a task instance with the given UUID is finished.
Args:
refresh_period (int, optional): How many seconds to wait
before checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
This task instance model after it finished.
|
def wait_until_finished(
self, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD
):
return self.manager.wait_until_finished(
uuid=self.uuid, refresh_period=refresh_period
)
| 755,008
|
Clone the task instance with given UUID.
Args:
uuid (str): The UUID of the task instance to clone.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance created due to the clone.
|
def clone(self, uuid):
# Clone the object
request_url = self._client.base_api_url + self.clone_url.format(
id=uuid
)
response = self._client.session.post(request_url)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance
return self.response_data_to_model_instance(response.json())
| 755,010
|
Terminate the task instance with given UUID.
Args:
uuid (str): The UUID of the task instance to terminate.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance that was told to terminate.
|
def terminate(self, uuid):
# Clone the object
request_url = self._client.base_api_url + self.terminate_url.format(
id=uuid
)
response = self._client.session.post(request_url)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_202_ACCEPTED,
)
# Return a model instance
return self.response_data_to_model_instance(response.json())
| 755,011
|
Convert response data to a task instance model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance from the reponse data.
|
def response_data_to_model_instance(self, response_data):
# Coerce datetime strings into datetime objects
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
if response_data["datetime_finished"]:
response_data["datetime_finished"] = dateutil.parser.parse(
response_data["datetime_finished"]
)
# Instantiate a model for the task instance
return super(
BaseTaskInstanceManager, self
).response_data_to_model_instance(response_data)
| 755,013
|
Add arguments described in the usage string to the
parser. View more details at the <create_parser> docs
Args:
parser (ArgumentParser): parser to add arguments to
usage (str): Usage string in the format described above
ignore_existing (bool): Ignore any arguments that have already been defined
|
def add_to_parser(parser, usage, ignore_existing=False):
usage = '\n' + usage
first_line = [i for i in usage.split('\n') if i][0]
indent = ' ' * (len(first_line) - len(first_line.lstrip(' ')))
usage = usage.replace('\n' + indent, '\n')
usage = usage.replace('\n...', '')
defaults = {}
description, *descriptors = usage.split('\n:')
description = description.replace('\n', ' ').strip()
if description and (not parser.description or not ignore_existing):
parser.description = description
for descriptor in descriptors:
try:
options, *info = descriptor.split('\n')
info = ' '.join(i for i in info if i).replace(' ', '')
if options.count(' ') == 1:
if options[0] == '-':
short, long = options.split(' ')
var_name = long.strip('-').replace('-', '_')
parser.add_argument(short, long, dest=var_name, action='store_true', help=info)
defaults[var_name] = False
else:
short, typ = options.split(' ')
parser.add_argument(short, type=types[typ], help=info)
else:
short, long, typ, default = options.split(' ')
info = info.rstrip() + '. Default: ' + default
default = '' if default == '-' else default
parser.add_argument(short, long, type=types[typ], default=default, help=info)
except ArgumentError:
if not ignore_existing:
raise
except Exception as e:
print(e.__class__.__name__ + ': ' + str(e))
print('While parsing:')
print(descriptor)
raise ValueError('Failed to create parser from usage string')
| 755,175
|
[update table]
Arguments:
t_obj {[objs of DeclarativeMeta]} -- [update the table]
|
def update(self, t_obj):
if isinstance(t_obj, Iterable):
self._session.add_all(t_obj)
else:
self._session.add(t_obj)
| 755,400
|
[insert bulk data]
Arguments:
table {[DeclarativeMeta cls]} -- [reflection of table]
insert_obj {[pd.DataFrame or list of dicts]} -- [insert_obj]
Keyword Arguments:
ignore {bool} -- [wether ignore exception or not] (default: {True})
Raises:
ValueError -- [f"The {reprlib.repr(insert_obj)} must be list of dicts type!"]
Returns:
[type] -- [description]
|
def insert(self, table, insert_obj, ignore=True):
if isinstance(insert_obj, pd.DataFrame):
if insert_obj.empty:
raise ValueError('The input DataFrame is empty, please check!')
insert_obj = insert_obj.to_dict(orient='records')
elif not isinstance(insert_obj, list):
raise ValueError(
f"The {reprlib.repr(insert_obj)} must be list of dicts type!")
ignore_str = 'IGNORE' if ignore else ''
return self._session.execute(
table.__table__.insert().prefix_with(ignore_str), insert_obj)
| 755,401
|
Temporarily change or set the environment variable during the execution of a function.
Args:
name: The name of the environment variable
value: A value to set for the environment variable
Returns:
The function return value.
|
def write(name, value):
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
existing_env = core.read(name, allow_none=True)
core.write(name, value)
func_val = func(*args, **kwargs)
core.write(name, existing_env)
return func_val
return _decorator
return wrapped
| 755,588
|
Only execute the function if the variable is set.
Args:
name: The name of the environment variable
Returns:
The function return value or `None` if the function was skipped.
|
def isset(name):
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
if core.isset(name):
return func(*args, **kwargs)
return _decorator
return wrapped
| 755,589
|
Only execute the function if the boolean variable is set.
Args:
name: The name of the environment variable
execute_bool: The boolean value to execute the function on
default: The default value if the environment variable is not set (respects `execute_bool`)
Returns:
The function return value or `None` if the function was skipped.
|
def bool(name, execute_bool=True, default=None):
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
if core.isset(name) and core.bool(name) == execute_bool:
return func(*args, **kwargs)
elif default is not None and default == execute_bool:
return func(*args, **kwargs)
return _decorator
return wrapped
| 755,590
|
Encodes text into `(samples, aux_indices..., token)` where each token is mapped to a unique index starting
from `i`. `i` is the number of special tokens.
Args:
lang: The spacy language to use. (Default value: 'en')
lower: Lower cases the tokens if True. (Default value: True)
special_token: The tokens that are reserved. Default: ['<UNK>', '<PAD>'], <UNK> for unknown words and <PAD> for padding token.
|
def __init__(self,
lang='en',
lower=True,
special_token=['<PAD>', '<UNK>']): # 0 - Pad, 1 - Unkown
self.lang = lang
self.lower = lower
self.special_token = special_token
self._token2idx = dict()
self._idx2token = dict()
self._token_counts = defaultdict(int)
self._num_texts = 0
self._counts = None
| 757,017
|
Decodes the texts using internal vocabulary. The list structure is maintained.
Args:
encoded_texts: The list of texts to decode.
unknown_token: The placeholder value for unknown token. (Default value: "<UNK>")
inplace: True to make changes inplace. (Default value: True)
Returns:
The decoded texts.
|
def decode_texts(self, encoded_texts, unknown_token="<UNK>", inplace=True):
if len(self._token2idx) == 0:
raise ValueError(
"You need to build vocabulary using `build_vocab` before using `decode_texts`")
if not isinstance(encoded_texts, list):
# assume it's a numpy array
encoded_texts = encoded_texts.tolist()
if not inplace:
encoded_texts = deepcopy(encoded_texts)
utils._recursive_apply(encoded_texts,
lambda token_id: self._idx2token.get(token_id) or unknown_token)
return encoded_texts
| 757,021
|
Builds the internal vocabulary and computes various statistics.
Args:
texts: The list of text items to encode.
verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
**kwargs: The kwargs for `token_generator`.
|
def build_vocab(self, texts, verbose=1, **kwargs):
if self.has_vocab:
logger.warn(
"Tokenizer already has existing vocabulary. Overriding and building new vocabulary.")
progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
count_tracker = utils._CountTracker()
self._token_counts.clear()
self._num_texts = len(texts)
for token_data in self.token_generator(texts, **kwargs):
indices, token = token_data[:-1], token_data[-1]
count_tracker.update(indices)
self._token_counts[token] += 1
# Update progressbar per document level.
progbar.update(indices[0])
# Generate token2idx and idx2token.
self.create_token_indices(self._token_counts.keys())
# All done. Finalize progressbar update and count tracker.
count_tracker.finalize()
self._counts = count_tracker.counts
progbar.update(len(texts))
| 757,024
|
Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
Returns:
The embeddings indexed by word.
|
def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True):
if embedding_path is not None:
embedding_type = embedding_path # identify embedding by path
embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)
if embeddings_index is not None:
return embeddings_index
if embedding_path is None:
embedding_type_obj = get_embedding_type(embedding_type)
# some very rough wrangling of zip files with the keras util `get_file`
# a special problem: when multiple files are in one zip file
extract = embedding_type_obj.get('extract', True)
file_path = get_file(
embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',))
if 'file_in_zip' in embedding_type_obj:
zip_folder = file_path.split('.zip')[0]
with ZipFile(file_path, 'r') as zf:
zf.extractall(zip_folder)
file_path = os.path.join(
zip_folder, embedding_type_obj['file_in_zip'])
else:
if extract:
if file_path.endswith('.zip'):
file_path = file_path.split('.zip')[0]
# if file_path.endswith('.gz'):
# file_path = file_path.split('.gz')[0]
else:
file_path = embedding_path
embeddings_index = _build_embeddings_index(file_path, embedding_dims)
if cache:
_EMBEDDINGS_CACHE[embedding_type] = embeddings_index
return embeddings_index
| 757,033
|
Encodes text into `(samples, characters)`
Args:
lang: The spacy language to use. (Default value: 'en')
lower: Lower cases the tokens if True. (Default value: True)
charset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.
(Default value: None)
|
def __init__(self,
lang='en',
lower=True,
charset=None):
super(CharTokenizer, self).__init__(lang, lower)
self.charset = charset
| 757,034
|
Encodes text into `(samples, sentences, characters)`
Args:
lang: The spacy language to use. (Default value: 'en')
lower: Lower cases the tokens if True. (Default value: True)
charset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.
(Default value: None)
|
def __init__(self,
lang='en',
lower=True,
charset=None):
super(SentenceCharTokenizer, self).__init__(lang, lower, charset)
| 757,036
|
Yields tokens from texts as `(text_idx, sent_idx, character)`
Args:
texts: The list of texts.
**kwargs: Supported args include:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000)
|
def token_generator(self, texts, **kwargs):
# Perf optimization. Only process what is necessary.
n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs)
nlp = spacy.load(self.lang)
kwargs = {
'batch_size': batch_size,
'n_threads': n_threads,
'disable': ['ner']
}
# Perf optimization: Lower the entire text instead of individual tokens.
texts_gen = utils._apply_generator(
texts, lambda x: x.lower()) if self.lower else texts
for text_idx, doc in enumerate(nlp.pipe(texts_gen, **kwargs)):
for sent_idx, sent in enumerate(doc.sents):
for word in sent:
for char in word:
yield text_idx, sent_idx, char
| 757,037
|
Creates `folds` number of indices that has roughly balanced multi-label distribution.
Args:
y: The multi-label outputs.
folds: The number of folds to create.
Returns:
`folds` number of indices that have roughly equal multi-label distributions.
|
def equal_distribution_folds(y, folds=2):
n, classes = y.shape
# Compute sample distribution over classes
dist = y.sum(axis=0).astype('float')
dist /= dist.sum()
index_list = []
fold_dist = np.zeros((folds, classes), dtype='float')
for _ in range(folds):
index_list.append([])
for i in range(n):
if i < folds:
target_fold = i
else:
normed_folds = fold_dist.T / fold_dist.sum(axis=1)
how_off = normed_folds.T - dist
target_fold = np.argmin(
np.dot((y[i] - .5).reshape(1, -1), how_off.T))
fold_dist[target_fold] += y[i]
index_list[target_fold].append(i)
logger.debug("Fold distributions:")
logger.debug(fold_dist)
return index_list
| 757,038
|
Creates a test split with roughly the same multi-label distribution in `y`.
Args:
y: The multi-label outputs.
test_size: The test size in [0, 1]
Returns:
The train and test indices.
|
def multi_label_train_test_split(y, test_size=0.2):
if test_size <= 0 or test_size >= 1:
raise ValueError("`test_size` should be between 0 and 1")
# Find the smallest rational number.
frac = Fraction(test_size).limit_denominator()
test_folds, total_folds = frac.numerator, frac.denominator
logger.warn('Inferring test_size as {}/{}. Generating {} folds. The algorithm might fail if denominator is large.'
.format(test_folds, total_folds, total_folds))
folds = equal_distribution_folds(y, folds=total_folds)
test_indices = np.concatenate(folds[:test_folds])
train_indices = np.concatenate(folds[test_folds:])
return train_indices, test_indices
| 757,039
|
Setup data
Args:
X: text data,
y: data labels,
tokenizer: A Tokenizer instance
proc_data_path: Path for the processed data
|
def setup_data(X, y, tokenizer, proc_data_path, **kwargs):
# only build vocabulary once (e.g. training data)
train = not tokenizer.has_vocab
if train:
tokenizer.build_vocab(X)
process_save(X, y, tokenizer, proc_data_path,
train=train, **kwargs)
return tokenizer
| 757,047
|
Splits data into a training, validation, and test set.
Args:
X: text data
y: data labels
ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
Returns:
split data: X_train, X_val, X_test, y_train, y_val, y_test
|
def split_data(X, y, ratio=(0.8, 0.1, 0.1)):
assert(sum(ratio) == 1 and len(ratio) == 3)
X_train, X_rest, y_train, y_rest = train_test_split(
X, y, train_size=ratio[0])
X_val, X_test, y_val, y_test = train_test_split(
X_rest, y_rest, train_size=ratio[1])
return X_train, X_val, X_test, y_train, y_val, y_test
| 757,048
|
Setup data while splitting into a training, validation, and test set.
Args:
X: text data,
y: data labels,
tokenizer: A Tokenizer instance
proc_data_dir: Directory for the split and processed data
|
def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs):
X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y)
# only build vocabulary on training data
tokenizer.build_vocab(X_train)
process_save(X_train, y_train, tokenizer, path.join(
proc_data_dir, 'train.bin'), train=True, **kwargs)
process_save(X_val, y_val, tokenizer, path.join(
proc_data_dir, 'val.bin'), **kwargs)
process_save(X_test, y_test, tokenizer, path.join(
proc_data_dir, 'test.bin'), **kwargs)
| 757,049
|
Loads a split dataset
Args:
proc_data_dir: Directory with the split and processed data
Returns:
(Training Data, Validation Data, Test Data)
|
def load_data_split(proc_data_dir):
ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin'))
ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin'))
ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin'))
return ds_train, ds_val, ds_test
| 757,050
|
A `Sequence` implementation that can pre-process a mini-batch via `process_fn`
Args:
X: The numpy array of inputs.
y: The numpy array of targets.
batch_size: The generator mini-batch size.
process_fn: The preprocessing function to apply on `X`
|
def __init__(self, X, y, batch_size, process_fn=None):
self.X = X
self.y = y
self.batch_size = batch_size
self.process_fn = process_fn or (lambda x: x)
| 757,060
|
A `Sequence` implementation that returns balanced `y` by undersampling majority class.
Args:
X: The numpy array of inputs.
y: The numpy array of targets.
batch_size: The generator mini-batch size.
process_fn: The preprocessing function to apply on `X`
|
def __init__(self, X, y, batch_size, process_fn=None):
self.X = X
self.y = y
self.batch_size = batch_size
self.process_fn = process_fn or (lambda x: x)
self.pos_indices = np.where(y == 1)[0]
self.neg_indices = np.where(y == 0)[0]
self.n = min(len(self.pos_indices), len(self.neg_indices))
self._index_array = None
| 757,062
|
Yields tokens from texts as `(text_idx, word)`
Args:
texts: The list of texts.
**kwargs: Supported args include:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000)
|
def token_generator(self, texts, **kwargs):
# Perf optimization. Only process what is necessary.
n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs)
nlp = spacy.load(self.lang)
disabled = ['parser']
if len(self.exclude_entities) > 0:
disabled.append('ner')
kwargs = {
'batch_size': batch_size,
'n_threads': n_threads,
'disable': disabled
}
for text_idx, doc in enumerate(nlp.pipe(texts, **kwargs)):
for word in doc:
processed_word = self._apply_options(word)
if processed_word is not None:
yield text_idx, processed_word
| 757,067
|
Build the actual model here.
Args:
x: The encoded or embedded input sequence.
Returns:
The model output tensor.
|
def __call__(self, x):
x = self.build_model(x)
if self.dropout_rate > 0:
x = Dropout(self.dropout_rate)(x)
return x
| 757,074
|
Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf
Args:
num_filters: The number of filters to use per `filter_size`. (Default value = 64)
filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
**cnn_kwargs: Additional args for building the `Conv1D` layer.
|
def __init__(self, num_filters=64, filter_sizes=[3, 4, 5], dropout_rate=0.5, **conv_kwargs):
super(YoonKimCNN, self).__init__(dropout_rate)
self.num_filters = num_filters
self.filter_sizes = filter_sizes
self.conv_kwargs = conv_kwargs
| 757,075
|
Alexander Rakhlin's CNN model: https://github.com/alexander-rakhlin/CNN-for-Sentence-Classification-in-Keras/
Args:
num_filters: The number of filters to use per `filter_size`. (Default value = 64)
filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
dropout_rate: Array for one dropout layer after the embedding and one before the final dense layer (Default value = [0.5, 0.8])
|
def __init__(self, num_filters=20, filter_sizes=[3, 8], dropout_rate=[0.5, 0.8], hidden_dims=20, **conv_kwargs):
super(AlexCNN, self).__init__(dropout_rate[0])
self.num_filters = num_filters
self.filter_sizes = filter_sizes
self.dropout_rate = dropout_rate[0]
self.dropout_rate2 = dropout_rate[1]
self.hidden_dims = hidden_dims
self.conv_kwargs = conv_kwargs
| 757,078
|
Creates a stacked RNN.
Args:
rnn_class: The type of RNN to use. (Default Value = LSTM)
encoder_dims: The number of hidden units of RNN. (Default Value: 50)
bidirectional: Whether to use bidirectional encoding. (Default Value = True)
**rnn_kwargs: Additional args for building the RNN.
|
def __init__(self, rnn_class=LSTM, hidden_dims=[50, 50], bidirectional=True, dropout_rate=0.5, **rnn_kwargs):
super(StackedRNN, self).__init__(dropout_rate)
self.rnn_class = rnn_class
self.hidden_dims = hidden_dims
self.bidirectional = bidirectional
self.rnn_kwargs = rnn_kwargs
| 757,080
|
Supported args include:
Args:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000)
|
def _parse_spacy_kwargs(**kwargs):
n_threads = kwargs.get('n_threads') or kwargs.get('num_threads')
batch_size = kwargs.get('batch_size')
if n_threads is None or n_threads is -1:
n_threads = cpu_count() - 1
if batch_size is None or batch_size is -1:
batch_size = 1000
return n_threads, batch_size
| 757,089
|
read text files in directory and returns them as array
Args:
directory: where the text files are
Returns:
Array of text
|
def read_folder(directory):
res = []
for filename in os.listdir(directory):
with io.open(os.path.join(directory, filename), encoding="utf-8") as f:
content = f.read()
res.append(content)
return res
| 757,094
|
Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data
Args:
limit: get only first N items for each class
Returns:
[X_train, y_train, X_test, y_test]
|
def imdb(limit=None, shuffle=True):
movie_review_url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
# download and extract, thus remove the suffix '.tar.gz'
path = keras.utils.get_file(
'aclImdb.tar.gz', movie_review_url, extract=True)[:-7]
X_train, y_train = read_pos_neg_data(path, 'train', limit)
X_test, y_test = read_pos_neg_data(path, 'test', limit)
if shuffle:
X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
X_test, y_test = sklearn.utils.shuffle(X_test, y_test)
return X_train, X_test, y_train, y_test
| 757,096
|
Creates an instance of the APIEndpoint class.
Args:
api - Gophish.client - The authenticated REST client
endpoint - str - The URL path to the resource endpoint
cls - gophish.models.Model - The Class to use when parsing results
|
def __init__(self, api, endpoint=None, cls=None):
self.api = api
self.endpoint = endpoint
self._cls = cls
| 757,533
|
Creates a new instance of the resource.
Args:
resource - gophish.models.Model - The resource instance
|
def post(self, resource):
response = self.api.execute(
"POST", self.endpoint, json=(resource.as_dict()))
if not response.ok:
raise Error.parse(response.json())
return self._cls.parse(response.json())
| 757,535
|
Edits an existing resource
Args:
resource - gophish.models.Model - The resource instance
|
def put(self, resource):
endpoint = self.endpoint
if resource.id:
endpoint = self._build_url(endpoint, resource.id)
response = self.api.execute("PUT", endpoint, json=resource.as_dict())
if not response.ok:
raise Error.parse(response.json())
return self._cls.parse(response.json())
| 757,536
|
Deletes an existing resource
Args:
resource_id - int - The resource ID to be deleted
|
def delete(self, resource_id):
endpoint = '{}/{}'.format(self.endpoint, resource_id)
response = self.api.execute("DELETE", endpoint)
if not response.ok:
raise Error.parse(response.json())
return self._cls.parse(response.json())
| 757,537
|
Check if this file exist and if it's a directory
This function will check if the given filename
actually exists and if it's not a Directory
Arguments:
filename {string} -- filename
Return:
True : if it's not a directory and if this file exist
False : If it's not a file and if it's a directory
|
def check_if_this_file_exist(filename):
#get the absolute path
filename = os.path.abspath(filename)
#Boolean
this_file_exist = os.path.exists(filename)
a_directory = os.path.isdir(filename)
result = this_file_exist and not a_directory
if result == False:
raise ValueError('The filename given was either non existent or was a directory')
else:
return result
| 757,692
|
Return a json value of the exif
Get a filename and return a JSON object
Arguments:
filename {string} -- your filename
Returns:
[JSON] -- Return a JSON object
|
def get_json(filename):
check_if_this_file_exist(filename)
#Process this function
filename = os.path.abspath(filename)
s = command_line(['exiftool', '-G', '-j', '-sort', filename])
if s:
#convert bytes to string
s = s.decode('utf-8').rstrip('\r\n')
return json.loads(s)
else:
return s
| 757,695
|
Return a csv representation of the exif
get a filename and returns a unicode string with a CSV format
Arguments:
filename {string} -- your filename
Returns:
[unicode] -- unicode string
|
def get_csv(filename):
check_if_this_file_exist(filename)
#Process this function
filename = os.path.abspath(filename)
s = command_line(['exiftool', '-G', '-csv', '-sort', filename])
if s:
#convert bytes to string
s = s.decode('utf-8')
return s
else:
return 0
| 757,696
|
Compute the running average of `k` successive elements of `t`. Return the averaged array.
Parameters:
t:
Python list or rank-1 array
k:
int, >= 2, how many successive elements to average
Returns:
rank-1 array, averaged data. If k > len(t), returns a zero-length array.
Caveat:
This is slightly different from MATLAB's aveknt, which returns the running average
of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``).
|
def aveknt(t, k):
t = np.atleast_1d(t)
if t.ndim > 1:
raise ValueError("t must be a list or a rank-1 array")
n = t.shape[0]
u = max(0, n - (k-1)) # number of elements in the output array
out = np.empty( (u,), dtype=t.dtype )
for j in range(u):
out[j] = sum( t[j:(j+k)] ) / k
return out
| 758,236
|
Count multiplicities of elements in a sorted list or rank-1 array.
Minimal emulation of MATLAB's ``knt2mlt``.
Parameters:
t:
Python list or rank-1 array. Must be sorted!
Returns:
out
rank-1 array such that
out[k] = #{ t[i] == t[k] for i < k }
Example:
If ``t = [1, 1, 2, 3, 3, 3]``, then ``out = [0, 1, 0, 0, 1, 2]``.
Caveat:
Requires input to be already sorted (this is not checked).
|
def knt2mlt(t):
t = np.atleast_1d(t)
if t.ndim > 1:
raise ValueError("t must be a list or a rank-1 array")
out = []
e = None
for k in range(t.shape[0]):
if t[k] != e:
e = t[k]
count = 0
else:
count += 1
out.append(count)
return np.array( out )
| 758,238
|
Return collocation matrix.
Minimal emulation of MATLAB's ``spcol``.
Parameters:
knots:
rank-1 array, knot vector (with appropriately repeated endpoints; see `augknt`, `aptknt`)
order:
int, >= 0, order of spline
tau:
rank-1 array, collocation sites
Returns:
rank-2 array A such that
A[i,j] = D**{m(i)} B_j(tau[i])
where
m(i) = multiplicity of site tau[i]
D**k = kth derivative (0 for function value itself)
|
def spcol(knots, order, tau):
m = knt2mlt(tau)
B = bspline.Bspline(knots, order)
dummy = B(0.)
nbasis = len(dummy) # perform dummy evaluation to get number of basis functions
A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype )
for i,item in enumerate(zip(tau,m)):
taui,mi = item
f = B.diff(order=mi)
A[i,:] = f(taui)
return A
| 758,239
|
Create a Bspline object.
Parameters:
knot_vector: Python list or rank-1 Numpy array containing knot vector
entries
order: Order of interpolation, e.g. 0 -> piecewise constant between
knots, 1 -> piecewise linear between knots, etc.
Returns:
Bspline object, callable to evaluate basis functions at given
values of `x` inside the knot span.
|
def __init__(self, knot_vector, order):
kv = np.atleast_1d(knot_vector)
if kv.ndim > 1:
raise ValueError("knot_vector must be Python list or rank-1 array, but got rank = %d" % (kv.ndim))
self.knot_vector = kv
order = int(order)
if order < 0:
raise ValueError("order must be integer >= 0, but got %d" % (order))
self.p = order
#Dummy calls to the functions for memory storage
self.__call__(0.0)
self.d(0.0)
| 758,240
|
Differentiate a B-spline `order` number of times.
Parameters:
order:
int, >= 0
Returns:
**lambda** `x`: ... that evaluates the `order`-th derivative of `B` at the point `x`.
The returned function internally uses __call__, which is 'memoized' for speed.
|
def diff(self, order=1):
order = int(order)
if order < 0:
raise ValueError("order must be >= 0, got %d" % (order))
if order == 0:
return self.__call__
if order > self.p: # identically zero, but force the same output format as in the general case
dummy = self.__call__(0.) # get number of basis functions and output dtype
nbasis = dummy.shape[0]
return lambda x: np.zeros( (nbasis,), dtype=dummy.dtype ) # accept but ignore input x
# At each differentiation, each term maps into two new terms.
# The number of terms in the result will be 2**order.
#
# This will cause an exponential explosion in the number of terms for high derivative orders,
# but for the first few orders (practical usage; >3 is rarely needed) the approach works.
#
terms = [ (1.,self) ]
for k in range(order):
tmp = []
for Ci,Bi in terms:
tmp.extend( (Ci*cn, Bn) for cn,Bn in Bi.__diff_internal() ) # NOTE: also propagate Ci
terms = tmp
# perform final summation at call time
return lambda x: sum( ci*Bi(x) for ci,Bi in terms )
| 758,247
|
Validate that a given value is a valid option for a given model instance.
Args:
value (xworkflows.base.StateWrapper): The base.StateWrapper returned by to_python.
model_instance: A WorkflowEnabled instance
|
def validate(self, value, model_instance):
if not isinstance(value, base.StateWrapper):
raise exceptions.ValidationError(self.error_messages['wrong_type'] % value)
elif not value.workflow == self.workflow:
raise exceptions.ValidationError(self.error_messages['wrong_workflow'] % value.workflow)
elif value.state not in self.workflow.states:
raise exceptions.ValidationError(self.error_messages['invalid_state'] % value.state)
| 759,972
|
Pops a message for a subscribed client.
Args:
deadline (int): max number of seconds to wait (None => no timeout)
Returns:
Future with the popped message as result (or None if timeout
or ConnectionError object in case of connection errors
or ClientError object if you are not subscribed)
|
def pubsub_pop_message(self, deadline=None):
if not self.subscribed:
excep = ClientError("you must subscribe before using "
"pubsub_pop_message")
raise tornado.gen.Return(excep)
reply = None
try:
reply = self._reply_list.pop(0)
raise tornado.gen.Return(reply)
except IndexError:
pass
if deadline is not None:
td = timedelta(seconds=deadline)
yield self._condition.wait(timeout=td)
else:
yield self._condition.wait()
try:
reply = self._reply_list.pop(0)
except IndexError:
pass
raise tornado.gen.Return(reply)
| 760,269
|
Constructor.
Args:
use_memory_view_min_size (int): minimum size before using
memoryview objects (advanced option, the default is probably
good for you).
|
def __init__(self, use_memory_view_min_size=4096):
self.use_memory_view_min_size = use_memory_view_min_size
self._deque = collections.deque()
self.clear()
| 760,381
|
Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size.
|
def pop_chunk(self, chunk_max_size):
if self._total_length < chunk_max_size:
# fastpath (the whole queue fit in a single chunk)
res = self._tobytes()
self.clear()
return res
first_iteration = True
while True:
try:
data = self._deque.popleft()
data_length = len(data)
self._total_length -= data_length
if first_iteration:
# first iteration
if data_length == chunk_max_size:
# we are lucky !
return data
elif data_length > chunk_max_size:
# we have enough data at first iteration
# => fast path optimization
view = self._get_pointer_or_memoryview(data,
data_length)
self.appendleft(view[chunk_max_size:])
return view[:chunk_max_size]
else:
# no single iteration fast path optimization :-(
# let's use a WriteBuffer to build the result chunk
chunk_write_buffer = WriteBuffer()
else:
# not first iteration
if chunk_write_buffer._total_length + data_length \
> chunk_max_size:
view = self._get_pointer_or_memoryview(data,
data_length)
limit = chunk_max_size - \
chunk_write_buffer._total_length - data_length
self.appendleft(view[limit:])
data = view[:limit]
chunk_write_buffer.append(data)
if chunk_write_buffer._total_length >= chunk_max_size:
break
except IndexError:
# the buffer is empty (so no memoryview inside)
self._has_view = False
break
first_iteration = False
return chunk_write_buffer._tobytes()
| 760,386
|
Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
|
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
| 760,436
|
Releases a client object to the pool.
Args:
client: Client object.
|
def release_client(self, client):
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release()
| 760,443
|
(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
|
def preconnect(self, size=-1):
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client)
| 760,445
|
Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2")
|
def stack_call(self, *args):
self.pipelined_args.append(args)
self.number_of_stacked_calls = self.number_of_stacked_calls + 1
| 760,482
|
Buffers some data to be sent to the host:port in a non blocking way.
So the data is always buffered and not sent on the socket in a
synchronous way.
You can give a WriteBuffer as parameter. The internal Connection
WriteBuffer will be extended with this one (without copying).
Args:
data (str or WriteBuffer): string (or WriteBuffer) to write to
the host:port.
|
def write(self, data):
if isinstance(data, WriteBuffer):
self._write_buffer.append(data)
else:
if len(data) > 0:
self._write_buffer.append(data)
if self.aggressive_write:
self._handle_write()
if self._write_buffer._total_length > 0:
self._register_or_update_event_handler(write=True)
| 760,493
|
Constructor.
Args:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
**connection_kwargs: :class:`Connection` object kwargs.
|
def __init__(self, autoconnect=True, password=None, db=0,
**connection_kwargs):
if 'read_callback' in connection_kwargs or \
'close_callback' in connection_kwargs:
raise Exception("read_callback and close_callback are not allowed "
"to be used here.")
self.connection_kwargs = connection_kwargs
self.autoconnect = autoconnect
self.password = password
self.db = db
self.__connection = None
self.subscribed = False
self.__connection = None
self.__reader = None
# Used for normal clients
self.__callback_queue = None
# Used for subscribed clients
self._condition = tornado.locks.Condition()
self._reply_list = None
| 760,524
|
Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
|
def _read_callback(self, data=None):
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect()
| 760,527
|
Constructor.
Args:
wrapped (Future): the original Future object (to wrap)
exit_callback: the exit callback to call at the end of
the block
|
def __init__(self, wrapped, exit_callback):
Future.__init__(self)
wrapped.add_done_callback(self._done_callback)
self._exit_callback = exit_callback
self._wrapped = wrapped
| 760,537
|
Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object
|
def _done_callback(self, wrapped):
if wrapped.exception():
self.set_exception(wrapped.exception())
else:
self.set_result(wrapped.result())
| 760,538
|
Initialize the WebDriver
Args:
desired_capabilities(dict): The desired capabilities requested by
the local end.
url(str): The url of remote server, default: localhost:3456/wd/hub.
|
def __init__(self, desired_capabilities, url='http://127.0.0.1:3456/wd/hub'):
self.session_id = None
self.capabilities = None
self.desired_capabilities = desired_capabilities
self.remote_invoker = RemoteInvoker(url)
| 760,665
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.