code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def open(uri, mode, kerberos=False, user=None, password=None):
"""Implement streamed reader from a web site.
Supports Kerberos and Basic HTTP authentication.
Parameters
----------
url: str
The URL to open.
mode: str
The mode to open using.
kerberos: boolean, optional
If True, will attempt to use the local Kerberos credentials
user: str, optional
The username for authenticating over HTTP
password: str, optional
The password for authenticating over HTTP
Note
----
If neither kerberos or (user, password) are set, will connect unauthenticated.
"""
if mode == 'rb':
return BufferedInputBase(uri, mode, kerberos=kerberos, user=user, password=password)
else:
raise NotImplementedError('http support for mode %r not implemented' % mode) | Implement streamed reader from a web site.
Supports Kerberos and Basic HTTP authentication.
Parameters
----------
url: str
The URL to open.
mode: str
The mode to open using.
kerberos: boolean, optional
If True, will attempt to use the local Kerberos credentials
user: str, optional
The username for authenticating over HTTP
password: str, optional
The password for authenticating over HTTP
Note
----
If neither kerberos or (user, password) are set, will connect unauthenticated. | Below is the the instruction that describes the task:
### Input:
Implement streamed reader from a web site.
Supports Kerberos and Basic HTTP authentication.
Parameters
----------
url: str
The URL to open.
mode: str
The mode to open using.
kerberos: boolean, optional
If True, will attempt to use the local Kerberos credentials
user: str, optional
The username for authenticating over HTTP
password: str, optional
The password for authenticating over HTTP
Note
----
If neither kerberos or (user, password) are set, will connect unauthenticated.
### Response:
def open(uri, mode, kerberos=False, user=None, password=None):
"""Implement streamed reader from a web site.
Supports Kerberos and Basic HTTP authentication.
Parameters
----------
url: str
The URL to open.
mode: str
The mode to open using.
kerberos: boolean, optional
If True, will attempt to use the local Kerberos credentials
user: str, optional
The username for authenticating over HTTP
password: str, optional
The password for authenticating over HTTP
Note
----
If neither kerberos or (user, password) are set, will connect unauthenticated.
"""
if mode == 'rb':
return BufferedInputBase(uri, mode, kerberos=kerberos, user=user, password=password)
else:
raise NotImplementedError('http support for mode %r not implemented' % mode) |
def _api_get(path, server=None):
'''
Do a GET request to the API
'''
server = _get_server(server)
response = requests.get(
url=_get_url(server['ssl'], server['url'], server['port'], path),
auth=_get_auth(server['user'], server['password']),
headers=_get_headers(),
verify=False
)
return _api_response(response) | Do a GET request to the API | Below is the the instruction that describes the task:
### Input:
Do a GET request to the API
### Response:
def _api_get(path, server=None):
'''
Do a GET request to the API
'''
server = _get_server(server)
response = requests.get(
url=_get_url(server['ssl'], server['url'], server['port'], path),
auth=_get_auth(server['user'], server['password']),
headers=_get_headers(),
verify=False
)
return _api_response(response) |
def get_empty_results(self):
"""
Because the base result type is different depending on the return structure
(e.g. list for flat, dict for object), `get_result_type` initials the
`results` variable to the proper type
"""
assert self.result_type is not None, (
'{} must specify a `result_type` value or overwrite the '
'`get_empty_result` method.'.format(self.__class__.__name__)
)
return self.result_type() | Because the base result type is different depending on the return structure
(e.g. list for flat, dict for object), `get_result_type` initials the
`results` variable to the proper type | Below is the the instruction that describes the task:
### Input:
Because the base result type is different depending on the return structure
(e.g. list for flat, dict for object), `get_result_type` initials the
`results` variable to the proper type
### Response:
def get_empty_results(self):
"""
Because the base result type is different depending on the return structure
(e.g. list for flat, dict for object), `get_result_type` initials the
`results` variable to the proper type
"""
assert self.result_type is not None, (
'{} must specify a `result_type` value or overwrite the '
'`get_empty_result` method.'.format(self.__class__.__name__)
)
return self.result_type() |
def v_args(inline=False, meta=False, tree=False):
"A convenience decorator factory, for modifying the behavior of user-supplied visitor methods"
if [tree, meta, inline].count(True) > 1:
raise ValueError("Visitor functions can either accept tree, or meta, or be inlined. These cannot be combined.")
def _visitor_args_dec(obj):
return _apply_decorator(obj, _visitor_args_func_dec, inline=inline, meta=meta, whole_tree=tree)
return _visitor_args_dec | A convenience decorator factory, for modifying the behavior of user-supplied visitor methods | Below is the the instruction that describes the task:
### Input:
A convenience decorator factory, for modifying the behavior of user-supplied visitor methods
### Response:
def v_args(inline=False, meta=False, tree=False):
"A convenience decorator factory, for modifying the behavior of user-supplied visitor methods"
if [tree, meta, inline].count(True) > 1:
raise ValueError("Visitor functions can either accept tree, or meta, or be inlined. These cannot be combined.")
def _visitor_args_dec(obj):
return _apply_decorator(obj, _visitor_args_func_dec, inline=inline, meta=meta, whole_tree=tree)
return _visitor_args_dec |
def search_process_log(self, pid, filter={}, start=0, limit=1000):
'''
search_process_log(self, pid, filter={}, start=0, limit=1000)
Search in process logs
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in process log data)
:return: Count of records found and list of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my product param'}
search_result = opereto_client.search_globals(filter=filter)
if search_result['total'] > 0
print(search_result['list'])
'''
pid = self._get_pid(pid)
request_data = {'start': start, 'limit': limit, 'filter': filter}
return self._call_rest_api('post', '/processes/' + pid + '/log/search', data=request_data,
error='Failed to search in process log') | search_process_log(self, pid, filter={}, start=0, limit=1000)
Search in process logs
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in process log data)
:return: Count of records found and list of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my product param'}
search_result = opereto_client.search_globals(filter=filter)
if search_result['total'] > 0
print(search_result['list']) | Below is the the instruction that describes the task:
### Input:
search_process_log(self, pid, filter={}, start=0, limit=1000)
Search in process logs
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in process log data)
:return: Count of records found and list of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my product param'}
search_result = opereto_client.search_globals(filter=filter)
if search_result['total'] > 0
print(search_result['list'])
### Response:
def search_process_log(self, pid, filter={}, start=0, limit=1000):
'''
search_process_log(self, pid, filter={}, start=0, limit=1000)
Search in process logs
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in process log data)
:return: Count of records found and list of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my product param'}
search_result = opereto_client.search_globals(filter=filter)
if search_result['total'] > 0
print(search_result['list'])
'''
pid = self._get_pid(pid)
request_data = {'start': start, 'limit': limit, 'filter': filter}
return self._call_rest_api('post', '/processes/' + pid + '/log/search', data=request_data,
error='Failed to search in process log') |
def get_email_context(self,**kwargs):
''' Overrides EmailRecipientMixin '''
context = super(Event,self).get_email_context(**kwargs)
context.update({
'id': self.id,
'name': self.__str__(),
'title': self.name,
'start': self.firstOccurrenceTime,
'next': self.nextOccurrenceTime,
'last': self.lastOccurrenceTime,
'url': self.url,
})
return context | Overrides EmailRecipientMixin | Below is the the instruction that describes the task:
### Input:
Overrides EmailRecipientMixin
### Response:
def get_email_context(self,**kwargs):
''' Overrides EmailRecipientMixin '''
context = super(Event,self).get_email_context(**kwargs)
context.update({
'id': self.id,
'name': self.__str__(),
'title': self.name,
'start': self.firstOccurrenceTime,
'next': self.nextOccurrenceTime,
'last': self.lastOccurrenceTime,
'url': self.url,
})
return context |
def webui_data_stores_user_profile_saved_query_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui")
data_stores = ET.SubElement(webui, "data-stores")
user_profile = ET.SubElement(data_stores, "user-profile")
username_key = ET.SubElement(user_profile, "username")
username_key.text = kwargs.pop('username')
saved_query = ET.SubElement(user_profile, "saved-query")
key_key = ET.SubElement(saved_query, "key")
key_key.text = kwargs.pop('key')
value = ET.SubElement(saved_query, "value")
value.text = kwargs.pop('value')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def webui_data_stores_user_profile_saved_query_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui")
data_stores = ET.SubElement(webui, "data-stores")
user_profile = ET.SubElement(data_stores, "user-profile")
username_key = ET.SubElement(user_profile, "username")
username_key.text = kwargs.pop('username')
saved_query = ET.SubElement(user_profile, "saved-query")
key_key = ET.SubElement(saved_query, "key")
key_key.text = kwargs.pop('key')
value = ET.SubElement(saved_query, "value")
value.text = kwargs.pop('value')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def process_docstring(app, what, name, obj, options, lines):
"""React to a docstring event and append contracts to it."""
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
lines.extend(_format_contracts(what=what, obj=obj)) | React to a docstring event and append contracts to it. | Below is the the instruction that describes the task:
### Input:
React to a docstring event and append contracts to it.
### Response:
def process_docstring(app, what, name, obj, options, lines):
"""React to a docstring event and append contracts to it."""
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
lines.extend(_format_contracts(what=what, obj=obj)) |
def write_csv(self, path=None):
"""Write CSV file. Sorts the sections before calling the superclass write_csv"""
# Sort the Sections
self.sort_sections(['Root', 'Contacts', 'Documentation', 'References', 'Resources', 'Citations', 'Schema'])
# Sort Terms in the root section
# Re-wrap the description and abstract
if self.description:
self.description = self.description
if self.abstract:
self.description = self.abstract
t = self['Root'].get_or_new_term('Root.Modified')
t.value = datetime_now()
self.sort_by_term()
return super().write_csv(str(path)) | Write CSV file. Sorts the sections before calling the superclass write_csv | Below is the the instruction that describes the task:
### Input:
Write CSV file. Sorts the sections before calling the superclass write_csv
### Response:
def write_csv(self, path=None):
"""Write CSV file. Sorts the sections before calling the superclass write_csv"""
# Sort the Sections
self.sort_sections(['Root', 'Contacts', 'Documentation', 'References', 'Resources', 'Citations', 'Schema'])
# Sort Terms in the root section
# Re-wrap the description and abstract
if self.description:
self.description = self.description
if self.abstract:
self.description = self.abstract
t = self['Root'].get_or_new_term('Root.Modified')
t.value = datetime_now()
self.sort_by_term()
return super().write_csv(str(path)) |
def _create_state_data(self, context, resp_args, relay_state):
"""
Adds the frontend idp entity id to state
See super class satosa.frontends.saml2.SAMLFrontend#save_state
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
"""
state = super()._create_state_data(context, resp_args, relay_state)
state["target_entity_id"] = context.target_entity_id_from_path()
return state | Adds the frontend idp entity id to state
See super class satosa.frontends.saml2.SAMLFrontend#save_state
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str] | Below is the the instruction that describes the task:
### Input:
Adds the frontend idp entity id to state
See super class satosa.frontends.saml2.SAMLFrontend#save_state
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
### Response:
def _create_state_data(self, context, resp_args, relay_state):
"""
Adds the frontend idp entity id to state
See super class satosa.frontends.saml2.SAMLFrontend#save_state
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
"""
state = super()._create_state_data(context, resp_args, relay_state)
state["target_entity_id"] = context.target_entity_id_from_path()
return state |
def activate_in_ec(self, ec_index):
'''Activate this component in an execution context.
@param ec_index The index of the execution context to activate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
ec = self.participating_ecs[ec_index]
else:
ec = self.owned_ecs[ec_index]
ec.activate_component(self._obj) | Activate this component in an execution context.
@param ec_index The index of the execution context to activate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs. | Below is the the instruction that describes the task:
### Input:
Activate this component in an execution context.
@param ec_index The index of the execution context to activate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
### Response:
def activate_in_ec(self, ec_index):
'''Activate this component in an execution context.
@param ec_index The index of the execution context to activate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
ec = self.participating_ecs[ec_index]
else:
ec = self.owned_ecs[ec_index]
ec.activate_component(self._obj) |
def get_form(self, form_class=None):
'''
Set form groups to the groups specified in the view if defined
'''
formobj = super(GenModify, self).get_form(form_class)
# Set requested group to this form
selfgroups = getattr(self, "form_groups", None)
if selfgroups:
if type(selfgroups) == list:
formobj.__groups__ = lambda: selfgroups
else:
formobj.__groups__ = selfgroups
else:
selfgroups = getattr(self, "__groups__", None)
if selfgroups:
formobj.__groups__ = selfgroups
# Return the new updated form
return formobj | Set form groups to the groups specified in the view if defined | Below is the the instruction that describes the task:
### Input:
Set form groups to the groups specified in the view if defined
### Response:
def get_form(self, form_class=None):
'''
Set form groups to the groups specified in the view if defined
'''
formobj = super(GenModify, self).get_form(form_class)
# Set requested group to this form
selfgroups = getattr(self, "form_groups", None)
if selfgroups:
if type(selfgroups) == list:
formobj.__groups__ = lambda: selfgroups
else:
formobj.__groups__ = selfgroups
else:
selfgroups = getattr(self, "__groups__", None)
if selfgroups:
formobj.__groups__ = selfgroups
# Return the new updated form
return formobj |
def spop(self, key, count=None, *, encoding=_NOTSET):
"""Remove and return one or multiple random members from a set."""
args = [key]
if count is not None:
args.append(count)
return self.execute(b'SPOP', *args, encoding=encoding) | Remove and return one or multiple random members from a set. | Below is the the instruction that describes the task:
### Input:
Remove and return one or multiple random members from a set.
### Response:
def spop(self, key, count=None, *, encoding=_NOTSET):
"""Remove and return one or multiple random members from a set."""
args = [key]
if count is not None:
args.append(count)
return self.execute(b'SPOP', *args, encoding=encoding) |
def create_postgresql_pypostgresql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using pypostgresql.
"""
return create_engine(
_create_postgresql_pypostgresql(
username, password, host, port, database),
**kwargs
) | create an engine connected to a postgresql database using pypostgresql. | Below is the the instruction that describes the task:
### Input:
create an engine connected to a postgresql database using pypostgresql.
### Response:
def create_postgresql_pypostgresql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using pypostgresql.
"""
return create_engine(
_create_postgresql_pypostgresql(
username, password, host, port, database),
**kwargs
) |
def _update_release_sha(repo_name, tag_name, new_release_sha, dry_run):
"""Update the commit associated with a given release tag.
Since updating a tag commit is not directly possible, this function
does the following steps:
* set the release tag to ``<tag_name>-tmp`` and associate it
with ``new_release_sha``.
* delete tag ``refs/tags/<tag_name>``.
* update the release tag to ``<tag_name>`` and associate it
with ``new_release_sha``.
"""
if new_release_sha is None:
return
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tag_name)
if not refs:
return
assert len(refs) == 1
# If sha associated with "<tag_name>" is up-to-date, we are done.
previous_release_sha = refs[0]["object"]["sha"]
if previous_release_sha == new_release_sha:
return
tmp_tag_name = tag_name + "-tmp"
# If any, remove leftover temporary tag "<tag_name>-tmp"
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tmp_tag_name)
if refs:
assert len(refs) == 1
time.sleep(0.1)
gh_ref_delete(repo_name,
"refs/tags/%s" % tmp_tag_name, dry_run=dry_run)
# Update "<tag_name>" release by associating it with the "<tag_name>-tmp"
# and "<new_release_sha>". It will create the temporary tag.
time.sleep(0.1)
patch_release(repo_name, tag_name,
tag_name=tmp_tag_name,
target_commitish=new_release_sha,
dry_run=dry_run)
# Now "<tag_name>-tmp" references "<new_release_sha>", remove "<tag_name>"
time.sleep(0.1)
gh_ref_delete(repo_name, "refs/tags/%s" % tag_name, dry_run=dry_run)
# Finally, update "<tag_name>-tmp" release by associating it with the
# "<tag_name>" and "<new_release_sha>".
time.sleep(0.1)
patch_release(repo_name, tmp_tag_name,
tag_name=tag_name,
target_commitish=new_release_sha,
dry_run=dry_run)
# ... and remove "<tag_name>-tmp"
time.sleep(0.1)
gh_ref_delete(repo_name,
"refs/tags/%s" % tmp_tag_name, dry_run=dry_run) | Update the commit associated with a given release tag.
Since updating a tag commit is not directly possible, this function
does the following steps:
* set the release tag to ``<tag_name>-tmp`` and associate it
with ``new_release_sha``.
* delete tag ``refs/tags/<tag_name>``.
* update the release tag to ``<tag_name>`` and associate it
with ``new_release_sha``. | Below is the the instruction that describes the task:
### Input:
Update the commit associated with a given release tag.
Since updating a tag commit is not directly possible, this function
does the following steps:
* set the release tag to ``<tag_name>-tmp`` and associate it
with ``new_release_sha``.
* delete tag ``refs/tags/<tag_name>``.
* update the release tag to ``<tag_name>`` and associate it
with ``new_release_sha``.
### Response:
def _update_release_sha(repo_name, tag_name, new_release_sha, dry_run):
"""Update the commit associated with a given release tag.
Since updating a tag commit is not directly possible, this function
does the following steps:
* set the release tag to ``<tag_name>-tmp`` and associate it
with ``new_release_sha``.
* delete tag ``refs/tags/<tag_name>``.
* update the release tag to ``<tag_name>`` and associate it
with ``new_release_sha``.
"""
if new_release_sha is None:
return
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tag_name)
if not refs:
return
assert len(refs) == 1
# If sha associated with "<tag_name>" is up-to-date, we are done.
previous_release_sha = refs[0]["object"]["sha"]
if previous_release_sha == new_release_sha:
return
tmp_tag_name = tag_name + "-tmp"
# If any, remove leftover temporary tag "<tag_name>-tmp"
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tmp_tag_name)
if refs:
assert len(refs) == 1
time.sleep(0.1)
gh_ref_delete(repo_name,
"refs/tags/%s" % tmp_tag_name, dry_run=dry_run)
# Update "<tag_name>" release by associating it with the "<tag_name>-tmp"
# and "<new_release_sha>". It will create the temporary tag.
time.sleep(0.1)
patch_release(repo_name, tag_name,
tag_name=tmp_tag_name,
target_commitish=new_release_sha,
dry_run=dry_run)
# Now "<tag_name>-tmp" references "<new_release_sha>", remove "<tag_name>"
time.sleep(0.1)
gh_ref_delete(repo_name, "refs/tags/%s" % tag_name, dry_run=dry_run)
# Finally, update "<tag_name>-tmp" release by associating it with the
# "<tag_name>" and "<new_release_sha>".
time.sleep(0.1)
patch_release(repo_name, tmp_tag_name,
tag_name=tag_name,
target_commitish=new_release_sha,
dry_run=dry_run)
# ... and remove "<tag_name>-tmp"
time.sleep(0.1)
gh_ref_delete(repo_name,
"refs/tags/%s" % tmp_tag_name, dry_run=dry_run) |
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack) | starts an inline entity with an optional style definition | Below is the the instruction that describes the task:
### Input:
starts an inline entity with an optional style definition
### Response:
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack) |
def parse_transform(transform, direction='forward'):
"""
direction : 'forward' | 'inverse'
"""
if hasattr(transform, '__call__'):
tfun = transform
tname = None
elif hasattr(transform, 'lower'):
tname = _get_canonical_name(transform)
if tname is None:
raise ValueError('Unknown transform: %s' % transform)
else:
tfun = name_transforms[tname][direction]
else:
raise TypeError('Unsupported transform type: %s' % type(transform))
return tfun, tname | direction : 'forward' | 'inverse' | Below is the the instruction that describes the task:
### Input:
direction : 'forward' | 'inverse'
### Response:
def parse_transform(transform, direction='forward'):
"""
direction : 'forward' | 'inverse'
"""
if hasattr(transform, '__call__'):
tfun = transform
tname = None
elif hasattr(transform, 'lower'):
tname = _get_canonical_name(transform)
if tname is None:
raise ValueError('Unknown transform: %s' % transform)
else:
tfun = name_transforms[tname][direction]
else:
raise TypeError('Unsupported transform type: %s' % type(transform))
return tfun, tname |
def adapt_meta(self, meta):
"""Convert meta from error response to href and surge_id attributes."""
surge = meta.get('surge_confirmation')
href = surge.get('href')
surge_id = surge.get('surge_confirmation_id')
return href, surge_id | Convert meta from error response to href and surge_id attributes. | Below is the the instruction that describes the task:
### Input:
Convert meta from error response to href and surge_id attributes.
### Response:
def adapt_meta(self, meta):
"""Convert meta from error response to href and surge_id attributes."""
surge = meta.get('surge_confirmation')
href = surge.get('href')
surge_id = surge.get('surge_confirmation_id')
return href, surge_id |
def autoprops(include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
cls=DECORATED):
"""
A decorator to automatically generate all properties getters and setters from the class constructor.
* if a @contract annotation exist on the __init__ method, mentioning a contract for a given parameter, the
parameter contract will be added on the generated setter method
* The user may override the generated getter and/or setter by creating them explicitly in the class and annotating
them with @getter_override or @setter_override. Note that the contract will still be dynamically added on the
setter, even if the setter already has one (in such case a `UserWarning` will be issued)
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:return:
"""
return autoprops_decorate(cls, include=include, exclude=exclude) | A decorator to automatically generate all properties getters and setters from the class constructor.
* if a @contract annotation exist on the __init__ method, mentioning a contract for a given parameter, the
parameter contract will be added on the generated setter method
* The user may override the generated getter and/or setter by creating them explicitly in the class and annotating
them with @getter_override or @setter_override. Note that the contract will still be dynamically added on the
setter, even if the setter already has one (in such case a `UserWarning` will be issued)
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:return: | Below is the the instruction that describes the task:
### Input:
A decorator to automatically generate all properties getters and setters from the class constructor.
* if a @contract annotation exist on the __init__ method, mentioning a contract for a given parameter, the
parameter contract will be added on the generated setter method
* The user may override the generated getter and/or setter by creating them explicitly in the class and annotating
them with @getter_override or @setter_override. Note that the contract will still be dynamically added on the
setter, even if the setter already has one (in such case a `UserWarning` will be issued)
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:return:
### Response:
def autoprops(include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
cls=DECORATED):
"""
A decorator to automatically generate all properties getters and setters from the class constructor.
* if a @contract annotation exist on the __init__ method, mentioning a contract for a given parameter, the
parameter contract will be added on the generated setter method
* The user may override the generated getter and/or setter by creating them explicitly in the class and annotating
them with @getter_override or @setter_override. Note that the contract will still be dynamically added on the
setter, even if the setter already has one (in such case a `UserWarning` will be issued)
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:return:
"""
return autoprops_decorate(cls, include=include, exclude=exclude) |
def write_points(self, data, time_precision='s', *args, **kwargs):
"""Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
def list_chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
batch_size = kwargs.get('batch_size')
if batch_size and batch_size > 0:
for item in data:
name = item.get('name')
columns = item.get('columns')
point_list = item.get('points', [])
for batch in list_chunks(point_list, batch_size):
item = [{
"points": batch,
"name": name,
"columns": columns
}]
self._write_points(
data=item,
time_precision=time_precision)
return True
return self._write_points(data=data,
time_precision=time_precision) | Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int | Below is the the instruction that describes the task:
### Input:
Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
### Response:
def write_points(self, data, time_precision='s', *args, **kwargs):
"""Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
def list_chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
batch_size = kwargs.get('batch_size')
if batch_size and batch_size > 0:
for item in data:
name = item.get('name')
columns = item.get('columns')
point_list = item.get('points', [])
for batch in list_chunks(point_list, batch_size):
item = [{
"points": batch,
"name": name,
"columns": columns
}]
self._write_points(
data=item,
time_precision=time_precision)
return True
return self._write_points(data=data,
time_precision=time_precision) |
def get_contextualize_items_by_step(self, default=None):
"""Get a dictionary of the contextualize_items grouped by the step or the default value"""
if self.items:
res = {}
for elem in self.items.values():
if elem.num in res:
res[elem.num].append(elem)
else:
res[elem.num] = [elem]
return res
else:
return default | Get a dictionary of the contextualize_items grouped by the step or the default value | Below is the the instruction that describes the task:
### Input:
Get a dictionary of the contextualize_items grouped by the step or the default value
### Response:
def get_contextualize_items_by_step(self, default=None):
"""Get a dictionary of the contextualize_items grouped by the step or the default value"""
if self.items:
res = {}
for elem in self.items.values():
if elem.num in res:
res[elem.num].append(elem)
else:
res[elem.num] = [elem]
return res
else:
return default |
def setup_pilotpoints_grid(ml=None,sr=None,ibound=None,prefix_dict=None,
every_n_cell=4,
use_ibound_zones=False,
pp_dir='.',tpl_dir='.',
shapename="pp.shp"):
"""setup grid-based pilot points. Uses the ibound to determine
where to set pilot points. pilot points are given generic ``pp_``
names unless ``prefix_dict`` is passed. Write template files and a
shapefile as well...hopefully this is useful to someone...
Parameters
----------
ml : flopy.mbase
a flopy mbase dervied type. If None, sr must not be None.
sr : flopy.utils.reference.SpatialReference
a spatial reference use to locate the model grid in space. If None,
ml must not be None. Default is None
ibound : numpy.ndarray
the modflow ibound integer array. Used to set pilot points only in active areas (!=0).
If None and ml is None, then pilot points are set in all rows and columns according to
every_n_cell. Default is None.
prefix_dict : dict
a dictionary of layer index, pilot point parameter prefixes.
Example : ``{0:["hk_"],1:["vka_"]}`` would setup pilot points with
the prefix ``"hk"`` for model layers 1 and ``"vka"`` for model layer 2 (zero based).
If None, a generic set of pilot points with the "pp" prefix are setup
for a generic nrowXncol grid. Default is None.
use_ibound_zones : bool
a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound
values greater than zero are treated as a single zone. Default is False.
pp_dir : str
directory to write pilot point files to. Default is '.'
tpl_dir : str
directory to write pilot point template file to. Default is '.'
shapename : str
name of shapefile to write that contains pilot point information. Default is "pp.shp"
Returns
-------
pp_df : pandas.DataFrame
a dataframe summarizing pilot point information (same information
written to shapename
Example
-------
``>>>import flopy``
``>>>from pyemu.utils import setup_pilotpoints_grid``
``>>>m = flopy.modflow.Modfow.load("mymodel.nam")``
``>>>setup_pilotpoints_grid(m,prefix_dict={0:['hk_'],1:['vka_']},``
``>>> every_n_cell=3,shapename='layer1_pp.shp')``
"""
import flopy
if ml is not None:
assert isinstance(ml,flopy.modflow.Modflow)
sr = ml.sr
if ibound is None:
ibound = ml.bas6.ibound.array
else:
assert sr is not None,"if 'ml' is not passed, 'sr' must be passed"
if ibound is None:
print("ibound not passed, using array of ones")
ibound = {k:np.ones((sr.nrow,sr.ncol)) for k in prefix_dict.keys()}
#assert ibound is not None,"if 'ml' is not pass, 'ibound' must be passed"
if isinstance(ibound, np.ndarray):
assert np.ndim(ibound) == 2 or np.ndim(ibound) == 3, \
"ibound needs to be either 3d np.ndarry or k_dict of 2d arrays. " \
"Array of {0} dimensions passed".format(np.ndim(ibound))
if np.ndim(ibound) == 2:
ibound = {0: ibound}
else:
ibound = {k: arr for k, arr in enumerate(ibound)}
try:
xcentergrid = sr.xcentergrid
ycentergrid = sr.ycentergrid
except Exception as e:
raise Exception("error getting xcentergrid and/or ycentergrid from 'sr':{0}".format(str(e)))
start = int(float(every_n_cell) / 2.0)
#build a generic prefix_dict
if prefix_dict is None:
prefix_dict = {k:["pp_{0:02d}_".format(k)] for k in range(ml.nlay)}
#check prefix_dict
for k, prefix in prefix_dict.items():
if not isinstance(prefix,list):
prefix_dict[k] = [prefix]
if np.all([isinstance(v, dict) for v in ibound.values()]):
for p in prefix_dict[k]:
if np.any([p.startswith(key) for key in ibound.keys()]):
ib_sel = next(key for key in ibound.keys() if p.startswith(key))
else:
ib_sel = 'general_zn'
assert k < len(ibound[ib_sel]), "layer index {0} > nlay {1}".format(k, len(ibound[ib_sel]))
else:
assert k < len(ibound),"layer index {0} > nlay {1}".format(k,len(ibound))
#try:
#ibound = ml.bas6.ibound.array
#except Exception as e:
# raise Exception("error getting model.bas6.ibound:{0}".format(str(e)))
par_info = []
pp_files,tpl_files = [],[]
pp_names = copy.copy(PP_NAMES)
pp_names.extend(["k","i","j"])
if not np.all([isinstance(v, dict) for v in ibound.values()]):
ibound = {"general_zn": ibound}
for par in ibound.keys():
for k in range(len(ibound[par])):
pp_df = None
ib = ibound[par][k]
assert ib.shape == xcentergrid.shape,"ib.shape != xcentergrid.shape for k {0}".\
format(k)
pp_count = 0
#skip this layer if not in prefix_dict
if k not in prefix_dict.keys():
continue
#cycle through rows and cols
for i in range(start,ib.shape[0]-start,every_n_cell):
for j in range(start,ib.shape[1]-start,every_n_cell):
# skip if this is an inactive cell
if ib[i,j] == 0:
continue
# get the attributes we need
x = xcentergrid[i,j]
y = ycentergrid[i,j]
name = "pp_{0:04d}".format(pp_count)
parval1 = 1.0
#decide what to use as the zone
zone = 1
if use_ibound_zones:
zone = ib[i,j]
#stick this pilot point into a dataframe container
if pp_df is None:
data = {"name": name, "x": x, "y": y, "zone": zone,
"parval1": parval1, "k":k, "i":i, "j":j}
pp_df = pd.DataFrame(data=data,index=[0],columns=pp_names)
else:
data = [name, x, y, zone, parval1, k, i, j]
pp_df.loc[pp_count,:] = data
pp_count += 1
#if we found some acceptable locs...
if pp_df is not None:
for prefix in prefix_dict[k]:
# if parameter prefix relates to current zone definition
if prefix.startswith(par) or (
~np.any([prefix.startswith(p) for p in ibound.keys()]) and par == "general_zn"):
base_filename = prefix+"pp.dat"
pp_filename = os.path.join(pp_dir, base_filename)
# write the base pilot point file
write_pp_file(pp_filename, pp_df)
tpl_filename = os.path.join(tpl_dir, base_filename + ".tpl")
#write the tpl file
pilot_points_to_tpl(pp_df, tpl_filename,
name_prefix=prefix)
pp_df.loc[:,"tpl_filename"] = tpl_filename
pp_df.loc[:,"pp_filename"] = pp_filename
pp_df.loc[:,"pargp"] = prefix
#save the parameter names and parval1s for later
par_info.append(pp_df.copy())
#save the pp_filename and tpl_filename for later
pp_files.append(pp_filename)
tpl_files.append(tpl_filename)
par_info = pd.concat(par_info)
for key,default in pst_config["par_defaults"].items():
if key in par_info.columns:
continue
par_info.loc[:,key] = default
if shapename is not None:
try:
import shapefile
except Exception as e:
print("error importing shapefile, try pip install pyshp...{0}"\
.format(str(e)))
return par_info
try:
shp = shapefile.Writer(target=shapename,shapeType=shapefile.POINT)
except:
shp = shapefile.Writer(shapeType=shapefile.POINT)
for name,dtype in par_info.dtypes.iteritems():
if dtype == object:
shp.field(name=name,fieldType='C',size=50)
elif dtype in [int,np.int,np.int64,np.int32]:
shp.field(name=name, fieldType='N', size=50, decimal=0)
elif dtype in [float,np.float,np.float32,np.float64]:
shp.field(name=name, fieldType='N', size=50, decimal=10)
else:
raise Exception("unrecognized field type in par_info:{0}:{1}".format(name,dtype))
#some pandas awesomeness..
par_info.apply(lambda x:shp.point(x.x,x.y), axis=1)
par_info.apply(lambda x:shp.record(*x),axis=1)
try:
shp.save(shapename)
except:
shp.close()
shp = shapefile.Reader(shapename)
assert shp.numRecords == par_info.shape[0]
return par_info | setup grid-based pilot points. Uses the ibound to determine
where to set pilot points. pilot points are given generic ``pp_``
names unless ``prefix_dict`` is passed. Write template files and a
shapefile as well...hopefully this is useful to someone...
Parameters
----------
ml : flopy.mbase
a flopy mbase dervied type. If None, sr must not be None.
sr : flopy.utils.reference.SpatialReference
a spatial reference use to locate the model grid in space. If None,
ml must not be None. Default is None
ibound : numpy.ndarray
the modflow ibound integer array. Used to set pilot points only in active areas (!=0).
If None and ml is None, then pilot points are set in all rows and columns according to
every_n_cell. Default is None.
prefix_dict : dict
a dictionary of layer index, pilot point parameter prefixes.
Example : ``{0:["hk_"],1:["vka_"]}`` would setup pilot points with
the prefix ``"hk"`` for model layers 1 and ``"vka"`` for model layer 2 (zero based).
If None, a generic set of pilot points with the "pp" prefix are setup
for a generic nrowXncol grid. Default is None.
use_ibound_zones : bool
a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound
values greater than zero are treated as a single zone. Default is False.
pp_dir : str
directory to write pilot point files to. Default is '.'
tpl_dir : str
directory to write pilot point template file to. Default is '.'
shapename : str
name of shapefile to write that contains pilot point information. Default is "pp.shp"
Returns
-------
pp_df : pandas.DataFrame
a dataframe summarizing pilot point information (same information
written to shapename
Example
-------
``>>>import flopy``
``>>>from pyemu.utils import setup_pilotpoints_grid``
``>>>m = flopy.modflow.Modfow.load("mymodel.nam")``
``>>>setup_pilotpoints_grid(m,prefix_dict={0:['hk_'],1:['vka_']},``
``>>> every_n_cell=3,shapename='layer1_pp.shp')`` | Below is the the instruction that describes the task:
### Input:
setup grid-based pilot points. Uses the ibound to determine
where to set pilot points. pilot points are given generic ``pp_``
names unless ``prefix_dict`` is passed. Write template files and a
shapefile as well...hopefully this is useful to someone...
Parameters
----------
ml : flopy.mbase
a flopy mbase dervied type. If None, sr must not be None.
sr : flopy.utils.reference.SpatialReference
a spatial reference use to locate the model grid in space. If None,
ml must not be None. Default is None
ibound : numpy.ndarray
the modflow ibound integer array. Used to set pilot points only in active areas (!=0).
If None and ml is None, then pilot points are set in all rows and columns according to
every_n_cell. Default is None.
prefix_dict : dict
a dictionary of layer index, pilot point parameter prefixes.
Example : ``{0:["hk_"],1:["vka_"]}`` would setup pilot points with
the prefix ``"hk"`` for model layers 1 and ``"vka"`` for model layer 2 (zero based).
If None, a generic set of pilot points with the "pp" prefix are setup
for a generic nrowXncol grid. Default is None.
use_ibound_zones : bool
a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound
values greater than zero are treated as a single zone. Default is False.
pp_dir : str
directory to write pilot point files to. Default is '.'
tpl_dir : str
directory to write pilot point template file to. Default is '.'
shapename : str
name of shapefile to write that contains pilot point information. Default is "pp.shp"
Returns
-------
pp_df : pandas.DataFrame
a dataframe summarizing pilot point information (same information
written to shapename
Example
-------
``>>>import flopy``
``>>>from pyemu.utils import setup_pilotpoints_grid``
``>>>m = flopy.modflow.Modfow.load("mymodel.nam")``
``>>>setup_pilotpoints_grid(m,prefix_dict={0:['hk_'],1:['vka_']},``
``>>> every_n_cell=3,shapename='layer1_pp.shp')``
### Response:
def setup_pilotpoints_grid(ml=None,sr=None,ibound=None,prefix_dict=None,
every_n_cell=4,
use_ibound_zones=False,
pp_dir='.',tpl_dir='.',
shapename="pp.shp"):
"""setup grid-based pilot points. Uses the ibound to determine
where to set pilot points. pilot points are given generic ``pp_``
names unless ``prefix_dict`` is passed. Write template files and a
shapefile as well...hopefully this is useful to someone...
Parameters
----------
ml : flopy.mbase
a flopy mbase dervied type. If None, sr must not be None.
sr : flopy.utils.reference.SpatialReference
a spatial reference use to locate the model grid in space. If None,
ml must not be None. Default is None
ibound : numpy.ndarray
the modflow ibound integer array. Used to set pilot points only in active areas (!=0).
If None and ml is None, then pilot points are set in all rows and columns according to
every_n_cell. Default is None.
prefix_dict : dict
a dictionary of layer index, pilot point parameter prefixes.
Example : ``{0:["hk_"],1:["vka_"]}`` would setup pilot points with
the prefix ``"hk"`` for model layers 1 and ``"vka"`` for model layer 2 (zero based).
If None, a generic set of pilot points with the "pp" prefix are setup
for a generic nrowXncol grid. Default is None.
use_ibound_zones : bool
a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound
values greater than zero are treated as a single zone. Default is False.
pp_dir : str
directory to write pilot point files to. Default is '.'
tpl_dir : str
directory to write pilot point template file to. Default is '.'
shapename : str
name of shapefile to write that contains pilot point information. Default is "pp.shp"
Returns
-------
pp_df : pandas.DataFrame
a dataframe summarizing pilot point information (same information
written to shapename
Example
-------
``>>>import flopy``
``>>>from pyemu.utils import setup_pilotpoints_grid``
``>>>m = flopy.modflow.Modfow.load("mymodel.nam")``
``>>>setup_pilotpoints_grid(m,prefix_dict={0:['hk_'],1:['vka_']},``
``>>> every_n_cell=3,shapename='layer1_pp.shp')``
"""
import flopy
if ml is not None:
assert isinstance(ml,flopy.modflow.Modflow)
sr = ml.sr
if ibound is None:
ibound = ml.bas6.ibound.array
else:
assert sr is not None,"if 'ml' is not passed, 'sr' must be passed"
if ibound is None:
print("ibound not passed, using array of ones")
ibound = {k:np.ones((sr.nrow,sr.ncol)) for k in prefix_dict.keys()}
#assert ibound is not None,"if 'ml' is not pass, 'ibound' must be passed"
if isinstance(ibound, np.ndarray):
assert np.ndim(ibound) == 2 or np.ndim(ibound) == 3, \
"ibound needs to be either 3d np.ndarry or k_dict of 2d arrays. " \
"Array of {0} dimensions passed".format(np.ndim(ibound))
if np.ndim(ibound) == 2:
ibound = {0: ibound}
else:
ibound = {k: arr for k, arr in enumerate(ibound)}
try:
xcentergrid = sr.xcentergrid
ycentergrid = sr.ycentergrid
except Exception as e:
raise Exception("error getting xcentergrid and/or ycentergrid from 'sr':{0}".format(str(e)))
start = int(float(every_n_cell) / 2.0)
#build a generic prefix_dict
if prefix_dict is None:
prefix_dict = {k:["pp_{0:02d}_".format(k)] for k in range(ml.nlay)}
#check prefix_dict
for k, prefix in prefix_dict.items():
if not isinstance(prefix,list):
prefix_dict[k] = [prefix]
if np.all([isinstance(v, dict) for v in ibound.values()]):
for p in prefix_dict[k]:
if np.any([p.startswith(key) for key in ibound.keys()]):
ib_sel = next(key for key in ibound.keys() if p.startswith(key))
else:
ib_sel = 'general_zn'
assert k < len(ibound[ib_sel]), "layer index {0} > nlay {1}".format(k, len(ibound[ib_sel]))
else:
assert k < len(ibound),"layer index {0} > nlay {1}".format(k,len(ibound))
#try:
#ibound = ml.bas6.ibound.array
#except Exception as e:
# raise Exception("error getting model.bas6.ibound:{0}".format(str(e)))
par_info = []
pp_files,tpl_files = [],[]
pp_names = copy.copy(PP_NAMES)
pp_names.extend(["k","i","j"])
if not np.all([isinstance(v, dict) for v in ibound.values()]):
ibound = {"general_zn": ibound}
for par in ibound.keys():
for k in range(len(ibound[par])):
pp_df = None
ib = ibound[par][k]
assert ib.shape == xcentergrid.shape,"ib.shape != xcentergrid.shape for k {0}".\
format(k)
pp_count = 0
#skip this layer if not in prefix_dict
if k not in prefix_dict.keys():
continue
#cycle through rows and cols
for i in range(start,ib.shape[0]-start,every_n_cell):
for j in range(start,ib.shape[1]-start,every_n_cell):
# skip if this is an inactive cell
if ib[i,j] == 0:
continue
# get the attributes we need
x = xcentergrid[i,j]
y = ycentergrid[i,j]
name = "pp_{0:04d}".format(pp_count)
parval1 = 1.0
#decide what to use as the zone
zone = 1
if use_ibound_zones:
zone = ib[i,j]
#stick this pilot point into a dataframe container
if pp_df is None:
data = {"name": name, "x": x, "y": y, "zone": zone,
"parval1": parval1, "k":k, "i":i, "j":j}
pp_df = pd.DataFrame(data=data,index=[0],columns=pp_names)
else:
data = [name, x, y, zone, parval1, k, i, j]
pp_df.loc[pp_count,:] = data
pp_count += 1
#if we found some acceptable locs...
if pp_df is not None:
for prefix in prefix_dict[k]:
# if parameter prefix relates to current zone definition
if prefix.startswith(par) or (
~np.any([prefix.startswith(p) for p in ibound.keys()]) and par == "general_zn"):
base_filename = prefix+"pp.dat"
pp_filename = os.path.join(pp_dir, base_filename)
# write the base pilot point file
write_pp_file(pp_filename, pp_df)
tpl_filename = os.path.join(tpl_dir, base_filename + ".tpl")
#write the tpl file
pilot_points_to_tpl(pp_df, tpl_filename,
name_prefix=prefix)
pp_df.loc[:,"tpl_filename"] = tpl_filename
pp_df.loc[:,"pp_filename"] = pp_filename
pp_df.loc[:,"pargp"] = prefix
#save the parameter names and parval1s for later
par_info.append(pp_df.copy())
#save the pp_filename and tpl_filename for later
pp_files.append(pp_filename)
tpl_files.append(tpl_filename)
par_info = pd.concat(par_info)
for key,default in pst_config["par_defaults"].items():
if key in par_info.columns:
continue
par_info.loc[:,key] = default
if shapename is not None:
try:
import shapefile
except Exception as e:
print("error importing shapefile, try pip install pyshp...{0}"\
.format(str(e)))
return par_info
try:
shp = shapefile.Writer(target=shapename,shapeType=shapefile.POINT)
except:
shp = shapefile.Writer(shapeType=shapefile.POINT)
for name,dtype in par_info.dtypes.iteritems():
if dtype == object:
shp.field(name=name,fieldType='C',size=50)
elif dtype in [int,np.int,np.int64,np.int32]:
shp.field(name=name, fieldType='N', size=50, decimal=0)
elif dtype in [float,np.float,np.float32,np.float64]:
shp.field(name=name, fieldType='N', size=50, decimal=10)
else:
raise Exception("unrecognized field type in par_info:{0}:{1}".format(name,dtype))
#some pandas awesomeness..
par_info.apply(lambda x:shp.point(x.x,x.y), axis=1)
par_info.apply(lambda x:shp.record(*x),axis=1)
try:
shp.save(shapename)
except:
shp.close()
shp = shapefile.Reader(shapename)
assert shp.numRecords == par_info.shape[0]
return par_info |
def init_parser():
"""Initialize option parser."""
usage = "Usage: %(prog)s <option(s)> <file(s)>"
description = " Display information from object <file(s)>.\n"
description += " At least one of the following switches must be given:"
#
# Create an argument parser and an exclusive group.
#
parser = ArgumentParser(
usage=usage, description=description, add_help=False)
group = parser.add_mutually_exclusive_group()
#
# Add objdump parameters.
#
group.add_argument("-a", "--archive-headers",
action=DumpArchieveHeadersAction,
type=FileType("r"), nargs="+",
help="Display archive header information")
group.add_argument("-f", "--file-headers",
action=DumpFileHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the overall file header")
#group.add_argument("-p", "--private-headers", action="store", type=FileType("r"), nargs="+", help="Display object format specific file header contents")
#group.add_argument("-P", "--private=OPT,OPT...", action="store", type=FileType("r"), nargs="+", help="Display object format specific contents")
group.add_argument("-h", "--section-headers",
action=DumpSectionHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the section headers")
#group.add_argument("-x", "--all-headers", action="store", type=FileType("r"), nargs="+", help="Display the contents of all headers")
group.add_argument("-d", "--disassemble",
action=DisassembleSectionAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
group.add_argument("-D", "--disassemble-all",
action=DisassembleSectionsAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
#group.add_argument("-S", "--source", action="store", type=FileType("r"), nargs="+", help="Intermix source code with disassembly")
group.add_argument("-s", "--full-contents",
action=DumpSectionContentAction,
type=FileType("r"), nargs="+",
help="Display the full contents of all sections requested")
#group.add_argument("-g", "--debugging", action="store", type=FileType("r"), nargs="+", help="Display debug information in object file")
#group.add_argument("-e", "--debugging-tags", action="store", type=FileType("r"), nargs="+", help="Display debug information using ctags style")
#group.add_argument("-G", "--stabs", action="store", type=FileType("r"), nargs="+", help="Display (in raw form) any STABS info in the file")
#-W[lLiaprmfFsoRt] or")
#--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,")
# =frames-interp,=str,=loc,=Ranges,=pubtypes,")
# =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]")
# Display DWARF info in the file")
group.add_argument("-t", "--syms",
action=DumpFileSymbols,
type=FileType("r"), nargs="+",
help="Display the contents of the symbol table(s)")
#-T, --dynamic-syms Display the contents of the dynamic symbol table")
#-r, --reloc Display the relocation entries in the file")
#-R, --dynamic-reloc Display the dynamic relocation entries in the file")
group.add_argument("-v", "--version", action="version",
version="%%(prog)s %s (%s)" % (__version__, __description__),
help="Display this program's version number")
group.add_argument("-i", "--info",
action=ListFormatAndArchitecturesInformationAction,
nargs=REMAINDER,
help="List object formats and architectures supported")
group.add_argument("-H", "--help", action="store_true", default=False,
help="Display this information")
return parser | Initialize option parser. | Below is the the instruction that describes the task:
### Input:
Initialize option parser.
### Response:
def init_parser():
"""Initialize option parser."""
usage = "Usage: %(prog)s <option(s)> <file(s)>"
description = " Display information from object <file(s)>.\n"
description += " At least one of the following switches must be given:"
#
# Create an argument parser and an exclusive group.
#
parser = ArgumentParser(
usage=usage, description=description, add_help=False)
group = parser.add_mutually_exclusive_group()
#
# Add objdump parameters.
#
group.add_argument("-a", "--archive-headers",
action=DumpArchieveHeadersAction,
type=FileType("r"), nargs="+",
help="Display archive header information")
group.add_argument("-f", "--file-headers",
action=DumpFileHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the overall file header")
#group.add_argument("-p", "--private-headers", action="store", type=FileType("r"), nargs="+", help="Display object format specific file header contents")
#group.add_argument("-P", "--private=OPT,OPT...", action="store", type=FileType("r"), nargs="+", help="Display object format specific contents")
group.add_argument("-h", "--section-headers",
action=DumpSectionHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the section headers")
#group.add_argument("-x", "--all-headers", action="store", type=FileType("r"), nargs="+", help="Display the contents of all headers")
group.add_argument("-d", "--disassemble",
action=DisassembleSectionAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
group.add_argument("-D", "--disassemble-all",
action=DisassembleSectionsAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
#group.add_argument("-S", "--source", action="store", type=FileType("r"), nargs="+", help="Intermix source code with disassembly")
group.add_argument("-s", "--full-contents",
action=DumpSectionContentAction,
type=FileType("r"), nargs="+",
help="Display the full contents of all sections requested")
#group.add_argument("-g", "--debugging", action="store", type=FileType("r"), nargs="+", help="Display debug information in object file")
#group.add_argument("-e", "--debugging-tags", action="store", type=FileType("r"), nargs="+", help="Display debug information using ctags style")
#group.add_argument("-G", "--stabs", action="store", type=FileType("r"), nargs="+", help="Display (in raw form) any STABS info in the file")
#-W[lLiaprmfFsoRt] or")
#--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,")
# =frames-interp,=str,=loc,=Ranges,=pubtypes,")
# =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]")
# Display DWARF info in the file")
group.add_argument("-t", "--syms",
action=DumpFileSymbols,
type=FileType("r"), nargs="+",
help="Display the contents of the symbol table(s)")
#-T, --dynamic-syms Display the contents of the dynamic symbol table")
#-r, --reloc Display the relocation entries in the file")
#-R, --dynamic-reloc Display the dynamic relocation entries in the file")
group.add_argument("-v", "--version", action="version",
version="%%(prog)s %s (%s)" % (__version__, __description__),
help="Display this program's version number")
group.add_argument("-i", "--info",
action=ListFormatAndArchitecturesInformationAction,
nargs=REMAINDER,
help="List object formats and architectures supported")
group.add_argument("-H", "--help", action="store_true", default=False,
help="Display this information")
return parser |
def get_params(mail, failobj=None, header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
failobj = failobj or []
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)} | Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters | Below is the the instruction that describes the task:
### Input:
Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
### Response:
def get_params(mail, failobj=None, header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
failobj = failobj or []
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)} |
def read_tree_dendropy(tree):
'''Create a TreeSwift tree from a DendroPy tree
Args:
``tree`` (``dendropy.datamodel.treemodel``): A Dendropy ``Tree`` object
Returns:
``Tree``: A TreeSwift tree created from ``tree``
'''
out = Tree(); d2t = dict()
if not hasattr(tree, 'preorder_node_iter') or not hasattr(tree, 'seed_node') or not hasattr(tree, 'is_rooted'):
raise TypeError("tree must be a DendroPy Tree object")
if tree.is_rooted != True:
out.is_rooted = False
for node in tree.preorder_node_iter():
if node == tree.seed_node:
curr = out.root
else:
curr = Node(); d2t[node.parent_node].add_child(curr)
d2t[node] = curr; curr.edge_length = node.edge_length
if hasattr(node, 'taxon') and node.taxon is not None:
curr.label = node.taxon.label
else:
curr.label = node.label
return out | Create a TreeSwift tree from a DendroPy tree
Args:
``tree`` (``dendropy.datamodel.treemodel``): A Dendropy ``Tree`` object
Returns:
``Tree``: A TreeSwift tree created from ``tree`` | Below is the the instruction that describes the task:
### Input:
Create a TreeSwift tree from a DendroPy tree
Args:
``tree`` (``dendropy.datamodel.treemodel``): A Dendropy ``Tree`` object
Returns:
``Tree``: A TreeSwift tree created from ``tree``
### Response:
def read_tree_dendropy(tree):
'''Create a TreeSwift tree from a DendroPy tree
Args:
``tree`` (``dendropy.datamodel.treemodel``): A Dendropy ``Tree`` object
Returns:
``Tree``: A TreeSwift tree created from ``tree``
'''
out = Tree(); d2t = dict()
if not hasattr(tree, 'preorder_node_iter') or not hasattr(tree, 'seed_node') or not hasattr(tree, 'is_rooted'):
raise TypeError("tree must be a DendroPy Tree object")
if tree.is_rooted != True:
out.is_rooted = False
for node in tree.preorder_node_iter():
if node == tree.seed_node:
curr = out.root
else:
curr = Node(); d2t[node.parent_node].add_child(curr)
d2t[node] = curr; curr.edge_length = node.edge_length
if hasattr(node, 'taxon') and node.taxon is not None:
curr.label = node.taxon.label
else:
curr.label = node.label
return out |
def filter_by_conditional_statement(self, statement):
"""Filter the Data Collection based on a conditional statement.
Args:
statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).
The variable should always be named as 'a' (without quotations).
Return:
A new Data Collection containing only the filtered data
"""
_filt_values, _filt_datetimes = self._filter_by_statement(statement)
collection = HourlyDiscontinuousCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = True
return collection | Filter the Data Collection based on a conditional statement.
Args:
statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).
The variable should always be named as 'a' (without quotations).
Return:
A new Data Collection containing only the filtered data | Below is the the instruction that describes the task:
### Input:
Filter the Data Collection based on a conditional statement.
Args:
statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).
The variable should always be named as 'a' (without quotations).
Return:
A new Data Collection containing only the filtered data
### Response:
def filter_by_conditional_statement(self, statement):
"""Filter the Data Collection based on a conditional statement.
Args:
statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).
The variable should always be named as 'a' (without quotations).
Return:
A new Data Collection containing only the filtered data
"""
_filt_values, _filt_datetimes = self._filter_by_statement(statement)
collection = HourlyDiscontinuousCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = True
return collection |
def unpadding(s, bs=AES.block_size):
"""Reverse operation to padding (see above).
Parameters
==========
:param s: bytes-like object;
:param bs: encryption block size.
"""
if PY3:
return s[:s[-1] - 96] if len(s) % bs == 0 else ''
else:
return s[:ord(s[-1]) - 96] if len(s) % bs == 0 else '' | Reverse operation to padding (see above).
Parameters
==========
:param s: bytes-like object;
:param bs: encryption block size. | Below is the the instruction that describes the task:
### Input:
Reverse operation to padding (see above).
Parameters
==========
:param s: bytes-like object;
:param bs: encryption block size.
### Response:
def unpadding(s, bs=AES.block_size):
"""Reverse operation to padding (see above).
Parameters
==========
:param s: bytes-like object;
:param bs: encryption block size.
"""
if PY3:
return s[:s[-1] - 96] if len(s) % bs == 0 else ''
else:
return s[:ord(s[-1]) - 96] if len(s) % bs == 0 else '' |
def insert_json(table=None,
bulk_size=1000,
concurrency=25,
hosts=None,
output_fmt=None):
"""Insert JSON lines fed into stdin into a Crate cluster.
If no hosts are specified the statements will be printed.
Args:
table: Target table name.
bulk_size: Bulk size of the insert statements.
concurrency: Number of operations to run concurrently.
hosts: hostname:port pairs of the Crate nodes
"""
if not hosts:
return print_only(table)
queries = (to_insert(table, d) for d in dicts_from_stdin())
bulk_queries = as_bulk_queries(queries, bulk_size)
print('Executing inserts: bulk_size={} concurrency={}'.format(
bulk_size, concurrency), file=sys.stderr)
stats = Stats()
with clients.client(hosts, concurrency=concurrency) as client:
f = partial(aio.measure, stats, client.execute_many)
try:
aio.run_many(f, bulk_queries, concurrency)
except clients.SqlException as e:
raise SystemExit(str(e))
try:
print(format_stats(stats.get(), output_fmt))
except KeyError:
if not stats.sampler.values:
raise SystemExit('No data received via stdin')
raise | Insert JSON lines fed into stdin into a Crate cluster.
If no hosts are specified the statements will be printed.
Args:
table: Target table name.
bulk_size: Bulk size of the insert statements.
concurrency: Number of operations to run concurrently.
hosts: hostname:port pairs of the Crate nodes | Below is the the instruction that describes the task:
### Input:
Insert JSON lines fed into stdin into a Crate cluster.
If no hosts are specified the statements will be printed.
Args:
table: Target table name.
bulk_size: Bulk size of the insert statements.
concurrency: Number of operations to run concurrently.
hosts: hostname:port pairs of the Crate nodes
### Response:
def insert_json(table=None,
bulk_size=1000,
concurrency=25,
hosts=None,
output_fmt=None):
"""Insert JSON lines fed into stdin into a Crate cluster.
If no hosts are specified the statements will be printed.
Args:
table: Target table name.
bulk_size: Bulk size of the insert statements.
concurrency: Number of operations to run concurrently.
hosts: hostname:port pairs of the Crate nodes
"""
if not hosts:
return print_only(table)
queries = (to_insert(table, d) for d in dicts_from_stdin())
bulk_queries = as_bulk_queries(queries, bulk_size)
print('Executing inserts: bulk_size={} concurrency={}'.format(
bulk_size, concurrency), file=sys.stderr)
stats = Stats()
with clients.client(hosts, concurrency=concurrency) as client:
f = partial(aio.measure, stats, client.execute_many)
try:
aio.run_many(f, bulk_queries, concurrency)
except clients.SqlException as e:
raise SystemExit(str(e))
try:
print(format_stats(stats.get(), output_fmt))
except KeyError:
if not stats.sampler.values:
raise SystemExit('No data received via stdin')
raise |
def caesar_app(parser, cmd, args): # pragma: no cover
"""
Caesar crypt a value with a key.
"""
parser.add_argument('shift', type=int, help='the shift to apply')
parser.add_argument('value', help='the value to caesar crypt, read from stdin if omitted', nargs='?')
parser.add_argument(
'-s', '--shift-range',
dest='shift_ranges',
action='append',
help='specify a character range to shift (defaults to a-z, A-Z)'
)
args = parser.parse_args(args)
if not args.shift_ranges:
args.shift_ranges = ['az', 'AZ']
return caesar(args.shift, pwnypack.main.string_value_or_stdin(args.value), args.shift_ranges) | Caesar crypt a value with a key. | Below is the the instruction that describes the task:
### Input:
Caesar crypt a value with a key.
### Response:
def caesar_app(parser, cmd, args): # pragma: no cover
"""
Caesar crypt a value with a key.
"""
parser.add_argument('shift', type=int, help='the shift to apply')
parser.add_argument('value', help='the value to caesar crypt, read from stdin if omitted', nargs='?')
parser.add_argument(
'-s', '--shift-range',
dest='shift_ranges',
action='append',
help='specify a character range to shift (defaults to a-z, A-Z)'
)
args = parser.parse_args(args)
if not args.shift_ranges:
args.shift_ranges = ['az', 'AZ']
return caesar(args.shift, pwnypack.main.string_value_or_stdin(args.value), args.shift_ranges) |
def rate_limit(self, name, limit=5, per=60, debug=False):
"""
Rate limit implementation. Allows up to `limit` of events every `per`
seconds.
See :ref:`rate-limit` for more information.
"""
return RateLimit(self, name, limit, per, debug) | Rate limit implementation. Allows up to `limit` of events every `per`
seconds.
See :ref:`rate-limit` for more information. | Below is the the instruction that describes the task:
### Input:
Rate limit implementation. Allows up to `limit` of events every `per`
seconds.
See :ref:`rate-limit` for more information.
### Response:
def rate_limit(self, name, limit=5, per=60, debug=False):
"""
Rate limit implementation. Allows up to `limit` of events every `per`
seconds.
See :ref:`rate-limit` for more information.
"""
return RateLimit(self, name, limit, per, debug) |
def _check_resources(name, expr, resources):
"""Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
"""
if expr is None:
return
bound = expr._resources()
if not bound and resources is None:
raise ValueError('no resources provided to compute %s' % name)
if bound and resources:
raise ValueError(
'explicit and implicit resources provided to compute %s' % name,
) | Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression. | Below is the the instruction that describes the task:
### Input:
Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
### Response:
def _check_resources(name, expr, resources):
"""Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
"""
if expr is None:
return
bound = expr._resources()
if not bound and resources is None:
raise ValueError('no resources provided to compute %s' % name)
if bound and resources:
raise ValueError(
'explicit and implicit resources provided to compute %s' % name,
) |
def update_ca_bundle(target=None, source=None, merge_files=None):
'''
Update the local CA bundle file from a URL
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle
salt-run http.update_ca_bundle target=/path/to/cacerts.pem
salt-run http.update_ca_bundle source=https://example.com/cacerts.pem
If the ``target`` is not specified, it will be pulled from the ``ca_cert``
configuration variable available to the master. If it cannot be found there,
it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``.
If the ``source`` is not specified, it will be pulled from the
``ca_cert_url`` configuration variable available to the master. If it cannot
be found, it will be downloaded from the cURL website, using an http (not
https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED!
``merge_files`` may also be specified, which includes a string or list of
strings representing a file or files to be appended to the end of the CA
bundle, once it is downloaded.
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle merge_files=/path/to/mycert.pem
'''
return salt.utils.http.update_ca_bundle(
target, source, __opts__, merge_files
) | Update the local CA bundle file from a URL
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle
salt-run http.update_ca_bundle target=/path/to/cacerts.pem
salt-run http.update_ca_bundle source=https://example.com/cacerts.pem
If the ``target`` is not specified, it will be pulled from the ``ca_cert``
configuration variable available to the master. If it cannot be found there,
it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``.
If the ``source`` is not specified, it will be pulled from the
``ca_cert_url`` configuration variable available to the master. If it cannot
be found, it will be downloaded from the cURL website, using an http (not
https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED!
``merge_files`` may also be specified, which includes a string or list of
strings representing a file or files to be appended to the end of the CA
bundle, once it is downloaded.
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle merge_files=/path/to/mycert.pem | Below is the the instruction that describes the task:
### Input:
Update the local CA bundle file from a URL
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle
salt-run http.update_ca_bundle target=/path/to/cacerts.pem
salt-run http.update_ca_bundle source=https://example.com/cacerts.pem
If the ``target`` is not specified, it will be pulled from the ``ca_cert``
configuration variable available to the master. If it cannot be found there,
it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``.
If the ``source`` is not specified, it will be pulled from the
``ca_cert_url`` configuration variable available to the master. If it cannot
be found, it will be downloaded from the cURL website, using an http (not
https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED!
``merge_files`` may also be specified, which includes a string or list of
strings representing a file or files to be appended to the end of the CA
bundle, once it is downloaded.
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle merge_files=/path/to/mycert.pem
### Response:
def update_ca_bundle(target=None, source=None, merge_files=None):
'''
Update the local CA bundle file from a URL
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle
salt-run http.update_ca_bundle target=/path/to/cacerts.pem
salt-run http.update_ca_bundle source=https://example.com/cacerts.pem
If the ``target`` is not specified, it will be pulled from the ``ca_cert``
configuration variable available to the master. If it cannot be found there,
it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``.
If the ``source`` is not specified, it will be pulled from the
``ca_cert_url`` configuration variable available to the master. If it cannot
be found, it will be downloaded from the cURL website, using an http (not
https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED!
``merge_files`` may also be specified, which includes a string or list of
strings representing a file or files to be appended to the end of the CA
bundle, once it is downloaded.
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle merge_files=/path/to/mycert.pem
'''
return salt.utils.http.update_ca_bundle(
target, source, __opts__, merge_files
) |
def jkeys(jfile, key_path=None, in_memory=True, ignore_prefix=('.', '_')):
""" get keys for initial json level, or at level after following key_path
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before returning keys
in_memory : bool
if true reads json into memory before finding keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e","f":"g"}
... }
... ''')
...
>>> jkeys(file_obj)
['a', 'b', 'c']
>>> jkeys(file_obj,["c"])
['d', 'f']
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jkeys(path)
['dir1', 'dir2', 'dir3']
>>> path = get_test_path()
>>> jkeys(path, ['dir1','file1'], in_memory=True)
['initial', 'meta', 'optimised', 'units']
"""
key_path = [] if key_path is None else key_path
def eval_file(file_obj):
if not in_memory:
return _get_keys_ijson(file_obj, key_path)
else:
return _get_keys(file_obj, key_path)
if isinstance(jfile, basestring):
if not os.path.exists(jfile):
raise IOError('jfile does not exist: {}'.format(jfile))
if os.path.isdir(jfile):
jpath = pathlib.Path(jfile)
return _get_keys_folder(jpath, key_path, in_memory, ignore_prefix)
else:
with open(jfile, 'r') as file_obj:
return eval_file(file_obj)
elif hasattr(jfile, 'read'):
return eval_file(jfile)
elif hasattr(jfile, 'iterdir'):
if jfile.is_file():
with jfile.open('r') as file_obj:
return eval_file(file_obj)
else:
return _get_keys_folder(jfile, key_path, in_memory, ignore_prefix)
else:
raise ValueError(
'jfile should be a str, '
'file_like or path_like object: {}'.format(jfile)) | get keys for initial json level, or at level after following key_path
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before returning keys
in_memory : bool
if true reads json into memory before finding keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e","f":"g"}
... }
... ''')
...
>>> jkeys(file_obj)
['a', 'b', 'c']
>>> jkeys(file_obj,["c"])
['d', 'f']
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jkeys(path)
['dir1', 'dir2', 'dir3']
>>> path = get_test_path()
>>> jkeys(path, ['dir1','file1'], in_memory=True)
['initial', 'meta', 'optimised', 'units'] | Below is the the instruction that describes the task:
### Input:
get keys for initial json level, or at level after following key_path
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before returning keys
in_memory : bool
if true reads json into memory before finding keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e","f":"g"}
... }
... ''')
...
>>> jkeys(file_obj)
['a', 'b', 'c']
>>> jkeys(file_obj,["c"])
['d', 'f']
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jkeys(path)
['dir1', 'dir2', 'dir3']
>>> path = get_test_path()
>>> jkeys(path, ['dir1','file1'], in_memory=True)
['initial', 'meta', 'optimised', 'units']
### Response:
def jkeys(jfile, key_path=None, in_memory=True, ignore_prefix=('.', '_')):
""" get keys for initial json level, or at level after following key_path
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before returning keys
in_memory : bool
if true reads json into memory before finding keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e","f":"g"}
... }
... ''')
...
>>> jkeys(file_obj)
['a', 'b', 'c']
>>> jkeys(file_obj,["c"])
['d', 'f']
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jkeys(path)
['dir1', 'dir2', 'dir3']
>>> path = get_test_path()
>>> jkeys(path, ['dir1','file1'], in_memory=True)
['initial', 'meta', 'optimised', 'units']
"""
key_path = [] if key_path is None else key_path
def eval_file(file_obj):
if not in_memory:
return _get_keys_ijson(file_obj, key_path)
else:
return _get_keys(file_obj, key_path)
if isinstance(jfile, basestring):
if not os.path.exists(jfile):
raise IOError('jfile does not exist: {}'.format(jfile))
if os.path.isdir(jfile):
jpath = pathlib.Path(jfile)
return _get_keys_folder(jpath, key_path, in_memory, ignore_prefix)
else:
with open(jfile, 'r') as file_obj:
return eval_file(file_obj)
elif hasattr(jfile, 'read'):
return eval_file(jfile)
elif hasattr(jfile, 'iterdir'):
if jfile.is_file():
with jfile.open('r') as file_obj:
return eval_file(file_obj)
else:
return _get_keys_folder(jfile, key_path, in_memory, ignore_prefix)
else:
raise ValueError(
'jfile should be a str, '
'file_like or path_like object: {}'.format(jfile)) |
def beta(self):
"""
Courant-Snyder parameter :math:`\\beta`.
"""
beta = _np.sqrt(self.sx)/self.emit
return beta | Courant-Snyder parameter :math:`\\beta`. | Below is the the instruction that describes the task:
### Input:
Courant-Snyder parameter :math:`\\beta`.
### Response:
def beta(self):
"""
Courant-Snyder parameter :math:`\\beta`.
"""
beta = _np.sqrt(self.sx)/self.emit
return beta |
def load(fp, strip_comments=False, **kw):
"""
Load a list of trees from an open Newick formatted file.
:param fp: open file handle.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return loads(fp.read(), **kw) | Load a list of trees from an open Newick formatted file.
:param fp: open file handle.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects. | Below is the the instruction that describes the task:
### Input:
Load a list of trees from an open Newick formatted file.
:param fp: open file handle.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
### Response:
def load(fp, strip_comments=False, **kw):
"""
Load a list of trees from an open Newick formatted file.
:param fp: open file handle.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return loads(fp.read(), **kw) |
def validation_statuses(self, area_uuid):
"""
Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/validations".format(uuid=area_uuid)
result = self._make_request('get', path)
return result.json() | Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained | Below is the the instruction that describes the task:
### Input:
Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained
### Response:
def validation_statuses(self, area_uuid):
"""
Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/validations".format(uuid=area_uuid)
result = self._make_request('get', path)
return result.json() |
def create_snapshot(self, name, tag):
"""
Create new instance of image with snaphot image (it is copied inside class constructuor)
:param name: str - name of image - not used now
:param tag: str - tag for image
:return: NspawnImage instance
"""
source = self.local_location
logger.debug("Create Snapshot: %s -> %s" % (source, name))
# FIXME: actually create the snapshot via clone command
if name and tag:
output_tag = "{}:{}".format(name, tag)
else:
output_tag = name or tag
return self.__class__(repository=source, tag=output_tag) | Create new instance of image with snaphot image (it is copied inside class constructuor)
:param name: str - name of image - not used now
:param tag: str - tag for image
:return: NspawnImage instance | Below is the the instruction that describes the task:
### Input:
Create new instance of image with snaphot image (it is copied inside class constructuor)
:param name: str - name of image - not used now
:param tag: str - tag for image
:return: NspawnImage instance
### Response:
def create_snapshot(self, name, tag):
"""
Create new instance of image with snaphot image (it is copied inside class constructuor)
:param name: str - name of image - not used now
:param tag: str - tag for image
:return: NspawnImage instance
"""
source = self.local_location
logger.debug("Create Snapshot: %s -> %s" % (source, name))
# FIXME: actually create the snapshot via clone command
if name and tag:
output_tag = "{}:{}".format(name, tag)
else:
output_tag = name or tag
return self.__class__(repository=source, tag=output_tag) |
def list_tag(self, limit=500, offset=0):
"""List `all` the tags for this Thing
Returns lists of tags, as below
#!python
[
"mytag1",
"mytag2"
"ein_name",
"nochein_name"
]
- OR...
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`limit` (optional) (integer) Return at most this many tags
`offset` (optional) (integer) Return tags starting at this offset
"""
evt = self._client._request_entity_tag_list(self.__lid, limit=limit, offset=offset)
self._client._wait_and_except_if_failed(evt)
return evt.payload['tags'] | List `all` the tags for this Thing
Returns lists of tags, as below
#!python
[
"mytag1",
"mytag2"
"ein_name",
"nochein_name"
]
- OR...
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`limit` (optional) (integer) Return at most this many tags
`offset` (optional) (integer) Return tags starting at this offset | Below is the the instruction that describes the task:
### Input:
List `all` the tags for this Thing
Returns lists of tags, as below
#!python
[
"mytag1",
"mytag2"
"ein_name",
"nochein_name"
]
- OR...
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`limit` (optional) (integer) Return at most this many tags
`offset` (optional) (integer) Return tags starting at this offset
### Response:
def list_tag(self, limit=500, offset=0):
"""List `all` the tags for this Thing
Returns lists of tags, as below
#!python
[
"mytag1",
"mytag2"
"ein_name",
"nochein_name"
]
- OR...
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`limit` (optional) (integer) Return at most this many tags
`offset` (optional) (integer) Return tags starting at this offset
"""
evt = self._client._request_entity_tag_list(self.__lid, limit=limit, offset=offset)
self._client._wait_and_except_if_failed(evt)
return evt.payload['tags'] |
def _load(self):
"""Load the MODIS RSR data for the band requested"""
if self.is_sw or self.platform_name == 'EOS-Aqua':
scale = 0.001
else:
scale = 1.0
detector = read_modis_response(self.requested_band_filename, scale)
self.rsr = detector
if self._sort:
self.sort() | Load the MODIS RSR data for the band requested | Below is the the instruction that describes the task:
### Input:
Load the MODIS RSR data for the band requested
### Response:
def _load(self):
"""Load the MODIS RSR data for the band requested"""
if self.is_sw or self.platform_name == 'EOS-Aqua':
scale = 0.001
else:
scale = 1.0
detector = read_modis_response(self.requested_band_filename, scale)
self.rsr = detector
if self._sort:
self.sort() |
def get_asset_query_session(self, proxy):
"""Gets an asset query session.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetQuerySession) - an
AssetQuerySession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_query() is false
compliance: optional - This method must be implemented if
supports_asset_query() is true.
"""
if not self.supports_asset_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetQuerySession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session | Gets an asset query session.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetQuerySession) - an
AssetQuerySession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_query() is false
compliance: optional - This method must be implemented if
supports_asset_query() is true. | Below is the the instruction that describes the task:
### Input:
Gets an asset query session.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetQuerySession) - an
AssetQuerySession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_query() is false
compliance: optional - This method must be implemented if
supports_asset_query() is true.
### Response:
def get_asset_query_session(self, proxy):
"""Gets an asset query session.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetQuerySession) - an
AssetQuerySession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_query() is false
compliance: optional - This method must be implemented if
supports_asset_query() is true.
"""
if not self.supports_asset_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetQuerySession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session |
def error_response(response):
"""
Raises errors matching the response code
"""
if response.status_code >= 500:
raise exceptions.GeocodioServerError
elif response.status_code == 403:
raise exceptions.GeocodioAuthError
elif response.status_code == 422:
raise exceptions.GeocodioDataError(response.json()["error"])
else:
raise exceptions.GeocodioError(
"Unknown service error (HTTP {0})".format(response.status_code)
) | Raises errors matching the response code | Below is the the instruction that describes the task:
### Input:
Raises errors matching the response code
### Response:
def error_response(response):
"""
Raises errors matching the response code
"""
if response.status_code >= 500:
raise exceptions.GeocodioServerError
elif response.status_code == 403:
raise exceptions.GeocodioAuthError
elif response.status_code == 422:
raise exceptions.GeocodioDataError(response.json()["error"])
else:
raise exceptions.GeocodioError(
"Unknown service error (HTTP {0})".format(response.status_code)
) |
def sign(self, msg, key):
"""
Create a signature over a message as defined in RFC7515 using an
RSA key
:param msg: the message.
:type msg: bytes
:returns: bytes, the signature of data.
:rtype: bytes
"""
if not isinstance(key, rsa.RSAPrivateKey):
raise TypeError(
"The key must be an instance of rsa.RSAPrivateKey")
sig = key.sign(msg, self.padding, self.hash)
return sig | Create a signature over a message as defined in RFC7515 using an
RSA key
:param msg: the message.
:type msg: bytes
:returns: bytes, the signature of data.
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Create a signature over a message as defined in RFC7515 using an
RSA key
:param msg: the message.
:type msg: bytes
:returns: bytes, the signature of data.
:rtype: bytes
### Response:
def sign(self, msg, key):
"""
Create a signature over a message as defined in RFC7515 using an
RSA key
:param msg: the message.
:type msg: bytes
:returns: bytes, the signature of data.
:rtype: bytes
"""
if not isinstance(key, rsa.RSAPrivateKey):
raise TypeError(
"The key must be an instance of rsa.RSAPrivateKey")
sig = key.sign(msg, self.padding, self.hash)
return sig |
def hlog(x, b=500, r=_display_max, d=_l_mmax):
"""
Base 10 hyperlog transform.
Parameters
----------
x : num | num iterable
values to be transformed.
b : num
Parameter controling the location of the shift
from linear to log transformation.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
hlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
"""
hlog_fun = _make_hlog_numeric(b, r, d)
if not hasattr(x, '__len__'): # if transforming a single number
y = hlog_fun(x)
else:
n = len(x)
if not n: # if transforming empty container
return x
else:
y = hlog_fun(x)
return y | Base 10 hyperlog transform.
Parameters
----------
x : num | num iterable
values to be transformed.
b : num
Parameter controling the location of the shift
from linear to log transformation.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
hlog_inv(r) = 10**d
Returns
-------
Array of transformed values. | Below is the the instruction that describes the task:
### Input:
Base 10 hyperlog transform.
Parameters
----------
x : num | num iterable
values to be transformed.
b : num
Parameter controling the location of the shift
from linear to log transformation.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
hlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
### Response:
def hlog(x, b=500, r=_display_max, d=_l_mmax):
"""
Base 10 hyperlog transform.
Parameters
----------
x : num | num iterable
values to be transformed.
b : num
Parameter controling the location of the shift
from linear to log transformation.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
hlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
"""
hlog_fun = _make_hlog_numeric(b, r, d)
if not hasattr(x, '__len__'): # if transforming a single number
y = hlog_fun(x)
else:
n = len(x)
if not n: # if transforming empty container
return x
else:
y = hlog_fun(x)
return y |
def new_pivot(self, attributes=None, exists=False):
"""
Create a new pivot model instance.
"""
pivot = self._related.new_pivot(self._parent, attributes, self._table, exists)
return pivot.set_pivot_keys(self._foreign_key, self._other_key) | Create a new pivot model instance. | Below is the the instruction that describes the task:
### Input:
Create a new pivot model instance.
### Response:
def new_pivot(self, attributes=None, exists=False):
"""
Create a new pivot model instance.
"""
pivot = self._related.new_pivot(self._parent, attributes, self._table, exists)
return pivot.set_pivot_keys(self._foreign_key, self._other_key) |
def delete_namespaced_service(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_service # noqa: E501
delete a Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | delete_namespaced_service # noqa: E501
delete a Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
delete_namespaced_service # noqa: E501
delete a Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_namespaced_service(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_service # noqa: E501
delete a Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values for the specified game.
"""
fields_to_include = {
'completed_passes': self.completed_passes,
'attempted_passes': self.attempted_passes,
'passing_yards': self.passing_yards,
'passing_touchdowns': self.passing_touchdowns,
'interceptions_thrown': self.interceptions_thrown,
'times_sacked': self.times_sacked,
'yards_lost_from_sacks': self.yards_lost_from_sacks,
'longest_pass': self.longest_pass,
'quarterback_rating': self.quarterback_rating,
'rush_attempts': self.rush_attempts,
'rush_yards': self.rush_yards,
'rush_touchdowns': self.rush_touchdowns,
'longest_rush': self.longest_rush,
'times_pass_target': self.times_pass_target,
'receptions': self.receptions,
'receiving_yards': self.receiving_yards,
'receiving_touchdowns': self.receiving_touchdowns,
'longest_reception': self.longest_reception,
'fumbles': self.fumbles,
'fumbles_lost': self.fumbles_lost,
'interceptions': self.interceptions,
'yards_returned_from_interception':
self.yards_returned_from_interception,
'interceptions_returned_for_touchdown':
self.interceptions_returned_for_touchdown,
'longest_interception_return': self.longest_interception_return,
'passes_defended': self.passes_defended,
'sacks': self.sacks,
'combined_tackles': self.combined_tackles,
'solo_tackles': self.solo_tackles,
'assists_on_tackles': self.assists_on_tackles,
'tackles_for_loss': self.tackles_for_loss,
'quarterback_hits': self.quarterback_hits,
'fumbles_recovered': self.fumbles_recovered,
'yards_recovered_from_fumble': self.yards_recovered_from_fumble,
'fumbles_recovered_for_touchdown':
self.fumbles_recovered_for_touchdown,
'fumbles_forced': self.fumbles_forced,
'kickoff_returns': self.kickoff_returns,
'kickoff_return_yards': self.kickoff_return_yards,
'average_kickoff_return_yards': self.average_kickoff_return_yards,
'kickoff_return_touchdown': self.kickoff_return_touchdown,
'longest_kickoff_return': self.longest_kickoff_return,
'punt_returns': self.punt_returns,
'punt_return_yards': self.punt_return_yards,
'yards_per_punt_return': self.yards_per_punt_return,
'punt_return_touchdown': self.punt_return_touchdown,
'longest_punt_return': self.longest_punt_return,
'extra_points_made': self.extra_points_made,
'extra_points_attempted': self.extra_points_attempted,
'field_goals_made': self.field_goals_made,
'field_goals_attempted': self.field_goals_attempted,
'punts': self.punts,
'total_punt_yards': self.total_punt_yards,
'yards_per_punt': self.yards_per_punt,
'longest_punt': self.longest_punt
}
return pd.DataFrame([fields_to_include], index=[self._player_id]) | Returns a ``pandas DataFrame`` containing all other relevant class
properties and values for the specified game. | Below is the the instruction that describes the task:
### Input:
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values for the specified game.
### Response:
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values for the specified game.
"""
fields_to_include = {
'completed_passes': self.completed_passes,
'attempted_passes': self.attempted_passes,
'passing_yards': self.passing_yards,
'passing_touchdowns': self.passing_touchdowns,
'interceptions_thrown': self.interceptions_thrown,
'times_sacked': self.times_sacked,
'yards_lost_from_sacks': self.yards_lost_from_sacks,
'longest_pass': self.longest_pass,
'quarterback_rating': self.quarterback_rating,
'rush_attempts': self.rush_attempts,
'rush_yards': self.rush_yards,
'rush_touchdowns': self.rush_touchdowns,
'longest_rush': self.longest_rush,
'times_pass_target': self.times_pass_target,
'receptions': self.receptions,
'receiving_yards': self.receiving_yards,
'receiving_touchdowns': self.receiving_touchdowns,
'longest_reception': self.longest_reception,
'fumbles': self.fumbles,
'fumbles_lost': self.fumbles_lost,
'interceptions': self.interceptions,
'yards_returned_from_interception':
self.yards_returned_from_interception,
'interceptions_returned_for_touchdown':
self.interceptions_returned_for_touchdown,
'longest_interception_return': self.longest_interception_return,
'passes_defended': self.passes_defended,
'sacks': self.sacks,
'combined_tackles': self.combined_tackles,
'solo_tackles': self.solo_tackles,
'assists_on_tackles': self.assists_on_tackles,
'tackles_for_loss': self.tackles_for_loss,
'quarterback_hits': self.quarterback_hits,
'fumbles_recovered': self.fumbles_recovered,
'yards_recovered_from_fumble': self.yards_recovered_from_fumble,
'fumbles_recovered_for_touchdown':
self.fumbles_recovered_for_touchdown,
'fumbles_forced': self.fumbles_forced,
'kickoff_returns': self.kickoff_returns,
'kickoff_return_yards': self.kickoff_return_yards,
'average_kickoff_return_yards': self.average_kickoff_return_yards,
'kickoff_return_touchdown': self.kickoff_return_touchdown,
'longest_kickoff_return': self.longest_kickoff_return,
'punt_returns': self.punt_returns,
'punt_return_yards': self.punt_return_yards,
'yards_per_punt_return': self.yards_per_punt_return,
'punt_return_touchdown': self.punt_return_touchdown,
'longest_punt_return': self.longest_punt_return,
'extra_points_made': self.extra_points_made,
'extra_points_attempted': self.extra_points_attempted,
'field_goals_made': self.field_goals_made,
'field_goals_attempted': self.field_goals_attempted,
'punts': self.punts,
'total_punt_yards': self.total_punt_yards,
'yards_per_punt': self.yards_per_punt,
'longest_punt': self.longest_punt
}
return pd.DataFrame([fields_to_include], index=[self._player_id]) |
def ssh_compute_add(public_key, application_name, rid=None, unit=None,
user=None):
"""Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param rid: Relation id of the relation between this charm and the app. If
none is supplied it is assumed its the relation relating to
the current hook context.
:type rid: str
:param unit: Unit to add ssh asserts for if none is supplied it is assumed
its the unit relating to the current hook context.
:type unit: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
relation_data = relation_get(rid=rid, unit=unit)
ssh_compute_add_host_and_key(
public_key,
relation_data.get('hostname'),
relation_data.get('private-address'),
application_name,
user=user) | Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param rid: Relation id of the relation between this charm and the app. If
none is supplied it is assumed its the relation relating to
the current hook context.
:type rid: str
:param unit: Unit to add ssh asserts for if none is supplied it is assumed
its the unit relating to the current hook context.
:type unit: str
:param user: The user that the ssh asserts are for.
:type user: str | Below is the the instruction that describes the task:
### Input:
Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param rid: Relation id of the relation between this charm and the app. If
none is supplied it is assumed its the relation relating to
the current hook context.
:type rid: str
:param unit: Unit to add ssh asserts for if none is supplied it is assumed
its the unit relating to the current hook context.
:type unit: str
:param user: The user that the ssh asserts are for.
:type user: str
### Response:
def ssh_compute_add(public_key, application_name, rid=None, unit=None,
user=None):
"""Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param rid: Relation id of the relation between this charm and the app. If
none is supplied it is assumed its the relation relating to
the current hook context.
:type rid: str
:param unit: Unit to add ssh asserts for if none is supplied it is assumed
its the unit relating to the current hook context.
:type unit: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
relation_data = relation_get(rid=rid, unit=unit)
ssh_compute_add_host_and_key(
public_key,
relation_data.get('hostname'),
relation_data.get('private-address'),
application_name,
user=user) |
def listen_dep(self, target, timeout):
"""Listen *timeout* seconds to become initialized as a DEP Target.
The RC-S956 can be set to listen as a DEP Target for passive
communication mode. Target active communication mode is
disabled by the driver due to performance issues. It is also
not possible to fully control the ATR_RES response, only the
response waiting time (TO byte of ATR_RES) and the general
bytes can be set by the driver. Because the TO value must be
set before calling the hardware listen function, it can not be
different for the Type A of Type F passive initalization (the
driver uses the higher value if they are different).
"""
# The RCS956 internal state machine must be in Mode 0 before
# we enter the listen phase. Also the RFConfiguration command
# for setting the TO parameter won't work in any other mode.
self.chipset.reset_mode()
# Set the WaitForSelected bit in CIU_FelNFC2 register to
# prevent active mode activation. Target active mode is not
# really working with this device.
self.chipset.write_register("CIU_FelNFC2", 0x80)
# We can not send ATR_RES as as a regular response but must
# use TgSetGeneralBytes to advance the chipset state machine
# to mode 3. Thus the ATR_RES is mostly determined by the
# firmware, we can only control the TO parameter for RWT, but
# must do it before the actual listen.
to = target.atr_res[15] & 0x0F
self.chipset.rf_configuration(0x82, bytearray([to, 2, to]))
# Disable automatic ATR_RES transmission. This must be done
# all again because the chipset reactivates the setting after
# ATR_RES was once send in TgSetGeneralBytes.
self.chipset.set_parameters(0b00001000)
# Now we can use the generic pn53x implementation
return super(Device, self).listen_dep(target, timeout) | Listen *timeout* seconds to become initialized as a DEP Target.
The RC-S956 can be set to listen as a DEP Target for passive
communication mode. Target active communication mode is
disabled by the driver due to performance issues. It is also
not possible to fully control the ATR_RES response, only the
response waiting time (TO byte of ATR_RES) and the general
bytes can be set by the driver. Because the TO value must be
set before calling the hardware listen function, it can not be
different for the Type A of Type F passive initalization (the
driver uses the higher value if they are different). | Below is the the instruction that describes the task:
### Input:
Listen *timeout* seconds to become initialized as a DEP Target.
The RC-S956 can be set to listen as a DEP Target for passive
communication mode. Target active communication mode is
disabled by the driver due to performance issues. It is also
not possible to fully control the ATR_RES response, only the
response waiting time (TO byte of ATR_RES) and the general
bytes can be set by the driver. Because the TO value must be
set before calling the hardware listen function, it can not be
different for the Type A of Type F passive initalization (the
driver uses the higher value if they are different).
### Response:
def listen_dep(self, target, timeout):
"""Listen *timeout* seconds to become initialized as a DEP Target.
The RC-S956 can be set to listen as a DEP Target for passive
communication mode. Target active communication mode is
disabled by the driver due to performance issues. It is also
not possible to fully control the ATR_RES response, only the
response waiting time (TO byte of ATR_RES) and the general
bytes can be set by the driver. Because the TO value must be
set before calling the hardware listen function, it can not be
different for the Type A of Type F passive initalization (the
driver uses the higher value if they are different).
"""
# The RCS956 internal state machine must be in Mode 0 before
# we enter the listen phase. Also the RFConfiguration command
# for setting the TO parameter won't work in any other mode.
self.chipset.reset_mode()
# Set the WaitForSelected bit in CIU_FelNFC2 register to
# prevent active mode activation. Target active mode is not
# really working with this device.
self.chipset.write_register("CIU_FelNFC2", 0x80)
# We can not send ATR_RES as as a regular response but must
# use TgSetGeneralBytes to advance the chipset state machine
# to mode 3. Thus the ATR_RES is mostly determined by the
# firmware, we can only control the TO parameter for RWT, but
# must do it before the actual listen.
to = target.atr_res[15] & 0x0F
self.chipset.rf_configuration(0x82, bytearray([to, 2, to]))
# Disable automatic ATR_RES transmission. This must be done
# all again because the chipset reactivates the setting after
# ATR_RES was once send in TgSetGeneralBytes.
self.chipset.set_parameters(0b00001000)
# Now we can use the generic pn53x implementation
return super(Device, self).listen_dep(target, timeout) |
def substitute(self, target, replacement):
"""Return a list of selectors obtained by replacing the `target`
selector with `replacement`.
Herein lie the guts of the Sass @extend directive.
In general, for a selector ``a X b Y c``, a target ``X Y``, and a
replacement ``q Z``, return the selectors ``a q X b Z c`` and ``q a X b
Z c``. Note in particular that no more than two selectors will be
returned, and the permutation of ancestors will never insert new simple
selectors "inside" the target selector.
"""
# Find the target in the parent selector, and split it into
# before/after
p_before, p_extras, p_after = self.break_around(target.simple_selectors)
# The replacement has no hinge; it only has the most specific simple
# selector (which is the part that replaces "self" in the parent) and
# whatever preceding simple selectors there may be
r_trail = replacement.simple_selectors[:-1]
r_extras = replacement.simple_selectors[-1]
# TODO what if the prefix doesn't match? who wins? should we even get
# this far?
focal_nodes = (p_extras.merge_into(r_extras),)
befores = _merge_selectors(p_before, r_trail)
cls = type(self)
return [
cls(before + focal_nodes + p_after)
for before in befores] | Return a list of selectors obtained by replacing the `target`
selector with `replacement`.
Herein lie the guts of the Sass @extend directive.
In general, for a selector ``a X b Y c``, a target ``X Y``, and a
replacement ``q Z``, return the selectors ``a q X b Z c`` and ``q a X b
Z c``. Note in particular that no more than two selectors will be
returned, and the permutation of ancestors will never insert new simple
selectors "inside" the target selector. | Below is the the instruction that describes the task:
### Input:
Return a list of selectors obtained by replacing the `target`
selector with `replacement`.
Herein lie the guts of the Sass @extend directive.
In general, for a selector ``a X b Y c``, a target ``X Y``, and a
replacement ``q Z``, return the selectors ``a q X b Z c`` and ``q a X b
Z c``. Note in particular that no more than two selectors will be
returned, and the permutation of ancestors will never insert new simple
selectors "inside" the target selector.
### Response:
def substitute(self, target, replacement):
"""Return a list of selectors obtained by replacing the `target`
selector with `replacement`.
Herein lie the guts of the Sass @extend directive.
In general, for a selector ``a X b Y c``, a target ``X Y``, and a
replacement ``q Z``, return the selectors ``a q X b Z c`` and ``q a X b
Z c``. Note in particular that no more than two selectors will be
returned, and the permutation of ancestors will never insert new simple
selectors "inside" the target selector.
"""
# Find the target in the parent selector, and split it into
# before/after
p_before, p_extras, p_after = self.break_around(target.simple_selectors)
# The replacement has no hinge; it only has the most specific simple
# selector (which is the part that replaces "self" in the parent) and
# whatever preceding simple selectors there may be
r_trail = replacement.simple_selectors[:-1]
r_extras = replacement.simple_selectors[-1]
# TODO what if the prefix doesn't match? who wins? should we even get
# this far?
focal_nodes = (p_extras.merge_into(r_extras),)
befores = _merge_selectors(p_before, r_trail)
cls = type(self)
return [
cls(before + focal_nodes + p_after)
for before in befores] |
def create_configmap(
name,
namespace,
data,
source=None,
template=None,
saltenv='base',
**kwargs):
'''
Creates the kubernetes configmap as defined by the user.
CLI Examples::
salt 'minion1' kubernetes.create_configmap \
settings default '{"example.conf": "# example file"}'
salt 'minion2' kubernetes.create_configmap \
name=settings namespace=default data='{"example.conf": "# example file"}'
'''
if source:
data = __read_and_render_yaml_file(source, template, saltenv)
elif data is None:
data = {}
data = __enforce_only_strings_dict(data)
body = kubernetes.client.V1ConfigMap(
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.create_namespaced_config_map(
namespace, body)
return api_response.to_dict()
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception(
'Exception when calling '
'CoreV1Api->create_namespaced_config_map'
)
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg) | Creates the kubernetes configmap as defined by the user.
CLI Examples::
salt 'minion1' kubernetes.create_configmap \
settings default '{"example.conf": "# example file"}'
salt 'minion2' kubernetes.create_configmap \
name=settings namespace=default data='{"example.conf": "# example file"}' | Below is the the instruction that describes the task:
### Input:
Creates the kubernetes configmap as defined by the user.
CLI Examples::
salt 'minion1' kubernetes.create_configmap \
settings default '{"example.conf": "# example file"}'
salt 'minion2' kubernetes.create_configmap \
name=settings namespace=default data='{"example.conf": "# example file"}'
### Response:
def create_configmap(
name,
namespace,
data,
source=None,
template=None,
saltenv='base',
**kwargs):
'''
Creates the kubernetes configmap as defined by the user.
CLI Examples::
salt 'minion1' kubernetes.create_configmap \
settings default '{"example.conf": "# example file"}'
salt 'minion2' kubernetes.create_configmap \
name=settings namespace=default data='{"example.conf": "# example file"}'
'''
if source:
data = __read_and_render_yaml_file(source, template, saltenv)
elif data is None:
data = {}
data = __enforce_only_strings_dict(data)
body = kubernetes.client.V1ConfigMap(
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.create_namespaced_config_map(
namespace, body)
return api_response.to_dict()
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception(
'Exception when calling '
'CoreV1Api->create_namespaced_config_map'
)
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg) |
def enable_rights(self):
"""
Enables rights management provided by :class:`fatbotslim.handlers.RightsHandler`.
"""
if self.rights is None:
handler_instance = RightsHandler(self)
self.handlers.insert(len(self.default_handlers), handler_instance) | Enables rights management provided by :class:`fatbotslim.handlers.RightsHandler`. | Below is the the instruction that describes the task:
### Input:
Enables rights management provided by :class:`fatbotslim.handlers.RightsHandler`.
### Response:
def enable_rights(self):
"""
Enables rights management provided by :class:`fatbotslim.handlers.RightsHandler`.
"""
if self.rights is None:
handler_instance = RightsHandler(self)
self.handlers.insert(len(self.default_handlers), handler_instance) |
def GenerateNewFileName(self):
"""
Create new file name from show name, season number, episode number
and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
Returns
----------
string
New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
"""
if self.showInfo.showName is not None and self.showInfo.seasonNum is not None and \
self.showInfo.episodeNum is not None and self.showInfo.episodeName is not None:
ext = os.path.splitext(self.fileInfo.origPath)[1]
newFileName = "{0}.S{1}E{2}".format(self.showInfo.showName, self.showInfo.seasonNum, \
self.showInfo.episodeNum)
for episodeNum in self.showInfo.multiPartEpisodeNumbers:
newFileName = newFileName + "_{0}".format(episodeNum)
newFileName = newFileName + ".{0}{1}".format(self.showInfo.episodeName, ext)
newFileName = util.StripSpecialCharacters(newFileName)
return newFileName | Create new file name from show name, season number, episode number
and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
Returns
----------
string
New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName. | Below is the the instruction that describes the task:
### Input:
Create new file name from show name, season number, episode number
and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
Returns
----------
string
New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
### Response:
def GenerateNewFileName(self):
"""
Create new file name from show name, season number, episode number
and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
Returns
----------
string
New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
"""
if self.showInfo.showName is not None and self.showInfo.seasonNum is not None and \
self.showInfo.episodeNum is not None and self.showInfo.episodeName is not None:
ext = os.path.splitext(self.fileInfo.origPath)[1]
newFileName = "{0}.S{1}E{2}".format(self.showInfo.showName, self.showInfo.seasonNum, \
self.showInfo.episodeNum)
for episodeNum in self.showInfo.multiPartEpisodeNumbers:
newFileName = newFileName + "_{0}".format(episodeNum)
newFileName = newFileName + ".{0}{1}".format(self.showInfo.episodeName, ext)
newFileName = util.StripSpecialCharacters(newFileName)
return newFileName |
def find_subdomains(domain):
"""Find subdomains according to the TLD."""
result = set()
response = get('https://findsubdomains.com/subdomains-of/' + domain).text
matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
for match in matches:
result.add(match.replace(' ', '').replace('\n', ''))
return list(result) | Find subdomains according to the TLD. | Below is the the instruction that describes the task:
### Input:
Find subdomains according to the TLD.
### Response:
def find_subdomains(domain):
"""Find subdomains according to the TLD."""
result = set()
response = get('https://findsubdomains.com/subdomains-of/' + domain).text
matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
for match in matches:
result.add(match.replace(' ', '').replace('\n', ''))
return list(result) |
def set_env(user, name, value=None):
'''
Set up an environment variable in the crontab.
CLI Example:
.. code-block:: bash
salt '*' cron.set_env root MAILTO user@example.com
'''
lst = list_tab(user)
for env in lst['env']:
if name == env['name']:
if value != env['value']:
rm_env(user, name)
jret = set_env(user, name, value)
if jret == 'new':
return 'updated'
else:
return jret
return 'present'
env = {'name': name, 'value': value}
lst['env'].append(env)
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit, return the error
return comdat['stderr']
return 'new' | Set up an environment variable in the crontab.
CLI Example:
.. code-block:: bash
salt '*' cron.set_env root MAILTO user@example.com | Below is the the instruction that describes the task:
### Input:
Set up an environment variable in the crontab.
CLI Example:
.. code-block:: bash
salt '*' cron.set_env root MAILTO user@example.com
### Response:
def set_env(user, name, value=None):
'''
Set up an environment variable in the crontab.
CLI Example:
.. code-block:: bash
salt '*' cron.set_env root MAILTO user@example.com
'''
lst = list_tab(user)
for env in lst['env']:
if name == env['name']:
if value != env['value']:
rm_env(user, name)
jret = set_env(user, name, value)
if jret == 'new':
return 'updated'
else:
return jret
return 'present'
env = {'name': name, 'value': value}
lst['env'].append(env)
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit, return the error
return comdat['stderr']
return 'new' |
def add_mongo_config(app, simple_connection_string,
mongo_uri, collection_name):
"""
Configure the application to use MongoDB.
:param app: Flask application
:param simple_connection_string:
Expects host:port:database_name or database_name
Mutally_exclusive with mongo_uri
:param mongo_uri: Expects mongodb://... as defined
in https://docs.mongodb.com/manual/reference/connection-string/
Mutually exclusive with simple_connection_string (must be None)
:param collection_name: The collection containing Sacred's runs
:return:
"""
if mongo_uri != (None, None):
add_mongo_config_with_uri(app, mongo_uri[0], mongo_uri[1],
collection_name)
if simple_connection_string is not None:
print("Ignoring the -m option. Overridden by "
"a more specific option (-mu).", file=sys.stderr)
else:
# Use the default value 'sacred' when not specified
if simple_connection_string is None:
simple_connection_string = "sacred"
add_mongo_config_simple(app, simple_connection_string, collection_name) | Configure the application to use MongoDB.
:param app: Flask application
:param simple_connection_string:
Expects host:port:database_name or database_name
Mutally_exclusive with mongo_uri
:param mongo_uri: Expects mongodb://... as defined
in https://docs.mongodb.com/manual/reference/connection-string/
Mutually exclusive with simple_connection_string (must be None)
:param collection_name: The collection containing Sacred's runs
:return: | Below is the the instruction that describes the task:
### Input:
Configure the application to use MongoDB.
:param app: Flask application
:param simple_connection_string:
Expects host:port:database_name or database_name
Mutally_exclusive with mongo_uri
:param mongo_uri: Expects mongodb://... as defined
in https://docs.mongodb.com/manual/reference/connection-string/
Mutually exclusive with simple_connection_string (must be None)
:param collection_name: The collection containing Sacred's runs
:return:
### Response:
def add_mongo_config(app, simple_connection_string,
mongo_uri, collection_name):
"""
Configure the application to use MongoDB.
:param app: Flask application
:param simple_connection_string:
Expects host:port:database_name or database_name
Mutally_exclusive with mongo_uri
:param mongo_uri: Expects mongodb://... as defined
in https://docs.mongodb.com/manual/reference/connection-string/
Mutually exclusive with simple_connection_string (must be None)
:param collection_name: The collection containing Sacred's runs
:return:
"""
if mongo_uri != (None, None):
add_mongo_config_with_uri(app, mongo_uri[0], mongo_uri[1],
collection_name)
if simple_connection_string is not None:
print("Ignoring the -m option. Overridden by "
"a more specific option (-mu).", file=sys.stderr)
else:
# Use the default value 'sacred' when not specified
if simple_connection_string is None:
simple_connection_string = "sacred"
add_mongo_config_simple(app, simple_connection_string, collection_name) |
def calculate(cls, order_id, shipping=None, refund_line_items=None):
"""
Calculates refund transactions based on line items and shipping.
When you want to create a refund, you should first use the calculate
endpoint to generate accurate refund transactions.
Args:
order_id: Order ID for which the Refund has to created.
shipping: Specify how much shipping to refund.
refund_line_items: A list of line item IDs and quantities to refund.
Returns:
Unsaved refund record
"""
data = {}
if shipping:
data['shipping'] = shipping
data['refund_line_items'] = refund_line_items or []
body = {'refund': data}
resource = cls.post(
"calculate", order_id=order_id, body=json.dumps(body).encode()
)
return cls(
cls.format.decode(resource.body),
prefix_options={'order_id': order_id}
) | Calculates refund transactions based on line items and shipping.
When you want to create a refund, you should first use the calculate
endpoint to generate accurate refund transactions.
Args:
order_id: Order ID for which the Refund has to created.
shipping: Specify how much shipping to refund.
refund_line_items: A list of line item IDs and quantities to refund.
Returns:
Unsaved refund record | Below is the the instruction that describes the task:
### Input:
Calculates refund transactions based on line items and shipping.
When you want to create a refund, you should first use the calculate
endpoint to generate accurate refund transactions.
Args:
order_id: Order ID for which the Refund has to created.
shipping: Specify how much shipping to refund.
refund_line_items: A list of line item IDs and quantities to refund.
Returns:
Unsaved refund record
### Response:
def calculate(cls, order_id, shipping=None, refund_line_items=None):
"""
Calculates refund transactions based on line items and shipping.
When you want to create a refund, you should first use the calculate
endpoint to generate accurate refund transactions.
Args:
order_id: Order ID for which the Refund has to created.
shipping: Specify how much shipping to refund.
refund_line_items: A list of line item IDs and quantities to refund.
Returns:
Unsaved refund record
"""
data = {}
if shipping:
data['shipping'] = shipping
data['refund_line_items'] = refund_line_items or []
body = {'refund': data}
resource = cls.post(
"calculate", order_id=order_id, body=json.dumps(body).encode()
)
return cls(
cls.format.decode(resource.body),
prefix_options={'order_id': order_id}
) |
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip()) | Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import. | Below is the the instruction that describes the task:
### Input:
Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
### Response:
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip()) |
def _events_with_diff_tag(talk, tag_array):
"""
Return the indices of the events with no tag in common as tag
"""
event_categories = np.nonzero(tag_array[talk])[0]
return np.nonzero(sum(tag_array.transpose()[event_categories]) == 0)[0] | Return the indices of the events with no tag in common as tag | Below is the the instruction that describes the task:
### Input:
Return the indices of the events with no tag in common as tag
### Response:
def _events_with_diff_tag(talk, tag_array):
"""
Return the indices of the events with no tag in common as tag
"""
event_categories = np.nonzero(tag_array[talk])[0]
return np.nonzero(sum(tag_array.transpose()[event_categories]) == 0)[0] |
def _CheckInstallSuccess(self):
"""Checks if the installer installed correctly."""
if not os.path.exists(self.install_path):
raise RuntimeError("Install failed, no files at: %s" % self.install_path)
try:
output = subprocess.check_output(["sc", "query", self.service_name])
service_running = "RUNNING" in output
except subprocess.CalledProcessError as e:
if e.returncode == 1060:
# 1060 means: The specified service does not exist as an installed
# service.
service_running = False
else:
raise
if self.expect_service_running:
if not service_running:
raise RuntimeError(
"GRR service not running after install, sc query output: %s" %
output)
else:
if service_running:
raise RuntimeError(
"GRR service running after install with expect_service_running == "
"False, sc query output: %s" % output) | Checks if the installer installed correctly. | Below is the the instruction that describes the task:
### Input:
Checks if the installer installed correctly.
### Response:
def _CheckInstallSuccess(self):
"""Checks if the installer installed correctly."""
if not os.path.exists(self.install_path):
raise RuntimeError("Install failed, no files at: %s" % self.install_path)
try:
output = subprocess.check_output(["sc", "query", self.service_name])
service_running = "RUNNING" in output
except subprocess.CalledProcessError as e:
if e.returncode == 1060:
# 1060 means: The specified service does not exist as an installed
# service.
service_running = False
else:
raise
if self.expect_service_running:
if not service_running:
raise RuntimeError(
"GRR service not running after install, sc query output: %s" %
output)
else:
if service_running:
raise RuntimeError(
"GRR service running after install with expect_service_running == "
"False, sc query output: %s" % output) |
def add_nulldata(self, rawtx, hexdata):
"""Writes <hexdata> as new nulldata output to <rawtx>."""
tx = deserialize.unsignedtx(rawtx)
nulldata_txout = deserialize.nulldata_txout(hexdata)
tx = control.add_nulldata_output(tx, nulldata_txout)
return serialize.tx(tx) | Writes <hexdata> as new nulldata output to <rawtx>. | Below is the the instruction that describes the task:
### Input:
Writes <hexdata> as new nulldata output to <rawtx>.
### Response:
def add_nulldata(self, rawtx, hexdata):
"""Writes <hexdata> as new nulldata output to <rawtx>."""
tx = deserialize.unsignedtx(rawtx)
nulldata_txout = deserialize.nulldata_txout(hexdata)
tx = control.add_nulldata_output(tx, nulldata_txout)
return serialize.tx(tx) |
def _build_job_arguments(task):
"""Build the set of arguments required for running a job"""
job_args = {}
job_args['qitems'] = Q_STORAGE_ITEMS
job_args['task_id'] = task.task_id
# Backend parameters
job_args['backend'] = task.backend
backend_args = copy.deepcopy(task.backend_args)
if 'next_from_date' in backend_args:
backend_args['from_date'] = backend_args.pop('next_from_date')
if 'next_offset' in backend_args:
backend_args['offset'] = backend_args.pop('next_offset')
job_args['backend_args'] = backend_args
# Category
job_args['category'] = task.category
# Archiving parameters
archiving_cfg = task.archiving_cfg
job_args['archive_args'] = archiving_cfg.to_dict() if archiving_cfg else None
# Scheduler parameters
sched_cfg = task.scheduling_cfg
job_args['max_retries'] = sched_cfg.max_retries if sched_cfg else MAX_JOB_RETRIES
return job_args | Build the set of arguments required for running a job | Below is the the instruction that describes the task:
### Input:
Build the set of arguments required for running a job
### Response:
def _build_job_arguments(task):
"""Build the set of arguments required for running a job"""
job_args = {}
job_args['qitems'] = Q_STORAGE_ITEMS
job_args['task_id'] = task.task_id
# Backend parameters
job_args['backend'] = task.backend
backend_args = copy.deepcopy(task.backend_args)
if 'next_from_date' in backend_args:
backend_args['from_date'] = backend_args.pop('next_from_date')
if 'next_offset' in backend_args:
backend_args['offset'] = backend_args.pop('next_offset')
job_args['backend_args'] = backend_args
# Category
job_args['category'] = task.category
# Archiving parameters
archiving_cfg = task.archiving_cfg
job_args['archive_args'] = archiving_cfg.to_dict() if archiving_cfg else None
# Scheduler parameters
sched_cfg = task.scheduling_cfg
job_args['max_retries'] = sched_cfg.max_retries if sched_cfg else MAX_JOB_RETRIES
return job_args |
def _CheckDatabaseEncoding(cursor):
"""Enforces a sane UTF-8 encoding for the database."""
cur_character_set = _ReadVariable("character_set_database", cursor)
if cur_character_set != CHARACTER_SET:
raise EncodingEnforcementError(
"Require MySQL character_set_database of {}, got {}."
" To create your database, use: {}".format(CHARACTER_SET,
cur_character_set,
CREATE_DATABASE_QUERY)) | Enforces a sane UTF-8 encoding for the database. | Below is the the instruction that describes the task:
### Input:
Enforces a sane UTF-8 encoding for the database.
### Response:
def _CheckDatabaseEncoding(cursor):
"""Enforces a sane UTF-8 encoding for the database."""
cur_character_set = _ReadVariable("character_set_database", cursor)
if cur_character_set != CHARACTER_SET:
raise EncodingEnforcementError(
"Require MySQL character_set_database of {}, got {}."
" To create your database, use: {}".format(CHARACTER_SET,
cur_character_set,
CREATE_DATABASE_QUERY)) |
def insert_conversion_steps_at_beginning(self, converters: List[Converter], inplace: bool = False):
"""
Utility method to insert converters at the beginning ofthis chain. If inplace is True, this object is modified
and None is returned. Otherwise, a copy is returned
:param converters: the list of converters to insert
:param inplace: boolean indicating whether to modify this object (True) or return a copy (False)
:return: None or a copy with the converters added
"""
if inplace:
for converter in reversed(converters):
self.insert_conversion_step_at_beginning(converter, inplace=True)
return
else:
new = copy(self)
for converter in reversed(converters):
# do inplace since it is a copy
new.insert_conversion_step_at_beginning(converter, inplace=True)
return new | Utility method to insert converters at the beginning ofthis chain. If inplace is True, this object is modified
and None is returned. Otherwise, a copy is returned
:param converters: the list of converters to insert
:param inplace: boolean indicating whether to modify this object (True) or return a copy (False)
:return: None or a copy with the converters added | Below is the the instruction that describes the task:
### Input:
Utility method to insert converters at the beginning ofthis chain. If inplace is True, this object is modified
and None is returned. Otherwise, a copy is returned
:param converters: the list of converters to insert
:param inplace: boolean indicating whether to modify this object (True) or return a copy (False)
:return: None or a copy with the converters added
### Response:
def insert_conversion_steps_at_beginning(self, converters: List[Converter], inplace: bool = False):
"""
Utility method to insert converters at the beginning ofthis chain. If inplace is True, this object is modified
and None is returned. Otherwise, a copy is returned
:param converters: the list of converters to insert
:param inplace: boolean indicating whether to modify this object (True) or return a copy (False)
:return: None or a copy with the converters added
"""
if inplace:
for converter in reversed(converters):
self.insert_conversion_step_at_beginning(converter, inplace=True)
return
else:
new = copy(self)
for converter in reversed(converters):
# do inplace since it is a copy
new.insert_conversion_step_at_beginning(converter, inplace=True)
return new |
def inspect(self, **kwargs):
"""
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
"""
try:
scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path)
except IOError:
return None
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
return None | Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred. | Below is the the instruction that describes the task:
### Input:
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
### Response:
def inspect(self, **kwargs):
"""
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
"""
try:
scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path)
except IOError:
return None
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
return None |
def translate_wp_comment(self, e):
"""
<wp:comment>
<wp:comment_id>1234</wp:comment_id>
<wp:comment_author><![CDATA[John Doe]]></wp:comment_author>
<wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email>
<wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url>
<wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP>
<wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date>
<wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt>
<wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content>
<wp:comment_approved><![CDATA[1]]></wp:comment_approved>
<wp:comment_type><![CDATA[]]></wp:comment_type>
<wp:comment_parent>0</wp:comment_parent>
<wp:comment_user_id>0</wp:comment_user_id>
</wp:comment>
"""
comment_dict = {}
comment_dict['ID'] = e.find('./{wp}comment_id').text
comment_dict['date'] = e.find('{wp}comment_date').text
comment_dict['content'] = e.find('{wp}comment_content').text
comment_dict['status'] = e.find('{wp}comment_approved').text
comment_dict['status'] = "approved" if comment_dict['status'] == "1" else "rejected"
comment_dict['parent'] = e.find('{wp}comment_parent').text
comment_dict['author'] = e.find('{wp}comment_author').text
comment_dict['date'] = time.strptime(comment_dict['date'], '%Y-%m-%d %H:%M:%S')
comment_dict['date'] = time.strftime('%Y-%m-%dT%H:%M:%S', comment_dict['date'])
return comment_dict | <wp:comment>
<wp:comment_id>1234</wp:comment_id>
<wp:comment_author><![CDATA[John Doe]]></wp:comment_author>
<wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email>
<wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url>
<wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP>
<wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date>
<wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt>
<wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content>
<wp:comment_approved><![CDATA[1]]></wp:comment_approved>
<wp:comment_type><![CDATA[]]></wp:comment_type>
<wp:comment_parent>0</wp:comment_parent>
<wp:comment_user_id>0</wp:comment_user_id>
</wp:comment> | Below is the the instruction that describes the task:
### Input:
<wp:comment>
<wp:comment_id>1234</wp:comment_id>
<wp:comment_author><![CDATA[John Doe]]></wp:comment_author>
<wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email>
<wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url>
<wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP>
<wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date>
<wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt>
<wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content>
<wp:comment_approved><![CDATA[1]]></wp:comment_approved>
<wp:comment_type><![CDATA[]]></wp:comment_type>
<wp:comment_parent>0</wp:comment_parent>
<wp:comment_user_id>0</wp:comment_user_id>
</wp:comment>
### Response:
def translate_wp_comment(self, e):
"""
<wp:comment>
<wp:comment_id>1234</wp:comment_id>
<wp:comment_author><![CDATA[John Doe]]></wp:comment_author>
<wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email>
<wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url>
<wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP>
<wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date>
<wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt>
<wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content>
<wp:comment_approved><![CDATA[1]]></wp:comment_approved>
<wp:comment_type><![CDATA[]]></wp:comment_type>
<wp:comment_parent>0</wp:comment_parent>
<wp:comment_user_id>0</wp:comment_user_id>
</wp:comment>
"""
comment_dict = {}
comment_dict['ID'] = e.find('./{wp}comment_id').text
comment_dict['date'] = e.find('{wp}comment_date').text
comment_dict['content'] = e.find('{wp}comment_content').text
comment_dict['status'] = e.find('{wp}comment_approved').text
comment_dict['status'] = "approved" if comment_dict['status'] == "1" else "rejected"
comment_dict['parent'] = e.find('{wp}comment_parent').text
comment_dict['author'] = e.find('{wp}comment_author').text
comment_dict['date'] = time.strptime(comment_dict['date'], '%Y-%m-%d %H:%M:%S')
comment_dict['date'] = time.strftime('%Y-%m-%dT%H:%M:%S', comment_dict['date'])
return comment_dict |
def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None):
"""Run an ASTRA back-projection on the given data using the CPU.
Parameters
----------
proj_data : `DiscreteLpElement`
Projection data to which the back-projector is applied
geometry : `Geometry`
Geometry defining the tomographic setup
reco_space : `DiscreteLp`
Space to which the calling operator maps
out : ``reco_space`` element, optional
Element of the reconstruction space to which the result is written.
If ``None``, an element in ``reco_space`` is created.
Returns
-------
out : ``reco_space`` element
Reconstruction data resulting from the application of the backward
projector. If ``out`` was provided, the returned object is a
reference to it.
"""
if not isinstance(proj_data, DiscreteLpElement):
raise TypeError('projection data {!r} is not a DiscreteLpElement '
'instance'.format(proj_data))
if proj_data.space.impl != 'numpy':
raise TypeError('`proj_data` must be a `numpy.ndarray` based, '
"container got `impl` {!r}"
"".format(proj_data.space.impl))
if not isinstance(geometry, Geometry):
raise TypeError('geometry {!r} is not a Geometry instance'
''.format(geometry))
if not isinstance(reco_space, DiscreteLp):
raise TypeError('reconstruction space {!r} is not a DiscreteLp '
'instance'.format(reco_space))
if reco_space.impl != 'numpy':
raise TypeError("`reco_space.impl` must be 'numpy', got {!r}"
"".format(reco_space.impl))
if reco_space.ndim != geometry.ndim:
raise ValueError('dimensions {} of reconstruction space and {} of '
'geometry do not match'.format(
reco_space.ndim, geometry.ndim))
if out is None:
out = reco_space.element()
else:
if out not in reco_space:
raise TypeError('`out` {} is neither None nor a '
'DiscreteLpElement instance'.format(out))
ndim = proj_data.ndim
# Create astra geometries
vol_geom = astra_volume_geometry(reco_space)
proj_geom = astra_projection_geometry(geometry)
# Create ASTRA data structure
sino_id = astra_data(proj_geom, datatype='projection', data=proj_data,
allow_copy=True)
# Create projector
# TODO: implement with different schemes for angles and detector
if not all(s == proj_data.space.interp_byaxis[0]
for s in proj_data.space.interp_byaxis):
raise ValueError('data interpolation must be the same in each '
'dimension, got {}'
''.format(proj_data.space.interp_byaxis))
proj_interp = proj_data.space.interp
proj_id = astra_projector(proj_interp, vol_geom, proj_geom, ndim,
impl='cpu')
# Convert out to correct dtype and order if needed.
with writable_array(out, dtype='float32', order='C') as out_arr:
vol_id = astra_data(vol_geom, datatype='volume', data=out_arr,
ndim=reco_space.ndim)
# Create algorithm
algo_id = astra_algorithm('backward', ndim, vol_id, sino_id, proj_id,
impl='cpu')
# Run algorithm
astra.algorithm.run(algo_id)
# Weight the adjoint by appropriate weights
scaling_factor = float(proj_data.space.weighting.const)
scaling_factor /= float(reco_space.weighting.const)
out *= scaling_factor
# Delete ASTRA objects
astra.algorithm.delete(algo_id)
astra.data2d.delete((vol_id, sino_id))
astra.projector.delete(proj_id)
return out | Run an ASTRA back-projection on the given data using the CPU.
Parameters
----------
proj_data : `DiscreteLpElement`
Projection data to which the back-projector is applied
geometry : `Geometry`
Geometry defining the tomographic setup
reco_space : `DiscreteLp`
Space to which the calling operator maps
out : ``reco_space`` element, optional
Element of the reconstruction space to which the result is written.
If ``None``, an element in ``reco_space`` is created.
Returns
-------
out : ``reco_space`` element
Reconstruction data resulting from the application of the backward
projector. If ``out`` was provided, the returned object is a
reference to it. | Below is the the instruction that describes the task:
### Input:
Run an ASTRA back-projection on the given data using the CPU.
Parameters
----------
proj_data : `DiscreteLpElement`
Projection data to which the back-projector is applied
geometry : `Geometry`
Geometry defining the tomographic setup
reco_space : `DiscreteLp`
Space to which the calling operator maps
out : ``reco_space`` element, optional
Element of the reconstruction space to which the result is written.
If ``None``, an element in ``reco_space`` is created.
Returns
-------
out : ``reco_space`` element
Reconstruction data resulting from the application of the backward
projector. If ``out`` was provided, the returned object is a
reference to it.
### Response:
def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None):
"""Run an ASTRA back-projection on the given data using the CPU.
Parameters
----------
proj_data : `DiscreteLpElement`
Projection data to which the back-projector is applied
geometry : `Geometry`
Geometry defining the tomographic setup
reco_space : `DiscreteLp`
Space to which the calling operator maps
out : ``reco_space`` element, optional
Element of the reconstruction space to which the result is written.
If ``None``, an element in ``reco_space`` is created.
Returns
-------
out : ``reco_space`` element
Reconstruction data resulting from the application of the backward
projector. If ``out`` was provided, the returned object is a
reference to it.
"""
if not isinstance(proj_data, DiscreteLpElement):
raise TypeError('projection data {!r} is not a DiscreteLpElement '
'instance'.format(proj_data))
if proj_data.space.impl != 'numpy':
raise TypeError('`proj_data` must be a `numpy.ndarray` based, '
"container got `impl` {!r}"
"".format(proj_data.space.impl))
if not isinstance(geometry, Geometry):
raise TypeError('geometry {!r} is not a Geometry instance'
''.format(geometry))
if not isinstance(reco_space, DiscreteLp):
raise TypeError('reconstruction space {!r} is not a DiscreteLp '
'instance'.format(reco_space))
if reco_space.impl != 'numpy':
raise TypeError("`reco_space.impl` must be 'numpy', got {!r}"
"".format(reco_space.impl))
if reco_space.ndim != geometry.ndim:
raise ValueError('dimensions {} of reconstruction space and {} of '
'geometry do not match'.format(
reco_space.ndim, geometry.ndim))
if out is None:
out = reco_space.element()
else:
if out not in reco_space:
raise TypeError('`out` {} is neither None nor a '
'DiscreteLpElement instance'.format(out))
ndim = proj_data.ndim
# Create astra geometries
vol_geom = astra_volume_geometry(reco_space)
proj_geom = astra_projection_geometry(geometry)
# Create ASTRA data structure
sino_id = astra_data(proj_geom, datatype='projection', data=proj_data,
allow_copy=True)
# Create projector
# TODO: implement with different schemes for angles and detector
if not all(s == proj_data.space.interp_byaxis[0]
for s in proj_data.space.interp_byaxis):
raise ValueError('data interpolation must be the same in each '
'dimension, got {}'
''.format(proj_data.space.interp_byaxis))
proj_interp = proj_data.space.interp
proj_id = astra_projector(proj_interp, vol_geom, proj_geom, ndim,
impl='cpu')
# Convert out to correct dtype and order if needed.
with writable_array(out, dtype='float32', order='C') as out_arr:
vol_id = astra_data(vol_geom, datatype='volume', data=out_arr,
ndim=reco_space.ndim)
# Create algorithm
algo_id = astra_algorithm('backward', ndim, vol_id, sino_id, proj_id,
impl='cpu')
# Run algorithm
astra.algorithm.run(algo_id)
# Weight the adjoint by appropriate weights
scaling_factor = float(proj_data.space.weighting.const)
scaling_factor /= float(reco_space.weighting.const)
out *= scaling_factor
# Delete ASTRA objects
astra.algorithm.delete(algo_id)
astra.data2d.delete((vol_id, sino_id))
astra.projector.delete(proj_id)
return out |
def DeleteMessageHandlerRequests(self, requests, cursor=None):
"""Deletes a list of message handler requests from the database."""
query = "DELETE FROM message_handler_requests WHERE request_id IN ({})"
request_ids = set([r.request_id for r in requests])
query = query.format(",".join(["%s"] * len(request_ids)))
cursor.execute(query, request_ids) | Deletes a list of message handler requests from the database. | Below is the the instruction that describes the task:
### Input:
Deletes a list of message handler requests from the database.
### Response:
def DeleteMessageHandlerRequests(self, requests, cursor=None):
"""Deletes a list of message handler requests from the database."""
query = "DELETE FROM message_handler_requests WHERE request_id IN ({})"
request_ids = set([r.request_id for r in requests])
query = query.format(",".join(["%s"] * len(request_ids)))
cursor.execute(query, request_ids) |
def rezero(self):
"""
Move the current scene so that the AABB of the whole
scene is centered at the origin.
Does this by changing the base frame to a new, offset
base frame.
"""
if self.is_empty or np.allclose(self.centroid, 0.0):
# early exit since what we want already exists
return
# the transformation to move the overall scene to AABB centroid
matrix = np.eye(4)
matrix[:3, 3] = -self.centroid
# we are going to change the base frame
new_base = str(self.graph.base_frame) + '_I'
self.graph.update(frame_from=new_base,
frame_to=self.graph.base_frame,
matrix=matrix)
self.graph.base_frame = new_base | Move the current scene so that the AABB of the whole
scene is centered at the origin.
Does this by changing the base frame to a new, offset
base frame. | Below is the the instruction that describes the task:
### Input:
Move the current scene so that the AABB of the whole
scene is centered at the origin.
Does this by changing the base frame to a new, offset
base frame.
### Response:
def rezero(self):
"""
Move the current scene so that the AABB of the whole
scene is centered at the origin.
Does this by changing the base frame to a new, offset
base frame.
"""
if self.is_empty or np.allclose(self.centroid, 0.0):
# early exit since what we want already exists
return
# the transformation to move the overall scene to AABB centroid
matrix = np.eye(4)
matrix[:3, 3] = -self.centroid
# we are going to change the base frame
new_base = str(self.graph.base_frame) + '_I'
self.graph.update(frame_from=new_base,
frame_to=self.graph.base_frame,
matrix=matrix)
self.graph.base_frame = new_base |
def add_record(self, record):
"""Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative.
"""
record.setdefault('_oai', {}).setdefault('sets', [])
assert not self.has_record(record)
record['_oai']['sets'].append(self.spec) | Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative. | Below is the the instruction that describes the task:
### Input:
Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative.
### Response:
def add_record(self, record):
"""Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative.
"""
record.setdefault('_oai', {}).setdefault('sets', [])
assert not self.has_record(record)
record['_oai']['sets'].append(self.spec) |
def save_lastnode_id():
"""Save the id of the last node created."""
init_counter()
with FileLock(_COUNTER_FILE):
with AtomicFile(_COUNTER_FILE, mode="w") as fh:
fh.write("%d\n" % _COUNTER) | Save the id of the last node created. | Below is the the instruction that describes the task:
### Input:
Save the id of the last node created.
### Response:
def save_lastnode_id():
"""Save the id of the last node created."""
init_counter()
with FileLock(_COUNTER_FILE):
with AtomicFile(_COUNTER_FILE, mode="w") as fh:
fh.write("%d\n" % _COUNTER) |
def set_options(self, options):
"""
Configure all the many options we'll need to make this happen.
"""
self.verbosity = int(options.get('verbosity'))
# Will we be gzipping?
self.gzip = getattr(settings, 'BAKERY_GZIP', False)
# And if so what content types will we be gzipping?
self.gzip_content_types = getattr(
settings,
'GZIP_CONTENT_TYPES',
DEFAULT_GZIP_CONTENT_TYPES
)
# What ACL (i.e. security permissions) will be giving the files on S3?
self.acl = getattr(settings, 'DEFAULT_ACL', self.DEFAULT_ACL)
# Should we set cache-control headers?
self.cache_control = getattr(settings, 'BAKERY_CACHE_CONTROL', {})
# If the user specifies a build directory...
if options.get('build_dir'):
# ... validate that it is good.
if not os.path.exists(options.get('build_dir')):
raise CommandError(self.build_missing_msg)
# Go ahead and use it
self.build_dir = options.get("build_dir")
# If the user does not specify a build dir...
else:
# Check if it is set in settings.py
if not hasattr(settings, 'BUILD_DIR'):
raise CommandError(self.build_unconfig_msg)
# Then make sure it actually exists
if not os.path.exists(settings.BUILD_DIR):
raise CommandError(self.build_missing_msg)
# Go ahead and use it
self.build_dir = settings.BUILD_DIR
# If the user provides a bucket name, use that.
if options.get("aws_bucket_name"):
self.aws_bucket_name = options.get("aws_bucket_name")
else:
# Otherwise try to find it the settings
if not hasattr(settings, 'AWS_BUCKET_NAME'):
raise CommandError(self.bucket_unconfig_msg)
self.aws_bucket_name = settings.AWS_BUCKET_NAME
# The bucket prefix, if it exists
self.aws_bucket_prefix = options.get("aws_bucket_prefix")
# If the user sets the --force option
if options.get('force'):
self.force_publish = True
else:
self.force_publish = False
# set the --dry-run option
if options.get('dry_run'):
self.dry_run = True
if self.verbosity > 0:
logger.info("Executing with the --dry-run option set.")
else:
self.dry_run = False
self.no_delete = options.get('no_delete')
self.no_pooling = options.get('no_pooling') | Configure all the many options we'll need to make this happen. | Below is the the instruction that describes the task:
### Input:
Configure all the many options we'll need to make this happen.
### Response:
def set_options(self, options):
"""
Configure all the many options we'll need to make this happen.
"""
self.verbosity = int(options.get('verbosity'))
# Will we be gzipping?
self.gzip = getattr(settings, 'BAKERY_GZIP', False)
# And if so what content types will we be gzipping?
self.gzip_content_types = getattr(
settings,
'GZIP_CONTENT_TYPES',
DEFAULT_GZIP_CONTENT_TYPES
)
# What ACL (i.e. security permissions) will be giving the files on S3?
self.acl = getattr(settings, 'DEFAULT_ACL', self.DEFAULT_ACL)
# Should we set cache-control headers?
self.cache_control = getattr(settings, 'BAKERY_CACHE_CONTROL', {})
# If the user specifies a build directory...
if options.get('build_dir'):
# ... validate that it is good.
if not os.path.exists(options.get('build_dir')):
raise CommandError(self.build_missing_msg)
# Go ahead and use it
self.build_dir = options.get("build_dir")
# If the user does not specify a build dir...
else:
# Check if it is set in settings.py
if not hasattr(settings, 'BUILD_DIR'):
raise CommandError(self.build_unconfig_msg)
# Then make sure it actually exists
if not os.path.exists(settings.BUILD_DIR):
raise CommandError(self.build_missing_msg)
# Go ahead and use it
self.build_dir = settings.BUILD_DIR
# If the user provides a bucket name, use that.
if options.get("aws_bucket_name"):
self.aws_bucket_name = options.get("aws_bucket_name")
else:
# Otherwise try to find it the settings
if not hasattr(settings, 'AWS_BUCKET_NAME'):
raise CommandError(self.bucket_unconfig_msg)
self.aws_bucket_name = settings.AWS_BUCKET_NAME
# The bucket prefix, if it exists
self.aws_bucket_prefix = options.get("aws_bucket_prefix")
# If the user sets the --force option
if options.get('force'):
self.force_publish = True
else:
self.force_publish = False
# set the --dry-run option
if options.get('dry_run'):
self.dry_run = True
if self.verbosity > 0:
logger.info("Executing with the --dry-run option set.")
else:
self.dry_run = False
self.no_delete = options.get('no_delete')
self.no_pooling = options.get('no_pooling') |
def createfork(self, project_id):
"""
Forks a project into the user namespace of the authenticated user.
:param project_id: Project ID to fork
:return: True if succeed
"""
request = requests.post(
'{0}/fork/{1}'.format(self.projects_url, project_id),
timeout=self.timeout, verify=self.verify_ssl)
if request.status_code == 200:
return True
else:
return False | Forks a project into the user namespace of the authenticated user.
:param project_id: Project ID to fork
:return: True if succeed | Below is the the instruction that describes the task:
### Input:
Forks a project into the user namespace of the authenticated user.
:param project_id: Project ID to fork
:return: True if succeed
### Response:
def createfork(self, project_id):
"""
Forks a project into the user namespace of the authenticated user.
:param project_id: Project ID to fork
:return: True if succeed
"""
request = requests.post(
'{0}/fork/{1}'.format(self.projects_url, project_id),
timeout=self.timeout, verify=self.verify_ssl)
if request.status_code == 200:
return True
else:
return False |
def check_against_tables(chars, tables):
"""
Perform a check against the table predicates in `tables`. `tables` must be
a reusable iterable containing characteristic functions of character sets,
that is, functions which return :data:`True` if the character is in the
table.
The function returns the first character occuring in any of the tables or
:data:`None` if no character matches.
"""
for c in chars:
if any(in_table(c) for in_table in tables):
return c
return None | Perform a check against the table predicates in `tables`. `tables` must be
a reusable iterable containing characteristic functions of character sets,
that is, functions which return :data:`True` if the character is in the
table.
The function returns the first character occuring in any of the tables or
:data:`None` if no character matches. | Below is the the instruction that describes the task:
### Input:
Perform a check against the table predicates in `tables`. `tables` must be
a reusable iterable containing characteristic functions of character sets,
that is, functions which return :data:`True` if the character is in the
table.
The function returns the first character occuring in any of the tables or
:data:`None` if no character matches.
### Response:
def check_against_tables(chars, tables):
"""
Perform a check against the table predicates in `tables`. `tables` must be
a reusable iterable containing characteristic functions of character sets,
that is, functions which return :data:`True` if the character is in the
table.
The function returns the first character occuring in any of the tables or
:data:`None` if no character matches.
"""
for c in chars:
if any(in_table(c) for in_table in tables):
return c
return None |
def set(self, namespace, key, value, description=None):
"""Set (create/update) a configuration item
Args:
namespace (`str`): Namespace for the item
key (`str`): Key of the item
value (`Any`): Value of the type, must by one of `DBCString`, `DBCFloat`, `DBCInt`, `DBCArray`, `DBCJSON` or
`bool`
description (`str`): Description of the configuration item
Returns:
`None`
"""
if isinstance(value, DBCChoice):
vtype = 'choice'
elif isinstance(value, DBCString):
vtype = 'string'
elif isinstance(value, DBCFloat):
vtype = 'float'
elif isinstance(value, DBCInt):
vtype = 'int'
elif isinstance(value, DBCArray):
vtype = 'array'
elif isinstance(value, DBCJSON):
vtype = 'json'
elif isinstance(value, bool):
vtype = 'bool'
else:
raise ValueError('Invalid config item type: {}'.format(type(value)))
if namespace in self.__data and key in self.__data[namespace]:
itm = db.ConfigItem.find_one(
ConfigItem.namespace_prefix == namespace,
ConfigItem.key == key
)
if not itm:
raise KeyError(key)
itm.value = value
itm.type = vtype
if description:
itm.description = description
else:
itm = ConfigItem()
itm.key = key
itm.value = value
itm.type = vtype
itm.description = description
itm.namespace_prefix = namespace
db.session.add(itm)
db.session.commit()
if namespace in self.__data:
self.__data[namespace][key] = value
else:
self.__data[namespace] = {key: value} | Set (create/update) a configuration item
Args:
namespace (`str`): Namespace for the item
key (`str`): Key of the item
value (`Any`): Value of the type, must by one of `DBCString`, `DBCFloat`, `DBCInt`, `DBCArray`, `DBCJSON` or
`bool`
description (`str`): Description of the configuration item
Returns:
`None` | Below is the the instruction that describes the task:
### Input:
Set (create/update) a configuration item
Args:
namespace (`str`): Namespace for the item
key (`str`): Key of the item
value (`Any`): Value of the type, must by one of `DBCString`, `DBCFloat`, `DBCInt`, `DBCArray`, `DBCJSON` or
`bool`
description (`str`): Description of the configuration item
Returns:
`None`
### Response:
def set(self, namespace, key, value, description=None):
"""Set (create/update) a configuration item
Args:
namespace (`str`): Namespace for the item
key (`str`): Key of the item
value (`Any`): Value of the type, must by one of `DBCString`, `DBCFloat`, `DBCInt`, `DBCArray`, `DBCJSON` or
`bool`
description (`str`): Description of the configuration item
Returns:
`None`
"""
if isinstance(value, DBCChoice):
vtype = 'choice'
elif isinstance(value, DBCString):
vtype = 'string'
elif isinstance(value, DBCFloat):
vtype = 'float'
elif isinstance(value, DBCInt):
vtype = 'int'
elif isinstance(value, DBCArray):
vtype = 'array'
elif isinstance(value, DBCJSON):
vtype = 'json'
elif isinstance(value, bool):
vtype = 'bool'
else:
raise ValueError('Invalid config item type: {}'.format(type(value)))
if namespace in self.__data and key in self.__data[namespace]:
itm = db.ConfigItem.find_one(
ConfigItem.namespace_prefix == namespace,
ConfigItem.key == key
)
if not itm:
raise KeyError(key)
itm.value = value
itm.type = vtype
if description:
itm.description = description
else:
itm = ConfigItem()
itm.key = key
itm.value = value
itm.type = vtype
itm.description = description
itm.namespace_prefix = namespace
db.session.add(itm)
db.session.commit()
if namespace in self.__data:
self.__data[namespace][key] = value
else:
self.__data[namespace] = {key: value} |
def run(self, **kwargs):
"""Execute the scheduler.
Returns:
`None`
"""
if not super().run(**kwargs):
return
if kwargs['list']:
self.log.info('--- List of Scheduler Modules ---')
for name, scheduler in list(self.scheduler_plugins.items()):
if self.active_scheduler == name:
self.log.info('{} (active)'.format(name))
else:
self.log.info(name)
self.log.info('--- End list of Scheduler Modules ---')
return
scheduler = self.scheduler_plugins[self.active_scheduler]()
scheduler.execute_scheduler() | Execute the scheduler.
Returns:
`None` | Below is the the instruction that describes the task:
### Input:
Execute the scheduler.
Returns:
`None`
### Response:
def run(self, **kwargs):
"""Execute the scheduler.
Returns:
`None`
"""
if not super().run(**kwargs):
return
if kwargs['list']:
self.log.info('--- List of Scheduler Modules ---')
for name, scheduler in list(self.scheduler_plugins.items()):
if self.active_scheduler == name:
self.log.info('{} (active)'.format(name))
else:
self.log.info(name)
self.log.info('--- End list of Scheduler Modules ---')
return
scheduler = self.scheduler_plugins[self.active_scheduler]()
scheduler.execute_scheduler() |
def phi_cause_mip(self, mechanism, purview):
"""Return the |small_phi| of the cause MIP.
This is the distance between the unpartitioned cause repertoire and the
MIP cause repertoire.
"""
mip = self.cause_mip(mechanism, purview)
return mip.phi if mip else 0 | Return the |small_phi| of the cause MIP.
This is the distance between the unpartitioned cause repertoire and the
MIP cause repertoire. | Below is the the instruction that describes the task:
### Input:
Return the |small_phi| of the cause MIP.
This is the distance between the unpartitioned cause repertoire and the
MIP cause repertoire.
### Response:
def phi_cause_mip(self, mechanism, purview):
"""Return the |small_phi| of the cause MIP.
This is the distance between the unpartitioned cause repertoire and the
MIP cause repertoire.
"""
mip = self.cause_mip(mechanism, purview)
return mip.phi if mip else 0 |
def action2button(action, autoraise=True, text_beside_icon=False, parent=None):
"""Create a QToolButton directly from a QAction object"""
if parent is None:
parent = action.parent()
button = QToolButton(parent)
button.setDefaultAction(action)
button.setAutoRaise(autoraise)
if text_beside_icon:
button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
return button | Create a QToolButton directly from a QAction object | Below is the the instruction that describes the task:
### Input:
Create a QToolButton directly from a QAction object
### Response:
def action2button(action, autoraise=True, text_beside_icon=False, parent=None):
"""Create a QToolButton directly from a QAction object"""
if parent is None:
parent = action.parent()
button = QToolButton(parent)
button.setDefaultAction(action)
button.setAutoRaise(autoraise)
if text_beside_icon:
button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
return button |
def add_feature(self, feature):
# type: (Any) -> int
""" Add a new feature name, return it's index.
"""
# A copy of self.feature_names is always made, because it might be
# "owned" by someone else.
# It's possible to make the copy only at the first call to
# self.add_feature to improve performance.
idx = self.n_features
if isinstance(self.feature_names, (list, np.ndarray)):
self.feature_names = list(self.feature_names)
self.feature_names.append(feature)
elif isinstance(self.feature_names, dict):
self.feature_names = dict(self.feature_names)
self.feature_names[idx] = feature
elif self.feature_names is None:
self.feature_names = {idx: feature}
self.n_features += 1
return idx | Add a new feature name, return it's index. | Below is the the instruction that describes the task:
### Input:
Add a new feature name, return it's index.
### Response:
def add_feature(self, feature):
# type: (Any) -> int
""" Add a new feature name, return it's index.
"""
# A copy of self.feature_names is always made, because it might be
# "owned" by someone else.
# It's possible to make the copy only at the first call to
# self.add_feature to improve performance.
idx = self.n_features
if isinstance(self.feature_names, (list, np.ndarray)):
self.feature_names = list(self.feature_names)
self.feature_names.append(feature)
elif isinstance(self.feature_names, dict):
self.feature_names = dict(self.feature_names)
self.feature_names[idx] = feature
elif self.feature_names is None:
self.feature_names = {idx: feature}
self.n_features += 1
return idx |
def exportProfile(self, filename=''):
"""
Exports the current profile to a file.
:param filename | <str>
"""
if not (filename and isinstance(filename, basestring)):
filename = QtGui.QFileDialog.getSaveFileName(self,
'Export Layout as...',
QtCore.QDir.currentPath(),
'XView (*.xview)')
if type(filename) == tuple:
filename = filename[0]
filename = nativestring(filename)
if not filename:
return
if not filename.endswith('.xview'):
filename += '.xview'
profile = self.saveProfile()
profile.save(filename) | Exports the current profile to a file.
:param filename | <str> | Below is the the instruction that describes the task:
### Input:
Exports the current profile to a file.
:param filename | <str>
### Response:
def exportProfile(self, filename=''):
"""
Exports the current profile to a file.
:param filename | <str>
"""
if not (filename and isinstance(filename, basestring)):
filename = QtGui.QFileDialog.getSaveFileName(self,
'Export Layout as...',
QtCore.QDir.currentPath(),
'XView (*.xview)')
if type(filename) == tuple:
filename = filename[0]
filename = nativestring(filename)
if not filename:
return
if not filename.endswith('.xview'):
filename += '.xview'
profile = self.saveProfile()
profile.save(filename) |
def _remove_pairs(self, ids_to_remove):
"""Remove the pairs identified by the given indices into _pairs.
Removes the pair, and updates the _key_ids mapping to be accurate.
Removing the ids from the _key_ids is your own responsibility.
Params:
ids_to_remove -- The indices to remove. MUST be sorted.
"""
# Remove them.
for i in reversed(ids_to_remove):
del self._pairs[i]
# We use the bisect to tell us how many spots the given index is
# shifting up in the list.
for ids in self._key_ids.itervalues():
for i, id in enumerate(ids):
ids[i] -= bisect(ids_to_remove, id) | Remove the pairs identified by the given indices into _pairs.
Removes the pair, and updates the _key_ids mapping to be accurate.
Removing the ids from the _key_ids is your own responsibility.
Params:
ids_to_remove -- The indices to remove. MUST be sorted. | Below is the the instruction that describes the task:
### Input:
Remove the pairs identified by the given indices into _pairs.
Removes the pair, and updates the _key_ids mapping to be accurate.
Removing the ids from the _key_ids is your own responsibility.
Params:
ids_to_remove -- The indices to remove. MUST be sorted.
### Response:
def _remove_pairs(self, ids_to_remove):
"""Remove the pairs identified by the given indices into _pairs.
Removes the pair, and updates the _key_ids mapping to be accurate.
Removing the ids from the _key_ids is your own responsibility.
Params:
ids_to_remove -- The indices to remove. MUST be sorted.
"""
# Remove them.
for i in reversed(ids_to_remove):
del self._pairs[i]
# We use the bisect to tell us how many spots the given index is
# shifting up in the list.
for ids in self._key_ids.itervalues():
for i, id in enumerate(ids):
ids[i] -= bisect(ids_to_remove, id) |
def set_data(self, data):
"""Set data."""
if data != self.editor.model.get_data():
self.editor.set_data(data)
self.editor.adjust_columns() | Set data. | Below is the the instruction that describes the task:
### Input:
Set data.
### Response:
def set_data(self, data):
"""Set data."""
if data != self.editor.model.get_data():
self.editor.set_data(data)
self.editor.adjust_columns() |
def _list_resource_descriptors(args, _):
"""Lists the resource descriptors in the project."""
project_id = args['project']
pattern = args['type'] or '*'
descriptors = gcm.ResourceDescriptors(project_id=project_id)
dataframe = descriptors.as_dataframe(pattern=pattern)
return _render_dataframe(dataframe) | Lists the resource descriptors in the project. | Below is the the instruction that describes the task:
### Input:
Lists the resource descriptors in the project.
### Response:
def _list_resource_descriptors(args, _):
"""Lists the resource descriptors in the project."""
project_id = args['project']
pattern = args['type'] or '*'
descriptors = gcm.ResourceDescriptors(project_id=project_id)
dataframe = descriptors.as_dataframe(pattern=pattern)
return _render_dataframe(dataframe) |
def log_predictive_density(self, y_test, mu_star, var_star, Y_metadata=None):
"""
Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param mu_star: predictive mean of gaussian p(f_{*}|mu_{*}, var_{*})
:type mu_star: (Nx1) array
:param var_star: predictive variance of gaussian p(f_{*}|mu_{*}, var_{*})
:type var_star: (Nx1) array
"""
assert y_test.shape==mu_star.shape
assert y_test.shape==var_star.shape
assert y_test.shape[1] == 1
flat_y_test = y_test.flatten()
flat_mu_star = mu_star.flatten()
flat_var_star = var_star.flatten()
if Y_metadata is not None:
#Need to zip individual elements of Y_metadata aswell
Y_metadata_flat = {}
if Y_metadata is not None:
for key, val in Y_metadata.items():
Y_metadata_flat[key] = np.atleast_1d(val).reshape(-1,1)
zipped_values = []
for i in range(y_test.shape[0]):
y_m = {}
for key, val in Y_metadata_flat.items():
if np.isscalar(val) or val.shape[0] == 1:
y_m[key] = val
else:
#Won't broadcast yet
y_m[key] = val[i]
zipped_values.append((flat_y_test[i], flat_mu_star[i], flat_var_star[i], y_m))
else:
#Otherwise just pass along None's
zipped_values = zip(flat_y_test, flat_mu_star, flat_var_star, [None]*y_test.shape[0])
def integral_generator(yi, mi, vi, yi_m):
"""Generate a function which can be integrated
to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*"""
def f(fi_star):
#exponent = np.exp(-(1./(2*vi))*np.square(mi-fi_star))
#from GPy.util.misc import safe_exp
#exponent = safe_exp(exponent)
#res = safe_exp(self.logpdf(fi_star, yi, yi_m))*exponent
#More stable in the log space
res = np.exp(self.logpdf(fi_star, yi, yi_m)
- 0.5*np.log(2*np.pi*vi)
- 0.5*np.square(fi_star-mi)/vi)
if not np.isfinite(res):
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
return res
return f
p_ystar, _ = zip(*[quad(integral_generator(yi, mi, vi, yi_m), -np.inf, np.inf)
for yi, mi, vi, yi_m in zipped_values])
p_ystar = np.array(p_ystar).reshape(*y_test.shape)
return np.log(p_ystar) | Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param mu_star: predictive mean of gaussian p(f_{*}|mu_{*}, var_{*})
:type mu_star: (Nx1) array
:param var_star: predictive variance of gaussian p(f_{*}|mu_{*}, var_{*})
:type var_star: (Nx1) array | Below is the the instruction that describes the task:
### Input:
Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param mu_star: predictive mean of gaussian p(f_{*}|mu_{*}, var_{*})
:type mu_star: (Nx1) array
:param var_star: predictive variance of gaussian p(f_{*}|mu_{*}, var_{*})
:type var_star: (Nx1) array
### Response:
def log_predictive_density(self, y_test, mu_star, var_star, Y_metadata=None):
"""
Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param mu_star: predictive mean of gaussian p(f_{*}|mu_{*}, var_{*})
:type mu_star: (Nx1) array
:param var_star: predictive variance of gaussian p(f_{*}|mu_{*}, var_{*})
:type var_star: (Nx1) array
"""
assert y_test.shape==mu_star.shape
assert y_test.shape==var_star.shape
assert y_test.shape[1] == 1
flat_y_test = y_test.flatten()
flat_mu_star = mu_star.flatten()
flat_var_star = var_star.flatten()
if Y_metadata is not None:
#Need to zip individual elements of Y_metadata aswell
Y_metadata_flat = {}
if Y_metadata is not None:
for key, val in Y_metadata.items():
Y_metadata_flat[key] = np.atleast_1d(val).reshape(-1,1)
zipped_values = []
for i in range(y_test.shape[0]):
y_m = {}
for key, val in Y_metadata_flat.items():
if np.isscalar(val) or val.shape[0] == 1:
y_m[key] = val
else:
#Won't broadcast yet
y_m[key] = val[i]
zipped_values.append((flat_y_test[i], flat_mu_star[i], flat_var_star[i], y_m))
else:
#Otherwise just pass along None's
zipped_values = zip(flat_y_test, flat_mu_star, flat_var_star, [None]*y_test.shape[0])
def integral_generator(yi, mi, vi, yi_m):
"""Generate a function which can be integrated
to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*"""
def f(fi_star):
#exponent = np.exp(-(1./(2*vi))*np.square(mi-fi_star))
#from GPy.util.misc import safe_exp
#exponent = safe_exp(exponent)
#res = safe_exp(self.logpdf(fi_star, yi, yi_m))*exponent
#More stable in the log space
res = np.exp(self.logpdf(fi_star, yi, yi_m)
- 0.5*np.log(2*np.pi*vi)
- 0.5*np.square(fi_star-mi)/vi)
if not np.isfinite(res):
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
return res
return f
p_ystar, _ = zip(*[quad(integral_generator(yi, mi, vi, yi_m), -np.inf, np.inf)
for yi, mi, vi, yi_m in zipped_values])
p_ystar = np.array(p_ystar).reshape(*y_test.shape)
return np.log(p_ystar) |
def _import_yaml(config_file_path):
"""Return a configuration object
"""
try:
logger.info('Importing config %s...', config_file_path)
with open(config_file_path) as config_file:
return yaml.safe_load(config_file.read())
except IOError as ex:
raise RepexError('{0}: {1} ({2})'.format(
ERRORS['config_file_not_found'], config_file_path, ex))
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as ex:
raise RepexError('{0} ({1})'.format(ERRORS['invalid_yaml'], ex)) | Return a configuration object | Below is the the instruction that describes the task:
### Input:
Return a configuration object
### Response:
def _import_yaml(config_file_path):
"""Return a configuration object
"""
try:
logger.info('Importing config %s...', config_file_path)
with open(config_file_path) as config_file:
return yaml.safe_load(config_file.read())
except IOError as ex:
raise RepexError('{0}: {1} ({2})'.format(
ERRORS['config_file_not_found'], config_file_path, ex))
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as ex:
raise RepexError('{0} ({1})'.format(ERRORS['invalid_yaml'], ex)) |
def supervise(project, workspace, namespace, workflow,
sample_sets, recovery_file):
""" Supervise submission of jobs from a Firehose-style workflow of workflows"""
# Get arguments
logging.info("Initializing FireCloud Supervisor...")
logging.info("Saving recovery checkpoints to " + recovery_file)
# Parse workflow description
# these three objects must be saved in order to recover the supervisor
args = {
'project' : project,
'workspace': workspace,
'namespace': namespace,
'workflow' : workflow,
'sample_sets': sample_sets
}
monitor_data, dependencies = init_supervisor_data(workflow, sample_sets)
recovery_data = {
'args' : args,
'monitor_data' : monitor_data,
'dependencies' : dependencies
}
# Monitor loop. Keep going until all nodes have been evaluated
supervise_until_complete(monitor_data, dependencies, args, recovery_file) | Supervise submission of jobs from a Firehose-style workflow of workflows | Below is the the instruction that describes the task:
### Input:
Supervise submission of jobs from a Firehose-style workflow of workflows
### Response:
def supervise(project, workspace, namespace, workflow,
sample_sets, recovery_file):
""" Supervise submission of jobs from a Firehose-style workflow of workflows"""
# Get arguments
logging.info("Initializing FireCloud Supervisor...")
logging.info("Saving recovery checkpoints to " + recovery_file)
# Parse workflow description
# these three objects must be saved in order to recover the supervisor
args = {
'project' : project,
'workspace': workspace,
'namespace': namespace,
'workflow' : workflow,
'sample_sets': sample_sets
}
monitor_data, dependencies = init_supervisor_data(workflow, sample_sets)
recovery_data = {
'args' : args,
'monitor_data' : monitor_data,
'dependencies' : dependencies
}
# Monitor loop. Keep going until all nodes have been evaluated
supervise_until_complete(monitor_data, dependencies, args, recovery_file) |
def _confirm_constant(a):
"""Confirm `a` has volumn vector of 1s."""
a = np.asanyarray(a)
return np.isclose(a, 1.0).all(axis=0).any() | Confirm `a` has volumn vector of 1s. | Below is the the instruction that describes the task:
### Input:
Confirm `a` has volumn vector of 1s.
### Response:
def _confirm_constant(a):
"""Confirm `a` has volumn vector of 1s."""
a = np.asanyarray(a)
return np.isclose(a, 1.0).all(axis=0).any() |
def ghz_circuit(qubits: Qubits) -> Circuit:
"""Returns a circuit that prepares a multi-qubit Bell state from the zero
state.
"""
circ = Circuit()
circ += H(qubits[0])
for q0 in range(0, len(qubits)-1):
circ += CNOT(qubits[q0], qubits[q0+1])
return circ | Returns a circuit that prepares a multi-qubit Bell state from the zero
state. | Below is the the instruction that describes the task:
### Input:
Returns a circuit that prepares a multi-qubit Bell state from the zero
state.
### Response:
def ghz_circuit(qubits: Qubits) -> Circuit:
"""Returns a circuit that prepares a multi-qubit Bell state from the zero
state.
"""
circ = Circuit()
circ += H(qubits[0])
for q0 in range(0, len(qubits)-1):
circ += CNOT(qubits[q0], qubits[q0+1])
return circ |
def proc_extra_field(label_store, filebytes, bpi, subtype, chan, num, aux_note, update):
"""
Process extra fields belonging to the current annotation.
Potential updated fields: subtype, chan, num, aux_note
"""
# aux_note and sub are reset between annotations. chan and num copy over
# previous value if missing.
# SUB
if label_store == 61:
# sub is interpreted as signed char.
subtype.append(filebytes[bpi, 0].astype('i1'))
update['subtype'] = False
bpi = bpi + 1
# CHAN
elif label_store == 62:
# chan is interpreted as un_signed char
chan.append(filebytes[bpi, 0])
update['chan'] = False
bpi = bpi + 1
# NUM
elif label_store == 60:
# num is interpreted as signed char
num.append(filebytes[bpi, 0].astype('i1'))
update['num'] = False
bpi = bpi + 1
# aux_note
elif label_store == 63:
# length of aux_note string. Max 256? No need to check other bits of
# second byte?
aux_notelen = filebytes[bpi, 0]
aux_notebytes = filebytes[bpi + 1:bpi + 1 + int(np.ceil(aux_notelen / 2.)),:].flatten()
if aux_notelen & 1:
aux_notebytes = aux_notebytes[:-1]
# The aux_note string
aux_note.append("".join([chr(char) for char in aux_notebytes]))
update['aux_note'] = False
bpi = bpi + 1 + int(np.ceil(aux_notelen / 2.))
return subtype, chan, num, aux_note, update, bpi | Process extra fields belonging to the current annotation.
Potential updated fields: subtype, chan, num, aux_note | Below is the the instruction that describes the task:
### Input:
Process extra fields belonging to the current annotation.
Potential updated fields: subtype, chan, num, aux_note
### Response:
def proc_extra_field(label_store, filebytes, bpi, subtype, chan, num, aux_note, update):
"""
Process extra fields belonging to the current annotation.
Potential updated fields: subtype, chan, num, aux_note
"""
# aux_note and sub are reset between annotations. chan and num copy over
# previous value if missing.
# SUB
if label_store == 61:
# sub is interpreted as signed char.
subtype.append(filebytes[bpi, 0].astype('i1'))
update['subtype'] = False
bpi = bpi + 1
# CHAN
elif label_store == 62:
# chan is interpreted as un_signed char
chan.append(filebytes[bpi, 0])
update['chan'] = False
bpi = bpi + 1
# NUM
elif label_store == 60:
# num is interpreted as signed char
num.append(filebytes[bpi, 0].astype('i1'))
update['num'] = False
bpi = bpi + 1
# aux_note
elif label_store == 63:
# length of aux_note string. Max 256? No need to check other bits of
# second byte?
aux_notelen = filebytes[bpi, 0]
aux_notebytes = filebytes[bpi + 1:bpi + 1 + int(np.ceil(aux_notelen / 2.)),:].flatten()
if aux_notelen & 1:
aux_notebytes = aux_notebytes[:-1]
# The aux_note string
aux_note.append("".join([chr(char) for char in aux_notebytes]))
update['aux_note'] = False
bpi = bpi + 1 + int(np.ceil(aux_notelen / 2.))
return subtype, chan, num, aux_note, update, bpi |
def get_texts(self):
""" Parse documents from a .txt file assuming 1 document per line, yielding lists of filtered tokens """
with self.getstream() as text_stream:
for i, line in enumerate(text_stream):
line = SMSCorpus.case_normalizer(line)
if self.mask is not None and not self.mask[i]:
continue
ngrams = []
for ng in tokens2ngrams(self.tokenizer(line)):
if SMSCorpus.ignore_matcher(ng):
continue
ngrams += [ng]
yield ngrams | Parse documents from a .txt file assuming 1 document per line, yielding lists of filtered tokens | Below is the the instruction that describes the task:
### Input:
Parse documents from a .txt file assuming 1 document per line, yielding lists of filtered tokens
### Response:
def get_texts(self):
""" Parse documents from a .txt file assuming 1 document per line, yielding lists of filtered tokens """
with self.getstream() as text_stream:
for i, line in enumerate(text_stream):
line = SMSCorpus.case_normalizer(line)
if self.mask is not None and not self.mask[i]:
continue
ngrams = []
for ng in tokens2ngrams(self.tokenizer(line)):
if SMSCorpus.ignore_matcher(ng):
continue
ngrams += [ng]
yield ngrams |
def copy(self, value):
'Copy a cell to the system clipboard.'
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, 'w', encoding=options.encoding) as fp:
fp.write(str(value))
p = subprocess.Popen(
self.command,
stdin=open(temp.name, 'r', encoding=options.encoding),
stdout=subprocess.DEVNULL)
p.communicate() | Copy a cell to the system clipboard. | Below is the the instruction that describes the task:
### Input:
Copy a cell to the system clipboard.
### Response:
def copy(self, value):
'Copy a cell to the system clipboard.'
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, 'w', encoding=options.encoding) as fp:
fp.write(str(value))
p = subprocess.Popen(
self.command,
stdin=open(temp.name, 'r', encoding=options.encoding),
stdout=subprocess.DEVNULL)
p.communicate() |
def fit_model(ts, regressors, method="cochrane-orcutt", optimizationArgs=None, sc=None):
"""
Parameters
----------
ts:
time series to which to fit an ARIMA model as a Numpy array
regressors:
regression matrix as a Numpy array
method:
Regression method. Currently, only "cochrane-orcutt" is supported.
optimizationArgs:
sc:
The SparkContext, required.
returns an RegressionARIMAModel
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
jmodel = jvm.com.cloudera.sparkts.models.RegressionARIMA.fitModel(_nparray2breezevector(sc, ts), _nparray2breezematrix(sc, regressors), method, _py2scala_seq(sc, optimizationArgs))
return RegressionARIMAModel(jmodel=jmodel, sc=sc) | Parameters
----------
ts:
time series to which to fit an ARIMA model as a Numpy array
regressors:
regression matrix as a Numpy array
method:
Regression method. Currently, only "cochrane-orcutt" is supported.
optimizationArgs:
sc:
The SparkContext, required.
returns an RegressionARIMAModel | Below is the the instruction that describes the task:
### Input:
Parameters
----------
ts:
time series to which to fit an ARIMA model as a Numpy array
regressors:
regression matrix as a Numpy array
method:
Regression method. Currently, only "cochrane-orcutt" is supported.
optimizationArgs:
sc:
The SparkContext, required.
returns an RegressionARIMAModel
### Response:
def fit_model(ts, regressors, method="cochrane-orcutt", optimizationArgs=None, sc=None):
"""
Parameters
----------
ts:
time series to which to fit an ARIMA model as a Numpy array
regressors:
regression matrix as a Numpy array
method:
Regression method. Currently, only "cochrane-orcutt" is supported.
optimizationArgs:
sc:
The SparkContext, required.
returns an RegressionARIMAModel
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
jmodel = jvm.com.cloudera.sparkts.models.RegressionARIMA.fitModel(_nparray2breezevector(sc, ts), _nparray2breezematrix(sc, regressors), method, _py2scala_seq(sc, optimizationArgs))
return RegressionARIMAModel(jmodel=jmodel, sc=sc) |
def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data) | Compute the Shannon entropy, a measure of uncertainty. | Below is the the instruction that describes the task:
### Input:
Compute the Shannon entropy, a measure of uncertainty.
### Response:
def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data) |
def list_services(profile=None, api_key=None):
'''
List services belonging to this account
CLI Example:
salt myminion pagerduty.list_services my-pagerduty-account
'''
return salt.utils.pagerduty.list_items(
'services',
'name',
__salt__['config.option'](profile),
api_key,
opts=__opts__
) | List services belonging to this account
CLI Example:
salt myminion pagerduty.list_services my-pagerduty-account | Below is the the instruction that describes the task:
### Input:
List services belonging to this account
CLI Example:
salt myminion pagerduty.list_services my-pagerduty-account
### Response:
def list_services(profile=None, api_key=None):
'''
List services belonging to this account
CLI Example:
salt myminion pagerduty.list_services my-pagerduty-account
'''
return salt.utils.pagerduty.list_items(
'services',
'name',
__salt__['config.option'](profile),
api_key,
opts=__opts__
) |
def many_init(cls, *args, **kwargs):
"""
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedField(**list_kwargs) | This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs) | Below is the the instruction that describes the task:
### Input:
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
### Response:
def many_init(cls, *args, **kwargs):
"""
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedField(**list_kwargs) |
def _end_of_decade(self):
"""
Reset the date to the last day of the decade.
:rtype: Date
"""
year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1
return self.set(year, 12, 31) | Reset the date to the last day of the decade.
:rtype: Date | Below is the the instruction that describes the task:
### Input:
Reset the date to the last day of the decade.
:rtype: Date
### Response:
def _end_of_decade(self):
"""
Reset the date to the last day of the decade.
:rtype: Date
"""
year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1
return self.set(year, 12, 31) |
def temperatures(self, force_refresh=False):
"""Retrieve the current details for each zone. Returns a generator."""
self._populate_full_data(force_refresh)
for device in self.full_data['devices']:
set_point = 0
status = ""
if 'heatSetpoint' in device['thermostat']['changeableValues']:
set_point = float(
device['thermostat']['changeableValues']["heatSetpoint"]["value"])
status = device['thermostat']['changeableValues']["heatSetpoint"]["status"]
else:
status = device['thermostat']['changeableValues']['status']
yield {'thermostat': device['thermostatModelType'],
'id': device['deviceID'],
'name': device['name'],
'temp': float(device['thermostat']['indoorTemperature']),
'setpoint': set_point,
'status': status,
'mode': device['thermostat']['changeableValues']['mode']} | Retrieve the current details for each zone. Returns a generator. | Below is the the instruction that describes the task:
### Input:
Retrieve the current details for each zone. Returns a generator.
### Response:
def temperatures(self, force_refresh=False):
"""Retrieve the current details for each zone. Returns a generator."""
self._populate_full_data(force_refresh)
for device in self.full_data['devices']:
set_point = 0
status = ""
if 'heatSetpoint' in device['thermostat']['changeableValues']:
set_point = float(
device['thermostat']['changeableValues']["heatSetpoint"]["value"])
status = device['thermostat']['changeableValues']["heatSetpoint"]["status"]
else:
status = device['thermostat']['changeableValues']['status']
yield {'thermostat': device['thermostatModelType'],
'id': device['deviceID'],
'name': device['name'],
'temp': float(device['thermostat']['indoorTemperature']),
'setpoint': set_point,
'status': status,
'mode': device['thermostat']['changeableValues']['mode']} |
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
warnings.warn("Use self.dcc(type).connect()", DeprecationWarning)
return self.dcc(dcctype).connect(address, port) | Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance. | Below is the the instruction that describes the task:
### Input:
Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
### Response:
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
warnings.warn("Use self.dcc(type).connect()", DeprecationWarning)
return self.dcc(dcctype).connect(address, port) |
def _import_next_layer(self, proto, length=None, *, version=4, extension=False):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* extension -- bool, if is extension header (False in default)
<keyword> True / False
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* IPv4 -- internet layer
* IPv6 -- internet layer
* AH -- internet layer
* TCP -- transport layer
* UDP -- transport layer
"""
if length == 0:
from pcapkit.protocols.null import NoPayload as Protocol
elif self._sigterm or proto == 59:
from pcapkit.protocols.raw import Raw as Protocol
elif proto == 51:
from pcapkit.protocols.internet.ah import AH as Protocol
elif proto == 139:
from pcapkit.protocols.internet.hip import HIP as Protocol
elif proto == 0:
from pcapkit.protocols.internet.hopopt import HOPOPT as Protocol
elif proto == 44:
from pcapkit.protocols.internet.ipv6_frag import IPv6_Frag as Protocol
elif proto == 60:
from pcapkit.protocols.internet.ipv6_opts import IPv6_Opts as Protocol
elif proto == 43:
from pcapkit.protocols.internet.ipv6_route import IPv6_Route as Protocol
elif proto == 135:
from pcapkit.protocols.internet.mh import MH as Protocol
elif proto == 4:
from pcapkit.protocols.internet.ipv4 import IPv4 as Protocol
elif proto == 41:
from pcapkit.protocols.internet.ipv6 import IPv6 as Protocol
elif proto == 6:
from pcapkit.protocols.transport.tcp import TCP as Protocol
elif proto == 17:
from pcapkit.protocols.transport.udp import UDP as Protocol
else:
from pcapkit.protocols.raw import Raw as Protocol
next_ = Protocol(self._file, length, version=version, extension=extension,
error=self._onerror, layer=self._exlayer, protocol=self._exproto)
return next_ | Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* extension -- bool, if is extension header (False in default)
<keyword> True / False
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* IPv4 -- internet layer
* IPv6 -- internet layer
* AH -- internet layer
* TCP -- transport layer
* UDP -- transport layer | Below is the the instruction that describes the task:
### Input:
Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* extension -- bool, if is extension header (False in default)
<keyword> True / False
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* IPv4 -- internet layer
* IPv6 -- internet layer
* AH -- internet layer
* TCP -- transport layer
* UDP -- transport layer
### Response:
def _import_next_layer(self, proto, length=None, *, version=4, extension=False):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* extension -- bool, if is extension header (False in default)
<keyword> True / False
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* IPv4 -- internet layer
* IPv6 -- internet layer
* AH -- internet layer
* TCP -- transport layer
* UDP -- transport layer
"""
if length == 0:
from pcapkit.protocols.null import NoPayload as Protocol
elif self._sigterm or proto == 59:
from pcapkit.protocols.raw import Raw as Protocol
elif proto == 51:
from pcapkit.protocols.internet.ah import AH as Protocol
elif proto == 139:
from pcapkit.protocols.internet.hip import HIP as Protocol
elif proto == 0:
from pcapkit.protocols.internet.hopopt import HOPOPT as Protocol
elif proto == 44:
from pcapkit.protocols.internet.ipv6_frag import IPv6_Frag as Protocol
elif proto == 60:
from pcapkit.protocols.internet.ipv6_opts import IPv6_Opts as Protocol
elif proto == 43:
from pcapkit.protocols.internet.ipv6_route import IPv6_Route as Protocol
elif proto == 135:
from pcapkit.protocols.internet.mh import MH as Protocol
elif proto == 4:
from pcapkit.protocols.internet.ipv4 import IPv4 as Protocol
elif proto == 41:
from pcapkit.protocols.internet.ipv6 import IPv6 as Protocol
elif proto == 6:
from pcapkit.protocols.transport.tcp import TCP as Protocol
elif proto == 17:
from pcapkit.protocols.transport.udp import UDP as Protocol
else:
from pcapkit.protocols.raw import Raw as Protocol
next_ = Protocol(self._file, length, version=version, extension=extension,
error=self._onerror, layer=self._exlayer, protocol=self._exproto)
return next_ |
def maybe_download_and_extract():
"""Download and extract processed data and embeddings."""
dest_directory = '.'
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'trees')
if not os.path.exists(extracted_dir_path):
zip_ref = zipfile.ZipFile(filepath, 'r')
zip_ref.extractall(dest_directory)
zip_ref.close() | Download and extract processed data and embeddings. | Below is the the instruction that describes the task:
### Input:
Download and extract processed data and embeddings.
### Response:
def maybe_download_and_extract():
"""Download and extract processed data and embeddings."""
dest_directory = '.'
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'trees')
if not os.path.exists(extracted_dir_path):
zip_ref = zipfile.ZipFile(filepath, 'r')
zip_ref.extractall(dest_directory)
zip_ref.close() |
def action_signal_triggered(self, model, prop_name, info):
"""This method notifies the parent state and child state models about complex actions
"""
msg = info.arg
# print("action_signal_triggered state: ", self.state.state_id, model, prop_name, info)
if msg.action.startswith('sm_notification_'):
return
# # affected child propagation from state
# if hasattr(self, 'states'):
# for m in info['arg'].affected_models:
# print(m, self.states)
# print([m is mm for mm in self.states.values()], [m in self for m in info['arg'].affected_models], \)
# [m in self.states.values() for m in info['arg'].affected_models]
if any([m in self for m in info['arg'].affected_models]):
if not msg.action.startswith('parent_notification_'):
new_msg = msg._replace(action='parent_notification_' + msg.action)
else:
new_msg = msg
for m in info['arg'].affected_models:
# print('???propagate it to', m, m.parent)
if isinstance(m, AbstractStateModel) and m in self:
# print('!!!propagate it from {0} to {1} {2}'.format(self.state.state_id, m.state.state_id, m))
m.action_signal.emit(new_msg)
if msg.action.startswith('parent_notification_'):
return
# recursive propagation of action signal TODO remove finally
if self.parent is not None:
# Notify parent about change of meta data
info.arg = msg
# print("DONE1", self.state.state_id, msg)
self.parent.action_signal_triggered(model, prop_name, info)
# print("FINISH DONE1", self.state.state_id, msg)
# state machine propagation of action signal (indirect) TODO remove finally
elif not msg.action.startswith('sm_notification_'): # Prevent recursive call
# If we are the root state, inform the state machine model by emitting our own meta signal.
# To make the signal distinguishable for a change of meta data to our state, the change property of
# the message is prepended with 'sm_notification_'
# print("DONE2", self.state.state_id, msg)
new_msg = msg._replace(action='sm_notification_' + msg.action)
self.action_signal.emit(new_msg)
# print("FINISH DONE2", self.state.state_id, msg)
else:
# print("DONE3 NOTHING")
pass | This method notifies the parent state and child state models about complex actions | Below is the the instruction that describes the task:
### Input:
This method notifies the parent state and child state models about complex actions
### Response:
def action_signal_triggered(self, model, prop_name, info):
"""This method notifies the parent state and child state models about complex actions
"""
msg = info.arg
# print("action_signal_triggered state: ", self.state.state_id, model, prop_name, info)
if msg.action.startswith('sm_notification_'):
return
# # affected child propagation from state
# if hasattr(self, 'states'):
# for m in info['arg'].affected_models:
# print(m, self.states)
# print([m is mm for mm in self.states.values()], [m in self for m in info['arg'].affected_models], \)
# [m in self.states.values() for m in info['arg'].affected_models]
if any([m in self for m in info['arg'].affected_models]):
if not msg.action.startswith('parent_notification_'):
new_msg = msg._replace(action='parent_notification_' + msg.action)
else:
new_msg = msg
for m in info['arg'].affected_models:
# print('???propagate it to', m, m.parent)
if isinstance(m, AbstractStateModel) and m in self:
# print('!!!propagate it from {0} to {1} {2}'.format(self.state.state_id, m.state.state_id, m))
m.action_signal.emit(new_msg)
if msg.action.startswith('parent_notification_'):
return
# recursive propagation of action signal TODO remove finally
if self.parent is not None:
# Notify parent about change of meta data
info.arg = msg
# print("DONE1", self.state.state_id, msg)
self.parent.action_signal_triggered(model, prop_name, info)
# print("FINISH DONE1", self.state.state_id, msg)
# state machine propagation of action signal (indirect) TODO remove finally
elif not msg.action.startswith('sm_notification_'): # Prevent recursive call
# If we are the root state, inform the state machine model by emitting our own meta signal.
# To make the signal distinguishable for a change of meta data to our state, the change property of
# the message is prepended with 'sm_notification_'
# print("DONE2", self.state.state_id, msg)
new_msg = msg._replace(action='sm_notification_' + msg.action)
self.action_signal.emit(new_msg)
# print("FINISH DONE2", self.state.state_id, msg)
else:
# print("DONE3 NOTHING")
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.