code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def is_descendant_of_bin(self, id_, bin_id):
"""Tests if an ``Id`` is a descendant of a bin.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=bin_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=bin_id) | Tests if an ``Id`` is a descendant of a bin.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``. | Below is the the instruction that describes the task:
### Input:
Tests if an ``Id`` is a descendant of a bin.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
### Response:
def is_descendant_of_bin(self, id_, bin_id):
"""Tests if an ``Id`` is a descendant of a bin.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=bin_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=bin_id) |
def folderitems(self):
"""XXX refactor if possible to non-classic mode
"""
items = super(AnalysisRequestAnalysesView, self).folderitems()
self.categories.sort()
return items | XXX refactor if possible to non-classic mode | Below is the the instruction that describes the task:
### Input:
XXX refactor if possible to non-classic mode
### Response:
def folderitems(self):
"""XXX refactor if possible to non-classic mode
"""
items = super(AnalysisRequestAnalysesView, self).folderitems()
self.categories.sort()
return items |
def _add_membership_multicast_socket(self):
"""
Make membership request to multicast
:rtype: None
"""
self._membership_request = socket.inet_aton(self._multicast_group) \
+ socket.inet_aton(self._multicast_ip)
# Send add membership request to socket
# See http://www.tldp.org/HOWTO/Multicast-HOWTO-6.html
# for explanation of sockopts
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
self._membership_request
) | Make membership request to multicast
:rtype: None | Below is the the instruction that describes the task:
### Input:
Make membership request to multicast
:rtype: None
### Response:
def _add_membership_multicast_socket(self):
"""
Make membership request to multicast
:rtype: None
"""
self._membership_request = socket.inet_aton(self._multicast_group) \
+ socket.inet_aton(self._multicast_ip)
# Send add membership request to socket
# See http://www.tldp.org/HOWTO/Multicast-HOWTO-6.html
# for explanation of sockopts
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
self._membership_request
) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'credential_type') and self.credential_type is not None:
_dict['credential_type'] = self.credential_type
if hasattr(self, 'client_id') and self.client_id is not None:
_dict['client_id'] = self.client_id
if hasattr(self, 'enterprise_id') and self.enterprise_id is not None:
_dict['enterprise_id'] = self.enterprise_id
if hasattr(self, 'url') and self.url is not None:
_dict['url'] = self.url
if hasattr(self, 'username') and self.username is not None:
_dict['username'] = self.username
if hasattr(self,
'organization_url') and self.organization_url is not None:
_dict['organization_url'] = self.organization_url
if hasattr(self, 'site_collection_path'
) and self.site_collection_path is not None:
_dict['site_collection.path'] = self.site_collection_path
if hasattr(self, 'client_secret') and self.client_secret is not None:
_dict['client_secret'] = self.client_secret
if hasattr(self, 'public_key_id') and self.public_key_id is not None:
_dict['public_key_id'] = self.public_key_id
if hasattr(self, 'private_key') and self.private_key is not None:
_dict['private_key'] = self.private_key
if hasattr(self, 'passphrase') and self.passphrase is not None:
_dict['passphrase'] = self.passphrase
if hasattr(self, 'password') and self.password is not None:
_dict['password'] = self.password
if hasattr(self, 'gateway_id') and self.gateway_id is not None:
_dict['gateway_id'] = self.gateway_id
if hasattr(self, 'source_version') and self.source_version is not None:
_dict['source_version'] = self.source_version
if hasattr(
self,
'web_application_url') and self.web_application_url is not None:
_dict['web_application_url'] = self.web_application_url
if hasattr(self, 'domain') and self.domain is not None:
_dict['domain'] = self.domain
if hasattr(self, 'endpoint') and self.endpoint is not None:
_dict['endpoint'] = self.endpoint
if hasattr(self, 'access_key_id') and self.access_key_id is not None:
_dict['access_key_id'] = self.access_key_id
if hasattr(self,
'secret_access_key') and self.secret_access_key is not None:
_dict['secret_access_key'] = self.secret_access_key
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'credential_type') and self.credential_type is not None:
_dict['credential_type'] = self.credential_type
if hasattr(self, 'client_id') and self.client_id is not None:
_dict['client_id'] = self.client_id
if hasattr(self, 'enterprise_id') and self.enterprise_id is not None:
_dict['enterprise_id'] = self.enterprise_id
if hasattr(self, 'url') and self.url is not None:
_dict['url'] = self.url
if hasattr(self, 'username') and self.username is not None:
_dict['username'] = self.username
if hasattr(self,
'organization_url') and self.organization_url is not None:
_dict['organization_url'] = self.organization_url
if hasattr(self, 'site_collection_path'
) and self.site_collection_path is not None:
_dict['site_collection.path'] = self.site_collection_path
if hasattr(self, 'client_secret') and self.client_secret is not None:
_dict['client_secret'] = self.client_secret
if hasattr(self, 'public_key_id') and self.public_key_id is not None:
_dict['public_key_id'] = self.public_key_id
if hasattr(self, 'private_key') and self.private_key is not None:
_dict['private_key'] = self.private_key
if hasattr(self, 'passphrase') and self.passphrase is not None:
_dict['passphrase'] = self.passphrase
if hasattr(self, 'password') and self.password is not None:
_dict['password'] = self.password
if hasattr(self, 'gateway_id') and self.gateway_id is not None:
_dict['gateway_id'] = self.gateway_id
if hasattr(self, 'source_version') and self.source_version is not None:
_dict['source_version'] = self.source_version
if hasattr(
self,
'web_application_url') and self.web_application_url is not None:
_dict['web_application_url'] = self.web_application_url
if hasattr(self, 'domain') and self.domain is not None:
_dict['domain'] = self.domain
if hasattr(self, 'endpoint') and self.endpoint is not None:
_dict['endpoint'] = self.endpoint
if hasattr(self, 'access_key_id') and self.access_key_id is not None:
_dict['access_key_id'] = self.access_key_id
if hasattr(self,
'secret_access_key') and self.secret_access_key is not None:
_dict['secret_access_key'] = self.secret_access_key
return _dict |
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds) | Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead. | Below is the the instruction that describes the task:
### Input:
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
### Response:
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds) |
def summarise( self, results ):
"""Generate a summary of results from a list of result dicts
returned by running the underlying experiment. By default we generate
mean, median, variance, and extrema for each value recorded.
Override this method to create different or extra summary statistics.
:param results: an array of result dicts
:returns: a dict of summary statistics"""
if len(results) == 0:
return dict()
else:
summary = dict()
# work out the fields to summarise
allKeys = results[0][Experiment.RESULTS].keys()
ks = self._summarised_results
if ks is None:
# if we don't restrict, summarise all keys
ks = allKeys
else:
# protect against a key that's not present
ks = [ k for k in ks if k in allKeys ]
# add the summary statistics
for k in ks:
# compute summaries for all fields we're interested in
vs = [ res[Experiment.RESULTS][k] for res in results ]
summary[self._mean(k)] = numpy.mean(vs)
summary[self._median(k)] = numpy.median(vs)
summary[self._variance(k)] = numpy.var(vs)
summary[self._min(k)] = numpy.min(vs)
summary[self._max(k)] = numpy.max(vs)
return summary | Generate a summary of results from a list of result dicts
returned by running the underlying experiment. By default we generate
mean, median, variance, and extrema for each value recorded.
Override this method to create different or extra summary statistics.
:param results: an array of result dicts
:returns: a dict of summary statistics | Below is the the instruction that describes the task:
### Input:
Generate a summary of results from a list of result dicts
returned by running the underlying experiment. By default we generate
mean, median, variance, and extrema for each value recorded.
Override this method to create different or extra summary statistics.
:param results: an array of result dicts
:returns: a dict of summary statistics
### Response:
def summarise( self, results ):
"""Generate a summary of results from a list of result dicts
returned by running the underlying experiment. By default we generate
mean, median, variance, and extrema for each value recorded.
Override this method to create different or extra summary statistics.
:param results: an array of result dicts
:returns: a dict of summary statistics"""
if len(results) == 0:
return dict()
else:
summary = dict()
# work out the fields to summarise
allKeys = results[0][Experiment.RESULTS].keys()
ks = self._summarised_results
if ks is None:
# if we don't restrict, summarise all keys
ks = allKeys
else:
# protect against a key that's not present
ks = [ k for k in ks if k in allKeys ]
# add the summary statistics
for k in ks:
# compute summaries for all fields we're interested in
vs = [ res[Experiment.RESULTS][k] for res in results ]
summary[self._mean(k)] = numpy.mean(vs)
summary[self._median(k)] = numpy.median(vs)
summary[self._variance(k)] = numpy.var(vs)
summary[self._min(k)] = numpy.min(vs)
summary[self._max(k)] = numpy.max(vs)
return summary |
def restart_complete(self, state, new_address):
'''
Called when we get notified that the restart has been completed by
some agent who has volontureed to do so.
'''
if state.timeout_call_id:
state.agent.cancel_delayed_call(state.timeout_call_id)
state.timeout_call_id = None
return self._send_restarted_notifications(new_address) | Called when we get notified that the restart has been completed by
some agent who has volontureed to do so. | Below is the the instruction that describes the task:
### Input:
Called when we get notified that the restart has been completed by
some agent who has volontureed to do so.
### Response:
def restart_complete(self, state, new_address):
'''
Called when we get notified that the restart has been completed by
some agent who has volontureed to do so.
'''
if state.timeout_call_id:
state.agent.cancel_delayed_call(state.timeout_call_id)
state.timeout_call_id = None
return self._send_restarted_notifications(new_address) |
def get_all_tags(image_name, branch=None):
"""
GET /v1/repositories/<namespace>/<repository_name>/tags
:param image_name: The docker image name
:param branch: The branch to filter by
:return: A list of Version instances, latest first
"""
try:
return get_all_tags_no_auth(image_name, branch)
except AuthException:
return get_all_tags_with_auth(image_name, branch) | GET /v1/repositories/<namespace>/<repository_name>/tags
:param image_name: The docker image name
:param branch: The branch to filter by
:return: A list of Version instances, latest first | Below is the the instruction that describes the task:
### Input:
GET /v1/repositories/<namespace>/<repository_name>/tags
:param image_name: The docker image name
:param branch: The branch to filter by
:return: A list of Version instances, latest first
### Response:
def get_all_tags(image_name, branch=None):
"""
GET /v1/repositories/<namespace>/<repository_name>/tags
:param image_name: The docker image name
:param branch: The branch to filter by
:return: A list of Version instances, latest first
"""
try:
return get_all_tags_no_auth(image_name, branch)
except AuthException:
return get_all_tags_with_auth(image_name, branch) |
def get(self):
"""
method to fetch all contents as a list
:return: list
"""
ret_list = []
if hasattr(self, "font"):
ret_list.append(self.font)
if hasattr(self, "size"):
ret_list.append(self.size)
if hasattr(self, "text"):
ret_list.append(self.text)
return ret_list | method to fetch all contents as a list
:return: list | Below is the the instruction that describes the task:
### Input:
method to fetch all contents as a list
:return: list
### Response:
def get(self):
"""
method to fetch all contents as a list
:return: list
"""
ret_list = []
if hasattr(self, "font"):
ret_list.append(self.font)
if hasattr(self, "size"):
ret_list.append(self.size)
if hasattr(self, "text"):
ret_list.append(self.text)
return ret_list |
def play(self, sox_effects=()):
""" Play the segment. """
audio_data = self.getAudioData()
logging.getLogger().info("Playing speech segment (%s): '%s'" % (self.lang, self))
cmd = ["sox", "-q", "-t", "mp3", "-"]
if sys.platform.startswith("win32"):
cmd.extend(("-t", "waveaudio"))
cmd.extend(("-d", "trim", "0.1", "reverse", "trim", "0.07", "reverse")) # "trim", "0.25", "-0.1"
cmd.extend(sox_effects)
logging.getLogger().debug("Start player process")
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL)
p.communicate(input=audio_data)
if p.returncode != 0:
raise RuntimeError()
logging.getLogger().debug("Done playing") | Play the segment. | Below is the the instruction that describes the task:
### Input:
Play the segment.
### Response:
def play(self, sox_effects=()):
""" Play the segment. """
audio_data = self.getAudioData()
logging.getLogger().info("Playing speech segment (%s): '%s'" % (self.lang, self))
cmd = ["sox", "-q", "-t", "mp3", "-"]
if sys.platform.startswith("win32"):
cmd.extend(("-t", "waveaudio"))
cmd.extend(("-d", "trim", "0.1", "reverse", "trim", "0.07", "reverse")) # "trim", "0.25", "-0.1"
cmd.extend(sox_effects)
logging.getLogger().debug("Start player process")
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL)
p.communicate(input=audio_data)
if p.returncode != 0:
raise RuntimeError()
logging.getLogger().debug("Done playing") |
def get_experiment_summ_ids( self, coinc_event_id ):
"""
Gets all the experiment_summ_ids that map to a given coinc_event_id.
"""
experiment_summ_ids = []
for row in self:
if row.coinc_event_id == coinc_event_id:
experiment_summ_ids.append(row.experiment_summ_id)
if len(experiment_summ_ids) == 0:
raise ValueError("'%s' could not be found in the experiment_map table" % coinc_event_id)
return experiment_summ_ids | Gets all the experiment_summ_ids that map to a given coinc_event_id. | Below is the the instruction that describes the task:
### Input:
Gets all the experiment_summ_ids that map to a given coinc_event_id.
### Response:
def get_experiment_summ_ids( self, coinc_event_id ):
"""
Gets all the experiment_summ_ids that map to a given coinc_event_id.
"""
experiment_summ_ids = []
for row in self:
if row.coinc_event_id == coinc_event_id:
experiment_summ_ids.append(row.experiment_summ_id)
if len(experiment_summ_ids) == 0:
raise ValueError("'%s' could not be found in the experiment_map table" % coinc_event_id)
return experiment_summ_ids |
def FindSourceFiles(self, node='.'):
""" returns a list of all source files.
"""
node = self.arg2nodes(node, self.fs.Entry)[0]
sources = []
def build_source(ss):
for s in ss:
if isinstance(s, SCons.Node.FS.Dir):
build_source(s.all_children())
elif s.has_builder():
build_source(s.sources)
elif isinstance(s.disambiguate(), SCons.Node.FS.File):
sources.append(s)
build_source(node.all_children())
def final_source(node):
while (node != node.srcnode()):
node = node.srcnode()
return node
sources = list(map( final_source, sources ));
# remove duplicates
return list(set(sources)) | returns a list of all source files. | Below is the the instruction that describes the task:
### Input:
returns a list of all source files.
### Response:
def FindSourceFiles(self, node='.'):
""" returns a list of all source files.
"""
node = self.arg2nodes(node, self.fs.Entry)[0]
sources = []
def build_source(ss):
for s in ss:
if isinstance(s, SCons.Node.FS.Dir):
build_source(s.all_children())
elif s.has_builder():
build_source(s.sources)
elif isinstance(s.disambiguate(), SCons.Node.FS.File):
sources.append(s)
build_source(node.all_children())
def final_source(node):
while (node != node.srcnode()):
node = node.srcnode()
return node
sources = list(map( final_source, sources ));
# remove duplicates
return list(set(sources)) |
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines) | _dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces | Below is the the instruction that describes the task:
### Input:
_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
### Response:
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines) |
def order(self, field, order=None):
"""
Set field and order set by arguments
"""
if not order:
order = ORDER.DESC
self.url.order = (field, order)
self.url.set_page(1)
return self | Set field and order set by arguments | Below is the the instruction that describes the task:
### Input:
Set field and order set by arguments
### Response:
def order(self, field, order=None):
"""
Set field and order set by arguments
"""
if not order:
order = ORDER.DESC
self.url.order = (field, order)
self.url.set_page(1)
return self |
def get_process_rca(self, pid=None):
'''
get_process_rca(self, pid=None)
Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process.
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
'''
pid = self._get_pid(pid)
return self._call_rest_api('get', '/processes/'+pid+'/rca', error='Failed to fetch process information') | get_process_rca(self, pid=None)
Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process.
:Parameters:
* *pid* (`string`) -- Identifier of an existing process | Below is the the instruction that describes the task:
### Input:
get_process_rca(self, pid=None)
Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process.
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
### Response:
def get_process_rca(self, pid=None):
'''
get_process_rca(self, pid=None)
Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process.
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
'''
pid = self._get_pid(pid)
return self._call_rest_api('get', '/processes/'+pid+'/rca', error='Failed to fetch process information') |
def resolve_function(slither, contract_name, function_name):
"""
Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise.
"""
# Obtain the target contract
contract = slither.get_contract_from_name(contract_name)
# Verify the contract was resolved successfully
if contract is None:
raise ResolveFunctionException(f"Could not resolve target contract: {contract_name}")
# Obtain the target function
target_function = next((function for function in contract.functions if function.name == function_name), None)
# Verify we have resolved the function specified.
if target_function is None:
raise ResolveFunctionException(f"Could not resolve target function: {contract_name}.{function_name}")
# Add the resolved function to the new list.
return target_function | Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise. | Below is the the instruction that describes the task:
### Input:
Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise.
### Response:
def resolve_function(slither, contract_name, function_name):
"""
Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise.
"""
# Obtain the target contract
contract = slither.get_contract_from_name(contract_name)
# Verify the contract was resolved successfully
if contract is None:
raise ResolveFunctionException(f"Could not resolve target contract: {contract_name}")
# Obtain the target function
target_function = next((function for function in contract.functions if function.name == function_name), None)
# Verify we have resolved the function specified.
if target_function is None:
raise ResolveFunctionException(f"Could not resolve target function: {contract_name}.{function_name}")
# Add the resolved function to the new list.
return target_function |
def process(self):
""" Collects, processes & reports metrics """
if self.agent.machine.fsm.current is "wait4init":
# Test the host agent if we're ready to send data
if self.agent.is_agent_ready():
self.agent.machine.fsm.ready()
else:
return
if self.agent.can_send():
self.snapshot_countdown = self.snapshot_countdown - 1
ss = None
cm = self.collect_metrics()
if self.snapshot_countdown < 1:
logger.debug("Sending process snapshot data")
self.snapshot_countdown = self.SNAPSHOT_PERIOD
ss = self.collect_snapshot()
md = copy.deepcopy(cm).delta_data(None)
else:
md = copy.deepcopy(cm).delta_data(self.last_metrics)
ed = EntityData(pid=self.agent.from_.pid, snapshot=ss, metrics=md)
response = self.agent.report_data(ed)
if response:
if response.status_code is 200 and len(response.content) > 2:
# The host agent returned something indicating that is has a request for us that we
# need to process.
self.handle_agent_tasks(json.loads(response.content)[0])
self.last_metrics = cm.__dict__ | Collects, processes & reports metrics | Below is the the instruction that describes the task:
### Input:
Collects, processes & reports metrics
### Response:
def process(self):
""" Collects, processes & reports metrics """
if self.agent.machine.fsm.current is "wait4init":
# Test the host agent if we're ready to send data
if self.agent.is_agent_ready():
self.agent.machine.fsm.ready()
else:
return
if self.agent.can_send():
self.snapshot_countdown = self.snapshot_countdown - 1
ss = None
cm = self.collect_metrics()
if self.snapshot_countdown < 1:
logger.debug("Sending process snapshot data")
self.snapshot_countdown = self.SNAPSHOT_PERIOD
ss = self.collect_snapshot()
md = copy.deepcopy(cm).delta_data(None)
else:
md = copy.deepcopy(cm).delta_data(self.last_metrics)
ed = EntityData(pid=self.agent.from_.pid, snapshot=ss, metrics=md)
response = self.agent.report_data(ed)
if response:
if response.status_code is 200 and len(response.content) > 2:
# The host agent returned something indicating that is has a request for us that we
# need to process.
self.handle_agent_tasks(json.loads(response.content)[0])
self.last_metrics = cm.__dict__ |
def Weber_saltation(mp, rhop, dp, rhog, D, Vterminal=4):
r'''Calculates saltation velocity of the gas for pneumatic conveying,
according to [1]_ as described in [2]_, [3]_, [4]_, and [5]_.
If Vterminal is under 3 m/s, use equation 1; otherwise, equation 2.
.. math::
Fr_s = \left(7 + \frac{8}{3}V_{terminal}\right)\mu^{0.25}
\left(\frac{d_p}{D}\right)^{0.1}
.. math::
Fr_s = 15\mu^{0.25}\left(\frac{d_p}{D}\right)^{0.1}
.. math::
Fr_s = \frac{V_{salt}}{\sqrt{gD}}
.. math::
\mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f}
Parameters
----------
mp : float
Solid mass flow rate, [kg/s]
rhop : float
Particle density, [kg/m^3]
dp : float
Particle diameter, [m]
rhog : float
Gas density, [kg/m^3]
D : float
Diameter of pipe, [m]
Vterminal : float
Terminal velocity of particle settling in gas, [m/s]
Returns
-------
V : float
Saltation velocity of gas, [m/s]
Notes
-----
Model is rearranged to be explicit in terms of saltation velocity
internally.
Examples
--------
Examples are only a self-test.
>>> Weber_saltation(mp=1, rhop=1000., dp=1E-3, rhog=1.2, D=0.1, Vterminal=4)
15.227445436331474
References
----------
.. [1] Weber, M. 1981. Principles of hydraulic and pneumatic conveying in
pipes. Bulk Solids Handling 1: 57-63.
.. [2] Rabinovich, Evgeny, and Haim Kalman. "Threshold Velocities of
Particle-Fluid Flows in Horizontal Pipes and Ducts: Literature Review."
Reviews in Chemical Engineering 27, no. 5-6 (January 1, 2011).
doi:10.1515/REVCE.2011.011.
.. [3] Setia, G., S. S. Mallick, R. Pan, and P. W. Wypych. "Modeling
Minimum Transport Boundary for Fluidized Dense-Phase Pneumatic Conveying
Systems." Powder Technology 277 (June 2015): 244-51.
doi:10.1016/j.powtec.2015.02.050.
.. [4] Bansal, A., S. S. Mallick, and P. W. Wypych. "Investigating
Straight-Pipe Pneumatic Conveying Characteristics for Fluidized
Dense-Phase Pneumatic Conveying." Particulate Science and Technology
31, no. 4 (July 4, 2013): 348-56. doi:10.1080/02726351.2012.732677.
.. [5] Gomes, L. M., and A. L. Amarante Mesquita. "On the Prediction of
Pickup and Saltation Velocities in Pneumatic Conveying." Brazilian
Journal of Chemical Engineering 31, no. 1 (March 2014): 35-46.
doi:10.1590/S0104-66322014000100005
'''
if Vterminal <= 3:
term1 = (7 + 8/3.*Vterminal)*(dp/D)**0.1
else:
term1 = 15.*(dp/D)**0.1
term2 = 1./(g*D)**0.5
term3 = mp/rhog/(pi/4*D**2)
return (term1/term2*term3**0.25)**(1/1.25) | r'''Calculates saltation velocity of the gas for pneumatic conveying,
according to [1]_ as described in [2]_, [3]_, [4]_, and [5]_.
If Vterminal is under 3 m/s, use equation 1; otherwise, equation 2.
.. math::
Fr_s = \left(7 + \frac{8}{3}V_{terminal}\right)\mu^{0.25}
\left(\frac{d_p}{D}\right)^{0.1}
.. math::
Fr_s = 15\mu^{0.25}\left(\frac{d_p}{D}\right)^{0.1}
.. math::
Fr_s = \frac{V_{salt}}{\sqrt{gD}}
.. math::
\mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f}
Parameters
----------
mp : float
Solid mass flow rate, [kg/s]
rhop : float
Particle density, [kg/m^3]
dp : float
Particle diameter, [m]
rhog : float
Gas density, [kg/m^3]
D : float
Diameter of pipe, [m]
Vterminal : float
Terminal velocity of particle settling in gas, [m/s]
Returns
-------
V : float
Saltation velocity of gas, [m/s]
Notes
-----
Model is rearranged to be explicit in terms of saltation velocity
internally.
Examples
--------
Examples are only a self-test.
>>> Weber_saltation(mp=1, rhop=1000., dp=1E-3, rhog=1.2, D=0.1, Vterminal=4)
15.227445436331474
References
----------
.. [1] Weber, M. 1981. Principles of hydraulic and pneumatic conveying in
pipes. Bulk Solids Handling 1: 57-63.
.. [2] Rabinovich, Evgeny, and Haim Kalman. "Threshold Velocities of
Particle-Fluid Flows in Horizontal Pipes and Ducts: Literature Review."
Reviews in Chemical Engineering 27, no. 5-6 (January 1, 2011).
doi:10.1515/REVCE.2011.011.
.. [3] Setia, G., S. S. Mallick, R. Pan, and P. W. Wypych. "Modeling
Minimum Transport Boundary for Fluidized Dense-Phase Pneumatic Conveying
Systems." Powder Technology 277 (June 2015): 244-51.
doi:10.1016/j.powtec.2015.02.050.
.. [4] Bansal, A., S. S. Mallick, and P. W. Wypych. "Investigating
Straight-Pipe Pneumatic Conveying Characteristics for Fluidized
Dense-Phase Pneumatic Conveying." Particulate Science and Technology
31, no. 4 (July 4, 2013): 348-56. doi:10.1080/02726351.2012.732677.
.. [5] Gomes, L. M., and A. L. Amarante Mesquita. "On the Prediction of
Pickup and Saltation Velocities in Pneumatic Conveying." Brazilian
Journal of Chemical Engineering 31, no. 1 (March 2014): 35-46.
doi:10.1590/S0104-66322014000100005 | Below is the the instruction that describes the task:
### Input:
r'''Calculates saltation velocity of the gas for pneumatic conveying,
according to [1]_ as described in [2]_, [3]_, [4]_, and [5]_.
If Vterminal is under 3 m/s, use equation 1; otherwise, equation 2.
.. math::
Fr_s = \left(7 + \frac{8}{3}V_{terminal}\right)\mu^{0.25}
\left(\frac{d_p}{D}\right)^{0.1}
.. math::
Fr_s = 15\mu^{0.25}\left(\frac{d_p}{D}\right)^{0.1}
.. math::
Fr_s = \frac{V_{salt}}{\sqrt{gD}}
.. math::
\mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f}
Parameters
----------
mp : float
Solid mass flow rate, [kg/s]
rhop : float
Particle density, [kg/m^3]
dp : float
Particle diameter, [m]
rhog : float
Gas density, [kg/m^3]
D : float
Diameter of pipe, [m]
Vterminal : float
Terminal velocity of particle settling in gas, [m/s]
Returns
-------
V : float
Saltation velocity of gas, [m/s]
Notes
-----
Model is rearranged to be explicit in terms of saltation velocity
internally.
Examples
--------
Examples are only a self-test.
>>> Weber_saltation(mp=1, rhop=1000., dp=1E-3, rhog=1.2, D=0.1, Vterminal=4)
15.227445436331474
References
----------
.. [1] Weber, M. 1981. Principles of hydraulic and pneumatic conveying in
pipes. Bulk Solids Handling 1: 57-63.
.. [2] Rabinovich, Evgeny, and Haim Kalman. "Threshold Velocities of
Particle-Fluid Flows in Horizontal Pipes and Ducts: Literature Review."
Reviews in Chemical Engineering 27, no. 5-6 (January 1, 2011).
doi:10.1515/REVCE.2011.011.
.. [3] Setia, G., S. S. Mallick, R. Pan, and P. W. Wypych. "Modeling
Minimum Transport Boundary for Fluidized Dense-Phase Pneumatic Conveying
Systems." Powder Technology 277 (June 2015): 244-51.
doi:10.1016/j.powtec.2015.02.050.
.. [4] Bansal, A., S. S. Mallick, and P. W. Wypych. "Investigating
Straight-Pipe Pneumatic Conveying Characteristics for Fluidized
Dense-Phase Pneumatic Conveying." Particulate Science and Technology
31, no. 4 (July 4, 2013): 348-56. doi:10.1080/02726351.2012.732677.
.. [5] Gomes, L. M., and A. L. Amarante Mesquita. "On the Prediction of
Pickup and Saltation Velocities in Pneumatic Conveying." Brazilian
Journal of Chemical Engineering 31, no. 1 (March 2014): 35-46.
doi:10.1590/S0104-66322014000100005
### Response:
def Weber_saltation(mp, rhop, dp, rhog, D, Vterminal=4):
r'''Calculates saltation velocity of the gas for pneumatic conveying,
according to [1]_ as described in [2]_, [3]_, [4]_, and [5]_.
If Vterminal is under 3 m/s, use equation 1; otherwise, equation 2.
.. math::
Fr_s = \left(7 + \frac{8}{3}V_{terminal}\right)\mu^{0.25}
\left(\frac{d_p}{D}\right)^{0.1}
.. math::
Fr_s = 15\mu^{0.25}\left(\frac{d_p}{D}\right)^{0.1}
.. math::
Fr_s = \frac{V_{salt}}{\sqrt{gD}}
.. math::
\mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f}
Parameters
----------
mp : float
Solid mass flow rate, [kg/s]
rhop : float
Particle density, [kg/m^3]
dp : float
Particle diameter, [m]
rhog : float
Gas density, [kg/m^3]
D : float
Diameter of pipe, [m]
Vterminal : float
Terminal velocity of particle settling in gas, [m/s]
Returns
-------
V : float
Saltation velocity of gas, [m/s]
Notes
-----
Model is rearranged to be explicit in terms of saltation velocity
internally.
Examples
--------
Examples are only a self-test.
>>> Weber_saltation(mp=1, rhop=1000., dp=1E-3, rhog=1.2, D=0.1, Vterminal=4)
15.227445436331474
References
----------
.. [1] Weber, M. 1981. Principles of hydraulic and pneumatic conveying in
pipes. Bulk Solids Handling 1: 57-63.
.. [2] Rabinovich, Evgeny, and Haim Kalman. "Threshold Velocities of
Particle-Fluid Flows in Horizontal Pipes and Ducts: Literature Review."
Reviews in Chemical Engineering 27, no. 5-6 (January 1, 2011).
doi:10.1515/REVCE.2011.011.
.. [3] Setia, G., S. S. Mallick, R. Pan, and P. W. Wypych. "Modeling
Minimum Transport Boundary for Fluidized Dense-Phase Pneumatic Conveying
Systems." Powder Technology 277 (June 2015): 244-51.
doi:10.1016/j.powtec.2015.02.050.
.. [4] Bansal, A., S. S. Mallick, and P. W. Wypych. "Investigating
Straight-Pipe Pneumatic Conveying Characteristics for Fluidized
Dense-Phase Pneumatic Conveying." Particulate Science and Technology
31, no. 4 (July 4, 2013): 348-56. doi:10.1080/02726351.2012.732677.
.. [5] Gomes, L. M., and A. L. Amarante Mesquita. "On the Prediction of
Pickup and Saltation Velocities in Pneumatic Conveying." Brazilian
Journal of Chemical Engineering 31, no. 1 (March 2014): 35-46.
doi:10.1590/S0104-66322014000100005
'''
if Vterminal <= 3:
term1 = (7 + 8/3.*Vterminal)*(dp/D)**0.1
else:
term1 = 15.*(dp/D)**0.1
term2 = 1./(g*D)**0.5
term3 = mp/rhog/(pi/4*D**2)
return (term1/term2*term3**0.25)**(1/1.25) |
def write(self, output_filepath):
"""
serialize the ExmaraldaFile instance and write it to a file.
Parameters
----------
output_filepath : str
relative or absolute path to the Exmaralda file to be created
"""
with open(output_filepath, 'w') as out_file:
out_file.write(self.__str__()) | serialize the ExmaraldaFile instance and write it to a file.
Parameters
----------
output_filepath : str
relative or absolute path to the Exmaralda file to be created | Below is the the instruction that describes the task:
### Input:
serialize the ExmaraldaFile instance and write it to a file.
Parameters
----------
output_filepath : str
relative or absolute path to the Exmaralda file to be created
### Response:
def write(self, output_filepath):
"""
serialize the ExmaraldaFile instance and write it to a file.
Parameters
----------
output_filepath : str
relative or absolute path to the Exmaralda file to be created
"""
with open(output_filepath, 'w') as out_file:
out_file.write(self.__str__()) |
def get_attributes(self, obj):
""" Get all object's attributes.
Sends multi-parameter info/config queries and returns the result as dictionary.
:param obj: requested object.
:returns: dictionary of <name, value> of all attributes returned by the query.
:rtype: dict of (str, str)
"""
return self._get_attributes('{}/{}'.format(self.session_url, obj.ref)) | Get all object's attributes.
Sends multi-parameter info/config queries and returns the result as dictionary.
:param obj: requested object.
:returns: dictionary of <name, value> of all attributes returned by the query.
:rtype: dict of (str, str) | Below is the the instruction that describes the task:
### Input:
Get all object's attributes.
Sends multi-parameter info/config queries and returns the result as dictionary.
:param obj: requested object.
:returns: dictionary of <name, value> of all attributes returned by the query.
:rtype: dict of (str, str)
### Response:
def get_attributes(self, obj):
""" Get all object's attributes.
Sends multi-parameter info/config queries and returns the result as dictionary.
:param obj: requested object.
:returns: dictionary of <name, value> of all attributes returned by the query.
:rtype: dict of (str, str)
"""
return self._get_attributes('{}/{}'.format(self.session_url, obj.ref)) |
def default_formats(self, path):
"""Return the default formats, if they apply to the current path #157"""
formats = long_form_multiple_formats(self.default_jupytext_formats)
for fmt in formats:
try:
base_path(path, fmt)
return self.default_jupytext_formats
except InconsistentPath:
continue
return None | Return the default formats, if they apply to the current path #157 | Below is the the instruction that describes the task:
### Input:
Return the default formats, if they apply to the current path #157
### Response:
def default_formats(self, path):
"""Return the default formats, if they apply to the current path #157"""
formats = long_form_multiple_formats(self.default_jupytext_formats)
for fmt in formats:
try:
base_path(path, fmt)
return self.default_jupytext_formats
except InconsistentPath:
continue
return None |
def main():
'''Calculate the distance of an object in inches using a HCSR04 sensor
and a Raspberry Pi'''
trig_pin = 17
echo_pin = 27
# Default values
# unit = 'metric'
# temperature = 20
# round_to = 1
# Create a distance reading with the hcsr04 sensor module
# and overide the default values for temp, unit and rounding)
value = sensor.Measurement(trig_pin,
echo_pin,
temperature=68,
unit='imperial',
round_to=2
)
raw_measurement = value.raw_distance()
# Calculate the distance in inches
imperial_distance = value.distance_imperial(raw_measurement)
print("The Distance = {} inches".format(imperial_distance)) | Calculate the distance of an object in inches using a HCSR04 sensor
and a Raspberry Pi | Below is the the instruction that describes the task:
### Input:
Calculate the distance of an object in inches using a HCSR04 sensor
and a Raspberry Pi
### Response:
def main():
'''Calculate the distance of an object in inches using a HCSR04 sensor
and a Raspberry Pi'''
trig_pin = 17
echo_pin = 27
# Default values
# unit = 'metric'
# temperature = 20
# round_to = 1
# Create a distance reading with the hcsr04 sensor module
# and overide the default values for temp, unit and rounding)
value = sensor.Measurement(trig_pin,
echo_pin,
temperature=68,
unit='imperial',
round_to=2
)
raw_measurement = value.raw_distance()
# Calculate the distance in inches
imperial_distance = value.distance_imperial(raw_measurement)
print("The Distance = {} inches".format(imperial_distance)) |
def conv2d(self, filter_size, output_channels, stride=1, padding='SAME', bn=True, activation_fn=tf.nn.relu,
b_value=0.0, s_value=1.0, trainable=True):
"""
2D Convolutional Layer.
:param filter_size: int. assumes square filter
:param output_channels: int
:param stride: int
:param padding: 'VALID' or 'SAME'
:param activation_fn: tf.nn function
:param b_value: float
:param s_value: float
"""
self.count['conv'] += 1
scope = 'conv_' + str(self.count['conv'])
with tf.variable_scope(scope):
# Conv function
input_channels = self.input.get_shape()[3]
if filter_size == 0: # outputs a 1x1 feature map; used for FCN
filter_size = self.input.get_shape()[2]
padding = 'VALID'
output_shape = [filter_size, filter_size, input_channels, output_channels]
w = self.weight_variable(name='weights', shape=output_shape, trainable=trainable)
self.input = tf.nn.conv2d(self.input, w, strides=[1, stride, stride, 1], padding=padding)
if bn is True: # batch normalization
self.input = self.batch_norm(self.input)
if b_value is not None: # bias value
b = self.const_variable(name='bias', shape=[output_channels], value=b_value, trainable=trainable)
self.input = tf.add(self.input, b)
if s_value is not None: # scale value
s = self.const_variable(name='scale', shape=[output_channels], value=s_value, trainable=trainable)
self.input = tf.multiply(self.input, s)
if activation_fn is not None: # activation function
self.input = activation_fn(self.input)
print(scope + ' output: ' + str(self.input.get_shape())) | 2D Convolutional Layer.
:param filter_size: int. assumes square filter
:param output_channels: int
:param stride: int
:param padding: 'VALID' or 'SAME'
:param activation_fn: tf.nn function
:param b_value: float
:param s_value: float | Below is the the instruction that describes the task:
### Input:
2D Convolutional Layer.
:param filter_size: int. assumes square filter
:param output_channels: int
:param stride: int
:param padding: 'VALID' or 'SAME'
:param activation_fn: tf.nn function
:param b_value: float
:param s_value: float
### Response:
def conv2d(self, filter_size, output_channels, stride=1, padding='SAME', bn=True, activation_fn=tf.nn.relu,
b_value=0.0, s_value=1.0, trainable=True):
"""
2D Convolutional Layer.
:param filter_size: int. assumes square filter
:param output_channels: int
:param stride: int
:param padding: 'VALID' or 'SAME'
:param activation_fn: tf.nn function
:param b_value: float
:param s_value: float
"""
self.count['conv'] += 1
scope = 'conv_' + str(self.count['conv'])
with tf.variable_scope(scope):
# Conv function
input_channels = self.input.get_shape()[3]
if filter_size == 0: # outputs a 1x1 feature map; used for FCN
filter_size = self.input.get_shape()[2]
padding = 'VALID'
output_shape = [filter_size, filter_size, input_channels, output_channels]
w = self.weight_variable(name='weights', shape=output_shape, trainable=trainable)
self.input = tf.nn.conv2d(self.input, w, strides=[1, stride, stride, 1], padding=padding)
if bn is True: # batch normalization
self.input = self.batch_norm(self.input)
if b_value is not None: # bias value
b = self.const_variable(name='bias', shape=[output_channels], value=b_value, trainable=trainable)
self.input = tf.add(self.input, b)
if s_value is not None: # scale value
s = self.const_variable(name='scale', shape=[output_channels], value=s_value, trainable=trainable)
self.input = tf.multiply(self.input, s)
if activation_fn is not None: # activation function
self.input = activation_fn(self.input)
print(scope + ' output: ' + str(self.input.get_shape())) |
def console_set_default_background(
con: tcod.console.Console, col: Tuple[int, int, int]
) -> None:
"""Change the default background color for a console.
Args:
con (Console): Any Console instance.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
.. deprecated:: 8.5
Use :any:`Console.default_bg` instead.
"""
lib.TCOD_console_set_default_background(_console(con), col) | Change the default background color for a console.
Args:
con (Console): Any Console instance.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
.. deprecated:: 8.5
Use :any:`Console.default_bg` instead. | Below is the the instruction that describes the task:
### Input:
Change the default background color for a console.
Args:
con (Console): Any Console instance.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
.. deprecated:: 8.5
Use :any:`Console.default_bg` instead.
### Response:
def console_set_default_background(
con: tcod.console.Console, col: Tuple[int, int, int]
) -> None:
"""Change the default background color for a console.
Args:
con (Console): Any Console instance.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
.. deprecated:: 8.5
Use :any:`Console.default_bg` instead.
"""
lib.TCOD_console_set_default_background(_console(con), col) |
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered)) | Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object. | Below is the the instruction that describes the task:
### Input:
Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
### Response:
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered)) |
def random_bytes(n):
"""Returns n random bytes generated with given seed
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes.
"""
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if "seed" in params:
random.seed(int(params["seed"]))
response = make_response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = "application/octet-stream"
return response | Returns n random bytes generated with given seed
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes. | Below is the the instruction that describes the task:
### Input:
Returns n random bytes generated with given seed
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes.
### Response:
def random_bytes(n):
"""Returns n random bytes generated with given seed
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes.
"""
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if "seed" in params:
random.seed(int(params["seed"]))
response = make_response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = "application/octet-stream"
return response |
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)] | returns a list of keys matching `pat`
if pat=="all", returns all registered options | Below is the the instruction that describes the task:
### Input:
returns a list of keys matching `pat`
if pat=="all", returns all registered options
### Response:
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)] |
def byaxis_out(self):
"""Object to index along output dimensions.
This is only valid for non-trivial `out_shape`.
Examples
--------
Indexing with integers or slices:
>>> domain = odl.IntervalProd(0, 1)
>>> fspace = odl.FunctionSpace(domain, out_dtype=(float, (2, 3, 4)))
>>> fspace.byaxis_out[0]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (2,)))
>>> fspace.byaxis_out[1]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3,)))
>>> fspace.byaxis_out[1:]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3, 4)))
Lists can be used to stack spaces arbitrarily:
>>> fspace.byaxis_out[[2, 1, 2]]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (4, 3, 4)))
"""
space = self
class FspaceByaxisOut(object):
"""Helper class for indexing by output axes."""
def __getitem__(self, indices):
"""Return ``self[indices]``.
Parameters
----------
indices : index expression
Object used to index the output components.
Returns
-------
space : `FunctionSpace`
The resulting space with same domain and scalar output
data type, but indexed output components.
Raises
------
IndexError
If this is a space of scalar-valued functions.
"""
try:
iter(indices)
except TypeError:
newshape = space.out_shape[indices]
else:
newshape = tuple(space.out_shape[int(i)] for i in indices)
dtype = (space.scalar_out_dtype, newshape)
return FunctionSpace(space.domain, out_dtype=dtype)
def __repr__(self):
"""Return ``repr(self)``."""
return repr(space) + '.byaxis_out'
return FspaceByaxisOut() | Object to index along output dimensions.
This is only valid for non-trivial `out_shape`.
Examples
--------
Indexing with integers or slices:
>>> domain = odl.IntervalProd(0, 1)
>>> fspace = odl.FunctionSpace(domain, out_dtype=(float, (2, 3, 4)))
>>> fspace.byaxis_out[0]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (2,)))
>>> fspace.byaxis_out[1]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3,)))
>>> fspace.byaxis_out[1:]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3, 4)))
Lists can be used to stack spaces arbitrarily:
>>> fspace.byaxis_out[[2, 1, 2]]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (4, 3, 4))) | Below is the the instruction that describes the task:
### Input:
Object to index along output dimensions.
This is only valid for non-trivial `out_shape`.
Examples
--------
Indexing with integers or slices:
>>> domain = odl.IntervalProd(0, 1)
>>> fspace = odl.FunctionSpace(domain, out_dtype=(float, (2, 3, 4)))
>>> fspace.byaxis_out[0]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (2,)))
>>> fspace.byaxis_out[1]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3,)))
>>> fspace.byaxis_out[1:]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3, 4)))
Lists can be used to stack spaces arbitrarily:
>>> fspace.byaxis_out[[2, 1, 2]]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (4, 3, 4)))
### Response:
def byaxis_out(self):
"""Object to index along output dimensions.
This is only valid for non-trivial `out_shape`.
Examples
--------
Indexing with integers or slices:
>>> domain = odl.IntervalProd(0, 1)
>>> fspace = odl.FunctionSpace(domain, out_dtype=(float, (2, 3, 4)))
>>> fspace.byaxis_out[0]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (2,)))
>>> fspace.byaxis_out[1]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3,)))
>>> fspace.byaxis_out[1:]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3, 4)))
Lists can be used to stack spaces arbitrarily:
>>> fspace.byaxis_out[[2, 1, 2]]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (4, 3, 4)))
"""
space = self
class FspaceByaxisOut(object):
"""Helper class for indexing by output axes."""
def __getitem__(self, indices):
"""Return ``self[indices]``.
Parameters
----------
indices : index expression
Object used to index the output components.
Returns
-------
space : `FunctionSpace`
The resulting space with same domain and scalar output
data type, but indexed output components.
Raises
------
IndexError
If this is a space of scalar-valued functions.
"""
try:
iter(indices)
except TypeError:
newshape = space.out_shape[indices]
else:
newshape = tuple(space.out_shape[int(i)] for i in indices)
dtype = (space.scalar_out_dtype, newshape)
return FunctionSpace(space.domain, out_dtype=dtype)
def __repr__(self):
"""Return ``repr(self)``."""
return repr(space) + '.byaxis_out'
return FspaceByaxisOut() |
def url_input(url_string, download=True):
"""
This method expects a direct URL link to an xml file. It will apply no
modifications to the received URL string, so ensure good input.
"""
log.debug('URL Input - {0}'.format(url_string))
try:
open_xml = urllib.request.urlopen(url_string)
except urllib.error.URLError as err:
print('utils.input.url_input received a bad URL, or could not connect')
raise err
else:
#Employ a quick check on the mimetype of the link
if not open_xml.headers['Content-Type'] == 'text/xml':
sys.exit('URL request does not appear to be XML')
filename = open_xml.headers['Content-Disposition'].split('\"')[1]
if download:
with open(filename, 'wb') as xml_file:
xml_file.write(open_xml.read())
return openaccess_epub.utils.file_root_name(filename) | This method expects a direct URL link to an xml file. It will apply no
modifications to the received URL string, so ensure good input. | Below is the the instruction that describes the task:
### Input:
This method expects a direct URL link to an xml file. It will apply no
modifications to the received URL string, so ensure good input.
### Response:
def url_input(url_string, download=True):
"""
This method expects a direct URL link to an xml file. It will apply no
modifications to the received URL string, so ensure good input.
"""
log.debug('URL Input - {0}'.format(url_string))
try:
open_xml = urllib.request.urlopen(url_string)
except urllib.error.URLError as err:
print('utils.input.url_input received a bad URL, or could not connect')
raise err
else:
#Employ a quick check on the mimetype of the link
if not open_xml.headers['Content-Type'] == 'text/xml':
sys.exit('URL request does not appear to be XML')
filename = open_xml.headers['Content-Disposition'].split('\"')[1]
if download:
with open(filename, 'wb') as xml_file:
xml_file.write(open_xml.read())
return openaccess_epub.utils.file_root_name(filename) |
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) +
shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee | Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats. | Below is the the instruction that describes the task:
### Input:
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
### Response:
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) +
shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee |
def read_int(self):
"""
Reads an integer. The size depends on the architecture.
Reads a 4 byte small-endian singed int on 32 bit arch
Reads an 8 byte small-endian singed int on 64 bit arch
"""
if self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64:
return int.from_bytes(self.read(8), byteorder = 'little', signed = True)
else:
return int.from_bytes(self.read(4), byteorder = 'little', signed = True) | Reads an integer. The size depends on the architecture.
Reads a 4 byte small-endian singed int on 32 bit arch
Reads an 8 byte small-endian singed int on 64 bit arch | Below is the the instruction that describes the task:
### Input:
Reads an integer. The size depends on the architecture.
Reads a 4 byte small-endian singed int on 32 bit arch
Reads an 8 byte small-endian singed int on 64 bit arch
### Response:
def read_int(self):
"""
Reads an integer. The size depends on the architecture.
Reads a 4 byte small-endian singed int on 32 bit arch
Reads an 8 byte small-endian singed int on 64 bit arch
"""
if self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64:
return int.from_bytes(self.read(8), byteorder = 'little', signed = True)
else:
return int.from_bytes(self.read(4), byteorder = 'little', signed = True) |
def find_parameter(self, name):
"""Find parameter by name or normalized arg name."""
name = self.normalize_name(name)
arg = self.args.get(name)
return None if arg is None else arg.parameter | Find parameter by name or normalized arg name. | Below is the the instruction that describes the task:
### Input:
Find parameter by name or normalized arg name.
### Response:
def find_parameter(self, name):
"""Find parameter by name or normalized arg name."""
name = self.normalize_name(name)
arg = self.args.get(name)
return None if arg is None else arg.parameter |
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else: sdlist.append(self.getatom())
return ''.join(sdlist) | Get the complete domain name from an address. | Below is the the instruction that describes the task:
### Input:
Get the complete domain name from an address.
### Response:
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else: sdlist.append(self.getatom())
return ''.join(sdlist) |
def real_main(new_url=None,
baseline_url=None,
upload_build_id=None,
upload_release_name=None):
"""Runs the ur_pair_diff."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
item = UrlPairDiff(
new_url,
baseline_url,
upload_build_id,
upload_release_name=upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | Runs the ur_pair_diff. | Below is the the instruction that describes the task:
### Input:
Runs the ur_pair_diff.
### Response:
def real_main(new_url=None,
baseline_url=None,
upload_build_id=None,
upload_release_name=None):
"""Runs the ur_pair_diff."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
item = UrlPairDiff(
new_url,
baseline_url,
upload_build_id,
upload_release_name=upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() |
def NRM(f,a,b,best):
WARN = True # Warn, rather than stop if I encounter a NaN...
"""
Calculate NRM expected lab field and estimated ancient field
NRM(blab,best)= (best/blab)*TRM(blab)
"""
if float(f)==0:
print('ERROR: NRM: f==0.')
if not WARN : sys.exit()
m = (old_div(float(best),float(f))) * TRM(f,a,b)
return float(m) | Calculate NRM expected lab field and estimated ancient field
NRM(blab,best)= (best/blab)*TRM(blab) | Below is the the instruction that describes the task:
### Input:
Calculate NRM expected lab field and estimated ancient field
NRM(blab,best)= (best/blab)*TRM(blab)
### Response:
def NRM(f,a,b,best):
WARN = True # Warn, rather than stop if I encounter a NaN...
"""
Calculate NRM expected lab field and estimated ancient field
NRM(blab,best)= (best/blab)*TRM(blab)
"""
if float(f)==0:
print('ERROR: NRM: f==0.')
if not WARN : sys.exit()
m = (old_div(float(best),float(f))) * TRM(f,a,b)
return float(m) |
def get_channels(self):
"""Get the selected channel(s in order). """
selectedItems = self.idx_chan.selectedItems()
selected_chan = [x.text() for x in selectedItems]
chan_in_order = []
for chan in self.chan:
if chan in selected_chan:
chan_in_order.append(chan)
return chan_in_order | Get the selected channel(s in order). | Below is the the instruction that describes the task:
### Input:
Get the selected channel(s in order).
### Response:
def get_channels(self):
"""Get the selected channel(s in order). """
selectedItems = self.idx_chan.selectedItems()
selected_chan = [x.text() for x in selectedItems]
chan_in_order = []
for chan in self.chan:
if chan in selected_chan:
chan_in_order.append(chan)
return chan_in_order |
def get_coh_PTF_files(cp, ifos, run_dir, bank_veto=False, summary_files=False):
"""
Retrieve files needed to run coh_PTF jobs within a PyGRB workflow
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
ifos : str
String containing the analysis interferometer IDs.
run_dir : str
The run directory, destination for retrieved files.
bank_veto : Boolean
If true, will retrieve the bank_veto_bank.xml file.
summary_files : Boolean
If true, will retrieve the summary page style files.
Returns
-------
file_list : pycbc.workflow.FileList object
A FileList containing the retrieved files.
"""
if os.getenv("LAL_SRC") is None:
raise ValueError("The environment variable LAL_SRC must be set to a "
"location containing the file lalsuite.git")
else:
lalDir = os.getenv("LAL_SRC")
sci_seg = segments.segment(int(cp.get("workflow", "start-time")),
int(cp.get("workflow", "end-time")))
file_list = FileList([])
# Bank veto
if bank_veto:
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"bank_veto_bank.xml" % lalDir, "%s" % run_dir)
bank_veto_url = "file://localhost%s/bank_veto_bank.xml" % run_dir
bank_veto = File(ifos, "bank_veto_bank", sci_seg,
file_url=bank_veto_url)
bank_veto.PFN(bank_veto.cache_entry.path, site="local")
file_list.extend(FileList([bank_veto]))
if summary_files:
# summary.js file
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"coh_PTF_html_summary.js" % lalDir, "%s" % run_dir)
summary_js_url = "file://localhost%s/coh_PTF_html_summary.js" \
% run_dir
summary_js = File(ifos, "coh_PTF_html_summary_js", sci_seg,
file_url=summary_js_url)
summary_js.PFN(summary_js.cache_entry.path, site="local")
file_list.extend(FileList([summary_js]))
# summary.css file
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"coh_PTF_html_summary.css" % lalDir, "%s" % run_dir)
summary_css_url = "file://localhost%s/coh_PTF_html_summary.css" \
% run_dir
summary_css = File(ifos, "coh_PTF_html_summary_css", sci_seg,
file_url=summary_css_url)
summary_css.PFN(summary_css.cache_entry.path, site="local")
file_list.extend(FileList([summary_css]))
return file_list | Retrieve files needed to run coh_PTF jobs within a PyGRB workflow
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
ifos : str
String containing the analysis interferometer IDs.
run_dir : str
The run directory, destination for retrieved files.
bank_veto : Boolean
If true, will retrieve the bank_veto_bank.xml file.
summary_files : Boolean
If true, will retrieve the summary page style files.
Returns
-------
file_list : pycbc.workflow.FileList object
A FileList containing the retrieved files. | Below is the the instruction that describes the task:
### Input:
Retrieve files needed to run coh_PTF jobs within a PyGRB workflow
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
ifos : str
String containing the analysis interferometer IDs.
run_dir : str
The run directory, destination for retrieved files.
bank_veto : Boolean
If true, will retrieve the bank_veto_bank.xml file.
summary_files : Boolean
If true, will retrieve the summary page style files.
Returns
-------
file_list : pycbc.workflow.FileList object
A FileList containing the retrieved files.
### Response:
def get_coh_PTF_files(cp, ifos, run_dir, bank_veto=False, summary_files=False):
"""
Retrieve files needed to run coh_PTF jobs within a PyGRB workflow
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
ifos : str
String containing the analysis interferometer IDs.
run_dir : str
The run directory, destination for retrieved files.
bank_veto : Boolean
If true, will retrieve the bank_veto_bank.xml file.
summary_files : Boolean
If true, will retrieve the summary page style files.
Returns
-------
file_list : pycbc.workflow.FileList object
A FileList containing the retrieved files.
"""
if os.getenv("LAL_SRC") is None:
raise ValueError("The environment variable LAL_SRC must be set to a "
"location containing the file lalsuite.git")
else:
lalDir = os.getenv("LAL_SRC")
sci_seg = segments.segment(int(cp.get("workflow", "start-time")),
int(cp.get("workflow", "end-time")))
file_list = FileList([])
# Bank veto
if bank_veto:
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"bank_veto_bank.xml" % lalDir, "%s" % run_dir)
bank_veto_url = "file://localhost%s/bank_veto_bank.xml" % run_dir
bank_veto = File(ifos, "bank_veto_bank", sci_seg,
file_url=bank_veto_url)
bank_veto.PFN(bank_veto.cache_entry.path, site="local")
file_list.extend(FileList([bank_veto]))
if summary_files:
# summary.js file
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"coh_PTF_html_summary.js" % lalDir, "%s" % run_dir)
summary_js_url = "file://localhost%s/coh_PTF_html_summary.js" \
% run_dir
summary_js = File(ifos, "coh_PTF_html_summary_js", sci_seg,
file_url=summary_js_url)
summary_js.PFN(summary_js.cache_entry.path, site="local")
file_list.extend(FileList([summary_js]))
# summary.css file
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"coh_PTF_html_summary.css" % lalDir, "%s" % run_dir)
summary_css_url = "file://localhost%s/coh_PTF_html_summary.css" \
% run_dir
summary_css = File(ifos, "coh_PTF_html_summary_css", sci_seg,
file_url=summary_css_url)
summary_css.PFN(summary_css.cache_entry.path, site="local")
file_list.extend(FileList([summary_css]))
return file_list |
def _expire_data(self):
"""
Remove all expired entries.
"""
expire_time_stamp = time.time() - self.expire_time
self.timed_data = {d: t for d, t in self.timed_data.items()
if t > expire_time_stamp} | Remove all expired entries. | Below is the the instruction that describes the task:
### Input:
Remove all expired entries.
### Response:
def _expire_data(self):
"""
Remove all expired entries.
"""
expire_time_stamp = time.time() - self.expire_time
self.timed_data = {d: t for d, t in self.timed_data.items()
if t > expire_time_stamp} |
def checkForSpiceError(f):
"""
Internal function to check
:param f:
:raise stypes.SpiceyError:
"""
if failed():
errorparts = {
"tkvsn": tkvrsn("TOOLKIT").replace("CSPICE_", ""),
"short": getmsg("SHORT", 26),
"explain": getmsg("EXPLAIN", 100).strip(),
"long": getmsg("LONG", 321).strip(),
"traceback": qcktrc(200)}
msg = stypes.errorformat.format(**errorparts)
reset()
raise stypes.SpiceyError(msg) | Internal function to check
:param f:
:raise stypes.SpiceyError: | Below is the the instruction that describes the task:
### Input:
Internal function to check
:param f:
:raise stypes.SpiceyError:
### Response:
def checkForSpiceError(f):
"""
Internal function to check
:param f:
:raise stypes.SpiceyError:
"""
if failed():
errorparts = {
"tkvsn": tkvrsn("TOOLKIT").replace("CSPICE_", ""),
"short": getmsg("SHORT", 26),
"explain": getmsg("EXPLAIN", 100).strip(),
"long": getmsg("LONG", 321).strip(),
"traceback": qcktrc(200)}
msg = stypes.errorformat.format(**errorparts)
reset()
raise stypes.SpiceyError(msg) |
def basic_consume(self, queue='', consumer_tag='', no_local=False,
no_ack=False, exclusive=False, nowait=False,
callback=None, ticket=None):
"""
start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue.
"""
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(queue)
args.write_shortstr(consumer_tag)
args.write_bit(no_local)
args.write_bit(no_ack)
args.write_bit(exclusive)
args.write_bit(nowait)
self._send_method((60, 20), args)
if not nowait:
consumer_tag = self.wait(allowed_methods=[
(60, 21), # Channel.basic_consume_ok
])
self.callbacks[consumer_tag] = callback
return consumer_tag | start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue. | Below is the the instruction that describes the task:
### Input:
start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue.
### Response:
def basic_consume(self, queue='', consumer_tag='', no_local=False,
no_ack=False, exclusive=False, nowait=False,
callback=None, ticket=None):
"""
start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue.
"""
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(queue)
args.write_shortstr(consumer_tag)
args.write_bit(no_local)
args.write_bit(no_ack)
args.write_bit(exclusive)
args.write_bit(nowait)
self._send_method((60, 20), args)
if not nowait:
consumer_tag = self.wait(allowed_methods=[
(60, 21), # Channel.basic_consume_ok
])
self.callbacks[consumer_tag] = callback
return consumer_tag |
def do_imports(self):
"""
Import all importable options
"""
self.do_import('worker_class', Worker)
self.do_import('queue_model', self.options.worker_class.queue_model)
self.do_import('error_model', self.options.worker_class.error_model)
self.do_import('callback', self.options.worker_class.callback) | Import all importable options | Below is the the instruction that describes the task:
### Input:
Import all importable options
### Response:
def do_imports(self):
"""
Import all importable options
"""
self.do_import('worker_class', Worker)
self.do_import('queue_model', self.options.worker_class.queue_model)
self.do_import('error_model', self.options.worker_class.error_model)
self.do_import('callback', self.options.worker_class.callback) |
def rename(self, target):
"""
Rename this path to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target) | Rename this path to the given path. | Below is the the instruction that describes the task:
### Input:
Rename this path to the given path.
### Response:
def rename(self, target):
"""
Rename this path to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target) |
def update_credentials(self, key, secret):
''' Update credentials '''
self.access_key = key
self.secret_key = secret | Update credentials | Below is the the instruction that describes the task:
### Input:
Update credentials
### Response:
def update_credentials(self, key, secret):
''' Update credentials '''
self.access_key = key
self.secret_key = secret |
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate thermal conductivity of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
k : float
Thermal conductivity of the liquid mixture, [W/m/K]
'''
if method == SIMPLE:
ks = [i(T, P) for i in self.ThermalConductivityLiquids]
return mixing_simple(zs, ks)
elif method == DIPPR_9H:
ks = [i(T, P) for i in self.ThermalConductivityLiquids]
return DIPPR9H(ws, ks)
elif method == FILIPPOV:
ks = [i(T, P) for i in self.ThermalConductivityLiquids]
return Filippov(ws, ks)
elif method == MAGOMEDOV:
k_w = self.ThermalConductivityLiquids[self.index_w](T, P)
ws = list(ws) ; ws.pop(self.index_w)
return thermal_conductivity_Magomedov(T, P, ws, self.wCASs, k_w)
else:
raise Exception('Method not valid') | r'''Method to calculate thermal conductivity of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
k : float
Thermal conductivity of the liquid mixture, [W/m/K] | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate thermal conductivity of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
k : float
Thermal conductivity of the liquid mixture, [W/m/K]
### Response:
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate thermal conductivity of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
k : float
Thermal conductivity of the liquid mixture, [W/m/K]
'''
if method == SIMPLE:
ks = [i(T, P) for i in self.ThermalConductivityLiquids]
return mixing_simple(zs, ks)
elif method == DIPPR_9H:
ks = [i(T, P) for i in self.ThermalConductivityLiquids]
return DIPPR9H(ws, ks)
elif method == FILIPPOV:
ks = [i(T, P) for i in self.ThermalConductivityLiquids]
return Filippov(ws, ks)
elif method == MAGOMEDOV:
k_w = self.ThermalConductivityLiquids[self.index_w](T, P)
ws = list(ws) ; ws.pop(self.index_w)
return thermal_conductivity_Magomedov(T, P, ws, self.wCASs, k_w)
else:
raise Exception('Method not valid') |
def find_visible_elements(driver, selector, by=By.CSS_SELECTOR):
"""
Finds all WebElements that match a selector and are visible.
Similar to webdriver.find_elements.
@Params
driver - the webdriver object (required)
selector - the locator that is used to search the DOM (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR)
"""
elements = driver.find_elements(by=by, value=selector)
return [element for element in elements if element.is_displayed()] | Finds all WebElements that match a selector and are visible.
Similar to webdriver.find_elements.
@Params
driver - the webdriver object (required)
selector - the locator that is used to search the DOM (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR) | Below is the the instruction that describes the task:
### Input:
Finds all WebElements that match a selector and are visible.
Similar to webdriver.find_elements.
@Params
driver - the webdriver object (required)
selector - the locator that is used to search the DOM (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR)
### Response:
def find_visible_elements(driver, selector, by=By.CSS_SELECTOR):
"""
Finds all WebElements that match a selector and are visible.
Similar to webdriver.find_elements.
@Params
driver - the webdriver object (required)
selector - the locator that is used to search the DOM (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR)
"""
elements = driver.find_elements(by=by, value=selector)
return [element for element in elements if element.is_displayed()] |
def transformer_moe_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 2001
hparams.max_input_seq_length = 2000
hparams.max_target_seq_length = 2000
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 5
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = True
# According to noam, ("n", "da") seems better for harder-to-learn models
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# Hparams used by transformer_prepare_decoder() function
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams = common_attention.add_standard_attention_hparams(hparams)
# Decoder layers type. If set, num_decoder_layers parameter will be ignored
# and the number of decoder layer will be deduced from the string
# See top file comment for example of usage
hparams.add_hparam("layer_types", "")
# Default attention type (ex: a, loc, red,...) and feed-forward type (ex: fc,
# sep, moe,...)
hparams.add_hparam("default_att", "a")
hparams.add_hparam("default_ff", "fc")
return hparams | Set of hyperparameters. | Below is the the instruction that describes the task:
### Input:
Set of hyperparameters.
### Response:
def transformer_moe_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 2001
hparams.max_input_seq_length = 2000
hparams.max_target_seq_length = 2000
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 5
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = True
# According to noam, ("n", "da") seems better for harder-to-learn models
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# Hparams used by transformer_prepare_decoder() function
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams = common_attention.add_standard_attention_hparams(hparams)
# Decoder layers type. If set, num_decoder_layers parameter will be ignored
# and the number of decoder layer will be deduced from the string
# See top file comment for example of usage
hparams.add_hparam("layer_types", "")
# Default attention type (ex: a, loc, red,...) and feed-forward type (ex: fc,
# sep, moe,...)
hparams.add_hparam("default_att", "a")
hparams.add_hparam("default_ff", "fc")
return hparams |
def date_factory(value, datatype_cls, validation_level=None):
"""
Creates a :class:`DT <hl7apy.base_datatypes.DT>` object
The value in input must be a string parsable with :meth:`datetime.strptime`.
The date format is chosen according to the length of the value as stated in this table:
+-------+-----------+
|Length |Format |
+=======+===========+
|4 |``%Y`` |
| | |
+-------+-----------+
|6 |``%Y%m`` |
| | |
+-------+-----------+
|8 |``%Y%m%d`` |
| | |
+-------+-----------+
Some examples that work are:
>>> from hl7apy.base_datatypes import DT
>>> date_factory("1974", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("198302", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("19880312", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
If the value does not match one of the valid format it raises :exc:`ValueError`
:type value: ``str``
:param value: the value to assign the date object
:type datatype_cls: `class`
:param value: the :class:`DT <hl7apy.base_datatypes.DT>` class to use. It has to be one implementation of
the different version modules
:type validation_level: ``int``
:param validation_level: It must be a value from class :attr:`validation_level`
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`hl7apy.base_datatypes.DT`
"""
dt_value, fmt = get_date_info(value)
return datatype_cls(dt_value, fmt) | Creates a :class:`DT <hl7apy.base_datatypes.DT>` object
The value in input must be a string parsable with :meth:`datetime.strptime`.
The date format is chosen according to the length of the value as stated in this table:
+-------+-----------+
|Length |Format |
+=======+===========+
|4 |``%Y`` |
| | |
+-------+-----------+
|6 |``%Y%m`` |
| | |
+-------+-----------+
|8 |``%Y%m%d`` |
| | |
+-------+-----------+
Some examples that work are:
>>> from hl7apy.base_datatypes import DT
>>> date_factory("1974", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("198302", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("19880312", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
If the value does not match one of the valid format it raises :exc:`ValueError`
:type value: ``str``
:param value: the value to assign the date object
:type datatype_cls: `class`
:param value: the :class:`DT <hl7apy.base_datatypes.DT>` class to use. It has to be one implementation of
the different version modules
:type validation_level: ``int``
:param validation_level: It must be a value from class :attr:`validation_level`
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`hl7apy.base_datatypes.DT` | Below is the the instruction that describes the task:
### Input:
Creates a :class:`DT <hl7apy.base_datatypes.DT>` object
The value in input must be a string parsable with :meth:`datetime.strptime`.
The date format is chosen according to the length of the value as stated in this table:
+-------+-----------+
|Length |Format |
+=======+===========+
|4 |``%Y`` |
| | |
+-------+-----------+
|6 |``%Y%m`` |
| | |
+-------+-----------+
|8 |``%Y%m%d`` |
| | |
+-------+-----------+
Some examples that work are:
>>> from hl7apy.base_datatypes import DT
>>> date_factory("1974", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("198302", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("19880312", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
If the value does not match one of the valid format it raises :exc:`ValueError`
:type value: ``str``
:param value: the value to assign the date object
:type datatype_cls: `class`
:param value: the :class:`DT <hl7apy.base_datatypes.DT>` class to use. It has to be one implementation of
the different version modules
:type validation_level: ``int``
:param validation_level: It must be a value from class :attr:`validation_level`
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`hl7apy.base_datatypes.DT`
### Response:
def date_factory(value, datatype_cls, validation_level=None):
"""
Creates a :class:`DT <hl7apy.base_datatypes.DT>` object
The value in input must be a string parsable with :meth:`datetime.strptime`.
The date format is chosen according to the length of the value as stated in this table:
+-------+-----------+
|Length |Format |
+=======+===========+
|4 |``%Y`` |
| | |
+-------+-----------+
|6 |``%Y%m`` |
| | |
+-------+-----------+
|8 |``%Y%m%d`` |
| | |
+-------+-----------+
Some examples that work are:
>>> from hl7apy.base_datatypes import DT
>>> date_factory("1974", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("198302", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
>>> date_factory("19880312", DT) #doctest: +ELLIPSIS
<hl7apy.base_datatypes.DT object at 0x...>
If the value does not match one of the valid format it raises :exc:`ValueError`
:type value: ``str``
:param value: the value to assign the date object
:type datatype_cls: `class`
:param value: the :class:`DT <hl7apy.base_datatypes.DT>` class to use. It has to be one implementation of
the different version modules
:type validation_level: ``int``
:param validation_level: It must be a value from class :attr:`validation_level`
:class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value
:rtype: :class:`hl7apy.base_datatypes.DT`
"""
dt_value, fmt = get_date_info(value)
return datatype_cls(dt_value, fmt) |
def hide_busy(self):
"""Unlock buttons A helper function to indicate processing is done."""
self.progress_bar.hide()
self.parent.pbnNext.setEnabled(True)
self.parent.pbnBack.setEnabled(True)
self.parent.pbnCancel.setEnabled(True)
self.parent.repaint()
disable_busy_cursor() | Unlock buttons A helper function to indicate processing is done. | Below is the the instruction that describes the task:
### Input:
Unlock buttons A helper function to indicate processing is done.
### Response:
def hide_busy(self):
"""Unlock buttons A helper function to indicate processing is done."""
self.progress_bar.hide()
self.parent.pbnNext.setEnabled(True)
self.parent.pbnBack.setEnabled(True)
self.parent.pbnCancel.setEnabled(True)
self.parent.repaint()
disable_busy_cursor() |
def nn_setsockopt(socket, level, option, value):
"""set a socket option
socket - socket number
level - option level
option - option
value - a readable byte buffer (not a Unicode string) containing the value
returns - 0 on success or < 0 on error
"""
try:
return _nn_setsockopt(socket, level, option, ctypes.addressof(value),
len(value))
except (TypeError, AttributeError):
buf_value = ctypes.create_string_buffer(value)
return _nn_setsockopt(socket, level, option,
ctypes.addressof(buf_value), len(value)) | set a socket option
socket - socket number
level - option level
option - option
value - a readable byte buffer (not a Unicode string) containing the value
returns - 0 on success or < 0 on error | Below is the the instruction that describes the task:
### Input:
set a socket option
socket - socket number
level - option level
option - option
value - a readable byte buffer (not a Unicode string) containing the value
returns - 0 on success or < 0 on error
### Response:
def nn_setsockopt(socket, level, option, value):
"""set a socket option
socket - socket number
level - option level
option - option
value - a readable byte buffer (not a Unicode string) containing the value
returns - 0 on success or < 0 on error
"""
try:
return _nn_setsockopt(socket, level, option, ctypes.addressof(value),
len(value))
except (TypeError, AttributeError):
buf_value = ctypes.create_string_buffer(value)
return _nn_setsockopt(socket, level, option,
ctypes.addressof(buf_value), len(value)) |
def __get_securities(self, currency: str, agent: str, symbol: str,
namespace: str) -> List[dal.Security]:
""" Fetches the securities that match the given filters """
repo = self.get_security_repository()
query = repo.query
if currency is not None:
query = query.filter(dal.Security.currency == currency)
if agent is not None:
query = query.filter(dal.Security.updater == agent)
if symbol is not None:
query = query.filter(dal.Security.symbol == symbol)
if namespace is not None:
query = query.filter(dal.Security.namespace == namespace)
# Sorting
query = query.order_by(dal.Security.namespace, dal.Security.symbol)
securities = query.all()
return securities | Fetches the securities that match the given filters | Below is the the instruction that describes the task:
### Input:
Fetches the securities that match the given filters
### Response:
def __get_securities(self, currency: str, agent: str, symbol: str,
namespace: str) -> List[dal.Security]:
""" Fetches the securities that match the given filters """
repo = self.get_security_repository()
query = repo.query
if currency is not None:
query = query.filter(dal.Security.currency == currency)
if agent is not None:
query = query.filter(dal.Security.updater == agent)
if symbol is not None:
query = query.filter(dal.Security.symbol == symbol)
if namespace is not None:
query = query.filter(dal.Security.namespace == namespace)
# Sorting
query = query.order_by(dal.Security.namespace, dal.Security.symbol)
securities = query.all()
return securities |
def workflow(ctx, client):
"""List or manage workflows with subcommands."""
if ctx.invoked_subcommand is None:
from renku.models.refs import LinkReference
names = defaultdict(list)
for ref in LinkReference.iter_items(client, common_path='workflows'):
names[ref.reference.name].append(ref.name)
for path in client.workflow_path.glob('*.cwl'):
click.echo(
'{path}: {names}'.format(
path=path.name,
names=', '.join(
click.style(_deref(name), fg='green')
for name in names[path.name]
),
)
) | List or manage workflows with subcommands. | Below is the the instruction that describes the task:
### Input:
List or manage workflows with subcommands.
### Response:
def workflow(ctx, client):
"""List or manage workflows with subcommands."""
if ctx.invoked_subcommand is None:
from renku.models.refs import LinkReference
names = defaultdict(list)
for ref in LinkReference.iter_items(client, common_path='workflows'):
names[ref.reference.name].append(ref.name)
for path in client.workflow_path.glob('*.cwl'):
click.echo(
'{path}: {names}'.format(
path=path.name,
names=', '.join(
click.style(_deref(name), fg='green')
for name in names[path.name]
),
)
) |
def main():
""" Start the DQL client. """
parse = argparse.ArgumentParser(description=main.__doc__)
parse.add_argument("-c", "--command", help="Run this command and exit")
region = os.environ.get("AWS_REGION", "us-west-1")
parse.add_argument(
"-r",
"--region",
default=region,
help="AWS region to connect to (default %(default)s)",
)
parse.add_argument(
"-H",
"--host",
default=None,
help="Host to connect to if using a local instance " "(default %(default)s)",
)
parse.add_argument(
"-p",
"--port",
default=8000,
type=int,
help="Port to connect to " "(default %(default)d)",
)
parse.add_argument(
"--version", action="store_true", help="Print the version and exit"
)
args = parse.parse_args()
if args.version:
print(__version__)
return
logging.config.dictConfig(LOG_CONFIG)
cli = DQLClient()
cli.initialize(region=args.region, host=args.host, port=args.port)
if args.command:
command = args.command.strip()
try:
cli.run_command(command)
if cli.engine.partial:
cli.run_command(";")
except KeyboardInterrupt:
pass
else:
cli.start() | Start the DQL client. | Below is the the instruction that describes the task:
### Input:
Start the DQL client.
### Response:
def main():
""" Start the DQL client. """
parse = argparse.ArgumentParser(description=main.__doc__)
parse.add_argument("-c", "--command", help="Run this command and exit")
region = os.environ.get("AWS_REGION", "us-west-1")
parse.add_argument(
"-r",
"--region",
default=region,
help="AWS region to connect to (default %(default)s)",
)
parse.add_argument(
"-H",
"--host",
default=None,
help="Host to connect to if using a local instance " "(default %(default)s)",
)
parse.add_argument(
"-p",
"--port",
default=8000,
type=int,
help="Port to connect to " "(default %(default)d)",
)
parse.add_argument(
"--version", action="store_true", help="Print the version and exit"
)
args = parse.parse_args()
if args.version:
print(__version__)
return
logging.config.dictConfig(LOG_CONFIG)
cli = DQLClient()
cli.initialize(region=args.region, host=args.host, port=args.port)
if args.command:
command = args.command.strip()
try:
cli.run_command(command)
if cli.engine.partial:
cli.run_command(";")
except KeyboardInterrupt:
pass
else:
cli.start() |
def _sin_to_angle(result, deriv, side=1):
"""Convert a sine and its derivatives to an angle and its derivatives"""
v = np.arcsin(np.clip(result[0], -1, 1))
sign = side
if sign == -1:
if v < 0:
offset = -np.pi
else:
offset = np.pi
else:
offset = 0.0
if deriv == 0:
return v*sign + offset,
if abs(result[0]) >= 1:
factor1 = 0
else:
factor1 = 1.0/np.sqrt(1-result[0]**2)
d = factor1*result[1]
if deriv == 1:
return v*sign + offset, d*sign
factor2 = result[0]*factor1**3
dd = factor2*np.outer(result[1], result[1]) + factor1*result[2]
if deriv == 2:
return v*sign + offset, d*sign, dd*sign
raise ValueError("deriv must be 0, 1 or 2.") | Convert a sine and its derivatives to an angle and its derivatives | Below is the the instruction that describes the task:
### Input:
Convert a sine and its derivatives to an angle and its derivatives
### Response:
def _sin_to_angle(result, deriv, side=1):
"""Convert a sine and its derivatives to an angle and its derivatives"""
v = np.arcsin(np.clip(result[0], -1, 1))
sign = side
if sign == -1:
if v < 0:
offset = -np.pi
else:
offset = np.pi
else:
offset = 0.0
if deriv == 0:
return v*sign + offset,
if abs(result[0]) >= 1:
factor1 = 0
else:
factor1 = 1.0/np.sqrt(1-result[0]**2)
d = factor1*result[1]
if deriv == 1:
return v*sign + offset, d*sign
factor2 = result[0]*factor1**3
dd = factor2*np.outer(result[1], result[1]) + factor1*result[2]
if deriv == 2:
return v*sign + offset, d*sign, dd*sign
raise ValueError("deriv must be 0, 1 or 2.") |
def validate_wavetable(self):
"""Enforce monotonic, ascending wavelength array with no zero or
negative values.
Raises
------
pysynphot.exceptions.DuplicateWavelength
Wavelength array contains duplicate entries.
pysynphot.exceptions.UnsortedWavelength
Wavelength array is not monotonic ascending or descending.
pysynphot.exceptions.ZeroWavelength
Wavelength array has zero or negative value(s).
"""
# First check for invalid values
wave = self._wavetable
if N.any(wave <= 0):
wrong = N.where(wave <= 0)[0]
raise exceptions.ZeroWavelength(
'Negative or Zero wavelength occurs in wavelength array',
rows=wrong)
# Now check for monotonicity & enforce ascending
sorted = N.sort(wave)
if not N.alltrue(sorted == wave):
if N.alltrue(sorted[::-1] == wave):
# monotonic descending is allowed
pass
else:
wrong = N.where(sorted != wave)[0]
raise exceptions.UnsortedWavelength(
'Wavelength array is not monotonic', rows=wrong)
# Check for duplicate values
dw = sorted[1:] - sorted[:-1]
if N.any(dw == 0):
wrong = N.where(dw == 0)[0]
raise exceptions.DuplicateWavelength(
"Wavelength array contains duplicate entries", rows=wrong) | Enforce monotonic, ascending wavelength array with no zero or
negative values.
Raises
------
pysynphot.exceptions.DuplicateWavelength
Wavelength array contains duplicate entries.
pysynphot.exceptions.UnsortedWavelength
Wavelength array is not monotonic ascending or descending.
pysynphot.exceptions.ZeroWavelength
Wavelength array has zero or negative value(s). | Below is the the instruction that describes the task:
### Input:
Enforce monotonic, ascending wavelength array with no zero or
negative values.
Raises
------
pysynphot.exceptions.DuplicateWavelength
Wavelength array contains duplicate entries.
pysynphot.exceptions.UnsortedWavelength
Wavelength array is not monotonic ascending or descending.
pysynphot.exceptions.ZeroWavelength
Wavelength array has zero or negative value(s).
### Response:
def validate_wavetable(self):
"""Enforce monotonic, ascending wavelength array with no zero or
negative values.
Raises
------
pysynphot.exceptions.DuplicateWavelength
Wavelength array contains duplicate entries.
pysynphot.exceptions.UnsortedWavelength
Wavelength array is not monotonic ascending or descending.
pysynphot.exceptions.ZeroWavelength
Wavelength array has zero or negative value(s).
"""
# First check for invalid values
wave = self._wavetable
if N.any(wave <= 0):
wrong = N.where(wave <= 0)[0]
raise exceptions.ZeroWavelength(
'Negative or Zero wavelength occurs in wavelength array',
rows=wrong)
# Now check for monotonicity & enforce ascending
sorted = N.sort(wave)
if not N.alltrue(sorted == wave):
if N.alltrue(sorted[::-1] == wave):
# monotonic descending is allowed
pass
else:
wrong = N.where(sorted != wave)[0]
raise exceptions.UnsortedWavelength(
'Wavelength array is not monotonic', rows=wrong)
# Check for duplicate values
dw = sorted[1:] - sorted[:-1]
if N.any(dw == 0):
wrong = N.where(dw == 0)[0]
raise exceptions.DuplicateWavelength(
"Wavelength array contains duplicate entries", rows=wrong) |
def target_doc(self):
"""Returns resource doc as at the target, when the posting was already created \
at the target. This property normally contains the **target_doc** data from \
the livebrigde storage item, saved in a syndication earlier.
:returns: dict"""
if not hasattr(self, "_target_doc") or not self._target_doc:
if self._existing:
self._target_doc = self._existing.get("target_doc", {})
return self._target_doc | Returns resource doc as at the target, when the posting was already created \
at the target. This property normally contains the **target_doc** data from \
the livebrigde storage item, saved in a syndication earlier.
:returns: dict | Below is the the instruction that describes the task:
### Input:
Returns resource doc as at the target, when the posting was already created \
at the target. This property normally contains the **target_doc** data from \
the livebrigde storage item, saved in a syndication earlier.
:returns: dict
### Response:
def target_doc(self):
"""Returns resource doc as at the target, when the posting was already created \
at the target. This property normally contains the **target_doc** data from \
the livebrigde storage item, saved in a syndication earlier.
:returns: dict"""
if not hasattr(self, "_target_doc") or not self._target_doc:
if self._existing:
self._target_doc = self._existing.get("target_doc", {})
return self._target_doc |
def add_node(self, node):
"""Link the agent to a random member of the previous generation."""
nodes = [n for n in self.nodes() if not isinstance(n, Source)]
num_agents = len(nodes)
curr_generation = int((num_agents - 1) / float(self.generation_size))
node.generation = curr_generation
if curr_generation == 0:
if self.initial_source:
source = min(
self.nodes(type=Source),
key=attrgetter('creation_time'))
source.connect(whom=node)
source.transmit(to_whom=node)
else:
prev_agents = Node.query\
.filter_by(failed=False,
network_id=self.id,
generation=(curr_generation - 1))\
.all()
prev_fits = [p.fitness for p in prev_agents]
prev_probs = [(f / (1.0 * sum(prev_fits))) for f in prev_fits]
rnd = random.random()
temp = 0.0
for i, probability in enumerate(prev_probs):
temp += probability
if temp > rnd:
parent = prev_agents[i]
break
parent.connect(whom=node)
parent.transmit(to_whom=node) | Link the agent to a random member of the previous generation. | Below is the the instruction that describes the task:
### Input:
Link the agent to a random member of the previous generation.
### Response:
def add_node(self, node):
"""Link the agent to a random member of the previous generation."""
nodes = [n for n in self.nodes() if not isinstance(n, Source)]
num_agents = len(nodes)
curr_generation = int((num_agents - 1) / float(self.generation_size))
node.generation = curr_generation
if curr_generation == 0:
if self.initial_source:
source = min(
self.nodes(type=Source),
key=attrgetter('creation_time'))
source.connect(whom=node)
source.transmit(to_whom=node)
else:
prev_agents = Node.query\
.filter_by(failed=False,
network_id=self.id,
generation=(curr_generation - 1))\
.all()
prev_fits = [p.fitness for p in prev_agents]
prev_probs = [(f / (1.0 * sum(prev_fits))) for f in prev_fits]
rnd = random.random()
temp = 0.0
for i, probability in enumerate(prev_probs):
temp += probability
if temp > rnd:
parent = prev_agents[i]
break
parent.connect(whom=node)
parent.transmit(to_whom=node) |
def _send_live_report(self, report_id):
"""Sends a PUT request with the report JSON files currently in the
report_queue attribute.
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
"""
# Determines the maximum number of reports sent at the same time in
# the same payload
buffer_size = 100
logger.debug("Report buffer size set to: {}".format(buffer_size))
for i in range(0, len(self.report_queue), buffer_size):
# Reset the report compilation batch
reports_compilation = []
# Iterate over report JSON batches determined by buffer_size
for report in self.report_queue[i: i + buffer_size]:
try:
report_file = [x for x in os.listdir(report)
if x.endswith(".json")][0]
except IndexError:
continue
with open(join(report, report_file)) as fh:
reports_compilation.append(json.loads(fh.read()))
logger.debug("Payload sent with size: {}".format(
asizeof(json.dumps(reports_compilation))
))
logger.debug("status: {}".format(self.status_info))
try:
requests.put(
self.broadcast_address,
json={"run_id": report_id,
"report_json": reports_compilation,
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The server"
" may be down or there is a problem with your internet "
"connection.", "red_bold"))
sys.exit(1)
# When there is no change in the report queue, but there is a change
# in the run status of the pipeline
if not self.report_queue:
logger.debug("status: {}".format(self.status_info))
try:
requests.put(
self.broadcast_address,
json={"run_id": report_id,
"report_json": [],
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The"
" server may be down or there is a problem with your "
"internet connection.", "red_bold"))
sys.exit(1)
# Reset the report queue after sending the request
self.report_queue = [] | Sends a PUT request with the report JSON files currently in the
report_queue attribute.
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash` | Below is the the instruction that describes the task:
### Input:
Sends a PUT request with the report JSON files currently in the
report_queue attribute.
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
### Response:
def _send_live_report(self, report_id):
"""Sends a PUT request with the report JSON files currently in the
report_queue attribute.
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
"""
# Determines the maximum number of reports sent at the same time in
# the same payload
buffer_size = 100
logger.debug("Report buffer size set to: {}".format(buffer_size))
for i in range(0, len(self.report_queue), buffer_size):
# Reset the report compilation batch
reports_compilation = []
# Iterate over report JSON batches determined by buffer_size
for report in self.report_queue[i: i + buffer_size]:
try:
report_file = [x for x in os.listdir(report)
if x.endswith(".json")][0]
except IndexError:
continue
with open(join(report, report_file)) as fh:
reports_compilation.append(json.loads(fh.read()))
logger.debug("Payload sent with size: {}".format(
asizeof(json.dumps(reports_compilation))
))
logger.debug("status: {}".format(self.status_info))
try:
requests.put(
self.broadcast_address,
json={"run_id": report_id,
"report_json": reports_compilation,
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The server"
" may be down or there is a problem with your internet "
"connection.", "red_bold"))
sys.exit(1)
# When there is no change in the report queue, but there is a change
# in the run status of the pipeline
if not self.report_queue:
logger.debug("status: {}".format(self.status_info))
try:
requests.put(
self.broadcast_address,
json={"run_id": report_id,
"report_json": [],
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The"
" server may be down or there is a problem with your "
"internet connection.", "red_bold"))
sys.exit(1)
# Reset the report queue after sending the request
self.report_queue = [] |
def manual_invoice(cls, user, due_delta, description_price_pairs):
''' Generates an invoice for arbitrary items, not held in a user's
cart.
Arguments:
user (User): The user the invoice is being generated for.
due_delta (datetime.timedelta): The length until the invoice is
due.
description_price_pairs ([(str, long or Decimal), ...]): A list of
pairs. Each pair consists of the description for each line item
and the price for that line item. The price will be cast to
Decimal.
Returns:
an Invoice.
'''
line_items = []
for description, price in description_price_pairs:
line_item = commerce.LineItem(
description=description,
quantity=1,
price=Decimal(price),
product=None,
)
line_items.append(line_item)
min_due_time = timezone.now() + due_delta
return cls._generate(user, None, min_due_time, line_items) | Generates an invoice for arbitrary items, not held in a user's
cart.
Arguments:
user (User): The user the invoice is being generated for.
due_delta (datetime.timedelta): The length until the invoice is
due.
description_price_pairs ([(str, long or Decimal), ...]): A list of
pairs. Each pair consists of the description for each line item
and the price for that line item. The price will be cast to
Decimal.
Returns:
an Invoice. | Below is the the instruction that describes the task:
### Input:
Generates an invoice for arbitrary items, not held in a user's
cart.
Arguments:
user (User): The user the invoice is being generated for.
due_delta (datetime.timedelta): The length until the invoice is
due.
description_price_pairs ([(str, long or Decimal), ...]): A list of
pairs. Each pair consists of the description for each line item
and the price for that line item. The price will be cast to
Decimal.
Returns:
an Invoice.
### Response:
def manual_invoice(cls, user, due_delta, description_price_pairs):
''' Generates an invoice for arbitrary items, not held in a user's
cart.
Arguments:
user (User): The user the invoice is being generated for.
due_delta (datetime.timedelta): The length until the invoice is
due.
description_price_pairs ([(str, long or Decimal), ...]): A list of
pairs. Each pair consists of the description for each line item
and the price for that line item. The price will be cast to
Decimal.
Returns:
an Invoice.
'''
line_items = []
for description, price in description_price_pairs:
line_item = commerce.LineItem(
description=description,
quantity=1,
price=Decimal(price),
product=None,
)
line_items.append(line_item)
min_due_time = timezone.now() + due_delta
return cls._generate(user, None, min_due_time, line_items) |
def is_varchar(self):
"""Determine if a data record is of the type VARCHAR."""
dt = DATA_TYPES['varchar']
if type(self.data) is dt['type'] and len(self.data) < dt['max']:
self.type = 'VARCHAR'
self.len = len(self.data)
return True | Determine if a data record is of the type VARCHAR. | Below is the the instruction that describes the task:
### Input:
Determine if a data record is of the type VARCHAR.
### Response:
def is_varchar(self):
"""Determine if a data record is of the type VARCHAR."""
dt = DATA_TYPES['varchar']
if type(self.data) is dt['type'] and len(self.data) < dt['max']:
self.type = 'VARCHAR'
self.len = len(self.data)
return True |
def dateAt(self, x):
"""
Returns the date at the inputed x position.
:return <QDate>
"""
gantt = self.ganttWidget()
dstart = gantt.dateStart()
days = int(x / float(gantt.cellWidth()))
return dstart.addDays(days) | Returns the date at the inputed x position.
:return <QDate> | Below is the the instruction that describes the task:
### Input:
Returns the date at the inputed x position.
:return <QDate>
### Response:
def dateAt(self, x):
"""
Returns the date at the inputed x position.
:return <QDate>
"""
gantt = self.ganttWidget()
dstart = gantt.dateStart()
days = int(x / float(gantt.cellWidth()))
return dstart.addDays(days) |
def begin_y(self):
"""
Return the Y-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
y, cy, flipV = cxnSp.y, cxnSp.cy, cxnSp.flipV
begin_y = y+cy if flipV else y
return Emu(begin_y) | Return the Y-position of the begin point of this connector, in
English Metric Units (as a |Length| object). | Below is the the instruction that describes the task:
### Input:
Return the Y-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
### Response:
def begin_y(self):
"""
Return the Y-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
y, cy, flipV = cxnSp.y, cxnSp.cy, cxnSp.flipV
begin_y = y+cy if flipV else y
return Emu(begin_y) |
def drawdaisy(x, y, color='#fefefe'):
"""
Draw a daisy at x, y
"""
# save location, size etc
_ctx.push()
# save fill and stroke
_fill =_ctx.fill()
_stroke = _ctx.stroke()
sc = (1.0 / _ctx.HEIGHT) * float(y * 0.5) * 4.0
# draw stalk
_ctx.strokewidth(sc * 2.0)
_ctx.stroke('#3B240B')
_ctx.line(x + (sin(x * 0.1) * 10.0), y + 80, x + sin(_ctx.FRAME * 0.1), y)
# draw flower
_ctx.translate(-20, 0)
_ctx.scale(sc)
# draw petals
_ctx.fill(color)
_ctx.nostroke()
for angle in xrange(0, 360, 45):
_ctx.rotate(degrees=45)
_ctx.rect(x, y, 40, 8, 1)
# draw centre
_ctx.fill('#F7FE2E')
_ctx.ellipse(x + 15, y, 10, 10)
# restore fill and stroke
_ctx.fill(_fill)
_ctx.stroke(_stroke)
# restore location, size etc
_ctx.pop() | Draw a daisy at x, y | Below is the the instruction that describes the task:
### Input:
Draw a daisy at x, y
### Response:
def drawdaisy(x, y, color='#fefefe'):
"""
Draw a daisy at x, y
"""
# save location, size etc
_ctx.push()
# save fill and stroke
_fill =_ctx.fill()
_stroke = _ctx.stroke()
sc = (1.0 / _ctx.HEIGHT) * float(y * 0.5) * 4.0
# draw stalk
_ctx.strokewidth(sc * 2.0)
_ctx.stroke('#3B240B')
_ctx.line(x + (sin(x * 0.1) * 10.0), y + 80, x + sin(_ctx.FRAME * 0.1), y)
# draw flower
_ctx.translate(-20, 0)
_ctx.scale(sc)
# draw petals
_ctx.fill(color)
_ctx.nostroke()
for angle in xrange(0, 360, 45):
_ctx.rotate(degrees=45)
_ctx.rect(x, y, 40, 8, 1)
# draw centre
_ctx.fill('#F7FE2E')
_ctx.ellipse(x + 15, y, 10, 10)
# restore fill and stroke
_ctx.fill(_fill)
_ctx.stroke(_stroke)
# restore location, size etc
_ctx.pop() |
def list2pd(all_data, subjindex=None, listindex=None):
"""
Makes multi-indexed dataframe of subject data
Parameters
----------
all_data : list of lists of strings
strings are either all presented or all recalled items, in the order of presentation or recall
*should also work for presented / recalled ints and floats, if desired
Returns
----------
subs_list_of_dfs : multi-indexed dataframe
dataframe of subject data (presented or recalled words/items), indexed by subject and list number
cell populated by the term presented or recalled in the position indicated by the column number
"""
# set default index if it is not defined
# max_nlists = max(map(lambda x: len(x), all_data))
listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex
subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex
def make_multi_index(listindex, sub_num):
return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List'])
listindex = list(listindex)
subjindex = list(subjindex)
subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)]
return pd.concat(subs_list_of_dfs) | Makes multi-indexed dataframe of subject data
Parameters
----------
all_data : list of lists of strings
strings are either all presented or all recalled items, in the order of presentation or recall
*should also work for presented / recalled ints and floats, if desired
Returns
----------
subs_list_of_dfs : multi-indexed dataframe
dataframe of subject data (presented or recalled words/items), indexed by subject and list number
cell populated by the term presented or recalled in the position indicated by the column number | Below is the the instruction that describes the task:
### Input:
Makes multi-indexed dataframe of subject data
Parameters
----------
all_data : list of lists of strings
strings are either all presented or all recalled items, in the order of presentation or recall
*should also work for presented / recalled ints and floats, if desired
Returns
----------
subs_list_of_dfs : multi-indexed dataframe
dataframe of subject data (presented or recalled words/items), indexed by subject and list number
cell populated by the term presented or recalled in the position indicated by the column number
### Response:
def list2pd(all_data, subjindex=None, listindex=None):
"""
Makes multi-indexed dataframe of subject data
Parameters
----------
all_data : list of lists of strings
strings are either all presented or all recalled items, in the order of presentation or recall
*should also work for presented / recalled ints and floats, if desired
Returns
----------
subs_list_of_dfs : multi-indexed dataframe
dataframe of subject data (presented or recalled words/items), indexed by subject and list number
cell populated by the term presented or recalled in the position indicated by the column number
"""
# set default index if it is not defined
# max_nlists = max(map(lambda x: len(x), all_data))
listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex
subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex
def make_multi_index(listindex, sub_num):
return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List'])
listindex = list(listindex)
subjindex = list(subjindex)
subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)]
return pd.concat(subs_list_of_dfs) |
def set_values(self,x):
""" Updates self.theta parameter. No returns values"""
x = numpy.atleast_2d(x)
x = x.real # ahem
C_inv = self.__C_inv__
theta = numpy.dot( x, C_inv )
self.theta = theta
return theta | Updates self.theta parameter. No returns values | Below is the the instruction that describes the task:
### Input:
Updates self.theta parameter. No returns values
### Response:
def set_values(self,x):
""" Updates self.theta parameter. No returns values"""
x = numpy.atleast_2d(x)
x = x.real # ahem
C_inv = self.__C_inv__
theta = numpy.dot( x, C_inv )
self.theta = theta
return theta |
def dependency_map(self):
r"""
Create a graph of the dependency graph in a decent format
See Also
--------
dependency_graph
dependency_list
"""
dtree = self.dependency_graph()
fig = nx.draw_spectral(dtree,
with_labels=True,
arrowsize=50,
node_size=2000,
edge_color='lightgrey',
width=3.0,
font_size=32,
font_weight='bold')
return fig | r"""
Create a graph of the dependency graph in a decent format
See Also
--------
dependency_graph
dependency_list | Below is the the instruction that describes the task:
### Input:
r"""
Create a graph of the dependency graph in a decent format
See Also
--------
dependency_graph
dependency_list
### Response:
def dependency_map(self):
r"""
Create a graph of the dependency graph in a decent format
See Also
--------
dependency_graph
dependency_list
"""
dtree = self.dependency_graph()
fig = nx.draw_spectral(dtree,
with_labels=True,
arrowsize=50,
node_size=2000,
edge_color='lightgrey',
width=3.0,
font_size=32,
font_weight='bold')
return fig |
def strict(*types):
"""Decorator, type check production rule output"""
def decorate(func):
@wraps(func)
def wrapper(self, p):
func(self, p)
if not isinstance(p[0], types):
raise YAMLStrictTypeError(p[0], types, func)
wrapper.co_firstlineno = func.__code__.co_firstlineno
return wrapper
return decorate | Decorator, type check production rule output | Below is the the instruction that describes the task:
### Input:
Decorator, type check production rule output
### Response:
def strict(*types):
"""Decorator, type check production rule output"""
def decorate(func):
@wraps(func)
def wrapper(self, p):
func(self, p)
if not isinstance(p[0], types):
raise YAMLStrictTypeError(p[0], types, func)
wrapper.co_firstlineno = func.__code__.co_firstlineno
return wrapper
return decorate |
def register(cls, config_type: Type[DecoderConfig], suffix: str):
"""
Registers decoder type for configuration. Suffix is appended to decoder prefix.
:param config_type: Configuration type for decoder.
:param suffix: String to append to decoder prefix.
:return: Class decorator.
"""
def wrapper(target_cls):
cls.__registry[config_type] = (target_cls, suffix)
return target_cls
return wrapper | Registers decoder type for configuration. Suffix is appended to decoder prefix.
:param config_type: Configuration type for decoder.
:param suffix: String to append to decoder prefix.
:return: Class decorator. | Below is the the instruction that describes the task:
### Input:
Registers decoder type for configuration. Suffix is appended to decoder prefix.
:param config_type: Configuration type for decoder.
:param suffix: String to append to decoder prefix.
:return: Class decorator.
### Response:
def register(cls, config_type: Type[DecoderConfig], suffix: str):
"""
Registers decoder type for configuration. Suffix is appended to decoder prefix.
:param config_type: Configuration type for decoder.
:param suffix: String to append to decoder prefix.
:return: Class decorator.
"""
def wrapper(target_cls):
cls.__registry[config_type] = (target_cls, suffix)
return target_cls
return wrapper |
def inverse(self):
"""
Retrieves the inverse of a G1 element.
"""
result = G1Element()
librelic.g1_neg_abi(byref(result), byref(self))
return result | Retrieves the inverse of a G1 element. | Below is the the instruction that describes the task:
### Input:
Retrieves the inverse of a G1 element.
### Response:
def inverse(self):
"""
Retrieves the inverse of a G1 element.
"""
result = G1Element()
librelic.g1_neg_abi(byref(result), byref(self))
return result |
def settings_view_decorator(function):
"""Insert decorator from settings, if any.
.. note:: Decorator in ``CLOUD_BROWSER_VIEW_DECORATOR`` can be either a
callable or a fully-qualified string path (the latter, which we'll
lazy import).
"""
dec = settings.CLOUD_BROWSER_VIEW_DECORATOR
# Trade-up string to real decorator.
if isinstance(dec, str):
# Split into module and decorator strings.
mod_str, _, dec_str = dec.rpartition('.')
if not (mod_str and dec_str):
raise ImportError("Unable to import module: %s" % mod_str)
# Import and try to get decorator function.
mod = import_module(mod_str)
if not hasattr(mod, dec_str):
raise ImportError("Unable to import decorator: %s" % dec)
dec = getattr(mod, dec_str)
if dec and callable(dec):
return dec(function)
return function | Insert decorator from settings, if any.
.. note:: Decorator in ``CLOUD_BROWSER_VIEW_DECORATOR`` can be either a
callable or a fully-qualified string path (the latter, which we'll
lazy import). | Below is the the instruction that describes the task:
### Input:
Insert decorator from settings, if any.
.. note:: Decorator in ``CLOUD_BROWSER_VIEW_DECORATOR`` can be either a
callable or a fully-qualified string path (the latter, which we'll
lazy import).
### Response:
def settings_view_decorator(function):
"""Insert decorator from settings, if any.
.. note:: Decorator in ``CLOUD_BROWSER_VIEW_DECORATOR`` can be either a
callable or a fully-qualified string path (the latter, which we'll
lazy import).
"""
dec = settings.CLOUD_BROWSER_VIEW_DECORATOR
# Trade-up string to real decorator.
if isinstance(dec, str):
# Split into module and decorator strings.
mod_str, _, dec_str = dec.rpartition('.')
if not (mod_str and dec_str):
raise ImportError("Unable to import module: %s" % mod_str)
# Import and try to get decorator function.
mod = import_module(mod_str)
if not hasattr(mod, dec_str):
raise ImportError("Unable to import decorator: %s" % dec)
dec = getattr(mod, dec_str)
if dec and callable(dec):
return dec(function)
return function |
def block_events(self):
"""
Special version of block_events that loops over all tree elements.
"""
# block events in the usual way
BaseObject.block_events(self)
# loop over all top level parameters
for i in range(self._widget.topLevelItemCount()):
self._widget.topLevelItem(i).param.blockSignals(True)
return self | Special version of block_events that loops over all tree elements. | Below is the the instruction that describes the task:
### Input:
Special version of block_events that loops over all tree elements.
### Response:
def block_events(self):
"""
Special version of block_events that loops over all tree elements.
"""
# block events in the usual way
BaseObject.block_events(self)
# loop over all top level parameters
for i in range(self._widget.topLevelItemCount()):
self._widget.topLevelItem(i).param.blockSignals(True)
return self |
def get_parameter_diff(awsclient, config):
"""get differences between local config and currently active config
"""
client_cf = awsclient.get_client('cloudformation')
try:
stack_name = config['stack']['StackName']
if stack_name:
response = client_cf.describe_stacks(StackName=stack_name)
if response['Stacks']:
stack_id = response['Stacks'][0]['StackId']
stack = response['Stacks'][0]
else:
return None
else:
print(
'StackName is not configured, could not create parameter diff')
return None
except GracefulExit:
raise
except Exception:
# probably the stack is not existent
return None
changed = 0
table = []
table.append(['Parameter', 'Current Value', 'New Value'])
# Check if there are parameters for the stack
if 'Parameters' in stack:
for param in stack['Parameters']:
try:
old = str(param['ParameterValue'])
# can not compare list with str!!
# if ',' in old:
# old = old.split(',')
new = config['parameters'][param['ParameterKey']]
if old != new:
if old.startswith('***'):
# parameter is configured with `NoEcho=True`
# this means we can not really say if the value changed!!
# for security reasons we block viewing the new value
new = old
table.append([param['ParameterKey'], old, new])
changed += 1
except GracefulExit:
raise
except Exception:
print('Did not find %s in local config file' % param[
'ParameterKey'])
if changed > 0:
print(tabulate(table, tablefmt='fancy_grid'))
return changed > 0 | get differences between local config and currently active config | Below is the the instruction that describes the task:
### Input:
get differences between local config and currently active config
### Response:
def get_parameter_diff(awsclient, config):
"""get differences between local config and currently active config
"""
client_cf = awsclient.get_client('cloudformation')
try:
stack_name = config['stack']['StackName']
if stack_name:
response = client_cf.describe_stacks(StackName=stack_name)
if response['Stacks']:
stack_id = response['Stacks'][0]['StackId']
stack = response['Stacks'][0]
else:
return None
else:
print(
'StackName is not configured, could not create parameter diff')
return None
except GracefulExit:
raise
except Exception:
# probably the stack is not existent
return None
changed = 0
table = []
table.append(['Parameter', 'Current Value', 'New Value'])
# Check if there are parameters for the stack
if 'Parameters' in stack:
for param in stack['Parameters']:
try:
old = str(param['ParameterValue'])
# can not compare list with str!!
# if ',' in old:
# old = old.split(',')
new = config['parameters'][param['ParameterKey']]
if old != new:
if old.startswith('***'):
# parameter is configured with `NoEcho=True`
# this means we can not really say if the value changed!!
# for security reasons we block viewing the new value
new = old
table.append([param['ParameterKey'], old, new])
changed += 1
except GracefulExit:
raise
except Exception:
print('Did not find %s in local config file' % param[
'ParameterKey'])
if changed > 0:
print(tabulate(table, tablefmt='fancy_grid'))
return changed > 0 |
def raw(self, module, method='GET', data=None):
'''
Submits or requsts raw input
'''
request = self.session
url = 'http://%s:%s/%s' % (self.host, self.port, module)
if self.verbose:
print data
if method=='GET':
response = request.get(url)
elif method=='POST':
response = request.post(url,data)
elif method=='PUT':
response = request.put(url,data)
elif method=='DELETE':
response = request.delete(url)
else:
return {'error' : 'No such request method %s' % method}
return response | Submits or requsts raw input | Below is the the instruction that describes the task:
### Input:
Submits or requsts raw input
### Response:
def raw(self, module, method='GET', data=None):
'''
Submits or requsts raw input
'''
request = self.session
url = 'http://%s:%s/%s' % (self.host, self.port, module)
if self.verbose:
print data
if method=='GET':
response = request.get(url)
elif method=='POST':
response = request.post(url,data)
elif method=='PUT':
response = request.put(url,data)
elif method=='DELETE':
response = request.delete(url)
else:
return {'error' : 'No such request method %s' % method}
return response |
def save_datasets(self, writer="geotiff", datasets=None, compute=True, **kwargs):
"""Save all the datasets present in a scene to disk using *writer*."""
if datasets is not None:
datasets = [self[ds] for ds in datasets]
else:
datasets = [self.datasets.get(ds) for ds in self.wishlist]
datasets = [ds for ds in datasets if ds is not None]
if not datasets:
raise RuntimeError("None of the requested datasets have been "
"generated or could not be loaded. Requested "
"composite inputs may need to have matching "
"dimensions (eg. through resampling).")
writer, save_kwargs = load_writer(writer, ppp_config_dir=self.ppp_config_dir, **kwargs)
return writer.save_datasets(datasets, compute=compute, **save_kwargs) | Save all the datasets present in a scene to disk using *writer*. | Below is the the instruction that describes the task:
### Input:
Save all the datasets present in a scene to disk using *writer*.
### Response:
def save_datasets(self, writer="geotiff", datasets=None, compute=True, **kwargs):
"""Save all the datasets present in a scene to disk using *writer*."""
if datasets is not None:
datasets = [self[ds] for ds in datasets]
else:
datasets = [self.datasets.get(ds) for ds in self.wishlist]
datasets = [ds for ds in datasets if ds is not None]
if not datasets:
raise RuntimeError("None of the requested datasets have been "
"generated or could not be loaded. Requested "
"composite inputs may need to have matching "
"dimensions (eg. through resampling).")
writer, save_kwargs = load_writer(writer, ppp_config_dir=self.ppp_config_dir, **kwargs)
return writer.save_datasets(datasets, compute=compute, **save_kwargs) |
def has_permission(self, request, *args, **kwargs):
"""
Figures out if the current user has permissions for this view.
"""
self.kwargs = kwargs
self.args = args
self.request = request
self.org = self.derive_org()
if self.get_user().is_superuser:
return True
if self.get_user().has_perm(self.permission):
return True
return self.has_org_perm(self.permission) | Figures out if the current user has permissions for this view. | Below is the the instruction that describes the task:
### Input:
Figures out if the current user has permissions for this view.
### Response:
def has_permission(self, request, *args, **kwargs):
"""
Figures out if the current user has permissions for this view.
"""
self.kwargs = kwargs
self.args = args
self.request = request
self.org = self.derive_org()
if self.get_user().is_superuser:
return True
if self.get_user().has_perm(self.permission):
return True
return self.has_org_perm(self.permission) |
def CheckCronJobAccess(self, username, cron_job_id):
"""Checks whether a given user can access given cron job."""
self._CheckAccess(
username, str(cron_job_id),
rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CRON_JOB) | Checks whether a given user can access given cron job. | Below is the the instruction that describes the task:
### Input:
Checks whether a given user can access given cron job.
### Response:
def CheckCronJobAccess(self, username, cron_job_id):
"""Checks whether a given user can access given cron job."""
self._CheckAccess(
username, str(cron_job_id),
rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CRON_JOB) |
def estimate_mutual_information(x, y):
"""Estimate the mutual information of two datasets.
Mutual information is a measure of dependence between
two datasets and is calculated as:
$I(x;y) = H(x) + H(y) - H(x,y)$
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
Returns:
float: A floating point number representing the mutual
information of x and y. This calculation is *exact*
for entirely discrete datasets and *approximate* if
there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
xy = np.concatenate((x, y), axis=1)
epsilon = _calculate_epsilon(xy)
h_x = estimate_entropy(x, epsilon)
h_y = estimate_entropy(y, epsilon)
h_xy = estimate_entropy(xy, epsilon)
return max(0, h_x + h_y - h_xy) | Estimate the mutual information of two datasets.
Mutual information is a measure of dependence between
two datasets and is calculated as:
$I(x;y) = H(x) + H(y) - H(x,y)$
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
Returns:
float: A floating point number representing the mutual
information of x and y. This calculation is *exact*
for entirely discrete datasets and *approximate* if
there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004. | Below is the the instruction that describes the task:
### Input:
Estimate the mutual information of two datasets.
Mutual information is a measure of dependence between
two datasets and is calculated as:
$I(x;y) = H(x) + H(y) - H(x,y)$
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
Returns:
float: A floating point number representing the mutual
information of x and y. This calculation is *exact*
for entirely discrete datasets and *approximate* if
there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
### Response:
def estimate_mutual_information(x, y):
"""Estimate the mutual information of two datasets.
Mutual information is a measure of dependence between
two datasets and is calculated as:
$I(x;y) = H(x) + H(y) - H(x,y)$
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
Returns:
float: A floating point number representing the mutual
information of x and y. This calculation is *exact*
for entirely discrete datasets and *approximate* if
there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
xy = np.concatenate((x, y), axis=1)
epsilon = _calculate_epsilon(xy)
h_x = estimate_entropy(x, epsilon)
h_y = estimate_entropy(y, epsilon)
h_xy = estimate_entropy(xy, epsilon)
return max(0, h_x + h_y - h_xy) |
def ximshow_unrectified(self, slitlet2d):
"""Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image.
"""
title = "Slitlet#" + str(self.islitlet)
ax = ximshow(slitlet2d, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1)
ylower = self.list_spectrails[0](xdum)
ax.plot(xdum, ylower, 'b-')
ymiddle = self.list_spectrails[1](xdum)
ax.plot(xdum, ymiddle, 'b--')
yupper = self.list_spectrails[2](xdum)
ax.plot(xdum, yupper, 'b-')
ylower_frontier = self.list_frontiers[0](xdum)
ax.plot(xdum, ylower_frontier, 'b:')
yupper_frontier = self.list_frontiers[1](xdum)
ax.plot(xdum, yupper_frontier, 'b:')
pause_debugplot(debugplot=self.debugplot, pltshow=True) | Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image. | Below is the the instruction that describes the task:
### Input:
Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image.
### Response:
def ximshow_unrectified(self, slitlet2d):
"""Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image.
"""
title = "Slitlet#" + str(self.islitlet)
ax = ximshow(slitlet2d, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1)
ylower = self.list_spectrails[0](xdum)
ax.plot(xdum, ylower, 'b-')
ymiddle = self.list_spectrails[1](xdum)
ax.plot(xdum, ymiddle, 'b--')
yupper = self.list_spectrails[2](xdum)
ax.plot(xdum, yupper, 'b-')
ylower_frontier = self.list_frontiers[0](xdum)
ax.plot(xdum, ylower_frontier, 'b:')
yupper_frontier = self.list_frontiers[1](xdum)
ax.plot(xdum, yupper_frontier, 'b:')
pause_debugplot(debugplot=self.debugplot, pltshow=True) |
def _projection(self, a, b, c):
"""Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes.
"""
ab = b - a
ac = c - a
return a + ((ab*ac).sum() / (ac*ac).sum()) * ac | Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes. | Below is the the instruction that describes the task:
### Input:
Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes.
### Response:
def _projection(self, a, b, c):
"""Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes.
"""
ab = b - a
ac = c - a
return a + ((ab*ac).sum() / (ac*ac).sum()) * ac |
def convert_table(self, markup):
""" Subtitutes <table> content to Wikipedia markup.
"""
for table in re.findall(self.re["html-table"], markup):
wiki = table
wiki = re.sub(r"<table(.*?)>", "{|\\1", wiki)
wiki = re.sub(r"<tr(.*?)>", "|-\\1", wiki)
wiki = re.sub(r"<td(.*?)>", "|\\1|", wiki)
wiki = wiki.replace("</td>", "\n")
wiki = wiki.replace("</tr>", "\n")
wiki = wiki.replace("</table>", "\n|}")
markup = markup.replace(table, wiki)
return markup | Subtitutes <table> content to Wikipedia markup. | Below is the the instruction that describes the task:
### Input:
Subtitutes <table> content to Wikipedia markup.
### Response:
def convert_table(self, markup):
""" Subtitutes <table> content to Wikipedia markup.
"""
for table in re.findall(self.re["html-table"], markup):
wiki = table
wiki = re.sub(r"<table(.*?)>", "{|\\1", wiki)
wiki = re.sub(r"<tr(.*?)>", "|-\\1", wiki)
wiki = re.sub(r"<td(.*?)>", "|\\1|", wiki)
wiki = wiki.replace("</td>", "\n")
wiki = wiki.replace("</tr>", "\n")
wiki = wiki.replace("</table>", "\n|}")
markup = markup.replace(table, wiki)
return markup |
def _from_dict(self, obj_dict):
"""
Initialize a model from the dictionary
"""
self._n_folds = obj_dict["param"]["n_folds"]
self._n_rows = obj_dict["param"]["n_rows"]
self._use_stored_folds = obj_dict["param"]["use_stored_folds"]
self._concise_model = Concise.from_dict(obj_dict["init_model"])
if obj_dict["trained_global_model"] is None:
self._concise_global_model = None
else:
self._concise_global_model = Concise.from_dict(obj_dict["trained_global_model"])
self._kf = [(fold, np.asarray(train), np.asarray(test)) for fold, train, test in obj_dict["folds"]]
self._cv_model = {fold: Concise.from_dict(model_dict) for fold, model_dict in obj_dict["output"].items()} | Initialize a model from the dictionary | Below is the the instruction that describes the task:
### Input:
Initialize a model from the dictionary
### Response:
def _from_dict(self, obj_dict):
"""
Initialize a model from the dictionary
"""
self._n_folds = obj_dict["param"]["n_folds"]
self._n_rows = obj_dict["param"]["n_rows"]
self._use_stored_folds = obj_dict["param"]["use_stored_folds"]
self._concise_model = Concise.from_dict(obj_dict["init_model"])
if obj_dict["trained_global_model"] is None:
self._concise_global_model = None
else:
self._concise_global_model = Concise.from_dict(obj_dict["trained_global_model"])
self._kf = [(fold, np.asarray(train), np.asarray(test)) for fold, train, test in obj_dict["folds"]]
self._cv_model = {fold: Concise.from_dict(model_dict) for fold, model_dict in obj_dict["output"].items()} |
def layers(self, rev=True):
"""
Get list of DockerImage for every layer in image
:param rev: get layers rev
:return: list of DockerImages
"""
image_layers = [
DockerImage(None, identifier=x, pull_policy=DockerImagePullPolicy.NEVER)
for x in self.get_layer_ids()
]
if not rev:
image_layers.reverse()
return image_layers | Get list of DockerImage for every layer in image
:param rev: get layers rev
:return: list of DockerImages | Below is the the instruction that describes the task:
### Input:
Get list of DockerImage for every layer in image
:param rev: get layers rev
:return: list of DockerImages
### Response:
def layers(self, rev=True):
"""
Get list of DockerImage for every layer in image
:param rev: get layers rev
:return: list of DockerImages
"""
image_layers = [
DockerImage(None, identifier=x, pull_policy=DockerImagePullPolicy.NEVER)
for x in self.get_layer_ids()
]
if not rev:
image_layers.reverse()
return image_layers |
def marker_for_line(self, line):
"""
Returns the marker that is displayed at the specified line number if
any.
:param line: The marker line.
:return: Marker of None
:rtype: pyqode.core.Marker
"""
markers = []
for marker in self._markers:
if line == marker.position:
markers.append(marker)
return markers | Returns the marker that is displayed at the specified line number if
any.
:param line: The marker line.
:return: Marker of None
:rtype: pyqode.core.Marker | Below is the the instruction that describes the task:
### Input:
Returns the marker that is displayed at the specified line number if
any.
:param line: The marker line.
:return: Marker of None
:rtype: pyqode.core.Marker
### Response:
def marker_for_line(self, line):
"""
Returns the marker that is displayed at the specified line number if
any.
:param line: The marker line.
:return: Marker of None
:rtype: pyqode.core.Marker
"""
markers = []
for marker in self._markers:
if line == marker.position:
markers.append(marker)
return markers |
def push_results(self, results, scheduler_name):
"""Send a HTTP request to the satellite (POST /put_results)
Send actions results to the satellite
:param results: Results list to send
:type results: list
:param scheduler_name: Scheduler name
:type scheduler_name: uuid
:return: True on success, False on failure
:rtype: bool
"""
logger.debug("Pushing %d results", len(results))
result = self.con.post('put_results', {'results': results, 'from': scheduler_name},
wait=True)
return result | Send a HTTP request to the satellite (POST /put_results)
Send actions results to the satellite
:param results: Results list to send
:type results: list
:param scheduler_name: Scheduler name
:type scheduler_name: uuid
:return: True on success, False on failure
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Send a HTTP request to the satellite (POST /put_results)
Send actions results to the satellite
:param results: Results list to send
:type results: list
:param scheduler_name: Scheduler name
:type scheduler_name: uuid
:return: True on success, False on failure
:rtype: bool
### Response:
def push_results(self, results, scheduler_name):
"""Send a HTTP request to the satellite (POST /put_results)
Send actions results to the satellite
:param results: Results list to send
:type results: list
:param scheduler_name: Scheduler name
:type scheduler_name: uuid
:return: True on success, False on failure
:rtype: bool
"""
logger.debug("Pushing %d results", len(results))
result = self.con.post('put_results', {'results': results, 'from': scheduler_name},
wait=True)
return result |
def output_path(self, path_name):
""" Modify a path so it fits expectations.
Avoid returning relative paths that start with '../' and possibly
return relative paths when output and cache directories match.
"""
# make sure it is a valid posix format
path = to_posix(path_name)
assert (path == path_name), "path_name passed to output_path must be in posix format"
if posixpath.isabs(path):
if self.output == self.cache:
# worth seeing if an absolute path can be avoided
path = posixpath.relpath(path, self.output)
else:
return posixpath.realpath(path)
if path.startswith('../'):
joined = posixpath.join(self.output, path)
return posixpath.realpath(joined)
return path | Modify a path so it fits expectations.
Avoid returning relative paths that start with '../' and possibly
return relative paths when output and cache directories match. | Below is the the instruction that describes the task:
### Input:
Modify a path so it fits expectations.
Avoid returning relative paths that start with '../' and possibly
return relative paths when output and cache directories match.
### Response:
def output_path(self, path_name):
""" Modify a path so it fits expectations.
Avoid returning relative paths that start with '../' and possibly
return relative paths when output and cache directories match.
"""
# make sure it is a valid posix format
path = to_posix(path_name)
assert (path == path_name), "path_name passed to output_path must be in posix format"
if posixpath.isabs(path):
if self.output == self.cache:
# worth seeing if an absolute path can be avoided
path = posixpath.relpath(path, self.output)
else:
return posixpath.realpath(path)
if path.startswith('../'):
joined = posixpath.join(self.output, path)
return posixpath.realpath(joined)
return path |
def _get_admin_info(command, host=None, core_name=None):
'''
PRIVATE METHOD
Calls the _http_request method and passes the admin command to execute
and stores the data. This data is fairly static but should be refreshed
periodically to make sure everything this OK. The data object will contain
the JSON response.
command : str
The admin command to execute.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name: str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return: dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
url = _format_url("admin/{0}".format(command), host, core_name=core_name)
resp = _http_request(url)
return resp | PRIVATE METHOD
Calls the _http_request method and passes the admin command to execute
and stores the data. This data is fairly static but should be refreshed
periodically to make sure everything this OK. The data object will contain
the JSON response.
command : str
The admin command to execute.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name: str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return: dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list} | Below is the the instruction that describes the task:
### Input:
PRIVATE METHOD
Calls the _http_request method and passes the admin command to execute
and stores the data. This data is fairly static but should be refreshed
periodically to make sure everything this OK. The data object will contain
the JSON response.
command : str
The admin command to execute.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name: str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return: dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
### Response:
def _get_admin_info(command, host=None, core_name=None):
'''
PRIVATE METHOD
Calls the _http_request method and passes the admin command to execute
and stores the data. This data is fairly static but should be refreshed
periodically to make sure everything this OK. The data object will contain
the JSON response.
command : str
The admin command to execute.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name: str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return: dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
url = _format_url("admin/{0}".format(command), host, core_name=core_name)
resp = _http_request(url)
return resp |
def verifyArguments(self, arg):
"""
Verifies that arguments to setInputs and setTargets are appropriately formatted.
"""
for l in arg:
if not type(l) == list and \
not type(l) == type(Numeric.array([0.0])) and \
not type(l) == tuple and \
not type(l) == dict:
return 0
if type(l) == dict:
for i in l:
if not type(i) == str and i not in list(self.layers.keys()):
return 0
else:
for i in l:
if not type(i) == float and not type(i) == int:
return 0
return 1 | Verifies that arguments to setInputs and setTargets are appropriately formatted. | Below is the the instruction that describes the task:
### Input:
Verifies that arguments to setInputs and setTargets are appropriately formatted.
### Response:
def verifyArguments(self, arg):
"""
Verifies that arguments to setInputs and setTargets are appropriately formatted.
"""
for l in arg:
if not type(l) == list and \
not type(l) == type(Numeric.array([0.0])) and \
not type(l) == tuple and \
not type(l) == dict:
return 0
if type(l) == dict:
for i in l:
if not type(i) == str and i not in list(self.layers.keys()):
return 0
else:
for i in l:
if not type(i) == float and not type(i) == int:
return 0
return 1 |
def capabilities(self, keyword=None):
"""CAPABILITIES command.
Determines the capabilities of the server.
Although RFC3977 states that this is a required command for servers to
implement not all servers do, so expect that NNTPPermanentError may be
raised when this command is issued.
See <http://tools.ietf.org/html/rfc3977#section-5.2>
Args:
keyword: Passed directly to the server, however, this is unused by
the server according to RFC3977.
Returns:
A list of capabilities supported by the server. The VERSION
capability is the first capability in the list.
"""
args = keyword
code, message = self.command("CAPABILITIES", args)
if code != 101:
raise NNTPReplyError(code, message)
return [x.strip() for x in self.info_gen(code, message)] | CAPABILITIES command.
Determines the capabilities of the server.
Although RFC3977 states that this is a required command for servers to
implement not all servers do, so expect that NNTPPermanentError may be
raised when this command is issued.
See <http://tools.ietf.org/html/rfc3977#section-5.2>
Args:
keyword: Passed directly to the server, however, this is unused by
the server according to RFC3977.
Returns:
A list of capabilities supported by the server. The VERSION
capability is the first capability in the list. | Below is the the instruction that describes the task:
### Input:
CAPABILITIES command.
Determines the capabilities of the server.
Although RFC3977 states that this is a required command for servers to
implement not all servers do, so expect that NNTPPermanentError may be
raised when this command is issued.
See <http://tools.ietf.org/html/rfc3977#section-5.2>
Args:
keyword: Passed directly to the server, however, this is unused by
the server according to RFC3977.
Returns:
A list of capabilities supported by the server. The VERSION
capability is the first capability in the list.
### Response:
def capabilities(self, keyword=None):
"""CAPABILITIES command.
Determines the capabilities of the server.
Although RFC3977 states that this is a required command for servers to
implement not all servers do, so expect that NNTPPermanentError may be
raised when this command is issued.
See <http://tools.ietf.org/html/rfc3977#section-5.2>
Args:
keyword: Passed directly to the server, however, this is unused by
the server according to RFC3977.
Returns:
A list of capabilities supported by the server. The VERSION
capability is the first capability in the list.
"""
args = keyword
code, message = self.command("CAPABILITIES", args)
if code != 101:
raise NNTPReplyError(code, message)
return [x.strip() for x in self.info_gen(code, message)] |
def _loadedges(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, float, np.ndarray]:
"""
Attempts to intelligently load the .mat file and take average of left and right edges
:return: left and right averages
:return: times for each column
:return: accept/reject for each column
:return: pixel-inch ratio
"""
data = sco.loadmat(self.filename)
datakeys = [k for k in data.keys()
if ('right' in k) or ('left' in k) or ('edge' in k)]
averagedata = ((data[datakeys[0]] + data[datakeys[1]]) / 2)
try:
times = (data['times'] - data['times'].min())[0]
except KeyError:
times = np.arange(len(data[datakeys[0]][0]))
try:
accept = data['accept']
except KeyError:
accept = np.zeros(len(times))
try:
ratio = data['ratio']
except KeyError:
ratio = 1
try:
viscosity = data['viscosity']
except KeyError:
viscosity = np.ones(len(times))
return averagedata, times, accept, ratio, viscosity | Attempts to intelligently load the .mat file and take average of left and right edges
:return: left and right averages
:return: times for each column
:return: accept/reject for each column
:return: pixel-inch ratio | Below is the the instruction that describes the task:
### Input:
Attempts to intelligently load the .mat file and take average of left and right edges
:return: left and right averages
:return: times for each column
:return: accept/reject for each column
:return: pixel-inch ratio
### Response:
def _loadedges(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, float, np.ndarray]:
"""
Attempts to intelligently load the .mat file and take average of left and right edges
:return: left and right averages
:return: times for each column
:return: accept/reject for each column
:return: pixel-inch ratio
"""
data = sco.loadmat(self.filename)
datakeys = [k for k in data.keys()
if ('right' in k) or ('left' in k) or ('edge' in k)]
averagedata = ((data[datakeys[0]] + data[datakeys[1]]) / 2)
try:
times = (data['times'] - data['times'].min())[0]
except KeyError:
times = np.arange(len(data[datakeys[0]][0]))
try:
accept = data['accept']
except KeyError:
accept = np.zeros(len(times))
try:
ratio = data['ratio']
except KeyError:
ratio = 1
try:
viscosity = data['viscosity']
except KeyError:
viscosity = np.ones(len(times))
return averagedata, times, accept, ratio, viscosity |
def pause(message='Press any key to continue . . . '):
"""
Prints the specified message if it's not None and waits for a keypress.
"""
if message is not None:
print(message, end='')
sys.stdout.flush()
getch()
print() | Prints the specified message if it's not None and waits for a keypress. | Below is the the instruction that describes the task:
### Input:
Prints the specified message if it's not None and waits for a keypress.
### Response:
def pause(message='Press any key to continue . . . '):
"""
Prints the specified message if it's not None and waits for a keypress.
"""
if message is not None:
print(message, end='')
sys.stdout.flush()
getch()
print() |
def colormesh(X, Y):
"""
Generates line paths for a quadmesh given 2D arrays of X and Y
coordinates.
"""
X1 = X[0:-1, 0:-1].ravel()
Y1 = Y[0:-1, 0:-1].ravel()
X2 = X[1:, 0:-1].ravel()
Y2 = Y[1:, 0:-1].ravel()
X3 = X[1:, 1:].ravel()
Y3 = Y[1:, 1:].ravel()
X4 = X[0:-1, 1:].ravel()
Y4 = Y[0:-1, 1:].ravel()
X = np.column_stack([X1, X2, X3, X4, X1])
Y = np.column_stack([Y1, Y2, Y3, Y4, Y1])
return X, Y | Generates line paths for a quadmesh given 2D arrays of X and Y
coordinates. | Below is the the instruction that describes the task:
### Input:
Generates line paths for a quadmesh given 2D arrays of X and Y
coordinates.
### Response:
def colormesh(X, Y):
"""
Generates line paths for a quadmesh given 2D arrays of X and Y
coordinates.
"""
X1 = X[0:-1, 0:-1].ravel()
Y1 = Y[0:-1, 0:-1].ravel()
X2 = X[1:, 0:-1].ravel()
Y2 = Y[1:, 0:-1].ravel()
X3 = X[1:, 1:].ravel()
Y3 = Y[1:, 1:].ravel()
X4 = X[0:-1, 1:].ravel()
Y4 = Y[0:-1, 1:].ravel()
X = np.column_stack([X1, X2, X3, X4, X1])
Y = np.column_stack([Y1, Y2, Y3, Y4, Y1])
return X, Y |
def get_item_notification_session(self, item_receiver):
"""Gets the notification session for notifications pertaining to item changes.
arg: item_receiver (osid.assessment.ItemReceiver): the item
receiver interface
return: (osid.assessment.ItemNotificationSession) - an
``ItemNotificationSession``
raise: NullArgument - ``item_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_item_notification()`` is ``true``.*
"""
if not self.supports_item_notification():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ItemNotificationSession(runtime=self._runtime, receiver=item_receiver) | Gets the notification session for notifications pertaining to item changes.
arg: item_receiver (osid.assessment.ItemReceiver): the item
receiver interface
return: (osid.assessment.ItemNotificationSession) - an
``ItemNotificationSession``
raise: NullArgument - ``item_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_item_notification()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the notification session for notifications pertaining to item changes.
arg: item_receiver (osid.assessment.ItemReceiver): the item
receiver interface
return: (osid.assessment.ItemNotificationSession) - an
``ItemNotificationSession``
raise: NullArgument - ``item_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_item_notification()`` is ``true``.*
### Response:
def get_item_notification_session(self, item_receiver):
"""Gets the notification session for notifications pertaining to item changes.
arg: item_receiver (osid.assessment.ItemReceiver): the item
receiver interface
return: (osid.assessment.ItemNotificationSession) - an
``ItemNotificationSession``
raise: NullArgument - ``item_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_item_notification()`` is ``true``.*
"""
if not self.supports_item_notification():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ItemNotificationSession(runtime=self._runtime, receiver=item_receiver) |
def parse_host_address(addr):
"""
parse host address to get domain name or ipv4/v6 address,
cidr prefix and net mask code string if given a subnet address
:param addr:
:type addr: str
:return: parsed domain name/ipv4 address/ipv6 address,
cidr prefix if there is,
net mask code string if there is
:rtype: (string, int, string)
"""
if addr.startswith('[') and addr.endswith(']'):
addr = addr[1:-1]
parts = addr.split('/')
if len(parts) == 1:
return parts[0], None, None
if len(parts) > 2:
raise ValueError("Illegal host address")
else:
domain_or_ip, prefix = parts
prefix = int(prefix)
if re.match(r"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$", domain_or_ip):
return domain_or_ip, prefix, ipv4_prefix_to_mask(prefix)
elif ':' in domain_or_ip:
return domain_or_ip, prefix, ipv6_prefix_to_mask(prefix)
else:
return domain_or_ip, None, None | parse host address to get domain name or ipv4/v6 address,
cidr prefix and net mask code string if given a subnet address
:param addr:
:type addr: str
:return: parsed domain name/ipv4 address/ipv6 address,
cidr prefix if there is,
net mask code string if there is
:rtype: (string, int, string) | Below is the the instruction that describes the task:
### Input:
parse host address to get domain name or ipv4/v6 address,
cidr prefix and net mask code string if given a subnet address
:param addr:
:type addr: str
:return: parsed domain name/ipv4 address/ipv6 address,
cidr prefix if there is,
net mask code string if there is
:rtype: (string, int, string)
### Response:
def parse_host_address(addr):
"""
parse host address to get domain name or ipv4/v6 address,
cidr prefix and net mask code string if given a subnet address
:param addr:
:type addr: str
:return: parsed domain name/ipv4 address/ipv6 address,
cidr prefix if there is,
net mask code string if there is
:rtype: (string, int, string)
"""
if addr.startswith('[') and addr.endswith(']'):
addr = addr[1:-1]
parts = addr.split('/')
if len(parts) == 1:
return parts[0], None, None
if len(parts) > 2:
raise ValueError("Illegal host address")
else:
domain_or_ip, prefix = parts
prefix = int(prefix)
if re.match(r"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$", domain_or_ip):
return domain_or_ip, prefix, ipv4_prefix_to_mask(prefix)
elif ':' in domain_or_ip:
return domain_or_ip, prefix, ipv6_prefix_to_mask(prefix)
else:
return domain_or_ip, None, None |
def get_volumes_for_container(self, container_id, skip_empty_source=True):
"""
get a list of volumes mounter in a container
:param container_id: str
:param skip_empty_source: bool, don't list volumes which are not created on FS
:return: list, a list of volume names
"""
logger.info("listing volumes for container '%s'", container_id)
inspect_output = self.d.inspect_container(container_id)
volumes = inspect_output['Mounts'] or []
volume_names = [x['Name'] for x in volumes]
if skip_empty_source:
# Don't show volumes which are not on the filesystem
volume_names = [x['Name'] for x in volumes if x['Source'] != ""]
logger.debug("volumes = %s", volume_names)
return volume_names | get a list of volumes mounter in a container
:param container_id: str
:param skip_empty_source: bool, don't list volumes which are not created on FS
:return: list, a list of volume names | Below is the the instruction that describes the task:
### Input:
get a list of volumes mounter in a container
:param container_id: str
:param skip_empty_source: bool, don't list volumes which are not created on FS
:return: list, a list of volume names
### Response:
def get_volumes_for_container(self, container_id, skip_empty_source=True):
"""
get a list of volumes mounter in a container
:param container_id: str
:param skip_empty_source: bool, don't list volumes which are not created on FS
:return: list, a list of volume names
"""
logger.info("listing volumes for container '%s'", container_id)
inspect_output = self.d.inspect_container(container_id)
volumes = inspect_output['Mounts'] or []
volume_names = [x['Name'] for x in volumes]
if skip_empty_source:
# Don't show volumes which are not on the filesystem
volume_names = [x['Name'] for x in volumes if x['Source'] != ""]
logger.debug("volumes = %s", volume_names)
return volume_names |
def get_context_data(self, **kwargs):
"""This add in the context of list_type and returns this as Alive."""
context = super(AnimalListAlive, self).get_context_data(**kwargs)
context['list_type'] = 'Alive'
return context | This add in the context of list_type and returns this as Alive. | Below is the the instruction that describes the task:
### Input:
This add in the context of list_type and returns this as Alive.
### Response:
def get_context_data(self, **kwargs):
"""This add in the context of list_type and returns this as Alive."""
context = super(AnimalListAlive, self).get_context_data(**kwargs)
context['list_type'] = 'Alive'
return context |
def _convert_to_db_tc_metadata(tc_metadata):
"""
Convert tc_metadata to match Opentmi metadata format
:param tc_metadata: metadata as dict
:return: converted metadata
"""
db_meta = copy.deepcopy(tc_metadata)
# tcid is a mandatory field, it should throw an error if it is missing
db_meta['tcid'] = db_meta['name']
del db_meta['name']
# Encapsulate current status inside dictionary
if 'status' in db_meta:
status = db_meta['status']
del db_meta['status']
db_meta['status'] = {'value': status}
# Node and dut information
if 'requirements' in db_meta:
db_meta['requirements']['node'] = {'count': 1}
try:
count = db_meta['requirements']['duts']['*']['count']
db_meta['requirements']['node']['count'] = count
except KeyError:
pass
# Collect and pack other info from meta
db_meta['other_info'] = {}
if 'title' in db_meta:
db_meta['other_info']['title'] = db_meta['title']
del db_meta['title']
if 'feature' in db_meta:
db_meta['other_info']['features'] = db_meta['feature']
del db_meta['feature']
else:
db_meta['other_info']['features'] = ['unknown']
if 'component' in db_meta:
db_meta['other_info']['components'] = db_meta["component"]
del db_meta['component']
return db_meta | Convert tc_metadata to match Opentmi metadata format
:param tc_metadata: metadata as dict
:return: converted metadata | Below is the the instruction that describes the task:
### Input:
Convert tc_metadata to match Opentmi metadata format
:param tc_metadata: metadata as dict
:return: converted metadata
### Response:
def _convert_to_db_tc_metadata(tc_metadata):
"""
Convert tc_metadata to match Opentmi metadata format
:param tc_metadata: metadata as dict
:return: converted metadata
"""
db_meta = copy.deepcopy(tc_metadata)
# tcid is a mandatory field, it should throw an error if it is missing
db_meta['tcid'] = db_meta['name']
del db_meta['name']
# Encapsulate current status inside dictionary
if 'status' in db_meta:
status = db_meta['status']
del db_meta['status']
db_meta['status'] = {'value': status}
# Node and dut information
if 'requirements' in db_meta:
db_meta['requirements']['node'] = {'count': 1}
try:
count = db_meta['requirements']['duts']['*']['count']
db_meta['requirements']['node']['count'] = count
except KeyError:
pass
# Collect and pack other info from meta
db_meta['other_info'] = {}
if 'title' in db_meta:
db_meta['other_info']['title'] = db_meta['title']
del db_meta['title']
if 'feature' in db_meta:
db_meta['other_info']['features'] = db_meta['feature']
del db_meta['feature']
else:
db_meta['other_info']['features'] = ['unknown']
if 'component' in db_meta:
db_meta['other_info']['components'] = db_meta["component"]
del db_meta['component']
return db_meta |
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data | Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}} | Below is the the instruction that describes the task:
### Input:
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
### Response:
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data |
def set_engine(cls, neweng):
''' Sets the given coralillo engine so the model uses it to communicate
with the redis database '''
assert isinstance(neweng, Engine), 'Provided object must be of class Engine'
if hasattr(cls, 'Meta'):
cls.Meta.engine = neweng
else:
class Meta:
engine = neweng
cls.Meta = Meta | Sets the given coralillo engine so the model uses it to communicate
with the redis database | Below is the the instruction that describes the task:
### Input:
Sets the given coralillo engine so the model uses it to communicate
with the redis database
### Response:
def set_engine(cls, neweng):
''' Sets the given coralillo engine so the model uses it to communicate
with the redis database '''
assert isinstance(neweng, Engine), 'Provided object must be of class Engine'
if hasattr(cls, 'Meta'):
cls.Meta.engine = neweng
else:
class Meta:
engine = neweng
cls.Meta = Meta |
def _request(self, method, url, **kwargs):
# type: (str, str, **Any) -> requests.Response
"""Perform the request on the API."""
self.last_request = None
self.last_response = self.session.request(method, url, auth=self.auth, headers=self.headers, **kwargs)
self.last_request = self.last_response.request
self.last_url = self.last_response.url
if self.last_response.status_code == requests.codes.forbidden:
raise ForbiddenError(self.last_response.json()['results'][0]['detail'])
return self.last_response | Perform the request on the API. | Below is the the instruction that describes the task:
### Input:
Perform the request on the API.
### Response:
def _request(self, method, url, **kwargs):
# type: (str, str, **Any) -> requests.Response
"""Perform the request on the API."""
self.last_request = None
self.last_response = self.session.request(method, url, auth=self.auth, headers=self.headers, **kwargs)
self.last_request = self.last_response.request
self.last_url = self.last_response.url
if self.last_response.status_code == requests.codes.forbidden:
raise ForbiddenError(self.last_response.json()['results'][0]['detail'])
return self.last_response |
def fit(self,y_true_cal=None, y_prob_cal=None):
""" If calibration, then train the calibration of probabilities
Parameters
----------
y_true_cal : array-like of shape = [n_samples], optional default = None
True class to be used for calibrating the probabilities
y_prob_cal : array-like of shape = [n_samples, 2], optional default = None
Predicted probabilities to be used for calibrating the probabilities
Returns
-------
self : object
Returns self.
"""
if self.calibration:
self.cal = ROCConvexHull()
self.cal.fit(y_true_cal, y_prob_cal[:, 1]) | If calibration, then train the calibration of probabilities
Parameters
----------
y_true_cal : array-like of shape = [n_samples], optional default = None
True class to be used for calibrating the probabilities
y_prob_cal : array-like of shape = [n_samples, 2], optional default = None
Predicted probabilities to be used for calibrating the probabilities
Returns
-------
self : object
Returns self. | Below is the the instruction that describes the task:
### Input:
If calibration, then train the calibration of probabilities
Parameters
----------
y_true_cal : array-like of shape = [n_samples], optional default = None
True class to be used for calibrating the probabilities
y_prob_cal : array-like of shape = [n_samples, 2], optional default = None
Predicted probabilities to be used for calibrating the probabilities
Returns
-------
self : object
Returns self.
### Response:
def fit(self,y_true_cal=None, y_prob_cal=None):
""" If calibration, then train the calibration of probabilities
Parameters
----------
y_true_cal : array-like of shape = [n_samples], optional default = None
True class to be used for calibrating the probabilities
y_prob_cal : array-like of shape = [n_samples, 2], optional default = None
Predicted probabilities to be used for calibrating the probabilities
Returns
-------
self : object
Returns self.
"""
if self.calibration:
self.cal = ROCConvexHull()
self.cal.fit(y_true_cal, y_prob_cal[:, 1]) |
def upgrade(name=None,
pkgs=None,
**kwargs):
'''
Run a full package upgrade (``pkg_add -u``), or upgrade a specific package
if ``name`` or ``pkgs`` is provided.
``name`` is ignored when ``pkgs`` is specified.
Returns a dictionary containing the changes:
.. versionadded:: 2019.2.0
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade python%2.7
'''
old = list_pkgs()
cmd = ['pkg_add', '-Ix', '-u']
if kwargs.get('noop', False):
cmd.append('-n')
if pkgs:
cmd.extend(pkgs)
elif name:
cmd.append(name)
# Now run the upgrade, compare the list of installed packages before and
# after and we have all the info we need.
result = __salt__['cmd.run_all'](cmd, output_loglevel='trace',
python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret | Run a full package upgrade (``pkg_add -u``), or upgrade a specific package
if ``name`` or ``pkgs`` is provided.
``name`` is ignored when ``pkgs`` is specified.
Returns a dictionary containing the changes:
.. versionadded:: 2019.2.0
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade python%2.7 | Below is the the instruction that describes the task:
### Input:
Run a full package upgrade (``pkg_add -u``), or upgrade a specific package
if ``name`` or ``pkgs`` is provided.
``name`` is ignored when ``pkgs`` is specified.
Returns a dictionary containing the changes:
.. versionadded:: 2019.2.0
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade python%2.7
### Response:
def upgrade(name=None,
pkgs=None,
**kwargs):
'''
Run a full package upgrade (``pkg_add -u``), or upgrade a specific package
if ``name`` or ``pkgs`` is provided.
``name`` is ignored when ``pkgs`` is specified.
Returns a dictionary containing the changes:
.. versionadded:: 2019.2.0
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade python%2.7
'''
old = list_pkgs()
cmd = ['pkg_add', '-Ix', '-u']
if kwargs.get('noop', False):
cmd.append('-n')
if pkgs:
cmd.extend(pkgs)
elif name:
cmd.append(name)
# Now run the upgrade, compare the list of installed packages before and
# after and we have all the info we need.
result = __salt__['cmd.run_all'](cmd, output_loglevel='trace',
python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.