code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_info(self, symbol, as_of=None):
"""
Reads and returns information about the data stored for symbol
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
Returns
-------
dictionary of the information (specific to the type of data)
"""
version = self._read_metadata(symbol, as_of=as_of, read_preference=None)
handler = self._read_handler(version, symbol)
if handler and hasattr(handler, 'get_info'):
return handler.get_info(version)
return {}
|
Reads and returns information about the data stored for symbol
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
Returns
-------
dictionary of the information (specific to the type of data)
|
def set_installed_packages():
"""Idempotently caches the list of packages installed in the virtualenv.
Can be run safely before the virtualenv is created, and will be rerun
afterwards.
"""
global INSTALLED_PACKAGES, REQUIRED_VERSION
if INSTALLED_PACKAGES:
return
if os.path.exists(BIN_PYTHON):
pip = subprocess.Popen(
(BIN_PYTHON, '-m', 'pip', 'freeze'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(stdout, stderr) = pip.communicate()
pip.wait()
INSTALLED_PACKAGES = [r.decode().split('==')[0].lower() for r in stdout.split()]
REQUIRED_VERSION = next((package for package in INSTALLED_PACKAGES if re.match(r'^lore[!<>=]', package)), None)
if REQUIRED_VERSION:
REQUIRED_VERSION = re.split(r'[!<>=]', REQUIRED_VERSION)[-1]
|
Idempotently caches the list of packages installed in the virtualenv.
Can be run safely before the virtualenv is created, and will be rerun
afterwards.
|
def _writeStructureLink(self, link, fileObject, replaceParamFile):
"""
Write Structure Link to File Method
"""
fileObject.write('%s\n' % link.type)
fileObject.write('NUMSTRUCTS %s\n' % link.numElements)
# Retrieve lists of structures
weirs = link.weirs
culverts = link.culverts
# Write weirs to file
for weir in weirs:
fileObject.write('STRUCTTYPE %s\n' % weir.type)
# Check for replacement vars
crestLength = vwp(weir.crestLength, replaceParamFile)
crestLowElevation = vwp(weir.crestLowElevation, replaceParamFile)
dischargeCoeffForward = vwp(weir.dischargeCoeffForward, replaceParamFile)
dischargeCoeffReverse = vwp(weir.dischargeCoeffReverse, replaceParamFile)
crestLowLocation = vwp(weir.crestLowLocation, replaceParamFile)
steepSlope = vwp(weir.steepSlope, replaceParamFile)
shallowSlope = vwp(weir.shallowSlope, replaceParamFile)
if weir.crestLength != None:
try:
fileObject.write('CREST_LENGTH %.6f\n' % crestLength)
except:
fileObject.write('CREST_LENGTH %s\n' % crestLength)
if weir.crestLowElevation != None:
try:
fileObject.write('CREST_LOW_ELEV %.6f\n' % crestLowElevation)
except:
fileObject.write('CREST_LOW_ELEV %s\n' % crestLowElevation)
if weir.dischargeCoeffForward != None:
try:
fileObject.write('DISCHARGE_COEFF_FORWARD %.6f\n' % dischargeCoeffForward)
except:
fileObject.write('DISCHARGE_COEFF_FORWARD %s\n' % dischargeCoeffForward)
if weir.dischargeCoeffReverse != None:
try:
fileObject.write('DISCHARGE_COEFF_REVERSE %.6f\n' % dischargeCoeffReverse)
except:
fileObject.write('DISCHARGE_COEFF_REVERSE %s\n' % dischargeCoeffReverse)
if weir.crestLowLocation != None:
fileObject.write('CREST_LOW_LOC %s\n' % crestLowLocation)
if weir.steepSlope != None:
try:
fileObject.write('STEEP_SLOPE %.6f\n' % steepSlope)
except:
fileObject.write('STEEP_SLOPE %s\n' % steepSlope)
if weir.shallowSlope != None:
try:
fileObject.write('SHALLOW_SLOPE %.6f\n' % shallowSlope)
except:
fileObject.write('SHALLOW_SLOPE %s\n' % shallowSlope)
# Write culverts to file
for culvert in culverts:
fileObject.write('STRUCTTYPE %s\n' % culvert.type)
# Check for replacement vars
upstreamInvert = vwp(culvert.upstreamInvert, replaceParamFile)
downstreamInvert = vwp(culvert.downstreamInvert, replaceParamFile)
inletDischargeCoeff = vwp(culvert.inletDischargeCoeff, replaceParamFile)
reverseFlowDischargeCoeff = vwp(culvert.reverseFlowDischargeCoeff, replaceParamFile)
slope = vwp(culvert.slope, replaceParamFile)
length = vwp(culvert.length, replaceParamFile)
roughness = vwp(culvert.roughness, replaceParamFile)
diameter = vwp(culvert.diameter, replaceParamFile)
width = vwp(culvert.width, replaceParamFile)
height = vwp(culvert.height, replaceParamFile)
if culvert.upstreamInvert != None:
try:
fileObject.write('UPINVERT %.6f\n' % upstreamInvert)
except:
fileObject.write('UPINVERT %s\n' % upstreamInvert)
if culvert.downstreamInvert != None:
try:
fileObject.write('DOWNINVERT %.6f\n' % downstreamInvert)
except:
fileObject.write('DOWNINVERT %s\n' % downstreamInvert)
if culvert.inletDischargeCoeff != None:
try:
fileObject.write('INLET_DISCH_COEFF %.6f\n' % inletDischargeCoeff)
except:
fileObject.write('INLET_DISCH_COEFF %s\n' % inletDischargeCoeff)
if culvert.reverseFlowDischargeCoeff != None:
try:
fileObject.write('REV_FLOW_DISCH_COEFF %.6f\n' % reverseFlowDischargeCoeff)
except:
fileObject.write('REV_FLOW_DISCH_COEFF %s\n' % reverseFlowDischargeCoeff)
if culvert.slope != None:
try:
fileObject.write('SLOPE %.6f\n' % slope)
except:
fileObject.write('SLOPE %s\n' % slope)
if culvert.length != None:
try:
fileObject.write('LENGTH %.6f\n' % length)
except:
fileObject.write('LENGTH %s\n' % length)
if culvert.roughness != None:
try:
fileObject.write('ROUGH_COEFF %.6f\n' % roughness)
except:
fileObject.write('ROUGH_COEFF %s\n' % roughness)
if culvert.diameter != None:
try:
fileObject.write('DIAMETER %.6f\n' % diameter)
except:
fileObject.write('DIAMETER %s\n' % diameter)
if culvert.width != None:
try:
fileObject.write('WIDTH %.6f\n' % width)
except:
fileObject.write('WIDTH %s\n' % width)
if culvert.height != None:
try:
fileObject.write('HEIGHT %.6f\n' % height)
except:
fileObject.write('HEIGHT %s\n' % height)
|
Write Structure Link to File Method
|
def _get_top_states(saltenv='base'):
'''
Equivalent to a salt cli: salt web state.show_top
'''
alt_states = []
try:
returned = __salt__['state.show_top']()
for i in returned[saltenv]:
alt_states.append(i)
except Exception:
raise
# log.info("top states: %s", alt_states)
return alt_states
|
Equivalent to a salt cli: salt web state.show_top
|
def _select_word_cursor(self):
""" Selects the word under the mouse cursor. """
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if (self._previous_cursor_start != cursor.selectionStart() and
self._previous_cursor_end != cursor.selectionEnd()):
self._remove_decoration()
self._add_decoration(cursor)
self._previous_cursor_start = cursor.selectionStart()
self._previous_cursor_end = cursor.selectionEnd()
|
Selects the word under the mouse cursor.
|
def _part_cls_for(cls, content_type):
"""
Return the custom part class registered for *content_type*, or the
default part class if no custom class is registered for
*content_type*.
"""
if content_type in cls.part_type_for:
return cls.part_type_for[content_type]
return cls.default_part_type
|
Return the custom part class registered for *content_type*, or the
default part class if no custom class is registered for
*content_type*.
|
def adult(display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_data = pd.read_csv(
cache(github_data_url + "adult.data"),
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
data = raw_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
data["Target"] = data["Target"] == " >50K"
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
data[k] = np.array([rcode[v.strip()] for v in data[k]])
else:
data[k] = data[k].cat.codes
if display:
return raw_data.drop(["Education", "Target", "fnlwgt"], axis=1), data["Target"].values
else:
return data.drop(["Target", "fnlwgt"], axis=1), data["Target"].values
|
Return the Adult census data in a nice package.
|
def setup(self):
"""Method runs the plugin"""
if self.dry_run is not True:
self.client = self._get_client()
self._disable_access_key()
|
Method runs the plugin
|
def do_AUTOCOMPLETE(cmd, s):
"""Shows autocomplete results for a given token."""
s = list(preprocess_query(s))[0]
keys = [k.decode() for k in DB.smembers(edge_ngram_key(s))]
print(white(keys))
print(magenta('({} elements)'.format(len(keys))))
|
Shows autocomplete results for a given token.
|
def needs_renewal(name, window=None):
'''
Check if a certificate needs renewal
:param name: CommonName of cert
:param window: Window in days to renew earlier or True/force to just return True
Code example:
.. code-block:: python
if __salt__['acme.needs_renewal']('dev.example.com'):
__salt__['acme.cert']('dev.example.com', **kwargs)
else:
log.info('Your certificate is still good')
'''
if window is not None and window in ('force', 'Force', True):
return True
return _renew_by(name, window) <= datetime.datetime.today()
|
Check if a certificate needs renewal
:param name: CommonName of cert
:param window: Window in days to renew earlier or True/force to just return True
Code example:
.. code-block:: python
if __salt__['acme.needs_renewal']('dev.example.com'):
__salt__['acme.cert']('dev.example.com', **kwargs)
else:
log.info('Your certificate is still good')
|
def replace_namespace_finalize(self, name, body, **kwargs): # noqa: E501
"""replace_namespace_finalize # noqa: E501
replace finalize of the specified Namespace # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespace_finalize(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Namespace (required)
:param V1Namespace body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Namespace
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespace_finalize_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.replace_namespace_finalize_with_http_info(name, body, **kwargs) # noqa: E501
return data
|
replace_namespace_finalize # noqa: E501
replace finalize of the specified Namespace # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespace_finalize(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Namespace (required)
:param V1Namespace body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Namespace
If the method is called asynchronously,
returns the request thread.
|
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
|
bool: The speaker's cross fade state.
True if enabled, False otherwise
|
def find_version(project, source, force_init): # type: (str, str, bool) ->None
"""
Entry point to just find a version and print next
:return:
"""
# quiet! no noise
file_opener = FileOpener()
finder = FindVersion(project, source, file_opener, force_init=force_init)
if finder.PROJECT is None:
raise TypeError("Next step will fail without project name")
if not finder.validate_current_versions():
# This is a failure.
logger.debug(unicode(finder.all_current_versions()))
logger.error("Versions not in sync, won't continue")
die(-1, "Versions not in sync, won't continue")
version = finder.find_any_valid_version()
if version:
print(finder.version_to_write(unicode(version)))
else:
logger.error("Failed to find version")
die(-1, "Failed to find version")
|
Entry point to just find a version and print next
:return:
|
def _analyze_indexed_fields(indexed_fields):
"""Internal helper to check a list of indexed fields.
Args:
indexed_fields: A list of names, possibly dotted names.
(A dotted name is a string containing names separated by dots,
e.g. 'foo.bar.baz'. An undotted name is a string containing no
dots, e.g. 'foo'.)
Returns:
A dict whose keys are undotted names. For each undotted name in
the argument, the dict contains that undotted name as a key with
None as a value. For each dotted name in the argument, the dict
contains the first component as a key with a list of remainders as
values.
Example:
If the argument is ['foo.bar.baz', 'bar', 'foo.bletch'], the return
value is {'foo': ['bar.baz', 'bletch'], 'bar': None}.
Raises:
TypeError if an argument is not a string.
ValueError for duplicate arguments and for conflicting arguments
(when an undotted name also appears as the first component of
a dotted name).
"""
result = {}
for field_name in indexed_fields:
if not isinstance(field_name, basestring):
raise TypeError('Field names must be strings; got %r' % (field_name,))
if '.' not in field_name:
if field_name in result:
raise ValueError('Duplicate field name %s' % field_name)
result[field_name] = None
else:
head, tail = field_name.split('.', 1)
if head not in result:
result[head] = [tail]
elif result[head] is None:
raise ValueError('Field name %s conflicts with ancestor %s' %
(field_name, head))
else:
result[head].append(tail)
return result
|
Internal helper to check a list of indexed fields.
Args:
indexed_fields: A list of names, possibly dotted names.
(A dotted name is a string containing names separated by dots,
e.g. 'foo.bar.baz'. An undotted name is a string containing no
dots, e.g. 'foo'.)
Returns:
A dict whose keys are undotted names. For each undotted name in
the argument, the dict contains that undotted name as a key with
None as a value. For each dotted name in the argument, the dict
contains the first component as a key with a list of remainders as
values.
Example:
If the argument is ['foo.bar.baz', 'bar', 'foo.bletch'], the return
value is {'foo': ['bar.baz', 'bletch'], 'bar': None}.
Raises:
TypeError if an argument is not a string.
ValueError for duplicate arguments and for conflicting arguments
(when an undotted name also appears as the first component of
a dotted name).
|
def assert_allclose(actual, desired, rtol=1.e-5, atol=1.e-8,
err_msg='', verbose=True):
r"""wrapper for numpy.testing.allclose with default tolerances of
numpy.allclose. Needed since testing method has different values."""
return assert_allclose_np(actual, desired, rtol=rtol, atol=atol,
err_msg=err_msg, verbose=verbose)
|
r"""wrapper for numpy.testing.allclose with default tolerances of
numpy.allclose. Needed since testing method has different values.
|
def update_user(self, user_id, **kwargs):
"""Update user properties of specified user.
:param str user_id: The ID of the user to update (Required)
:param str username: The unique username of the user
:param str email: The unique email of the user
:param str full_name: The full name of the user
:param str password: The password string of the user.
:param str phone_number: Phone number of the user
:param bool terms_accepted: Is 'General Terms & Conditions' accepted
:param bool marketing_accepted: Is receiving marketing information accepted?
:returns: the updated user object
:rtype: User
"""
api = self._get_api(iam.AccountAdminApi)
user = User._create_request_map(kwargs)
body = iam.UserUpdateReq(**user)
return User(api.update_user(user_id, body))
|
Update user properties of specified user.
:param str user_id: The ID of the user to update (Required)
:param str username: The unique username of the user
:param str email: The unique email of the user
:param str full_name: The full name of the user
:param str password: The password string of the user.
:param str phone_number: Phone number of the user
:param bool terms_accepted: Is 'General Terms & Conditions' accepted
:param bool marketing_accepted: Is receiving marketing information accepted?
:returns: the updated user object
:rtype: User
|
def initialize_registry(args: argparse.Namespace, backend: StorageBackend, log: logging.Logger):
"""
Initialize the registry and the index.
:param args: :class:`argparse.Namespace` with "backend", "args", "force" and "log_level".
:param backend: Backend which is responsible for working with model files.
:param log: Logger supplied by supply_backend
:return: None
"""
try:
backend.reset(args.force)
except ExistingBackendError:
return 1
log.info("Resetting the index ...")
backend.index.reset()
try:
backend.index.upload("reset", {})
except ValueError:
return 1
log.info("Successfully initialized")
|
Initialize the registry and the index.
:param args: :class:`argparse.Namespace` with "backend", "args", "force" and "log_level".
:param backend: Backend which is responsible for working with model files.
:param log: Logger supplied by supply_backend
:return: None
|
def needs_to_run(G, target, in_mem_shas, from_store, settings):
"""
Determines if a target needs to run. This can happen in two ways:
(a) If a dependency of the target has changed
(b) If an output of the target is missing
Args:
The graph we are going to build
The name of the target
The dictionary of the current shas held in memory
The dictionary of the shas from the shastore
The settings dictionary
Returns:
True if the target needs to be run
False if not
"""
force = settings["force"]
sprint = settings["sprint"]
if(force):
sprint("Target rebuild is being forced so {} needs to run".format(target),
level="verbose")
return True
node_dict = get_the_node_dict(G, target)
if 'output' in node_dict:
for output in acts.get_all_outputs(node_dict):
if not os.path.isfile(output):
outstr = "Output file '{}' is missing so it needs to run"
sprint(outstr.format(output), level="verbose")
return True
if 'dependencies' not in node_dict:
# if it has no dependencies, it always needs to run
sprint("Target {} has no dependencies and needs to run".format(target),
level="verbose")
return True
for dep in node_dict['dependencies']:
# because the shas are updated after all targets build,
# its possible that the dependency's sha doesn't exist
# in the current "in_mem" dictionary. If this is the case,
# then the target needs to run
if ('files' in in_mem_shas and dep not in in_mem_shas['files'] or
'files' not in in_mem_shas):
outstr = "Dep '{}' doesn't exist in memory so it needs to run"
sprint(outstr.format(dep), level="verbose")
return True
now_sha = in_mem_shas['files'][dep]['sha']
if ('files' in from_store and dep not in from_store['files'] or
'files' not in from_store):
outst = "Dep '{}' doesn't exist in shastore so it needs to run"
sprint(outst.format(dep), level="verbose")
return True
old_sha = from_store['files'][dep]['sha']
if now_sha != old_sha:
outstr = "There's a mismatch for dep {} so it needs to run"
sprint(outstr.format(dep), level="verbose")
return True
sprint("Target '{}' doesn't need to run".format(target), level="verbose")
return False
|
Determines if a target needs to run. This can happen in two ways:
(a) If a dependency of the target has changed
(b) If an output of the target is missing
Args:
The graph we are going to build
The name of the target
The dictionary of the current shas held in memory
The dictionary of the shas from the shastore
The settings dictionary
Returns:
True if the target needs to be run
False if not
|
def filter_by_transcript_expression(
self,
transcript_expression_dict,
min_expression_value=0.0):
"""
Filters effects to those which have an associated transcript whose
expression value in the transcript_expression_dict argument is greater
than min_expression_value.
Parameters
----------
transcript_expression_dict : dict
Dictionary mapping Ensembl transcript IDs to expression estimates
(either FPKM or TPM)
min_expression_value : float
Threshold above which we'll keep an effect in the result collection
"""
return self.filter_above_threshold(
key_fn=lambda effect: effect.transcript_id,
value_dict=transcript_expression_dict,
threshold=min_expression_value)
|
Filters effects to those which have an associated transcript whose
expression value in the transcript_expression_dict argument is greater
than min_expression_value.
Parameters
----------
transcript_expression_dict : dict
Dictionary mapping Ensembl transcript IDs to expression estimates
(either FPKM or TPM)
min_expression_value : float
Threshold above which we'll keep an effect in the result collection
|
def _color(str_, fore_color=None, back_color=None, styles=None):
""" Return the string wrapped with the appropriate styling escape sequences.
Args:
str_ (str): The string to be wrapped.
fore_color (str, optional): Any foreground color supported by the
`Colorama`_ module.
back_color (str, optional): Any background color supported by the
`Colorama`_ module.
styles (list of str, optional): Any styles supported by the `Colorama`_
module.
Returns:
str: The string styled with the appropriate escape sequences.
.. _Colorama:
https://pypi.python.org/pypi/colorama
"""
# TODO: Colorama is documented to work on Windows and trivial test case
# proves this to be the case, but it doesn't work in Rez. If the initialise
# is called in sec/rez/__init__.py then it does work, however as discussed
# in the following comment this is not always desirable. So until we can
# work out why we forcibly turn it off.
if not config.get("color_enabled", False) or platform_.name == "windows":
return str_
# lazily init colorama. This is important - we don't want to init at startup,
# because colorama prints a RESET_ALL character atexit. This in turn adds
# unexpected output when capturing the output of a command run in a
# ResolvedContext, for example.
_init_colorama()
colored = ""
if not styles:
styles = []
if fore_color:
colored += getattr(colorama.Fore, fore_color.upper(), '')
if back_color:
colored += getattr(colorama.Back, back_color.upper(), '')
for style in styles:
colored += getattr(colorama.Style, style.upper(), '')
return colored + str_ + colorama.Style.RESET_ALL
|
Return the string wrapped with the appropriate styling escape sequences.
Args:
str_ (str): The string to be wrapped.
fore_color (str, optional): Any foreground color supported by the
`Colorama`_ module.
back_color (str, optional): Any background color supported by the
`Colorama`_ module.
styles (list of str, optional): Any styles supported by the `Colorama`_
module.
Returns:
str: The string styled with the appropriate escape sequences.
.. _Colorama:
https://pypi.python.org/pypi/colorama
|
async def get_counts(self):
"""
see :class:`datasketch.MinHashLSH`.
"""
fs = (hashtable.itemcounts() for hashtable in self.hashtables)
return await asyncio.gather(*fs)
|
see :class:`datasketch.MinHashLSH`.
|
def replay_scope(self, sess):
"""Enters a replay scope that unsets it at the end."""
current_replay = self.replay(sess)
try:
self.set_replay(sess, True)
yield
finally:
self.set_replay(sess, current_replay)
|
Enters a replay scope that unsets it at the end.
|
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
"""Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
"""
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str
|
Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
|
def invoke(invocation):
"""
Find a Planner subclass corresnding to `invocation` and use it to invoke
the module.
:param Invocation invocation:
:returns:
Module return dict.
:raises ansible.errors.AnsibleError:
Unrecognized/unsupported module type.
"""
(invocation.module_path,
invocation.module_source) = get_module_data(invocation.module_name)
planner = _get_planner(invocation)
if invocation.wrap_async:
response = _invoke_async_task(invocation, planner)
elif planner.should_fork():
response = _invoke_isolated_task(invocation, planner)
else:
_propagate_deps(invocation, planner, invocation.connection.context)
response = invocation.connection.get_chain().call(
ansible_mitogen.target.run_module,
kwargs=planner.get_kwargs(),
)
return invocation.action._postprocess_response(response)
|
Find a Planner subclass corresnding to `invocation` and use it to invoke
the module.
:param Invocation invocation:
:returns:
Module return dict.
:raises ansible.errors.AnsibleError:
Unrecognized/unsupported module type.
|
def _MakeExecutable(self, metadata_script):
"""Add executable permissions to a file.
Args:
metadata_script: string, the path to the executable file.
"""
mode = os.stat(metadata_script).st_mode
os.chmod(metadata_script, mode | stat.S_IEXEC)
|
Add executable permissions to a file.
Args:
metadata_script: string, the path to the executable file.
|
def infer(self, sensationList, reset=True, objectName=None):
"""
Infer on a given set of sensations for a single object.
The provided sensationList is a list of sensations, and each sensation is a
mapping from cortical column to a tuple of three SDR's respectively
corresponding to the locationInput, the coarseSensorInput, and the
sensorInput.
For example, the input can look as follows, if we are inferring a simple
object with two sensations (with very few active bits for simplicity):
sensationList = [
{
# location, coarse feature, fine feature for CC0, sensation 1
0: ( [1, 5, 10], [9, 32, 75], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 1
1: ( [6, 2, 15], [11, 42, 92], [7, 11, 50] ),
},
{
# location, coarse feature, fine feature for CC0, sensation 2
0: ( [2, 9, 10], [10, 35, 78], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 2
1: ( [1, 4, 12], [10, 32, 52], [6, 10, 52] ),
},
]
If the object is known by the caller, an object name can be specified
as an optional argument, and must match the objects given while learning.
This is used later when evaluating inference statistics.
Parameters:
----------------------------
@param objects (dict)
Objects to learn, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
@param objectName (str)
Name of the objects (must match the names given during learning).
"""
self._unsetLearningMode()
statistics = collections.defaultdict(list)
if objectName is not None:
if objectName not in self.objectRepresentationsL2:
raise ValueError("The provided objectName was not given during"
" learning")
for sensations in sensationList:
# feed all columns with sensations
for col in xrange(self.numColumns):
location, coarseFeature, fineFeature = sensations[col]
self.locationInputs[col].addDataToQueue(list(location), 0, 0)
self.coarseSensors[col].addDataToQueue(list(coarseFeature), 0, 0)
self.sensors[col].addDataToQueue(list(fineFeature), 0, 0)
self.network.run(1)
self._updateInferenceStats(statistics, objectName)
if reset:
# send reset signal
self._sendReset()
# save statistics
statistics["numSteps"] = len(sensationList)
statistics["object"] = objectName if objectName is not None else "Unknown"
self.statistics.append(statistics)
|
Infer on a given set of sensations for a single object.
The provided sensationList is a list of sensations, and each sensation is a
mapping from cortical column to a tuple of three SDR's respectively
corresponding to the locationInput, the coarseSensorInput, and the
sensorInput.
For example, the input can look as follows, if we are inferring a simple
object with two sensations (with very few active bits for simplicity):
sensationList = [
{
# location, coarse feature, fine feature for CC0, sensation 1
0: ( [1, 5, 10], [9, 32, 75], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 1
1: ( [6, 2, 15], [11, 42, 92], [7, 11, 50] ),
},
{
# location, coarse feature, fine feature for CC0, sensation 2
0: ( [2, 9, 10], [10, 35, 78], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 2
1: ( [1, 4, 12], [10, 32, 52], [6, 10, 52] ),
},
]
If the object is known by the caller, an object name can be specified
as an optional argument, and must match the objects given while learning.
This is used later when evaluating inference statistics.
Parameters:
----------------------------
@param objects (dict)
Objects to learn, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
@param objectName (str)
Name of the objects (must match the names given during learning).
|
def add_item(self, api_token, content, **kwargs):
"""Add a task to a project.
:param token: The user's login token.
:type token: str
:param content: The task description.
:type content: str
:param project_id: The project to add the task to. Default is ``Inbox``
:type project_id: str
:param date_string: The deadline date for the task.
:type date_string: str
:param priority: The task priority ``(1-4)``.
:type priority: int
:param indent: The task indentation ``(1-4)``.
:type indent: int
:param item_order: The task order.
:type item_order: int
:param children: A list of child tasks IDs.
:type children: str
:param labels: A list of label IDs.
:type labels: str
:param assigned_by_uid: The ID of the user who assigns current task.
Accepts 0 or any user id from the list of project collaborators.
If value is unset or invalid it will automatically be set up by
your uid.
:type assigned_by_uid: str
:param responsible_uid: The id of user who is responsible for
accomplishing the current task. Accepts 0 or any user id from
the list of project collaborators. If the value is unset or
invalid it will automatically be set to null.
:type responsible_uid: str
:param note: Content of a note to add.
:type note: str
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> response = api.login('john.doe@gmail.com', 'password')
>>> user_info = response.json()
>>> user_api_token = user_info['token']
>>> response = api.add_item(user_api_token, 'Install PyTodoist')
>>> task = response.json()
>>> print(task['content'])
Install PyTodoist
"""
params = {
'token': api_token,
'content': content
}
return self._post('add_item', params, **kwargs)
|
Add a task to a project.
:param token: The user's login token.
:type token: str
:param content: The task description.
:type content: str
:param project_id: The project to add the task to. Default is ``Inbox``
:type project_id: str
:param date_string: The deadline date for the task.
:type date_string: str
:param priority: The task priority ``(1-4)``.
:type priority: int
:param indent: The task indentation ``(1-4)``.
:type indent: int
:param item_order: The task order.
:type item_order: int
:param children: A list of child tasks IDs.
:type children: str
:param labels: A list of label IDs.
:type labels: str
:param assigned_by_uid: The ID of the user who assigns current task.
Accepts 0 or any user id from the list of project collaborators.
If value is unset or invalid it will automatically be set up by
your uid.
:type assigned_by_uid: str
:param responsible_uid: The id of user who is responsible for
accomplishing the current task. Accepts 0 or any user id from
the list of project collaborators. If the value is unset or
invalid it will automatically be set to null.
:type responsible_uid: str
:param note: Content of a note to add.
:type note: str
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> response = api.login('john.doe@gmail.com', 'password')
>>> user_info = response.json()
>>> user_api_token = user_info['token']
>>> response = api.add_item(user_api_token, 'Install PyTodoist')
>>> task = response.json()
>>> print(task['content'])
Install PyTodoist
|
def write_question(self, question):
"""Writes a question to the packet"""
self.write_name(question.name)
self.write_short(question.type)
self.write_short(question.clazz)
|
Writes a question to the packet
|
def get_singlesig_privkey(privkey_info, blockchain='bitcoin', **blockchain_opts):
"""
Given a private key bundle, get the (single) private key
"""
if blockchain == 'bitcoin':
return btc_get_singlesig_privkey(privkey_info, **blockchain_opts)
else:
raise ValueError('Unknown blockchain "{}"'.format(blockchain))
|
Given a private key bundle, get the (single) private key
|
def insert(self, rectangle):
"""
Insert a rectangle into the bin.
Parameters
-------------
rectangle: (2,) float, size of rectangle to insert
"""
rectangle = np.asanyarray(rectangle, dtype=np.float64)
for child in self.child:
if child is not None:
attempt = child.insert(rectangle)
if attempt is not None:
return attempt
if self.occupied:
return None
# compare the bin size to the insertion candidate size
size_test = self.extents - rectangle
# this means the inserted rectangle is too big for the cell
if np.any(size_test < -tol.zero):
return None
# since the cell is big enough for the current rectangle, either it
# is going to be inserted here, or the cell is going to be split
# either way, the cell is now occupied.
self.occupied = True
# this means the inserted rectangle fits perfectly
# since we already checked to see if it was negative, no abs is needed
if np.all(size_test < tol.zero):
return self.bounds[0:2]
# since the rectangle fits but the empty space is too big,
# we need to create some children to insert into
# first, we decide which way to split
vertical = size_test[0] > size_test[1]
length = rectangle[int(not vertical)]
child_bounds = self.split(length, vertical)
self.child[0] = RectangleBin(bounds=child_bounds[0])
self.child[1] = RectangleBin(bounds=child_bounds[1])
return self.child[0].insert(rectangle)
|
Insert a rectangle into the bin.
Parameters
-------------
rectangle: (2,) float, size of rectangle to insert
|
def insert_base_bank_options(parser):
"""
Adds essential common options for template bank generation to an
ArgumentParser instance.
"""
def match_type(s):
err_msg = "must be a number between 0 and 1 excluded, not %r" % s
try:
value = float(s)
except ValueError:
raise argparse.ArgumentTypeError(err_msg)
if value <= 0 or value >= 1:
raise argparse.ArgumentTypeError(err_msg)
return value
parser.add_argument(
'-m', '--min-match', type=match_type, required=True,
help="Generate bank with specified minimum match. Required.")
parser.add_argument(
'-O', '--output-file', required=True,
help="Output file name. Required.")
parser.add_argument('--f-low-column', type=str, metavar='NAME',
help='If given, store the lower frequency cutoff into '
'column NAME of the single-inspiral table.')
|
Adds essential common options for template bank generation to an
ArgumentParser instance.
|
def _fix_dot_imports(not_consumed):
""" Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
# TODO: this should be improved in issue astroid #46
names = {}
for name, stmts in not_consumed.items():
if any(
isinstance(stmt, astroid.AssignName)
and isinstance(stmt.assign_type(), astroid.AugAssign)
for stmt in stmts
):
continue
for stmt in stmts:
if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)):
continue
for imports in stmt.names:
second_name = None
import_module_name = imports[0]
if import_module_name == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
name_matches_dotted_import = False
if (
import_module_name.startswith(name)
and import_module_name.find(".") > -1
):
name_matches_dotted_import = True
if name_matches_dotted_import or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = import_module_name
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
|
Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
|
def wait_for_net(self, timeout=None, wait_polling_interval=None):
"""Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
|
Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
|
def _send_request(self, request, **kwargs):
"""Send GET request."""
log.debug(LOG_CHECK, "Send request %s with %s", request, kwargs)
log.debug(LOG_CHECK, "Request headers %s", request.headers)
self.url_connection = self.session.send(request, **kwargs)
self.headers = self.url_connection.headers
self._add_ssl_info()
|
Send GET request.
|
def setup_argument_parser():
"""Setup command line parser"""
# Fix help formatter width
if 'COLUMNS' not in os.environ:
os.environ['COLUMNS'] = str(shutil.get_terminal_size().columns)
parser = argparse.ArgumentParser(
prog='soapy_power',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Obtain a power spectrum from SoapySDR devices',
add_help=False
)
# Fix recognition of optional argements of type float_with_multiplier
parser._negative_number_matcher = re_float_with_multiplier_negative
main_title = parser.add_argument_group('Main options')
main_title.add_argument('-h', '--help', action='help',
help='show this help message and exit')
main_title.add_argument('-f', '--freq', metavar='Hz|Hz:Hz', type=freq_or_freq_range, default='1420405752',
help='center frequency or frequency range to scan, number '
'can be followed by a k, M or G multiplier (default: %(default)s)')
output_group = main_title.add_mutually_exclusive_group()
output_group.add_argument('-O', '--output', metavar='FILE', type=argparse.FileType('w'), default=sys.stdout,
help='output to file (incompatible with --output-fd, default is stdout)')
output_group.add_argument('--output-fd', metavar='NUM', type=int, default=None,
help='output to existing file descriptor (incompatible with -O)')
main_title.add_argument('-F', '--format', choices=sorted(writer.formats.keys()), default='rtl_power',
help='output format (default: %(default)s)')
main_title.add_argument('-q', '--quiet', action='store_true',
help='limit verbosity')
main_title.add_argument('--debug', action='store_true',
help='detailed debugging messages')
main_title.add_argument('--detect', action='store_true',
help='detect connected SoapySDR devices and exit')
main_title.add_argument('--info', action='store_true',
help='show info about selected SoapySDR device and exit')
main_title.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
bins_title = parser.add_argument_group('FFT bins')
bins_group = bins_title.add_mutually_exclusive_group()
bins_group.add_argument('-b', '--bins', type=int, default=512,
help='number of FFT bins (incompatible with -B, default: %(default)s)')
bins_group.add_argument('-B', '--bin-size', metavar='Hz', type=float_with_multiplier,
help='bin size in Hz (incompatible with -b)')
spectra_title = parser.add_argument_group('Averaging')
spectra_group = spectra_title.add_mutually_exclusive_group()
spectra_group.add_argument('-n', '--repeats', type=int, default=1600,
help='number of spectra to average (incompatible with -t and -T, default: %(default)s)')
spectra_group.add_argument('-t', '--time', metavar='SECONDS', type=float,
help='integration time (incompatible with -T and -n)')
spectra_group.add_argument('-T', '--total-time', metavar='SECONDS', type=float,
help='total integration time of all hops (incompatible with -t and -n)')
runs_title = parser.add_argument_group('Measurements')
runs_group = runs_title.add_mutually_exclusive_group()
runs_group.add_argument('-c', '--continue', dest='endless', action='store_true',
help='repeat the measurement endlessly (incompatible with -u and -e)')
runs_group.add_argument('-u', '--runs', type=int, default=1,
help='number of measurements (incompatible with -c and -e, default: %(default)s)')
runs_group.add_argument('-e', '--elapsed', metavar='SECONDS', type=float,
help='scan session duration (time limit in seconds, incompatible with -c and -u)')
device_title = parser.add_argument_group('Device settings')
device_title.add_argument('-d', '--device', default='',
help='SoapySDR device to use')
device_title.add_argument('-C', '--channel', type=int, default=0,
help='SoapySDR RX channel (default: %(default)s)')
device_title.add_argument('-A', '--antenna', default='',
help='SoapySDR selected antenna')
device_title.add_argument('-r', '--rate', metavar='Hz', type=float_with_multiplier, default=2e6,
help='sample rate (default: %(default)s)')
device_title.add_argument('-w', '--bandwidth', metavar='Hz', type=float_with_multiplier, default=0,
help='filter bandwidth (default: %(default)s)')
device_title.add_argument('-p', '--ppm', type=int, default=0,
help='frequency correction in ppm')
gain_group = device_title.add_mutually_exclusive_group()
gain_group.add_argument('-g', '--gain', metavar='dB', type=float, default=37.2,
help='total gain (incompatible with -G and -a, default: %(default)s)')
gain_group.add_argument('-G', '--specific-gains', metavar='STRING', type=specific_gains, default='',
help='specific gains of individual amplification elements '
'(incompatible with -g and -a, example: LNA=28,VGA=12,AMP=0')
gain_group.add_argument('-a', '--agc', action='store_true',
help='enable Automatic Gain Control (incompatible with -g and -G)')
device_title.add_argument('--lnb-lo', metavar='Hz', type=float_with_multiplier, default=0,
help='LNB LO frequency, negative for upconverters (default: %(default)s)')
device_title.add_argument('--device-settings', metavar='STRING', type=device_settings, default='',
help='SoapySDR device settings (example: biastee=true)')
device_title.add_argument('--force-rate', action='store_true',
help='ignore list of sample rates provided by device and allow any value')
device_title.add_argument('--force-bandwidth', action='store_true',
help='ignore list of filter bandwidths provided by device and allow any value')
device_title.add_argument('--tune-delay', metavar='SECONDS', type=float, default=0,
help='time to delay measurement after changing frequency (to avoid artifacts)')
device_title.add_argument('--reset-stream', action='store_true',
help='reset streaming after changing frequency (to avoid artifacts)')
crop_title = parser.add_argument_group('Crop')
crop_group = crop_title.add_mutually_exclusive_group()
crop_group.add_argument('-o', '--overlap', metavar='PERCENT', type=float, default=0,
help='percent of overlap when frequency hopping (incompatible with -k)')
crop_group.add_argument('-k', '--crop', metavar='PERCENT', type=float, default=0,
help='percent of crop when frequency hopping (incompatible with -o)')
perf_title = parser.add_argument_group('Performance options')
perf_title.add_argument('-s', '--buffer-size', type=int, default=0,
help='base buffer size (number of samples, 0 = auto, default: %(default)s)')
perf_title.add_argument('-S', '--max-buffer-size', type=int, default=0,
help='maximum buffer size (number of samples, -1 = unlimited, 0 = auto, default: %(default)s)')
fft_rules_group = perf_title.add_mutually_exclusive_group()
fft_rules_group.add_argument('--even', action='store_true',
help='use only even numbers of FFT bins')
fft_rules_group.add_argument('--pow2', action='store_true',
help='use only powers of 2 as number of FFT bins')
perf_title.add_argument('--max-threads', metavar='NUM', type=int, default=0,
help='maximum number of PSD threads (0 = auto, default: %(default)s)')
perf_title.add_argument('--max-queue-size', metavar='NUM', type=int, default=0,
help='maximum size of PSD work queue (-1 = unlimited, 0 = auto, default: %(default)s)')
perf_title.add_argument('--no-pyfftw', action='store_true',
help='don\'t use pyfftw library even if it is available (use scipy.fftpack or numpy.fft)')
other_title = parser.add_argument_group('Other options')
other_title.add_argument('-l', '--linear', action='store_true',
help='linear power values instead of logarithmic')
other_title.add_argument('-R', '--remove-dc', action='store_true',
help='interpolate central point to cancel DC bias (useful only with boxcar window)')
other_title.add_argument('-D', '--detrend', choices=['none', 'constant'], default='none',
help='remove mean value from data to cancel DC bias (default: %(default)s)')
other_title.add_argument('--fft-window', choices=['boxcar', 'hann', 'hamming', 'blackman', 'bartlett', 'kaiser', 'tukey'],
default='hann', help='Welch\'s method window function (default: %(default)s)')
other_title.add_argument('--fft-window-param', metavar='FLOAT', type=float, default=None,
help='shape parameter of window function (required for kaiser and tukey windows)')
other_title.add_argument('--fft-overlap', metavar='PERCENT', type=float, default=50,
help='Welch\'s method overlap between segments (default: %(default)s)')
return parser
|
Setup command line parser
|
def transform_file(ELEMS, ofname, EPO, TREE, opt):
"transform/map the elements of this file and dump the output on 'ofname'"
BED4_FRM = "%s\t%d\t%d\t%s\n"
log.info("%s (%d) elements ..." % (opt.screen and "screening" or "transforming", ELEMS.shape[0]))
with open(ofname, 'w') as out_fd:
if opt.screen:
for elem in ELEMS.flat:
matching_blocks = [attrgetter("value")(_) for _ in TREE.find(elem['chrom'], elem['start'], elem['end'])]
assert set( matching_blocks ) <= set( EPO.keys() )
if matching_blocks:
out_fd.write(BED4_FRM % elem)
else:
for chrom in set( ELEMS['chrom'] ):
transform_by_chrom(EPO,
ELEMS[ELEMS['chrom'] == chrom],
TREE, chrom, opt, out_fd)
log.info("DONE!")
|
transform/map the elements of this file and dump the output on 'ofname
|
def _sorted_resource_labels(labels):
"""Sort label names, putting well-known resource labels first."""
head = [label for label in TOP_RESOURCE_LABELS if label in labels]
tail = sorted(label for label in labels if label not in TOP_RESOURCE_LABELS)
return head + tail
|
Sort label names, putting well-known resource labels first.
|
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
|
Setter for _db_file attribute
|
def get_or_create(
cls, name: sym.Symbol, module: types.ModuleType = None
) -> "Namespace":
"""Get the namespace bound to the symbol `name` in the global namespace
cache, creating it if it does not exist.
Return the namespace."""
return cls._NAMESPACES.swap(Namespace.__get_or_create, name, module=module)[
name
]
|
Get the namespace bound to the symbol `name` in the global namespace
cache, creating it if it does not exist.
Return the namespace.
|
def emit_db_sequence_updates(engine):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if engine and engine.name == 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname ||
'.' || c.relname || ';' AS qry,
n.nspname || '.' || c.relname AS qual_name
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
for (qry, qual_name) in list(conn.execute(qry)):
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % (qual_name, nextval)
|
Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build.
|
def find_by_b64ids(self, _ids, **kwargs):
"""
Pass me a list of base64-encoded ObjectId
"""
return self.find_by_ids([ObjectId(base64.b64decode(_id)) for _id in _ids], **kwargs)
|
Pass me a list of base64-encoded ObjectId
|
def get_or_create_edge(self,
source: Node,
target: Node,
relation: str,
bel: str,
sha512: str,
data: EdgeData,
evidence: Optional[Evidence] = None,
annotations: Optional[List[NamespaceEntry]] = None,
properties: Optional[List[Property]] = None,
) -> Edge:
"""Create an edge if it does not exist, or return it if it does.
:param source: Source node of the relation
:param target: Target node of the relation
:param relation: Type of the relation between source and target node
:param bel: BEL statement that describes the relation
:param sha512: The SHA512 hash of the edge as a string
:param data: The PyBEL data dictionary
:param Evidence evidence: Evidence object that proves the given relation
:param properties: List of all properties that belong to the edge
:param annotations: List of all annotations that belong to the edge
"""
if sha512 in self.object_cache_edge:
edge = self.object_cache_edge[sha512]
self.session.add(edge)
return edge
edge = self.get_edge_by_hash(sha512)
if edge is not None:
self.object_cache_edge[sha512] = edge
return edge
edge = Edge(
source=source,
target=target,
relation=relation,
bel=bel,
sha512=sha512,
data=json.dumps(data),
)
if evidence is not None:
edge.evidence = evidence
if properties is not None:
edge.properties = properties
if annotations is not None:
edge.annotations = annotations
self.session.add(edge)
self.object_cache_edge[sha512] = edge
return edge
|
Create an edge if it does not exist, or return it if it does.
:param source: Source node of the relation
:param target: Target node of the relation
:param relation: Type of the relation between source and target node
:param bel: BEL statement that describes the relation
:param sha512: The SHA512 hash of the edge as a string
:param data: The PyBEL data dictionary
:param Evidence evidence: Evidence object that proves the given relation
:param properties: List of all properties that belong to the edge
:param annotations: List of all annotations that belong to the edge
|
def execute(self, proxy, method, args):
"""Execute given XMLRPC call."""
try:
result = getattr(proxy, method)(raw_xml=self.options.xml, *tuple(args))
except xmlrpc.ERRORS as exc:
self.LOG.error("While calling %s(%s): %s" % (method, ", ".join(repr(i) for i in args), exc))
self.return_code = error.EX_NOINPUT if "not find" in getattr(exc, "faultString", "") else error.EX_DATAERR
else:
if not self.options.quiet:
if self.options.repr:
# Pretty-print if requested, or it's a collection and not a scalar
result = pformat(result)
elif hasattr(result, "__iter__"):
result = '\n'.join(i if isinstance(i, basestring) else pformat(i) for i in result)
print(fmt.to_console(result))
|
Execute given XMLRPC call.
|
def reload(self):
"""Reload the metadata for this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_instance]
:end-before: [END bigtable_reload_instance]
"""
instance_pb = self._client.instance_admin_client.get_instance(self.name)
# NOTE: _update_from_pb does not check that the project and
# instance ID on the response match the request.
self._update_from_pb(instance_pb)
|
Reload the metadata for this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_instance]
:end-before: [END bigtable_reload_instance]
|
def _attempt_command_recovery(command, ack, serial_conn):
'''Recovery after following a failed write_and_return() atempt'''
with serial_with_temp_timeout(serial_conn, RECOVERY_TIMEOUT) as device:
response = _write_to_device_and_return(command, ack, device)
if response is None:
log.debug("No valid response during _attempt_command_recovery")
raise RuntimeError(
"Recovery attempted - no valid serial response "
"for command: {} in {} seconds".format(
command.encode(), RECOVERY_TIMEOUT))
return response
|
Recovery after following a failed write_and_return() atempt
|
def save_report_to_html(self):
"""Save report in the dock to html."""
html = self.page().mainFrame().toHtml()
if self.report_path is not None:
html_to_file(html, self.report_path)
else:
msg = self.tr('report_path is not set')
raise InvalidParameterError(msg)
|
Save report in the dock to html.
|
def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None):
'''
Creates a queue under the given account.
:param str queue_name:
The name of the queue to create. A queue name must be from 3 through
63 characters long and may only contain lowercase letters, numbers,
and the dash (-) character. The first and last letters in the queue
must be alphanumeric. The dash (-) character cannot be the first or
last character. Consecutive dash characters are not permitted in the
queue name.
:param metadata:
A dict containing name-value pairs to associate with the queue as
metadata. Note that metadata names preserve the case with which they
were created, but are case-insensitive when set or read.
:type metadata: a dict mapping str to str
:param bool fail_on_exist:
Specifies whether to throw an exception if the queue already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the queue was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [('timeout', _int_to_str(timeout))]
request.headers = [('x-ms-meta-name-values', metadata)]
if not fail_on_exist:
try:
response = self._perform_request(request)
if response.status == _HTTP_RESPONSE_NO_CONTENT:
return False
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
response = self._perform_request(request)
if response.status == _HTTP_RESPONSE_NO_CONTENT:
raise AzureConflictHttpError(
_ERROR_CONFLICT.format(response.message), response.status)
return True
|
Creates a queue under the given account.
:param str queue_name:
The name of the queue to create. A queue name must be from 3 through
63 characters long and may only contain lowercase letters, numbers,
and the dash (-) character. The first and last letters in the queue
must be alphanumeric. The dash (-) character cannot be the first or
last character. Consecutive dash characters are not permitted in the
queue name.
:param metadata:
A dict containing name-value pairs to associate with the queue as
metadata. Note that metadata names preserve the case with which they
were created, but are case-insensitive when set or read.
:type metadata: a dict mapping str to str
:param bool fail_on_exist:
Specifies whether to throw an exception if the queue already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the queue was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool
|
def merging_cli(debug=False):
"""
simple commandline interface of the merging module.
This function is called when you use the ``discoursegraphs`` application
directly on the command line.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tiger-file',
help='TigerXML (syntax) file to be merged')
parser.add_argument('-r', '--rst-file',
help='RS3 (rhetorical structure) file to be merged')
parser.add_argument('-a', '--anaphoricity-file',
help='anaphoricity file to be merged')
parser.add_argument('-c', '--conano-file',
help='conano file to be merged')
parser.add_argument('-m', '--mmax-file',
help='MMAX2 file to be merged')
parser.add_argument(
'-o', '--output-format', default='dot',
help=('output format: brackets, brat, dot, pickle, geoff, gexf, graphml, '
'neo4j, exmaralda, conll, paula, no-output'))
parser.add_argument('output_file', nargs='?', default=sys.stdout)
args = parser.parse_args(sys.argv[1:])
for filepath in (args.tiger_file, args.rst_file, args.anaphoricity_file,
args.conano_file):
if filepath: # if it was specified on the command line
assert os.path.isfile(filepath), \
"File '{}' doesn't exist".format(filepath)
# create an empty document graph. merge it with other graphs later on.
discourse_docgraph = DiscourseDocumentGraph()
if args.tiger_file:
from discoursegraphs.readwrite.tiger import TigerDocumentGraph
tiger_docgraph = TigerDocumentGraph(args.tiger_file)
discourse_docgraph.merge_graphs(tiger_docgraph)
if args.rst_file:
rst_graph = dg.read_rs3(args.rst_file)
discourse_docgraph.merge_graphs(rst_graph)
if args.anaphoricity_file:
from discoursegraphs.readwrite import AnaphoraDocumentGraph
anaphora_graph = AnaphoraDocumentGraph(args.anaphoricity_file)
discourse_docgraph.merge_graphs(anaphora_graph)
# the anaphora doc graph only contains trivial edges from its root
# node.
try:
discourse_docgraph.remove_node('anaphoricity:root_node')
except networkx.NetworkXError as e: # ignore if the node doesn't exist
pass
if args.conano_file:
from discoursegraphs.readwrite import ConanoDocumentGraph
conano_graph = ConanoDocumentGraph(args.conano_file)
discourse_docgraph.merge_graphs(conano_graph)
if args.mmax_file:
from discoursegraphs.readwrite import MMAXDocumentGraph
mmax_graph = MMAXDocumentGraph(args.mmax_file)
discourse_docgraph.merge_graphs(mmax_graph)
if isinstance(args.output_file, str): # if we're not piping to stdout ...
# we need abspath to handle files in the current directory
path_to_output_file = \
os.path.dirname(os.path.abspath(args.output_file))
if not os.path.isdir(path_to_output_file):
create_dir(path_to_output_file)
if args.output_format == 'dot':
write_dot(discourse_docgraph, args.output_file)
elif args.output_format == 'brat':
dg.write_brat(discourse_docgraph, args.output_file)
elif args.output_format == 'brackets':
dg.write_brackets(discourse_docgraph, args.output_file)
elif args.output_format == 'pickle':
import cPickle as pickle
with open(args.output_file, 'wb') as pickle_file:
pickle.dump(discourse_docgraph, pickle_file)
elif args.output_format in ('geoff', 'neo4j'):
from discoursegraphs.readwrite.neo4j import write_geoff
write_geoff(discourse_docgraph, args.output_file)
print '' # this is just cosmetic for stdout
elif args.output_format == 'gexf':
dg.write_gexf(discourse_docgraph, args.output_file)
elif args.output_format == 'graphml':
dg.write_graphml(discourse_docgraph, args.output_file)
elif args.output_format == 'exmaralda':
from discoursegraphs.readwrite.exmaralda import write_exb
write_exb(discourse_docgraph, args.output_file)
elif args.output_format == 'conll':
from discoursegraphs.readwrite.conll import write_conll
write_conll(discourse_docgraph, args.output_file)
elif args.output_format == 'paula':
from discoursegraphs.readwrite.paulaxml.paula import write_paula
write_paula(discourse_docgraph, args.output_file)
elif args.output_format == 'no-output':
pass # just testing if the merging works
else:
raise ValueError(
"Unsupported output format: {}".format(args.output_format))
if debug:
print "Merged successfully: ", args.tiger_file
|
simple commandline interface of the merging module.
This function is called when you use the ``discoursegraphs`` application
directly on the command line.
|
def gradient(self):
"""
Sum of covariance function derivatives.
Returns
-------
dict
∂K₀ + ∂K₁ + ⋯
"""
grad = {}
for i, f in enumerate(self._covariances):
for varname, g in f.gradient().items():
grad[f"{self._name}[{i}].{varname}"] = g
return grad
|
Sum of covariance function derivatives.
Returns
-------
dict
∂K₀ + ∂K₁ + ⋯
|
def make_pdb(self, ligands=True, alt_states=False, pseudo_group=False, header=True, footer=True):
"""Generates a PDB string for the Assembly.
Parameters
----------
ligands : bool, optional
If `True`, will include ligands in the output.
alt_states : bool, optional
If `True`, will include alternate conformations in the output.
pseudo_group : bool, optional
If `True`, will include pseudo atoms in the output.
header : bool, optional
If `True` will write a header for output.
footer : bool, optional
If `True` will write a footer for output.
Returns
-------
pdb_str : str
String of the pdb for the Assembly. Generated by collating
Polymer().pdb calls for the component Polymers.
"""
base_filters = dict(ligands=ligands, pseudo_group=pseudo_group)
restricted_mol_types = [x[0] for x in base_filters.items() if not x[1]]
in_groups = [x for x in self.filter_mol_types(restricted_mol_types)]
pdb_header = 'HEADER {:<80}\n'.format(
'ISAMBARD Model {}'.format(self.id)) if header else ''
pdb_body = ''.join([x.make_pdb(
alt_states=alt_states, inc_ligands=ligands) + '{:<80}\n'.format('TER') for x in in_groups])
pdb_footer = '{:<80}\n'.format('END') if footer else ''
pdb_str = ''.join([pdb_header, pdb_body, pdb_footer])
return pdb_str
|
Generates a PDB string for the Assembly.
Parameters
----------
ligands : bool, optional
If `True`, will include ligands in the output.
alt_states : bool, optional
If `True`, will include alternate conformations in the output.
pseudo_group : bool, optional
If `True`, will include pseudo atoms in the output.
header : bool, optional
If `True` will write a header for output.
footer : bool, optional
If `True` will write a footer for output.
Returns
-------
pdb_str : str
String of the pdb for the Assembly. Generated by collating
Polymer().pdb calls for the component Polymers.
|
def _find_convertable_object(self, data):
"""
Get the first instance of a `self.pod_types`
"""
data = list(data)
convertable_object_idxs = [
idx
for idx, obj
in enumerate(data)
if obj.get('kind') in self.pod_types.keys()
]
if len(convertable_object_idxs) < 1:
raise Exception("Kubernetes config didn't contain any of {}".format(
', '.join(self.pod_types.keys())
))
return list(data)[convertable_object_idxs[0]]
|
Get the first instance of a `self.pod_types`
|
def strip_prefix(string, prefix, regex=False):
"""Strip the prefix from the string
If 'regex' is specified, prefix is understood as a regular expression."""
if not isinstance(string, six.string_types) or not isinstance(prefix, six.string_types):
msg = 'Arguments to strip_prefix must be string types. Are: {s}, {p}'\
.format(s=type(string), p=type(prefix))
raise TypeError(msg)
if not regex:
prefix = re.escape(prefix)
if not prefix.startswith('^'):
prefix = '^({s})'.format(s=prefix)
return _strip(string, prefix)
|
Strip the prefix from the string
If 'regex' is specified, prefix is understood as a regular expression.
|
def posterior_marginals(self, x, mask=None):
"""Run a Kalman smoother to return posterior mean and cov.
Note that the returned values `smoothed_means` depend on the observed
time series `x`, while the `smoothed_covs` are independent
of the observed series; i.e., they depend only on the model itself.
This means that the mean values have shape `concat([sample_shape(x),
batch_shape, [num_timesteps, {latent/observation}_size]])`,
while the covariances have shape `concat[(batch_shape, [num_timesteps,
{latent/observation}_size, {latent/observation}_size]])`, which
does not depend on the sample shape.
This function only performs smoothing. If the user wants the
intermediate values, which are returned by filtering pass `forward_filter`,
one could get it by:
```
(log_likelihoods,
filtered_means, filtered_covs,
predicted_means, predicted_covs,
observation_means, observation_covs) = model.forward_filter(x)
smoothed_means, smoothed_covs = model.backward_smoothing_pass(x)
```
where `x` is an observation sequence.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None`.
Returns:
smoothed_means: Means of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
smoothed_covs: Covariances of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `filtered_means`.
"""
with tf.name_scope("smooth"):
x = tf.convert_to_tensor(value=x, name="x")
(_, filtered_means, filtered_covs,
predicted_means, predicted_covs, _, _) = self.forward_filter(
x, mask=mask)
(smoothed_means, smoothed_covs) = self.backward_smoothing_pass(
filtered_means, filtered_covs,
predicted_means, predicted_covs)
return (smoothed_means, smoothed_covs)
|
Run a Kalman smoother to return posterior mean and cov.
Note that the returned values `smoothed_means` depend on the observed
time series `x`, while the `smoothed_covs` are independent
of the observed series; i.e., they depend only on the model itself.
This means that the mean values have shape `concat([sample_shape(x),
batch_shape, [num_timesteps, {latent/observation}_size]])`,
while the covariances have shape `concat[(batch_shape, [num_timesteps,
{latent/observation}_size, {latent/observation}_size]])`, which
does not depend on the sample shape.
This function only performs smoothing. If the user wants the
intermediate values, which are returned by filtering pass `forward_filter`,
one could get it by:
```
(log_likelihoods,
filtered_means, filtered_covs,
predicted_means, predicted_covs,
observation_means, observation_covs) = model.forward_filter(x)
smoothed_means, smoothed_covs = model.backward_smoothing_pass(x)
```
where `x` is an observation sequence.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None`.
Returns:
smoothed_means: Means of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
smoothed_covs: Covariances of the per-timestep smoothed
distributions over latent states, p(x_{t} | x_{:T}), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `filtered_means`.
|
def user_in_group(user, group):
"""Returns True if the given user is in given group"""
if isinstance(group, Group):
return user_is_superuser(user) or group in user.groups.all()
elif isinstance(group, six.string_types):
return user_is_superuser(user) or user.groups.filter(name=group).exists()
raise TypeError("'group' argument must be a string or a Group instance")
|
Returns True if the given user is in given group
|
def from_devanagari(self, data):
"""A convenience method"""
from indic_transliteration import sanscript
return sanscript.transliterate(data=data, _from=sanscript.DEVANAGARI, _to=self.name)
|
A convenience method
|
def version(self, value):
"""
Setter for **self.__version** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"version", value)
self.__version = value
|
Setter for **self.__version** attribute.
:param value: Attribute value.
:type value: unicode
|
def getImports(pth):
"""Forwards to the correct getImports implementation for the platform.
"""
if not os.path.isfile(pth):
pth = check_extract_from_egg(pth)[0][0]
if is_win or is_cygwin:
if pth.lower().endswith(".manifest"):
return []
try:
return _getImports_pe(pth)
except Exception, exception:
# Assemblies can pull in files which aren't necessarily PE,
# but are still needed by the assembly. Any additional binary
# dependencies should already have been handled by
# selectAssemblies in that case, so just warn, return an empty
# list and continue.
if logger.isEnabledFor(logging.WARN):
# logg excaption only if level >= warn
logger.warn('Can not get binary dependencies for file: %s', pth)
logger.exception(exception)
return []
elif is_darwin:
return _getImports_macholib(pth)
else:
return _getImports_ldd(pth)
|
Forwards to the correct getImports implementation for the platform.
|
def get(cont=None, path=None, local_file=None, return_bin=False, profile=None):
'''
List the contents of a container, or return an object from a container. Set
return_bin to True in order to retrieve an object wholesale. Otherwise,
Salt will attempt to parse an XML response.
CLI Example to list containers:
.. code-block:: bash
salt myminion swift.get
CLI Example to list the contents of a container:
.. code-block:: bash
salt myminion swift.get mycontainer
CLI Example to return the binary contents of an object:
.. code-block:: bash
salt myminion swift.get mycontainer myfile.png return_bin=True
CLI Example to save the binary contents of an object to a local file:
.. code-block:: bash
salt myminion swift.get mycontainer myfile.png local_file=/tmp/myfile.png
'''
swift_conn = _auth(profile)
if cont is None:
return swift_conn.get_account()
if path is None:
return swift_conn.get_container(cont)
if return_bin is True:
return swift_conn.get_object(cont, path, return_bin)
if local_file is not None:
return swift_conn.get_object(cont, path, local_file)
return False
|
List the contents of a container, or return an object from a container. Set
return_bin to True in order to retrieve an object wholesale. Otherwise,
Salt will attempt to parse an XML response.
CLI Example to list containers:
.. code-block:: bash
salt myminion swift.get
CLI Example to list the contents of a container:
.. code-block:: bash
salt myminion swift.get mycontainer
CLI Example to return the binary contents of an object:
.. code-block:: bash
salt myminion swift.get mycontainer myfile.png return_bin=True
CLI Example to save the binary contents of an object to a local file:
.. code-block:: bash
salt myminion swift.get mycontainer myfile.png local_file=/tmp/myfile.png
|
def someoneKnownSeen(self, home=None, camera=None):
"""
Return True if someone known has been seen
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
#Check in the last event is someone known has been seen
if self.lastEvent[cam_id]['type'] == 'person':
if self.lastEvent[cam_id]['person_id'] in self._knownPersons():
return True
return False
|
Return True if someone known has been seen
|
def numToDigits(num, places):
"""
Helper, for converting numbers to textual digits.
"""
s = str(num)
if len(s) < places:
return ("0" * (places - len(s))) + s
elif len(s) > places:
return s[len(s)-places: ]
else:
return s
|
Helper, for converting numbers to textual digits.
|
def comb_jit(N, k):
"""
Numba jitted function that computes N choose k. Return `0` if the
outcome exceeds the maximum value of `np.intp` or if N < 0, k < 0,
or k > N.
Parameters
----------
N : scalar(int)
k : scalar(int)
Returns
-------
val : scalar(int)
"""
# From scipy.special._comb_int_long
# github.com/scipy/scipy/blob/v1.0.0/scipy/special/_comb.pyx
INTP_MAX = np.iinfo(np.intp).max
if N < 0 or k < 0 or k > N:
return 0
if k == 0:
return 1
if k == 1:
return N
if N == INTP_MAX:
return 0
M = N + 1
nterms = min(k, N - k)
val = 1
for j in range(1, nterms + 1):
# Overflow check
if val > INTP_MAX // (M - j):
return 0
val *= M - j
val //= j
return val
|
Numba jitted function that computes N choose k. Return `0` if the
outcome exceeds the maximum value of `np.intp` or if N < 0, k < 0,
or k > N.
Parameters
----------
N : scalar(int)
k : scalar(int)
Returns
-------
val : scalar(int)
|
def _score_macro_average(self, n_classes):
"""
Compute the macro average scores for the ROCAUC curves.
"""
# Gather all FPRs
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
# Compute the averages per class
for i in range(n_classes):
avg_tpr += interp(all_fpr, self.fpr[i], self.tpr[i])
# Finalize the average
avg_tpr /= n_classes
# Store the macro averages
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO])
|
Compute the macro average scores for the ROCAUC curves.
|
def node_rank(self):
"""
Returns the maximum rank for each **topological node** in the
``DictGraph``. The rank of a node is defined as the number of edges
between the node and a node which has rank 0. A **topological node**
has rank 0 if it has no incoming edges.
"""
nodes = self.postorder()
node_rank = {}
for node in nodes:
max_rank = 0
for child in self[node].nodes():
some_rank = node_rank[child] + 1
max_rank = max(max_rank, some_rank)
node_rank[node] = max_rank
return node_rank
|
Returns the maximum rank for each **topological node** in the
``DictGraph``. The rank of a node is defined as the number of edges
between the node and a node which has rank 0. A **topological node**
has rank 0 if it has no incoming edges.
|
def fit_transform(self, X, **kwargs):
"""Computes the diffusion operator and the position of the cells in the
embedding space
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData` If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
kwargs : further arguments for `PHATE.transform()`
Keyword arguments as specified in :func:`~phate.PHATE.transform`
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE
"""
tasklogger.log_start('PHATE')
self.fit(X)
embedding = self.transform(**kwargs)
tasklogger.log_complete('PHATE')
return embedding
|
Computes the diffusion operator and the position of the cells in the
embedding space
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData` If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
kwargs : further arguments for `PHATE.transform()`
Keyword arguments as specified in :func:`~phate.PHATE.transform`
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE
|
def move_pos(self, line=1, column=1):
""" Move the cursor to a new position.
Default: line 1, column 1
"""
return self.chained(move.pos(line=line, column=column))
|
Move the cursor to a new position.
Default: line 1, column 1
|
def fhi_header(filename, ppdesc):
"""
Parse the FHI abinit header. Example:
Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom, zion, pspdat
1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, 4)
try:
header = _dict_from_lines(lines[:4], [0, 3, 6, 3])
except ValueError:
# The last record with rchrg ... seems to be optional.
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
|
Parse the FHI abinit header. Example:
Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom, zion, pspdat
1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
|
def _is_undefok(arg, undefok_names):
"""Returns whether we can ignore arg based on a set of undefok flag names."""
if not arg.startswith('-'):
return False
if arg.startswith('--'):
arg_without_dash = arg[2:]
else:
arg_without_dash = arg[1:]
if '=' in arg_without_dash:
name, _ = arg_without_dash.split('=', 1)
else:
name = arg_without_dash
if name in undefok_names:
return True
return False
|
Returns whether we can ignore arg based on a set of undefok flag names.
|
def _create_extended_jinja_tags(self, nodes):
"""Loops through the nodes and looks for special jinja tags that
contains more than one tag but only one ending tag."""
jinja_a = None
jinja_b = None
ext_node = None
ext_nodes = []
for node in nodes:
if isinstance(node, EmptyLine):
continue
if node.has_children():
node.children = self._create_extended_jinja_tags(node.children)
if not isinstance(node, JinjaTag):
jinja_a = None
continue
if jinja_a is None or (
node.tag_name in self._extended_tags and jinja_a.tag_name not in self._extended_tags[node.tag_name]):
jinja_a = node
continue
if node.tag_name in self._extended_tags and \
jinja_a.tag_name in self._extended_tags[node.tag_name]:
if ext_node is None:
ext_node = ExtendedJinjaTag()
ext_node.add(jinja_a)
ext_nodes.append(ext_node)
ext_node.add(node)
else:
ext_node = None
jinja_a = node
#replace the nodes with the new extended node
for node in ext_nodes:
nodes.insert(nodes.index(node.children[0]), node)
index = nodes.index(node.children[0])
del nodes[index:index+len(node.children)]
return nodes
|
Loops through the nodes and looks for special jinja tags that
contains more than one tag but only one ending tag.
|
def action_set(self, hostname=None, action=None, subdoms=None, *args):
"Adds a hostname to the LB, or alters an existing one"
usage = "set <hostname> <action> <subdoms> [option=value, ...]"
if hostname is None:
sys.stderr.write("You must supply a hostname.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
if action is None:
sys.stderr.write("You must supply an action.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
if subdoms is None or subdoms.lower() not in ("true", "false"):
sys.stderr.write("You must supply True or False for the subdomains flag.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
# Grab options
options = {}
for arg in args:
if "=" not in arg:
sys.stderr.write("%s is not a valid option (no =)\n" % (
arg
))
sys.exit(1)
key, value = arg.split("=", 1)
options[key] = value
# Sanity-check options
if action in ("proxy, mirror") and "backends" not in options:
sys.stderr.write("The %s action requires a backends option.\n" % action)
sys.exit(1)
if action == "static" and "type" not in options:
sys.stderr.write("The %s action requires a type option.\n" % action)
sys.exit(1)
if action == "redirect" and "redirect_to" not in options:
sys.stderr.write("The %s action requires a redirect_to option.\n" % action)
sys.exit(1)
if action == "empty" and "code" not in options:
sys.stderr.write("The %s action requires a code option.\n" % action)
sys.exit(1)
# Expand some options from text to datastructure
if "backends" in options:
options['backends'] = [
(lambda x: (x[0], int(x[1])))(bit.split(":", 1))
for bit in options['backends'].split(",")
]
# Set!
self.client.set(
hostname,
[action, options, subdoms.lower() == "true"]
)
|
Adds a hostname to the LB, or alters an existing one
|
def load_model_by_id(self, model_id):
"""Get the model by model_id
Parameters
----------
model_id : int
model index
Returns
-------
load_model : Graph
the model graph representation
"""
with open(os.path.join(self.path, str(model_id) + ".json")) as fin:
json_str = fin.read().replace("\n", "")
load_model = json_to_graph(json_str)
return load_model
|
Get the model by model_id
Parameters
----------
model_id : int
model index
Returns
-------
load_model : Graph
the model graph representation
|
def endpoint_get(service, region=None, profile=None, interface=None, **connection_args):
'''
Return a specific endpoint (keystone endpoint-get)
CLI Example:
.. code-block:: bash
salt 'v2' keystone.endpoint_get nova [region=RegionOne]
salt 'v3' keystone.endpoint_get nova interface=admin [region=RegionOne]
'''
auth(profile, **connection_args)
services = service_list(profile, **connection_args)
if service not in services:
return {'Error': 'Could not find the specified service'}
service_id = services[service]['id']
endpoints = endpoint_list(profile, **connection_args)
e = [_f for _f in [e
if e['service_id'] == service_id and
(e['region'] == region if region else True) and
(e['interface'] == interface if interface else True)
else None for e in endpoints.values()] if _f]
if len(e) > 1:
return {'Error': 'Multiple endpoints found ({0}) for the {1} service. Please specify region.'.format(e, service)}
if len(e) == 1:
return e[0]
return {'Error': 'Could not find endpoint for the specified service'}
|
Return a specific endpoint (keystone endpoint-get)
CLI Example:
.. code-block:: bash
salt 'v2' keystone.endpoint_get nova [region=RegionOne]
salt 'v3' keystone.endpoint_get nova interface=admin [region=RegionOne]
|
def append_instances(cls, inst1, inst2):
"""
Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.
:param inst1: the first dataset
:type inst1: Instances
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances
"""
msg = inst1.equal_headers(inst2)
if msg is not None:
raise Exception("Cannot appent instances: " + msg)
result = cls.copy_instances(inst1)
for i in xrange(inst2.num_instances):
result.add_instance(inst2.get_instance(i))
return result
|
Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.
:param inst1: the first dataset
:type inst1: Instances
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances
|
def generate_filterbank_header(self, nchans=1, ):
""" Generate a blimpy header dictionary """
gp_head = self.read_first_header()
fb_head = {}
telescope_str = gp_head.get("TELESCOP", "unknown")
if telescope_str in ('GBT', 'GREENBANK'):
fb_head["telescope_id"] = 6
elif telescope_str in ('PKS', 'PARKES'):
fb_head["telescop_id"] = 7
else:
fb_head["telescop_id"] = 0
# Using .get() method allows us to fill in default values if not present
fb_head["source_name"] = gp_head.get("SRC_NAME", "unknown")
fb_head["az_start"] = gp_head.get("AZ", 0)
fb_head["za_start"] = gp_head.get("ZA", 0)
fb_head["src_raj"] = Angle(str(gp_head.get("RA", 0.0)) + "hr")
fb_head["src_dej"] = Angle(str(gp_head.get("DEC", 0.0)) + "deg")
fb_head["rawdatafile"] = self.filename
# hardcoded
fb_head["machine_id"] = 20
fb_head["data_type"] = 1 # blio datatype
fb_head["barycentric"] = 0
fb_head["pulsarcentric"] = 0
fb_head["nbits"] = 32
# TODO - compute these values. Need to figure out the correct calcs
fb_head["tstart"] = 0.0
fb_head["tsamp"] = 1.0
fb_head["fch1"] = 0.0
fb_head["foff"] = 187.5 / nchans
# Need to be updated based on output specs
fb_head["nchans"] = nchans
fb_head["nifs"] = 1
fb_head["nbeams"] = 1
return fb_head
|
Generate a blimpy header dictionary
|
def _ActionDatabase(self, cmd, args = None, commit = True, error = True):
"""
Do action on database.
Parameters
----------
cmd : string
SQL command.
args : tuple [optional : default = None]
Arguments to be passed along with the SQL command.
e.g. cmd="SELECT Value FROM Config WHERE Name=?" args=(fieldName, )
commit : boolean [optional : default = True]
If true commit database changes after command is executed.
error : boolean [optional : default = True]
If False then any sqlite3.OperationalError exceptions will cause this
function to return None, otherwise the exception will be raised.
Returns
----------
If a valid result is obtained from the database this will be returned.
If an error occurs and the error argument is set to False then the
return value will be None.
"""
goodlogging.Log.Info("DB", "Database Command: {0} {1}".format(cmd, args), verbosity=self.logVerbosity)
with sqlite3.connect(self._dbPath) as db:
try:
if args is None:
result = db.execute(cmd)
else:
result = db.execute(cmd, args)
except sqlite3.OperationalError:
if error is True:
raise
return None
else:
if commit is True:
db.commit()
return result.fetchall()
|
Do action on database.
Parameters
----------
cmd : string
SQL command.
args : tuple [optional : default = None]
Arguments to be passed along with the SQL command.
e.g. cmd="SELECT Value FROM Config WHERE Name=?" args=(fieldName, )
commit : boolean [optional : default = True]
If true commit database changes after command is executed.
error : boolean [optional : default = True]
If False then any sqlite3.OperationalError exceptions will cause this
function to return None, otherwise the exception will be raised.
Returns
----------
If a valid result is obtained from the database this will be returned.
If an error occurs and the error argument is set to False then the
return value will be None.
|
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,
socket_timeout=self.socket_timeout,
flush_on_reconnect=self.flush_on_reconnect)
for s in servers]
self._init_buckets()
|
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
|
def main(argv):
"""This function sets up a command-line option parser and then calls fetch_and_write_mrca
to do all of the real work.
"""
import argparse
description = 'Uses Open Tree of Life web services to the MRCA for a set of OTT IDs.'
parser = argparse.ArgumentParser(prog='ot-tree-of-life-mrca', description=description)
parser.add_argument('ottid', nargs='*', type=int, help='OTT IDs')
parser.add_argument('--subtree', action='store_true', default=False, required=False,
help='write a newick representation of the subtree rooted at this mrca')
parser.add_argument('--induced-subtree', action='store_true', default=False, required=False,
help='write a newick representation of the topology of the requested taxa in the synthetic tree (the subtree pruned to just the queried taxa)')
parser.add_argument('--details', action='store_true', default=False, required=False,
help='report more details about the mrca node')
args = parser.parse_args(argv)
id_list = args.ottid
if not id_list:
sys.stderr.write('No OTT IDs provided. Running a dummy query with 770302 770315\n')
id_list = [770302, 770315]
fetch_and_write_mrca(id_list, args.details, args.subtree, args.induced_subtree, sys.stdout, sys.stderr)
|
This function sets up a command-line option parser and then calls fetch_and_write_mrca
to do all of the real work.
|
def forceupdate(self, *args, **kw):
"""Like a bulk :meth:`forceput`."""
self._update(False, self._ON_DUP_OVERWRITE, *args, **kw)
|
Like a bulk :meth:`forceput`.
|
def post_process_images(self, doctree):
"""Pick the best candidate for all image URIs."""
super(AbstractSlideBuilder, self).post_process_images(doctree)
# figure out where this doctree is in relation to the srcdir
relative_base = (
['..'] *
doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')
)
for node in doctree.traverse(nodes.image):
if node.get('candidates') is None:
node['candidates'] = ('*',)
# fix up images with absolute paths
if node['uri'].startswith(self.outdir):
node['uri'] = '/'.join(
relative_base + [
node['uri'][len(self.outdir) + 1:]
]
)
|
Pick the best candidate for all image URIs.
|
def call_actions_parallel_future(self, service_name, actions, **kwargs):
"""
This method is identical in signature and behavior to `call_actions_parallel`, except that it sends the requests
and then immediately returns a `FutureResponse` instead of blocking waiting on responses and returning a
generator. Just call `result(timeout=None)` on the future response to block for an available response (which
will be a generator). Some of the possible exceptions may be raised when this method is called; others may be
raised when the future is used.
If argument `raise_job_errors` is supplied and is `False`, some items in the result list might be lists of job
errors instead of individual `ActionResponse`s. Be sure to check for that if used in this manner.
If argument `catch_transport_errors` is supplied and is `True`, some items in the result list might be instances
of `Exception` instead of individual `ActionResponse`s. Be sure to check for that if used in this manner.
:return: A generator of action responses that blocks waiting on responses once you begin iteration
:rtype: Client.FutureResponse
"""
job_responses = self.call_jobs_parallel_future(
jobs=({'service_name': service_name, 'actions': [action]} for action in actions),
**kwargs
)
def parse_results(results):
for job in results:
if isinstance(job, Exception):
yield job
elif job.errors:
yield job.errors
else:
yield job.actions[0]
return self.FutureResponse(lambda _timeout: (x for x in parse_results(job_responses.result(_timeout))))
|
This method is identical in signature and behavior to `call_actions_parallel`, except that it sends the requests
and then immediately returns a `FutureResponse` instead of blocking waiting on responses and returning a
generator. Just call `result(timeout=None)` on the future response to block for an available response (which
will be a generator). Some of the possible exceptions may be raised when this method is called; others may be
raised when the future is used.
If argument `raise_job_errors` is supplied and is `False`, some items in the result list might be lists of job
errors instead of individual `ActionResponse`s. Be sure to check for that if used in this manner.
If argument `catch_transport_errors` is supplied and is `True`, some items in the result list might be instances
of `Exception` instead of individual `ActionResponse`s. Be sure to check for that if used in this manner.
:return: A generator of action responses that blocks waiting on responses once you begin iteration
:rtype: Client.FutureResponse
|
def name2idfobject(idf, groupnamess=None, objkeys=None, **kwargs):
"""return the object, if the Name or some other field is known.
send filed in **kwargs as Name='a name', Roughness='smooth'
Returns the first find (field search is unordered)
objkeys -> if objkeys=['ZONE', 'Material'], search only those
groupnames -> not yet coded"""
# TODO : this is a very slow search. revist to speed it up.
if not objkeys:
objkeys = idfobjectkeys(idf)
for objkey in objkeys:
idfobjs = idf.idfobjects[objkey.upper()]
for idfobj in idfobjs:
for key, val in kwargs.items():
try:
if idfobj[key] == val:
return idfobj
except BadEPFieldError as e:
continue
|
return the object, if the Name or some other field is known.
send filed in **kwargs as Name='a name', Roughness='smooth'
Returns the first find (field search is unordered)
objkeys -> if objkeys=['ZONE', 'Material'], search only those
groupnames -> not yet coded
|
def _set_sfm_walk(self, v, load=False):
"""
Setter method for sfm_walk, mapped from YANG variable /sysmon/sfm_walk (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sfm_walk is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sfm_walk() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=sfm_walk.sfm_walk, is_container='container', presence=False, yang_name="sfm-walk", rest_name="sfm-walk", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'SFM Walk', u'callpoint': u'sfmWalk', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sysmon', defining_module='brocade-sysmon', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sfm_walk must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=sfm_walk.sfm_walk, is_container='container', presence=False, yang_name="sfm-walk", rest_name="sfm-walk", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'SFM Walk', u'callpoint': u'sfmWalk', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sysmon', defining_module='brocade-sysmon', yang_type='container', is_config=True)""",
})
self.__sfm_walk = t
if hasattr(self, '_set'):
self._set()
|
Setter method for sfm_walk, mapped from YANG variable /sysmon/sfm_walk (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sfm_walk is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sfm_walk() directly.
|
def _compute_vs30_star_factor(self, imt, vs30):
"""
Compute and return vs30 star factor, equation 5, page 77.
"""
v1 = self._compute_v1_factor(imt)
vs30_star = vs30.copy()
vs30_star[vs30_star >= v1] = v1
return vs30_star, v1
|
Compute and return vs30 star factor, equation 5, page 77.
|
def _obtain_api_token(self, username, password):
"""Use username and password to obtain and return an API token."""
data = self._request("POST", "auth/apitoken",
{"username": username, "password": password},
reestablish_session=False)
return data["api_token"]
|
Use username and password to obtain and return an API token.
|
def query_value(stmt, args=(), default=None):
"""
Execute a query, returning the first value in the first row of the
result set. If the query returns no result set, a default value is
returned, which is `None` by default.
"""
for row in query(stmt, args, TupleFactory):
return row[0]
return default
|
Execute a query, returning the first value in the first row of the
result set. If the query returns no result set, a default value is
returned, which is `None` by default.
|
def rnd_date_array(size, start=date(1970, 1, 1), end=None, **kwargs):
"""
Array or Matrix of random date generator.
:returns: 1d or 2d array of datetime.date
"""
if end is None:
end = date.today()
start = parser.parse_date(start)
end = parser.parse_date(end)
_assert_correct_start_end(start, end)
return _randn(size, _rnd_date, start, end)
|
Array or Matrix of random date generator.
:returns: 1d or 2d array of datetime.date
|
def get_default_configfile_path():
"""Return the default configuration-file path.
Typically returns a user-local configuration file; e.g:
``~/.config/dwave/dwave.conf``.
Returns:
str:
Configuration file path.
Examples:
This example displays the default configuration file on an Ubuntu Unix system
running IPython 2.7.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',
'/usr/share/upstart/xdg/dwave/dwave.conf',
'/etc/xdg/dwave/dwave.conf',
'/home/mary/.config/dwave/dwave.conf',
'./dwave.conf']
>>> # Find default configuration path
>>> dc.config.get_default_configfile_path() # doctest: +SKIP
'/home/mary/.config/dwave/dwave.conf'
"""
base = homebase.user_config_dir(
app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False,
use_virtualenv=False, create=False)
path = os.path.join(base, CONF_FILENAME)
return path
|
Return the default configuration-file path.
Typically returns a user-local configuration file; e.g:
``~/.config/dwave/dwave.conf``.
Returns:
str:
Configuration file path.
Examples:
This example displays the default configuration file on an Ubuntu Unix system
running IPython 2.7.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',
'/usr/share/upstart/xdg/dwave/dwave.conf',
'/etc/xdg/dwave/dwave.conf',
'/home/mary/.config/dwave/dwave.conf',
'./dwave.conf']
>>> # Find default configuration path
>>> dc.config.get_default_configfile_path() # doctest: +SKIP
'/home/mary/.config/dwave/dwave.conf'
|
def run(self, writer, reader):
"""
Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param reader: callable reads keystrokes from input stream, sending
instance of blessed.keyboard.Keystroke.
:type reader: callable
"""
self._page_data = self.initialize_page_data()
self._set_lastpage()
if not self.term.is_a_tty:
self._run_notty(writer)
else:
self._run_tty(writer, reader)
|
Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param reader: callable reads keystrokes from input stream, sending
instance of blessed.keyboard.Keystroke.
:type reader: callable
|
def search(self, query, fetch_messages=False, thread_limit=5, message_limit=5):
"""
Searches for messages in all threads
:param query: Text to search for
:param fetch_messages: Whether to fetch :class:`models.Message` objects or IDs only
:param thread_limit: Max. number of threads to retrieve
:param message_limit: Max. number of messages to retrieve
:type thread_limit: int
:type message_limit: int
:return: Dictionary with thread IDs as keys and generators to get messages as values
:rtype: generator
:raises: FBchatException if request failed
"""
data = {"query": query, "snippetLimit": thread_limit}
j = self._post(
self.req_url.SEARCH_MESSAGES, data, fix_request=True, as_json=True
)
result = j["payload"]["search_snippets"][query]
if fetch_messages:
search_method = self.searchForMessages
else:
search_method = self.searchForMessageIDs
return {
thread_id: search_method(query, limit=message_limit, thread_id=thread_id)
for thread_id in result
}
|
Searches for messages in all threads
:param query: Text to search for
:param fetch_messages: Whether to fetch :class:`models.Message` objects or IDs only
:param thread_limit: Max. number of threads to retrieve
:param message_limit: Max. number of messages to retrieve
:type thread_limit: int
:type message_limit: int
:return: Dictionary with thread IDs as keys and generators to get messages as values
:rtype: generator
:raises: FBchatException if request failed
|
def create(self, alpha_sender):
"""
Create a new AlphaSenderInstance
:param unicode alpha_sender: An Alphanumeric Sender ID string, up to 11 characters.
:returns: Newly created AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
"""
data = values.of({'AlphaSender': alpha_sender, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return AlphaSenderInstance(self._version, payload, service_sid=self._solution['service_sid'], )
|
Create a new AlphaSenderInstance
:param unicode alpha_sender: An Alphanumeric Sender ID string, up to 11 characters.
:returns: Newly created AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
|
def setup_legacy_graph(native, options_bootstrapper, build_configuration):
"""Construct and return the components necessary for LegacyBuildGraph construction."""
bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
return EngineInitializer.setup_legacy_graph_extended(
bootstrap_options.pants_ignore,
bootstrap_options.pants_workdir,
bootstrap_options.local_store_dir,
bootstrap_options.build_file_imports,
options_bootstrapper,
build_configuration,
native=native,
glob_match_error_behavior=bootstrap_options.glob_expansion_failure,
build_ignore_patterns=bootstrap_options.build_ignore,
exclude_target_regexps=bootstrap_options.exclude_target_regexp,
subproject_roots=bootstrap_options.subproject_roots,
include_trace_on_error=bootstrap_options.print_exception_stacktrace,
execution_options=ExecutionOptions.from_bootstrap_options(bootstrap_options),
)
|
Construct and return the components necessary for LegacyBuildGraph construction.
|
def _escaped(self):
""" Escape character is at end of accumulated token
character list.
"""
chars = self._token_info['chars']
count = len(chars)
# prev char is escape, keep going
if count and chars[count - 1] == self.ESCAPE:
chars.pop() # swallow escape char
return True
else:
return False
|
Escape character is at end of accumulated token
character list.
|
def raw(self):
"""Try to transform str to raw str"
... this will not work every time
"""
escape_dict = {'\a': r'\a',
'\b': r'\b',
'\c': r'\c',
'\f': r'\f',
'\n': r'\n',
'\r': r'\r',
'\t': r'\t',
'\v': r'\v',
#'\x':r'\x',#cannot do \x - otherwise exception
'\'': r'\'',
'\"': r'\"',
#'\0':r'\0', #doesnt work
'\1': r'\1',
'\2': r'\2',
'\3': r'\3',
'\4': r'\4',
'\5': r'\5',
'\6': r'\6',
#'\7':r'\7',#same as \a is ASCI
}
new_string = ''
for char in self:
try:
new_string += escape_dict[char]
except KeyError:
new_string += char
return new_string
|
Try to transform str to raw str"
... this will not work every time
|
def get_search_for_slugs(self, slug):
""" Search for a particular slug
"""
return _get_request(_SLUG_SEARCH.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
slug=_format_query(slug),
at=self.access_token))
|
Search for a particular slug
|
def cd(path):
"""
Change working directory.
Returns absolute path to new working directory.
"""
_cdhist.append(pwd()) # Push to history.
path = abspath(path)
os.chdir(path)
return path
|
Change working directory.
Returns absolute path to new working directory.
|
def remove(self, item):
"""Remove an item from the list.
"""
self.items.pop(item)
self._remove_dep(item)
self.order = None
self.changed(code_changed=True)
|
Remove an item from the list.
|
def update_or_create(cls, name, external_endpoint=None, vpn_site=None,
trust_all_cas=True, with_status=False):
"""
Update or create an ExternalGateway. The ``external_endpoint`` and
``vpn_site`` parameters are expected to be a list of dicts with key/value
pairs to satisfy the respective elements create constructor. VPN Sites will
represent the final state of the VPN site list. ExternalEndpoint that are
pre-existing will not be deleted if not provided in the ``external_endpoint``
parameter, however existing elements will be updated as specified.
:param str name: name of external gateway
:param list(dict) external_endpoint: list of dict items with key/value
to satisfy ExternalEndpoint.create constructor
:param list(dict) vpn_site: list of dict items with key/value to satisfy
VPNSite.create constructor
:param bool with_status: If set to True, returns a 3-tuple of
(ExternalGateway, modified, created), where modified and created
is the boolean status for operations performed.
:raises ValueError: missing required argument/s for constructor argument
:rtype: ExternalGateway
"""
if external_endpoint:
for endpoint in external_endpoint:
if 'name' not in endpoint:
raise ValueError('External endpoints are configured '
'but missing the name parameter.')
if vpn_site:
for site in vpn_site:
if 'name' not in site:
raise ValueError('VPN sites are configured but missing '
'the name parameter.')
# Make sure VPN sites are resolvable before continuing
sites = [element_resolver(element, do_raise=True)
for element in site.get('site_element', [])]
site.update(site_element=sites)
updated = False
created = False
try:
extgw = ExternalGateway.get(name)
except ElementNotFound:
extgw = ExternalGateway.create(name, trust_all_cas)
created = True
if external_endpoint:
for endpoint in external_endpoint:
_, modified, was_created = ExternalEndpoint.update_or_create(
extgw, with_status=True, **endpoint)
if was_created or modified:
updated = True
if vpn_site:
for site in vpn_site:
_, modified, was_created = VPNSite.update_or_create(extgw,
name=site['name'], site_element=site.get('site_element'),
with_status=True)
if was_created or modified:
updated = True
if with_status:
return extgw, updated, created
return extgw
|
Update or create an ExternalGateway. The ``external_endpoint`` and
``vpn_site`` parameters are expected to be a list of dicts with key/value
pairs to satisfy the respective elements create constructor. VPN Sites will
represent the final state of the VPN site list. ExternalEndpoint that are
pre-existing will not be deleted if not provided in the ``external_endpoint``
parameter, however existing elements will be updated as specified.
:param str name: name of external gateway
:param list(dict) external_endpoint: list of dict items with key/value
to satisfy ExternalEndpoint.create constructor
:param list(dict) vpn_site: list of dict items with key/value to satisfy
VPNSite.create constructor
:param bool with_status: If set to True, returns a 3-tuple of
(ExternalGateway, modified, created), where modified and created
is the boolean status for operations performed.
:raises ValueError: missing required argument/s for constructor argument
:rtype: ExternalGateway
|
def prefetch(self, bucket, key):
"""镜像回源预取文件:
从镜像源站抓取资源到空间中,如果空间中已经存在,则覆盖该资源,具体规格参考
http://developer.qiniu.com/docs/v6/api/reference/rs/prefetch.html
Args:
bucket: 待获取资源所在的空间
key: 代获取资源文件名
Returns:
一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"}
一个ResponseInfo对象
"""
resource = entry(bucket, key)
return self.__io_do(bucket, 'prefetch', resource)
|
镜像回源预取文件:
从镜像源站抓取资源到空间中,如果空间中已经存在,则覆盖该资源,具体规格参考
http://developer.qiniu.com/docs/v6/api/reference/rs/prefetch.html
Args:
bucket: 待获取资源所在的空间
key: 代获取资源文件名
Returns:
一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"}
一个ResponseInfo对象
|
def get_object(cls, api_token):
"""
Class method that will return an Account object.
"""
acct = cls(token=api_token)
acct.load()
return acct
|
Class method that will return an Account object.
|
def __objecthasfields(bunchdt, data, commdct, idfobject, places=7, **kwargs):
"""test if the idf object has the field values in kwargs"""
for key, value in list(kwargs.items()):
if not isfieldvalue(
bunchdt, data, commdct,
idfobject, key, value, places=places):
return False
return True
|
test if the idf object has the field values in kwargs
|
def get_value(self, dictionary):
"""
Given the *incoming* primitive data, return the value for this field
that should be validated and transformed to a native value.
"""
if html.is_html_input(dictionary):
# HTML forms will represent empty fields as '', and cannot
# represent None or False values directly.
if self.field_name not in dictionary:
if getattr(self.root, 'partial', False):
return empty
return self.default_empty_html
ret = dictionary[self.field_name]
if ret == '' and self.allow_null:
# If the field is blank, and null is a valid value then
# determine if we should use null instead.
return '' if getattr(self, 'allow_blank', False) else None
elif ret == '' and not self.required:
# If the field is blank, and emptyness is valid then
# determine if we should use emptyness instead.
return '' if getattr(self, 'allow_blank', False) else empty
return ret
return dictionary.get(self.field_name, empty)
|
Given the *incoming* primitive data, return the value for this field
that should be validated and transformed to a native value.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.