repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
pandas-dev/pandas | pandas/core/groupby/categorical.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/categorical.py#L77-L100 | def recode_from_groupby(c, sort, ci):
"""
Reverse the codes_to_groupby to account for sort / observed.
Parameters
----------
c : Categorical
sort : boolean
The value of the sort parameter groupby was called with.
ci : CategoricalIndex
The codes / categories to recode
Returns
-------
CategoricalIndex
"""
# we re-order to the original category orderings
if sort:
return ci.set_categories(c.categories)
# we are not sorting, so add unobserved to the end
return ci.add_categories(
c.categories[~c.categories.isin(ci.categories)]) | [
"def",
"recode_from_groupby",
"(",
"c",
",",
"sort",
",",
"ci",
")",
":",
"# we re-order to the original category orderings",
"if",
"sort",
":",
"return",
"ci",
".",
"set_categories",
"(",
"c",
".",
"categories",
")",
"# we are not sorting, so add unobserved to the end"... | Reverse the codes_to_groupby to account for sort / observed.
Parameters
----------
c : Categorical
sort : boolean
The value of the sort parameter groupby was called with.
ci : CategoricalIndex
The codes / categories to recode
Returns
-------
CategoricalIndex | [
"Reverse",
"the",
"codes_to_groupby",
"to",
"account",
"for",
"sort",
"/",
"observed",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/python/layers/distribution_layer.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1698-L1703 | def params_size(num_components, event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
return MixtureSameFamily.params_size(
num_components,
IndependentLogistic.params_size(event_shape, name=name),
name=name) | [
"def",
"params_size",
"(",
"num_components",
",",
"event_shape",
"=",
"(",
")",
",",
"name",
"=",
"None",
")",
":",
"return",
"MixtureSameFamily",
".",
"params_size",
"(",
"num_components",
",",
"IndependentLogistic",
".",
"params_size",
"(",
"event_shape",
",",... | The number of `params` needed to create a single distribution. | [
"The",
"number",
"of",
"params",
"needed",
"to",
"create",
"a",
"single",
"distribution",
"."
] | python | test |
gwpy/gwpy | gwpy/types/series.py | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L873-L941 | def crop(self, start=None, end=None, copy=False):
"""Crop this series to the given x-axis extent.
Parameters
----------
start : `float`, optional
lower limit of x-axis to crop to, defaults to
current `~Series.x0`
end : `float`, optional
upper limit of x-axis to crop to, defaults to current series end
copy : `bool`, optional, default: `False`
copy the input data to fresh memory, otherwise return a view
Returns
-------
series : `Series`
A new series with a sub-set of the input data
Notes
-----
If either ``start`` or ``end`` are outside of the original
`Series` span, warnings will be printed and the limits will
be restricted to the :attr:`~Series.xspan`
"""
x0, x1 = self.xspan
xtype = type(x0)
if isinstance(start, Quantity):
start = start.to(self.xunit).value
if isinstance(end, Quantity):
end = end.to(self.xunit).value
# pin early starts to time-series start
if start == x0:
start = None
elif start is not None and xtype(start) < x0:
warn('%s.crop given start smaller than current start, '
'crop will begin when the Series actually starts.'
% type(self).__name__)
start = None
# pin late ends to time-series end
if end == x1:
end = None
if end is not None and xtype(end) > x1:
warn('%s.crop given end larger than current end, '
'crop will end when the Series actually ends.'
% type(self).__name__)
end = None
# find start index
if start is None:
idx0 = None
else:
idx0 = int((xtype(start) - x0) // self.dx.value)
# find end index
if end is None:
idx1 = None
else:
idx1 = int((xtype(end) - x0) // self.dx.value)
if idx1 >= self.size:
idx1 = None
# crop
if copy:
return self[idx0:idx1].copy()
return self[idx0:idx1] | [
"def",
"crop",
"(",
"self",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"x0",
",",
"x1",
"=",
"self",
".",
"xspan",
"xtype",
"=",
"type",
"(",
"x0",
")",
"if",
"isinstance",
"(",
"start",
",",
"Quant... | Crop this series to the given x-axis extent.
Parameters
----------
start : `float`, optional
lower limit of x-axis to crop to, defaults to
current `~Series.x0`
end : `float`, optional
upper limit of x-axis to crop to, defaults to current series end
copy : `bool`, optional, default: `False`
copy the input data to fresh memory, otherwise return a view
Returns
-------
series : `Series`
A new series with a sub-set of the input data
Notes
-----
If either ``start`` or ``end`` are outside of the original
`Series` span, warnings will be printed and the limits will
be restricted to the :attr:`~Series.xspan` | [
"Crop",
"this",
"series",
"to",
"the",
"given",
"x",
"-",
"axis",
"extent",
"."
] | python | train |
lpantano/seqcluster | seqcluster/detect/metacluster.py | https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/detect/metacluster.py#L202-L214 | def _calculate_similarity(c):
"""Get a similarity matrix of % of shared sequence
:param c: cluster object
:return ma: similarity matrix
"""
ma = {}
for idc in c:
set1 = _get_seqs(c[idc])
[ma.update({(idc, idc2): _common(set1, _get_seqs(c[idc2]), idc, idc2)}) for idc2 in c if idc != idc2 and (idc2, idc) not in ma]
# logger.debug("_calculate_similarity_ %s" % ma)
return ma | [
"def",
"_calculate_similarity",
"(",
"c",
")",
":",
"ma",
"=",
"{",
"}",
"for",
"idc",
"in",
"c",
":",
"set1",
"=",
"_get_seqs",
"(",
"c",
"[",
"idc",
"]",
")",
"[",
"ma",
".",
"update",
"(",
"{",
"(",
"idc",
",",
"idc2",
")",
":",
"_common",
... | Get a similarity matrix of % of shared sequence
:param c: cluster object
:return ma: similarity matrix | [
"Get",
"a",
"similarity",
"matrix",
"of",
"%",
"of",
"shared",
"sequence"
] | python | train |
zvoase/django-relax | relax/utils.py | https://github.com/zvoase/django-relax/blob/10bb37bf3a512b290816856a6877c17fa37e930f/relax/utils.py#L7-L35 | def generator_to_list(function):
"""
Wrap a generator function so that it returns a list when called.
For example:
# Define a generator
>>> def mygen(n):
... i = 0
... while i < n:
... yield i
... i += 1
# This is how it might work
>>> generator = mygen(5)
>>> generator.next()
0
>>> generator.next()
1
# Wrap it in generator_to_list, and it will behave differently.
>>> mygen = generator_to_list(mygen)
>>> mygen(5)
[0, 1, 2, 3, 4]
"""
def wrapper(*args, **kwargs):
return list(function(*args, **kwargs))
wrapper.__name__ = function.__name__
wrapper.__doc__ = function.__doc__
return wrapper | [
"def",
"generator_to_list",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"list",
"(",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"wrapper",
".",
"__name__",
"=",
"... | Wrap a generator function so that it returns a list when called.
For example:
# Define a generator
>>> def mygen(n):
... i = 0
... while i < n:
... yield i
... i += 1
# This is how it might work
>>> generator = mygen(5)
>>> generator.next()
0
>>> generator.next()
1
# Wrap it in generator_to_list, and it will behave differently.
>>> mygen = generator_to_list(mygen)
>>> mygen(5)
[0, 1, 2, 3, 4] | [
"Wrap",
"a",
"generator",
"function",
"so",
"that",
"it",
"returns",
"a",
"list",
"when",
"called",
".",
"For",
"example",
":",
"#",
"Define",
"a",
"generator",
">>>",
"def",
"mygen",
"(",
"n",
")",
":",
"...",
"i",
"=",
"0",
"...",
"while",
"i",
"... | python | valid |
Autodesk/pyccc | pyccc/static/run_job.py | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/static/run_job.py#L97-L124 | def find_class(self, module, name):
""" This override is here to help pickle find the modules that classes are defined in.
It does three things:
1) remaps the "PackagedFunction" class from pyccc to the `source.py` module.
2) Remaps any classes created in the client's '__main__' to the `source.py` module
3) Creates on-the-fly modules to store any other classes present in source.py
References:
This is a modified version of the 2-only recipe from
https://wiki.python.org/moin/UsingPickle/RenamingModules.
It's been modified for 2/3 cross-compatibility """
import pickle
modname = self.RENAMETABLE.get(module, module)
try:
# can't use ``super`` here (not 2/3 compatible)
klass = pickle.Unpickler.find_class(self, modname, name)
except (ImportError, RuntimeError):
definition = getattr(source, name)
newmod = _makemod(modname)
sys.modules[modname] = newmod
setattr(newmod, name, definition)
klass = pickle.Unpickler.find_class(self, newmod.__name__, name)
klass.__module__ = module
return klass | [
"def",
"find_class",
"(",
"self",
",",
"module",
",",
"name",
")",
":",
"import",
"pickle",
"modname",
"=",
"self",
".",
"RENAMETABLE",
".",
"get",
"(",
"module",
",",
"module",
")",
"try",
":",
"# can't use ``super`` here (not 2/3 compatible)",
"klass",
"=",
... | This override is here to help pickle find the modules that classes are defined in.
It does three things:
1) remaps the "PackagedFunction" class from pyccc to the `source.py` module.
2) Remaps any classes created in the client's '__main__' to the `source.py` module
3) Creates on-the-fly modules to store any other classes present in source.py
References:
This is a modified version of the 2-only recipe from
https://wiki.python.org/moin/UsingPickle/RenamingModules.
It's been modified for 2/3 cross-compatibility | [
"This",
"override",
"is",
"here",
"to",
"help",
"pickle",
"find",
"the",
"modules",
"that",
"classes",
"are",
"defined",
"in",
"."
] | python | train |
calvinku96/labreporthelper | labreporthelper/datafile.py | https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/datafile.py#L66-L95 | def create_dat_file(self):
"""
Create and write empty data file in the data directory
"""
output = "## {}\n".format(self.name)
try:
kwargs_items = self.kwargs.iteritems()
except AttributeError:
kwargs_items = self.kwargs.items()
for key, val in kwargs_items:
if val is "l":
output += "#l {}=\n".format(str(key))
elif val is "f" or True:
output += "#f {}=\n".format(str(key))
comment = "## " + "\t".join(["col{" + str(i) + ":d}"
for i in range(self.argnum)])
comment += "\n"
rangeargnum = range(self.argnum)
output += comment.format(*rangeargnum)
if os.path.isfile(self.location_dat):
files = glob.glob(self.location_dat + "*")
count = 2
while (
(self.location_dat + str(count) in files)
) and (count <= 10):
count += 1
os.rename(self.location_dat, self.location_dat + str(count))
dat_file = open(self.location_dat, "wb")
dat_file.write(output)
dat_file.close() | [
"def",
"create_dat_file",
"(",
"self",
")",
":",
"output",
"=",
"\"## {}\\n\"",
".",
"format",
"(",
"self",
".",
"name",
")",
"try",
":",
"kwargs_items",
"=",
"self",
".",
"kwargs",
".",
"iteritems",
"(",
")",
"except",
"AttributeError",
":",
"kwargs_items... | Create and write empty data file in the data directory | [
"Create",
"and",
"write",
"empty",
"data",
"file",
"in",
"the",
"data",
"directory"
] | python | train |
dcramer/django-static-compiler | src/static_compiler/storage.py | https://github.com/dcramer/django-static-compiler/blob/7218d30e1b4eb88b7a8683ae5748f6a71c502b81/src/static_compiler/storage.py#L30-L36 | def get_available_name(self, name):
"""
Deletes the given file if it exists.
"""
if self.exists(name):
self.delete(name)
return name | [
"def",
"get_available_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"exists",
"(",
"name",
")",
":",
"self",
".",
"delete",
"(",
"name",
")",
"return",
"name"
] | Deletes the given file if it exists. | [
"Deletes",
"the",
"given",
"file",
"if",
"it",
"exists",
"."
] | python | train |
3ll3d00d/vibe | backend/src/analyser/resources/measurement.py | https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/analyser/resources/measurement.py#L189-L204 | def put(self, measurementId, deviceId):
"""
Fails the measurement for this device.
:param measurementId: the measurement name.
:param deviceId: the device name.
:return: 200 if
"""
payload = request.get_json()
failureReason = json.loads(payload).get('failureReason') if payload is not None else None
logger.warning('Failing measurement ' + measurementId + ' for ' + deviceId + ' because ' + str(failureReason))
if self._measurementController.failMeasurement(measurementId, deviceId, failureReason=failureReason):
logger.warning('Failed measurement ' + measurementId + ' for ' + deviceId)
return None, 200
else:
logger.error('Unable to fail measurement ' + measurementId + ' for ' + deviceId)
return None, 404 | [
"def",
"put",
"(",
"self",
",",
"measurementId",
",",
"deviceId",
")",
":",
"payload",
"=",
"request",
".",
"get_json",
"(",
")",
"failureReason",
"=",
"json",
".",
"loads",
"(",
"payload",
")",
".",
"get",
"(",
"'failureReason'",
")",
"if",
"payload",
... | Fails the measurement for this device.
:param measurementId: the measurement name.
:param deviceId: the device name.
:return: 200 if | [
"Fails",
"the",
"measurement",
"for",
"this",
"device",
".",
":",
"param",
"measurementId",
":",
"the",
"measurement",
"name",
".",
":",
"param",
"deviceId",
":",
"the",
"device",
"name",
".",
":",
"return",
":",
"200",
"if"
] | python | train |
jcushman/pdfquery | pdfquery/pdfquery.py | https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L170-L177 | def obj_to_string(obj, top=True):
"""
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
"""
obj = prepare_for_json_encoding(obj)
if type(obj) == six.text_type:
return obj
return json.dumps(obj) | [
"def",
"obj_to_string",
"(",
"obj",
",",
"top",
"=",
"True",
")",
":",
"obj",
"=",
"prepare_for_json_encoding",
"(",
"obj",
")",
"if",
"type",
"(",
"obj",
")",
"==",
"six",
".",
"text_type",
":",
"return",
"obj",
"return",
"json",
".",
"dumps",
"(",
... | Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded. | [
"Turn",
"an",
"arbitrary",
"object",
"into",
"a",
"unicode",
"string",
".",
"If",
"complex",
"(",
"dict",
"/",
"list",
"/",
"tuple",
")",
"will",
"be",
"json",
"-",
"encoded",
"."
] | python | train |
twidi/django-adv-cache-tag | adv_cache_tag/tag.py | https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L408-L413 | def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content)) | [
"def",
"decode_content",
"(",
"self",
")",
":",
"self",
".",
"content",
"=",
"pickle",
".",
"loads",
"(",
"zlib",
".",
"decompress",
"(",
"self",
".",
"content",
")",
")"
] | Decode (decompress...) the content got from the cache, to the final
html | [
"Decode",
"(",
"decompress",
"...",
")",
"the",
"content",
"got",
"from",
"the",
"cache",
"to",
"the",
"final",
"html"
] | python | train |
bitesofcode/projexui | projexui/widgets/xcalendarwidget/xcalendarwidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendarwidget.py#L234-L259 | def mousePressEvent( self, event ):
"""
Handles the mouse press event.
:param event | <QMouseEvent>
"""
scene_point = self.mapToScene(event.pos())
date = self.scene().dateAt(scene_point)
date_time = self.scene().dateTimeAt(scene_point)
item = self.scene().itemAt(scene_point)
if ( not isinstance(item, XCalendarItem) ):
item = None
# checks to see if the signals are blocked
if ( not self.signalsBlocked() ):
if ( item ):
self.calendarItemClicked.emit(item)
elif ( date_time.isValid() ):
self.dateTimeClicked.emit(date_time)
elif ( date.isValid() ):
self.dateClicked.emit(date)
return super(XCalendarWidget, self).mousePressEvent(event) | [
"def",
"mousePressEvent",
"(",
"self",
",",
"event",
")",
":",
"scene_point",
"=",
"self",
".",
"mapToScene",
"(",
"event",
".",
"pos",
"(",
")",
")",
"date",
"=",
"self",
".",
"scene",
"(",
")",
".",
"dateAt",
"(",
"scene_point",
")",
"date_time",
"... | Handles the mouse press event.
:param event | <QMouseEvent> | [
"Handles",
"the",
"mouse",
"press",
"event",
".",
":",
"param",
"event",
"|",
"<QMouseEvent",
">"
] | python | train |
WZBSocialScienceCenter/tmtoolkit | tmtoolkit/topicmod/model_stats.py | https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/model_stats.py#L190-L197 | def get_least_relevant_words_for_topic(vocab, rel_mat, topic, n=None):
"""
Get words from `vocab` for `topic` ordered by least to most relevance (Sievert and Shirley 2014) using the relevance
matrix `rel_mat` obtained from `get_topic_word_relevance()`.
Optionally only return the `n` least relevant words.
"""
_check_relevant_words_for_topic_args(vocab, rel_mat, topic)
return _words_by_score(vocab, rel_mat[topic], least_to_most=True, n=n) | [
"def",
"get_least_relevant_words_for_topic",
"(",
"vocab",
",",
"rel_mat",
",",
"topic",
",",
"n",
"=",
"None",
")",
":",
"_check_relevant_words_for_topic_args",
"(",
"vocab",
",",
"rel_mat",
",",
"topic",
")",
"return",
"_words_by_score",
"(",
"vocab",
",",
"re... | Get words from `vocab` for `topic` ordered by least to most relevance (Sievert and Shirley 2014) using the relevance
matrix `rel_mat` obtained from `get_topic_word_relevance()`.
Optionally only return the `n` least relevant words. | [
"Get",
"words",
"from",
"vocab",
"for",
"topic",
"ordered",
"by",
"least",
"to",
"most",
"relevance",
"(",
"Sievert",
"and",
"Shirley",
"2014",
")",
"using",
"the",
"relevance",
"matrix",
"rel_mat",
"obtained",
"from",
"get_topic_word_relevance",
"()",
".",
"O... | python | train |
danielfrg/datasciencebox | datasciencebox/salt/_modules/conda.py | https://github.com/danielfrg/datasciencebox/blob/6b7aa642c6616a46547035fcb815acc1de605a6f/datasciencebox/salt/_modules/conda.py#L89-L99 | def update(packages, env=None, user=None):
"""
Update conda packages in a conda env
Attributes
----------
packages: list of packages comma delimited
"""
packages = ' '.join(packages.split(','))
cmd = _create_conda_cmd('update', args=[packages, '--yes', '-q'], env=env, user=user)
return _execcmd(cmd, user=user) | [
"def",
"update",
"(",
"packages",
",",
"env",
"=",
"None",
",",
"user",
"=",
"None",
")",
":",
"packages",
"=",
"' '",
".",
"join",
"(",
"packages",
".",
"split",
"(",
"','",
")",
")",
"cmd",
"=",
"_create_conda_cmd",
"(",
"'update'",
",",
"args",
... | Update conda packages in a conda env
Attributes
----------
packages: list of packages comma delimited | [
"Update",
"conda",
"packages",
"in",
"a",
"conda",
"env"
] | python | train |
yahoo/TensorFlowOnSpark | tensorflowonspark/pipeline.py | https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/tensorflowonspark/pipeline.py#L483-L564 | def _run_model(iterator, args, tf_args):
"""mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.
Args:
:iterator: input RDD partition iterator.
:args: arguments for TFModel, in argparse format
:tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.
Returns:
An iterator of result data.
"""
single_node_env(tf_args)
logging.info("===== input_mapping: {}".format(args.input_mapping))
logging.info("===== output_mapping: {}".format(args.output_mapping))
input_tensor_names = [tensor for col, tensor in sorted(args.input_mapping.items())]
output_tensor_names = [tensor for tensor, col in sorted(args.output_mapping.items())]
# if using a signature_def_key, get input/output tensor info from the requested signature
if args.signature_def_key:
assert args.export_dir, "Inferencing with signature_def_key requires --export_dir argument"
logging.info("===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}".format(args.tag_set, args.export_dir))
meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set)
signature = meta_graph_def.signature_def[args.signature_def_key]
logging.debug("signature: {}".format(signature))
inputs_tensor_info = signature.inputs
logging.debug("inputs_tensor_info: {0}".format(inputs_tensor_info))
outputs_tensor_info = signature.outputs
logging.debug("outputs_tensor_info: {0}".format(outputs_tensor_info))
result = []
global global_sess, global_args
if global_sess and global_args == args:
# if graph/session already loaded/started (and using same args), just reuse it
sess = global_sess
else:
# otherwise, create new session and load graph from disk
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
if args.export_dir:
assert args.tag_set, "Inferencing from a saved_model requires --tag_set"
# load graph from a saved_model
logging.info("===== restoring from saved_model: {}".format(args.export_dir))
loader.load(sess, args.tag_set.split(','), args.export_dir)
elif args.model_dir:
# load graph from a checkpoint
ckpt = tf.train.latest_checkpoint(args.model_dir)
assert ckpt, "Invalid model checkpoint path: {}".format(args.model_dir)
logging.info("===== restoring from checkpoint: {}".format(ckpt + ".meta"))
saver = tf.train.import_meta_graph(ckpt + ".meta", clear_devices=True)
saver.restore(sess, ckpt)
else:
raise Exception("Inferencing requires either --model_dir or --export_dir argument")
global_sess = sess
global_args = args
# get list of input/output tensors (by name)
if args.signature_def_key:
input_tensors = [inputs_tensor_info[t].name for t in input_tensor_names]
output_tensors = [outputs_tensor_info[output_tensor_names[0]].name]
else:
input_tensors = [t + ':0' for t in input_tensor_names]
output_tensors = [t + ':0' for t in output_tensor_names]
logging.info("input_tensors: {0}".format(input_tensors))
logging.info("output_tensors: {0}".format(output_tensors))
# feed data in batches and return output tensors
for tensors in yield_batch(iterator, args.batch_size, len(input_tensor_names)):
inputs_feed_dict = {}
for i in range(len(input_tensors)):
inputs_feed_dict[input_tensors[i]] = tensors[i]
outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict)
lengths = [len(output) for output in outputs]
input_size = len(tensors[0])
assert all([length == input_size for length in lengths]), "Output array sizes {} must match input size: {}".format(lengths, input_size)
python_outputs = [output.tolist() for output in outputs] # convert from numpy to standard python types
result.extend(zip(*python_outputs)) # convert to an array of tuples of "output columns"
return result | [
"def",
"_run_model",
"(",
"iterator",
",",
"args",
",",
"tf_args",
")",
":",
"single_node_env",
"(",
"tf_args",
")",
"logging",
".",
"info",
"(",
"\"===== input_mapping: {}\"",
".",
"format",
"(",
"args",
".",
"input_mapping",
")",
")",
"logging",
".",
"info... | mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.
Args:
:iterator: input RDD partition iterator.
:args: arguments for TFModel, in argparse format
:tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.
Returns:
An iterator of result data. | [
"mapPartitions",
"function",
"to",
"run",
"single",
"-",
"node",
"inferencing",
"from",
"a",
"checkpoint",
"/",
"saved_model",
"using",
"the",
"model",
"s",
"input",
"/",
"output",
"mappings",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/variation/bamprep.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bamprep.py#L68-L86 | def _piped_bamprep_region_gatk(data, region, prep_params, out_file, tmp_dir):
"""Perform semi-piped BAM preparation using Picard/GATK tools.
"""
broad_runner = broad.runner_from_config(data["config"])
cur_bam, cl = _piped_input_cl(data, region, tmp_dir, out_file, prep_params)
if not prep_params["realign"]:
prerecal_bam = None
elif prep_params["realign"] == "gatk":
prerecal_bam, cl = _piped_realign_gatk(data, region, cl, out_file, tmp_dir,
prep_params)
else:
raise NotImplementedError("Realignment method: %s" % prep_params["realign"])
with file_transaction(data, out_file) as tx_out_file:
out_flag = ("-o" if (prep_params["realign"] == "gatk"
or not prep_params["realign"])
else ">")
cmd = "{cl} {out_flag} {tx_out_file}".format(**locals())
do.run(cmd, "GATK: realign {0}".format(region), data)
_cleanup_tempfiles(data, [cur_bam, prerecal_bam]) | [
"def",
"_piped_bamprep_region_gatk",
"(",
"data",
",",
"region",
",",
"prep_params",
",",
"out_file",
",",
"tmp_dir",
")",
":",
"broad_runner",
"=",
"broad",
".",
"runner_from_config",
"(",
"data",
"[",
"\"config\"",
"]",
")",
"cur_bam",
",",
"cl",
"=",
"_pi... | Perform semi-piped BAM preparation using Picard/GATK tools. | [
"Perform",
"semi",
"-",
"piped",
"BAM",
"preparation",
"using",
"Picard",
"/",
"GATK",
"tools",
"."
] | python | train |
apache/airflow | airflow/utils/dag_processing.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L1078-L1088 | def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None | [
"def",
"get_start_time",
"(",
"self",
",",
"file_path",
")",
":",
"if",
"file_path",
"in",
"self",
".",
"_processors",
":",
"return",
"self",
".",
"_processors",
"[",
"file_path",
"]",
".",
"start_time",
"return",
"None"
] | :param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime | [
":",
"param",
"file_path",
":",
"the",
"path",
"to",
"the",
"file",
"that",
"s",
"being",
"processed",
":",
"type",
"file_path",
":",
"unicode",
":",
"return",
":",
"the",
"start",
"time",
"of",
"the",
"process",
"that",
"s",
"processing",
"the",
"specif... | python | test |
python-thumbnails/python-thumbnails | thumbnails/__init__.py | https://github.com/python-thumbnails/python-thumbnails/blob/d8dc0ff5410f730de2a0e5759e8a818b19de35b9/thumbnails/__init__.py#L11-L65 | def get_thumbnail(original, size, **options):
"""
Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object
"""
engine = get_engine()
cache = get_cache_backend()
original = SourceFile(original)
crop = options.get('crop', None)
options = engine.evaluate_options(options)
thumbnail_name = generate_filename(original, size, crop)
if settings.THUMBNAIL_DUMMY:
engine = DummyEngine()
return engine.get_thumbnail(thumbnail_name, engine.parse_size(size), crop, options)
cached = cache.get(thumbnail_name)
force = options is not None and 'force' in options and options['force']
if not force and cached:
return cached
thumbnail = Thumbnail(thumbnail_name, engine.get_format(original, options))
if force or not thumbnail.exists:
size = engine.parse_size(size)
thumbnail.image = engine.get_thumbnail(original, size, crop, options)
thumbnail.save(options)
for resolution in settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS:
resolution_size = engine.calculate_alternative_resolution_size(resolution, size)
image = engine.get_thumbnail(original, resolution_size, crop, options)
thumbnail.save_alternative_resolution(resolution, image, options)
cache.set(thumbnail)
return thumbnail | [
"def",
"get_thumbnail",
"(",
"original",
",",
"size",
",",
"*",
"*",
"options",
")",
":",
"engine",
"=",
"get_engine",
"(",
")",
"cache",
"=",
"get_cache_backend",
"(",
")",
"original",
"=",
"SourceFile",
"(",
"original",
")",
"crop",
"=",
"options",
"."... | Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object | [
"Creates",
"or",
"gets",
"an",
"already",
"created",
"thumbnail",
"for",
"the",
"given",
"image",
"with",
"the",
"given",
"size",
"and",
"options",
"."
] | python | train |
elastic/elasticsearch-py | elasticsearch/client/indices.py | https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/indices.py#L52-L79 | def flush(self, index=None, params=None):
"""
Explicitly flush one or more indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string for all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg force: Whether a flush should be forced even if it is not
necessarily needed ie. if no changes will be committed to the index.
This is useful if transaction log IDs should be incremented even if
no uncommitted changes are present. (This setting can be considered
as internal)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg wait_if_ongoing: If set to true the flush operation will block
until the flush can be executed if another flush operation is
already executing. The default is true. If set to false the flush
will be skipped iff if another flush operation is already running.
"""
return self.transport.perform_request(
"POST", _make_path(index, "_flush"), params=params
) | [
"def",
"flush",
"(",
"self",
",",
"index",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"POST\"",
",",
"_make_path",
"(",
"index",
",",
"\"_flush\"",
")",
",",
"params",
"=",
"par... | Explicitly flush one or more indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string for all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg force: Whether a flush should be forced even if it is not
necessarily needed ie. if no changes will be committed to the index.
This is useful if transaction log IDs should be incremented even if
no uncommitted changes are present. (This setting can be considered
as internal)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg wait_if_ongoing: If set to true the flush operation will block
until the flush can be executed if another flush operation is
already executing. The default is true. If set to false the flush
will be skipped iff if another flush operation is already running. | [
"Explicitly",
"flush",
"one",
"or",
"more",
"indices",
".",
"<http",
":",
"//",
"www",
".",
"elastic",
".",
"co",
"/",
"guide",
"/",
"en",
"/",
"elasticsearch",
"/",
"reference",
"/",
"current",
"/",
"indices",
"-",
"flush",
".",
"html",
">",
"_"
] | python | train |
edx/edx-enterprise | enterprise/utils.py | https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/utils.py#L185-L213 | def get_catalog_admin_url_template(mode='change'):
"""
Get template of catalog admin url.
URL template will contain a placeholder '{catalog_id}' for catalog id.
Arguments:
mode e.g. change/add.
Returns:
A string containing template for catalog url.
Example:
>>> get_catalog_admin_url_template('change')
"http://localhost:18381/admin/catalogs/catalog/{catalog_id}/change/"
"""
api_base_url = getattr(settings, "COURSE_CATALOG_API_URL", "")
# Extract FQDN (Fully Qualified Domain Name) from API URL.
match = re.match(r"^(?P<fqdn>(?:https?://)?[^/]+)", api_base_url)
if not match:
return ""
# Return matched FQDN from catalog api url appended with catalog admin path
if mode == 'change':
return match.group("fqdn").rstrip("/") + "/admin/catalogs/catalog/{catalog_id}/change/"
elif mode == 'add':
return match.group("fqdn").rstrip("/") + "/admin/catalogs/catalog/add/" | [
"def",
"get_catalog_admin_url_template",
"(",
"mode",
"=",
"'change'",
")",
":",
"api_base_url",
"=",
"getattr",
"(",
"settings",
",",
"\"COURSE_CATALOG_API_URL\"",
",",
"\"\"",
")",
"# Extract FQDN (Fully Qualified Domain Name) from API URL.",
"match",
"=",
"re",
".",
... | Get template of catalog admin url.
URL template will contain a placeholder '{catalog_id}' for catalog id.
Arguments:
mode e.g. change/add.
Returns:
A string containing template for catalog url.
Example:
>>> get_catalog_admin_url_template('change')
"http://localhost:18381/admin/catalogs/catalog/{catalog_id}/change/" | [
"Get",
"template",
"of",
"catalog",
"admin",
"url",
"."
] | python | valid |
crytic/slither | slither/core/declarations/contract.py | https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/contract.py#L152-L158 | def slithir_variables(self):
'''
List all of the slithir variables (non SSA)
'''
slithir_variables = [f.slithir_variables for f in self.functions + self.modifiers]
slithir_variables = [item for sublist in slithir_variables for item in sublist]
return list(set(slithir_variables)) | [
"def",
"slithir_variables",
"(",
"self",
")",
":",
"slithir_variables",
"=",
"[",
"f",
".",
"slithir_variables",
"for",
"f",
"in",
"self",
".",
"functions",
"+",
"self",
".",
"modifiers",
"]",
"slithir_variables",
"=",
"[",
"item",
"for",
"sublist",
"in",
... | List all of the slithir variables (non SSA) | [
"List",
"all",
"of",
"the",
"slithir",
"variables",
"(",
"non",
"SSA",
")"
] | python | train |
J535D165/recordlinkage | recordlinkage/measures.py | https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/measures.py#L211-L235 | def false_negatives(links_true, links_pred):
"""Count the number of False Negatives.
Returns the number of incorrect predictions of true links. (true links,
but predicted as non-links). This value is known as the number of False
Negatives (FN).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted links.
Returns
-------
int
The number of false negatives.
"""
links_true = _get_multiindex(links_true)
links_pred = _get_multiindex(links_pred)
return len(links_true.difference(links_pred)) | [
"def",
"false_negatives",
"(",
"links_true",
",",
"links_pred",
")",
":",
"links_true",
"=",
"_get_multiindex",
"(",
"links_true",
")",
"links_pred",
"=",
"_get_multiindex",
"(",
"links_pred",
")",
"return",
"len",
"(",
"links_true",
".",
"difference",
"(",
"lin... | Count the number of False Negatives.
Returns the number of incorrect predictions of true links. (true links,
but predicted as non-links). This value is known as the number of False
Negatives (FN).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted links.
Returns
-------
int
The number of false negatives. | [
"Count",
"the",
"number",
"of",
"False",
"Negatives",
"."
] | python | train |
inspirehep/refextract | refextract/references/engine.py | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L187-L200 | def format_hep(citation_elements):
"""Format hep-th report numbers with a dash
e.g. replaces hep-th-9711200 with hep-th/9711200
"""
prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-',
'math-ph-')
for el in citation_elements:
if el['type'] == 'REPORTNUMBER':
for p in prefixes:
if el['report_num'].startswith(p):
el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \
el['report_num'][len(p):]
return citation_elements | [
"def",
"format_hep",
"(",
"citation_elements",
")",
":",
"prefixes",
"=",
"(",
"'astro-ph-'",
",",
"'hep-th-'",
",",
"'hep-ph-'",
",",
"'hep-ex-'",
",",
"'hep-lat-'",
",",
"'math-ph-'",
")",
"for",
"el",
"in",
"citation_elements",
":",
"if",
"el",
"[",
"'typ... | Format hep-th report numbers with a dash
e.g. replaces hep-th-9711200 with hep-th/9711200 | [
"Format",
"hep",
"-",
"th",
"report",
"numbers",
"with",
"a",
"dash"
] | python | train |
ttinies/sc2players | sc2players/playerRecord.py | https://github.com/ttinies/sc2players/blob/fd9b37c268bf1005d9ef73a25e65ed97c8b7895f/sc2players/playerRecord.py#L143-L153 | def load(self, playerName=None):
"""retrieve the PlayerRecord settings from saved disk file"""
if playerName: # switch the PlayerRecord this object describes
self.name = playerName # preset value to load self.filename
try:
with open(self.filename, "rb") as f:
data = f.read()
except Exception:
raise ValueError("invalid profile, '%s'. file does not exist: %s"%(self.name, self.filename))
self.update(json.loads(data))
self._matches = [] # mandate match history be recalculated for this newly loaded player | [
"def",
"load",
"(",
"self",
",",
"playerName",
"=",
"None",
")",
":",
"if",
"playerName",
":",
"# switch the PlayerRecord this object describes",
"self",
".",
"name",
"=",
"playerName",
"# preset value to load self.filename",
"try",
":",
"with",
"open",
"(",
"self",... | retrieve the PlayerRecord settings from saved disk file | [
"retrieve",
"the",
"PlayerRecord",
"settings",
"from",
"saved",
"disk",
"file"
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/functional_groups.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/functional_groups.py#L238-L304 | def get_basic_functional_groups(self, func_groups=None):
"""
Identify functional groups that cannot be identified by the Ertl method
of get_special_carbon and get_heteroatoms, such as benzene rings, methyl
groups, and ethyl groups.
TODO: Think of other functional groups that are important enough to be
added (ex: do we need ethyl, butyl, propyl?)
:param func_groups: List of strs representing the functional groups of
interest. Default to None, meaning that all of the functional groups
defined in this function will be sought.
:return: list of sets of ints, representing groups of connected atoms
"""
strat = OpenBabelNN()
hydrogens = {n for n in self.molgraph.graph.nodes if
str(self.species[n]) == "H"}
carbons = [n for n in self.molgraph.graph.nodes if
str(self.species[n]) == "C"]
if func_groups is None:
func_groups = ["methyl", "phenyl"]
results = []
if "methyl" in func_groups:
for node in carbons:
neighbors = strat.get_nn_info(self.molecule, node)
hs = {n["site_index"] for n in neighbors if n["site_index"] in hydrogens}
# Methyl group is CH3, but this will also catch methane
if len(hs) >= 3:
hs.add(node)
results.append(hs)
if "phenyl" in func_groups:
rings_indices = [set(sum(ring, ())) for ring in
self.molgraph.find_rings()]
possible_phenyl = [r for r in rings_indices if len(r) == 6]
for ring in possible_phenyl:
# Phenyl group should have only one (0 for benzene) member whose
# neighbors are not two carbons and one hydrogen
num_deviants = 0
for node in ring:
neighbors = strat.get_nn_info(self.molecule, node)
neighbor_spec = sorted([str(self.species[n["site_index"]])
for n in neighbors])
if neighbor_spec != ["C", "C", "H"]:
num_deviants += 1
if num_deviants <= 1:
for node in ring:
ring_group = copy.deepcopy(ring)
neighbors = self.molgraph.graph[node]
# Add hydrogens to the functional group
for neighbor in neighbors.keys():
if neighbor in hydrogens:
ring_group.add(neighbor)
results.append(ring_group)
return results | [
"def",
"get_basic_functional_groups",
"(",
"self",
",",
"func_groups",
"=",
"None",
")",
":",
"strat",
"=",
"OpenBabelNN",
"(",
")",
"hydrogens",
"=",
"{",
"n",
"for",
"n",
"in",
"self",
".",
"molgraph",
".",
"graph",
".",
"nodes",
"if",
"str",
"(",
"s... | Identify functional groups that cannot be identified by the Ertl method
of get_special_carbon and get_heteroatoms, such as benzene rings, methyl
groups, and ethyl groups.
TODO: Think of other functional groups that are important enough to be
added (ex: do we need ethyl, butyl, propyl?)
:param func_groups: List of strs representing the functional groups of
interest. Default to None, meaning that all of the functional groups
defined in this function will be sought.
:return: list of sets of ints, representing groups of connected atoms | [
"Identify",
"functional",
"groups",
"that",
"cannot",
"be",
"identified",
"by",
"the",
"Ertl",
"method",
"of",
"get_special_carbon",
"and",
"get_heteroatoms",
"such",
"as",
"benzene",
"rings",
"methyl",
"groups",
"and",
"ethyl",
"groups",
"."
] | python | train |
agoragames/leaderboard-python | leaderboard/leaderboard.py | https://github.com/agoragames/leaderboard-python/blob/ec309859b197a751ac0322374b36d134d8c5522f/leaderboard/leaderboard.py#L529-L537 | def change_score_for(self, member, delta, member_data=None):
'''
Change the score for a member in the leaderboard by a score delta which can be positive or negative.
@param member [String] Member name.
@param delta [float] Score change.
@param member_data [String] Optional member data.
'''
self.change_score_for_member_in(self.leaderboard_name, member, delta, member_data) | [
"def",
"change_score_for",
"(",
"self",
",",
"member",
",",
"delta",
",",
"member_data",
"=",
"None",
")",
":",
"self",
".",
"change_score_for_member_in",
"(",
"self",
".",
"leaderboard_name",
",",
"member",
",",
"delta",
",",
"member_data",
")"
] | Change the score for a member in the leaderboard by a score delta which can be positive or negative.
@param member [String] Member name.
@param delta [float] Score change.
@param member_data [String] Optional member data. | [
"Change",
"the",
"score",
"for",
"a",
"member",
"in",
"the",
"leaderboard",
"by",
"a",
"score",
"delta",
"which",
"can",
"be",
"positive",
"or",
"negative",
"."
] | python | train |
ybrs/single-beat | singlebeat/beat.py | https://github.com/ybrs/single-beat/blob/d036b62d2531710dfd806e9dc2a8d67c77616082/singlebeat/beat.py#L372-L390 | def cli_command_restart(self, msg):
"""\
restart the subprocess
i. we set our state to RESTARTING - on restarting we still send heartbeat
ii. we kill the subprocess
iii. we start again
iv. if its started we set our state to RUNNING, else we set it to WAITING
:param msg:
:return:
"""
info = ''
if self.state == State.RUNNING and self.sprocess and self.sprocess.proc:
self.state = State.RESTARTING
self.sprocess.set_exit_callback(self.proc_exit_cb_restart)
self.sprocess.proc.kill()
info = 'killed'
# TODO: check if process is really dead etc.
return info | [
"def",
"cli_command_restart",
"(",
"self",
",",
"msg",
")",
":",
"info",
"=",
"''",
"if",
"self",
".",
"state",
"==",
"State",
".",
"RUNNING",
"and",
"self",
".",
"sprocess",
"and",
"self",
".",
"sprocess",
".",
"proc",
":",
"self",
".",
"state",
"="... | \
restart the subprocess
i. we set our state to RESTARTING - on restarting we still send heartbeat
ii. we kill the subprocess
iii. we start again
iv. if its started we set our state to RUNNING, else we set it to WAITING
:param msg:
:return: | [
"\\",
"restart",
"the",
"subprocess",
"i",
".",
"we",
"set",
"our",
"state",
"to",
"RESTARTING",
"-",
"on",
"restarting",
"we",
"still",
"send",
"heartbeat",
"ii",
".",
"we",
"kill",
"the",
"subprocess",
"iii",
".",
"we",
"start",
"again",
"iv",
".",
"... | python | test |
google/grr | grr/client/grr_response_client/client_utils_osx.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_utils_osx.py#L189-L215 | def GetRawDevice(path):
"""Resolve the raw device that contains the path."""
device_map = GetMountpoints()
path = utils.SmartUnicode(path)
mount_point = path = utils.NormalizePath(path, "/")
result = rdf_paths.PathSpec(pathtype=rdf_paths.PathSpec.PathType.OS)
# Assign the most specific mount point to the result
while mount_point:
try:
result.path, fs_type = device_map[mount_point]
if fs_type in [
"ext2", "ext3", "ext4", "vfat", "ntfs", "Apple_HFS", "hfs", "msdos"
]:
# These are read filesystems
result.pathtype = rdf_paths.PathSpec.PathType.OS
else:
result.pathtype = rdf_paths.PathSpec.PathType.UNSET
# Drop the mount point
path = utils.NormalizePath(path[len(mount_point):])
return result, path
except KeyError:
mount_point = os.path.dirname(mount_point) | [
"def",
"GetRawDevice",
"(",
"path",
")",
":",
"device_map",
"=",
"GetMountpoints",
"(",
")",
"path",
"=",
"utils",
".",
"SmartUnicode",
"(",
"path",
")",
"mount_point",
"=",
"path",
"=",
"utils",
".",
"NormalizePath",
"(",
"path",
",",
"\"/\"",
")",
"res... | Resolve the raw device that contains the path. | [
"Resolve",
"the",
"raw",
"device",
"that",
"contains",
"the",
"path",
"."
] | python | train |
WhyNotHugo/django-afip | django_afip/admin.py | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/admin.py#L24-L59 | def catch_errors(f):
"""
Catches specific errors in admin actions and shows a friendly error.
"""
@functools.wraps(f)
def wrapper(self, request, *args, **kwargs):
try:
return f(self, request, *args, **kwargs)
except exceptions.CertificateExpired:
self.message_user(
request,
_('The AFIP Taxpayer certificate has expired.'),
messages.ERROR,
)
except exceptions.UntrustedCertificate:
self.message_user(
request,
_('The AFIP Taxpayer certificate is untrusted.'),
messages.ERROR,
)
except exceptions.CorruptCertificate:
self.message_user(
request,
_('The AFIP Taxpayer certificate is corrupt.'),
messages.ERROR,
)
except exceptions.AuthenticationError as e:
logger.exception('AFIP auth failed')
self.message_user(
request,
_('An unknown authentication error has ocurred: %s') % e,
messages.ERROR,
)
return wrapper | [
"def",
"catch_errors",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"f",
"(",
"self",
",",
"request",
... | Catches specific errors in admin actions and shows a friendly error. | [
"Catches",
"specific",
"errors",
"in",
"admin",
"actions",
"and",
"shows",
"a",
"friendly",
"error",
"."
] | python | train |
bigchaindb/bigchaindb-driver | bigchaindb_driver/pool.py | https://github.com/bigchaindb/bigchaindb-driver/blob/c294a535f0696bd19483ae11a4882b74e6fc061e/bigchaindb_driver/pool.py#L32-L52 | def pick(self, connections):
"""Picks a connection with the earliest backoff time.
As a result, the first connection is picked
for as long as it has no backoff time.
Otherwise, the connections are tried in a round robin fashion.
Args:
connections (:obj:list): List of
:class:`~bigchaindb_driver.connection.Connection` instances.
"""
if len(connections) == 1:
return connections[0]
def key(conn):
return (datetime.min
if conn.backoff_time is None
else conn.backoff_time)
return min(*connections, key=key) | [
"def",
"pick",
"(",
"self",
",",
"connections",
")",
":",
"if",
"len",
"(",
"connections",
")",
"==",
"1",
":",
"return",
"connections",
"[",
"0",
"]",
"def",
"key",
"(",
"conn",
")",
":",
"return",
"(",
"datetime",
".",
"min",
"if",
"conn",
".",
... | Picks a connection with the earliest backoff time.
As a result, the first connection is picked
for as long as it has no backoff time.
Otherwise, the connections are tried in a round robin fashion.
Args:
connections (:obj:list): List of
:class:`~bigchaindb_driver.connection.Connection` instances. | [
"Picks",
"a",
"connection",
"with",
"the",
"earliest",
"backoff",
"time",
"."
] | python | train |
aws/sagemaker-python-sdk | src/sagemaker/workflow/airflow.py | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/workflow/airflow.py#L533-L617 | def transform_config_from_estimator(estimator, task_id, task_type, instance_count, instance_type, data,
data_type='S3Prefix', content_type=None, compression_type=None, split_type=None,
job_name=None, model_name=None, strategy=None, assemble_with=None, output_path=None,
output_kms_key=None, accept=None, env=None, max_concurrent_transforms=None,
max_payload=None, tags=None, role=None, volume_kms_key=None,
model_server_workers=None, image=None, vpc_config_override=None):
"""Export Airflow transform config from a SageMaker estimator
Args:
estimator (sagemaker.model.EstimatorBase): The SageMaker estimator to export Airflow config from.
It has to be an estimator associated with a training job.
task_id (str): The task id of any airflow.contrib.operators.SageMakerTrainingOperator or
airflow.contrib.operators.SageMakerTuningOperator that generates training jobs in the DAG. The transform
config is built based on the training job generated in this operator.
task_type (str): Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator. Values can be
'training', 'tuning' or None (which means training job is not from any task).
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'.
data (str): Input data location in S3.
data_type (str): What the S3 location defines (default: 'S3Prefix'). Valid values:
* 'S3Prefix' - the S3 URI defines a key name prefix. All objects with this prefix will be used as
inputs for the transform job.
* 'ManifestFile' - the S3 URI points to a single manifest file listing each S3 object to use as
an input for the transform job.
content_type (str): MIME type of the input data (default: None).
compression_type (str): Compression type of the input data, if compressed (default: None).
Valid values: 'Gzip', None.
split_type (str): The record delimiter for the input object (default: 'None').
Valid values: 'None', 'Line', 'RecordIO', and 'TFRecord'.
job_name (str): transform job name (default: None). If not specified, one will be generated.
model_name (str): model name (default: None). If not specified, one will be generated.
strategy (str): The strategy used to decide how to batch records in a single request (default: None).
Valid values: 'MULTI_RECORD' and 'SINGLE_RECORD'.
assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' or 'None'.
output_path (str): S3 location for saving the transform result. If not specified, results are stored to
a default bucket.
output_kms_key (str): Optional. KMS key ID for encrypting the transform output (default: None).
accept (str): The content type accepted by the endpoint deployed during the transform job.
env (dict): Environment variables to be set for use during the transform job (default: None).
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
tags (list[dict]): List of tags for labeling a transform job. If none specified, then the tags used for
the training job are used for the transform job.
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML
compute instance (default: None).
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
image (str): An container image to use for deploying the model
vpc_config_override (dict[str, list[str]]): Override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
dict: Transform config that can be directly used by SageMakerTransformOperator in Airflow.
"""
model_base_config = model_config_from_estimator(instance_type=instance_type, estimator=estimator, task_id=task_id,
task_type=task_type, role=role, image=image, name=model_name,
model_server_workers=model_server_workers,
vpc_config_override=vpc_config_override)
if isinstance(estimator, sagemaker.estimator.Framework):
transformer = estimator.transformer(instance_count, instance_type, strategy, assemble_with, output_path,
output_kms_key, accept, env, max_concurrent_transforms,
max_payload, tags, role, model_server_workers, volume_kms_key)
else:
transformer = estimator.transformer(instance_count, instance_type, strategy, assemble_with, output_path,
output_kms_key, accept, env, max_concurrent_transforms,
max_payload, tags, role, volume_kms_key)
transformer.model_name = model_base_config['ModelName']
transform_base_config = transform_config(transformer, data, data_type, content_type, compression_type,
split_type, job_name)
config = {
'Model': model_base_config,
'Transform': transform_base_config
}
return config | [
"def",
"transform_config_from_estimator",
"(",
"estimator",
",",
"task_id",
",",
"task_type",
",",
"instance_count",
",",
"instance_type",
",",
"data",
",",
"data_type",
"=",
"'S3Prefix'",
",",
"content_type",
"=",
"None",
",",
"compression_type",
"=",
"None",
","... | Export Airflow transform config from a SageMaker estimator
Args:
estimator (sagemaker.model.EstimatorBase): The SageMaker estimator to export Airflow config from.
It has to be an estimator associated with a training job.
task_id (str): The task id of any airflow.contrib.operators.SageMakerTrainingOperator or
airflow.contrib.operators.SageMakerTuningOperator that generates training jobs in the DAG. The transform
config is built based on the training job generated in this operator.
task_type (str): Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator. Values can be
'training', 'tuning' or None (which means training job is not from any task).
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'.
data (str): Input data location in S3.
data_type (str): What the S3 location defines (default: 'S3Prefix'). Valid values:
* 'S3Prefix' - the S3 URI defines a key name prefix. All objects with this prefix will be used as
inputs for the transform job.
* 'ManifestFile' - the S3 URI points to a single manifest file listing each S3 object to use as
an input for the transform job.
content_type (str): MIME type of the input data (default: None).
compression_type (str): Compression type of the input data, if compressed (default: None).
Valid values: 'Gzip', None.
split_type (str): The record delimiter for the input object (default: 'None').
Valid values: 'None', 'Line', 'RecordIO', and 'TFRecord'.
job_name (str): transform job name (default: None). If not specified, one will be generated.
model_name (str): model name (default: None). If not specified, one will be generated.
strategy (str): The strategy used to decide how to batch records in a single request (default: None).
Valid values: 'MULTI_RECORD' and 'SINGLE_RECORD'.
assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' or 'None'.
output_path (str): S3 location for saving the transform result. If not specified, results are stored to
a default bucket.
output_kms_key (str): Optional. KMS key ID for encrypting the transform output (default: None).
accept (str): The content type accepted by the endpoint deployed during the transform job.
env (dict): Environment variables to be set for use during the transform job (default: None).
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
tags (list[dict]): List of tags for labeling a transform job. If none specified, then the tags used for
the training job are used for the transform job.
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML
compute instance (default: None).
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
image (str): An container image to use for deploying the model
vpc_config_override (dict[str, list[str]]): Override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
dict: Transform config that can be directly used by SageMakerTransformOperator in Airflow. | [
"Export",
"Airflow",
"transform",
"config",
"from",
"a",
"SageMaker",
"estimator"
] | python | train |
CiscoDevNet/webexteamssdk | webexteamssdk/api/guest_issuer.py | https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/webexteamssdk/api/guest_issuer.py#L79-L121 | def create(self, subject, displayName, issuerToken, expiration, secret):
"""Create a new guest issuer using the provided issuer token.
This function returns a guest issuer with an api access token.
Args:
subject(basestring): Unique and public identifier
displayName(basestring): Display Name of the guest user
issuerToken(basestring): Issuer token from developer hub
expiration(basestring): Expiration time as a unix timestamp
secret(basestring): The secret used to sign your guest issuers
Returns:
GuestIssuerToken: A Guest Issuer with a valid access token.
Raises:
TypeError: If the parameter types are incorrect
ApiError: If the webex teams cloud returns an error.
"""
check_type(subject, basestring)
check_type(displayName, basestring)
check_type(issuerToken, basestring)
check_type(expiration, basestring)
check_type(secret, basestring)
payload = {
"sub": subject,
"name": displayName,
"iss": issuerToken,
"exp": expiration
}
key = base64.b64decode(secret)
jwt_token = jwt.encode(payload, key, algorithm='HS256')
url = self._session.base_url + API_ENDPOINT + "/" + "login"
headers = {
'Authorization': "Bearer " + jwt_token.decode('utf-8')
}
response = requests.post(url, headers=headers)
check_response_code(response, EXPECTED_RESPONSE_CODE['GET'])
return self._object_factory(OBJECT_TYPE, response.json()) | [
"def",
"create",
"(",
"self",
",",
"subject",
",",
"displayName",
",",
"issuerToken",
",",
"expiration",
",",
"secret",
")",
":",
"check_type",
"(",
"subject",
",",
"basestring",
")",
"check_type",
"(",
"displayName",
",",
"basestring",
")",
"check_type",
"(... | Create a new guest issuer using the provided issuer token.
This function returns a guest issuer with an api access token.
Args:
subject(basestring): Unique and public identifier
displayName(basestring): Display Name of the guest user
issuerToken(basestring): Issuer token from developer hub
expiration(basestring): Expiration time as a unix timestamp
secret(basestring): The secret used to sign your guest issuers
Returns:
GuestIssuerToken: A Guest Issuer with a valid access token.
Raises:
TypeError: If the parameter types are incorrect
ApiError: If the webex teams cloud returns an error. | [
"Create",
"a",
"new",
"guest",
"issuer",
"using",
"the",
"provided",
"issuer",
"token",
"."
] | python | test |
DinoTools/python-overpy | overpy/__init__.py | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1234-L1282 | def from_json(cls, data, result=None):
"""
Create new RelationMember element from JSON data
:param child: Element data from JSON
:type child: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of RelationMember
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
ref = data.get("ref")
role = data.get("role")
attributes = {}
ignore = ["geometry", "type", "ref", "role"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
geometry = data.get("geometry")
if isinstance(geometry, list):
geometry_orig = geometry
geometry = []
for v in geometry_orig:
geometry.append(
RelationWayGeometryValue(
lat=v.get("lat"),
lon=v.get("lon")
)
)
else:
geometry = None
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
) | [
"def",
"from_json",
"(",
"cls",
",",
"data",
",",
"result",
"=",
"None",
")",
":",
"if",
"data",
".",
"get",
"(",
"\"type\"",
")",
"!=",
"cls",
".",
"_type_value",
":",
"raise",
"exception",
".",
"ElementDataWrongType",
"(",
"type_expected",
"=",
"cls",
... | Create new RelationMember element from JSON data
:param child: Element data from JSON
:type child: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of RelationMember
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match. | [
"Create",
"new",
"RelationMember",
"element",
"from",
"JSON",
"data"
] | python | train |
ToucanToco/toucan-data-sdk | toucan_data_sdk/utils/postprocess/math.py | https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L48-L52 | def divide(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='truediv') | [
"def",
"divide",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
")",
":",
"return",
"_basic_math_operation",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
",",
"op",
"=",
"'truediv'",
")"
] | DEPRECATED - use `formula` instead | [
"DEPRECATED",
"-",
"use",
"formula",
"instead"
] | python | test |
senaite/senaite.core | bika/lims/controlpanel/bika_analysisservices.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/controlpanel/bika_analysisservices.py#L308-L316 | def format_price(self, price):
"""Formats the price with the set decimal mark and correct currency
"""
return u"{} {}{}{:02d}".format(
self.currency_symbol,
price[0],
self.decimal_mark,
price[1],
) | [
"def",
"format_price",
"(",
"self",
",",
"price",
")",
":",
"return",
"u\"{} {}{}{:02d}\"",
".",
"format",
"(",
"self",
".",
"currency_symbol",
",",
"price",
"[",
"0",
"]",
",",
"self",
".",
"decimal_mark",
",",
"price",
"[",
"1",
"]",
",",
")"
] | Formats the price with the set decimal mark and correct currency | [
"Formats",
"the",
"price",
"with",
"the",
"set",
"decimal",
"mark",
"and",
"correct",
"currency"
] | python | train |
CybOXProject/mixbox | mixbox/namespaces.py | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/namespaces.py#L758-L809 | def assert_valid(self):
"""For debugging; does some sanity checks on this set. Raises
InvalidNamespaceSetError if this namespace set is invalid. Otherwise,
raises/returns nothing."""
for ns_uri, ni in six.iteritems(self.__ns_uri_map):
if not ni.uri:
raise InvalidNamespaceSetError(
"URI not set in _NamespaceInfo (id={0}):\n{1}".format(
id(ni), ni
)
)
if ns_uri != ni.uri:
raise InvalidNamespaceSetError(
"URI mismatch in dict ({0}) and _NamespaceInfo ({1})".format(
ns_uri, ni.uri
)
)
if (ni.preferred_prefix is not None and
ni.preferred_prefix not in ni.prefixes):
raise InvalidNamespaceSetError(
"Namespace {0.uri}: preferred prefix " \
'"{0.preferred_prefix}" not in prefixes ' \
"{0.prefixes}".format(ni)
)
for prefix in ni.prefixes:
if not prefix:
raise InvalidNamespaceSetError(
"Namespace {0.uri}: empty value in prefix " \
"set: {0.prefixes}".format(ni)
)
other_ni = self.__prefix_map.get(prefix)
if other_ni is None:
raise InvalidNamespaceSetError(
'Namespace {0.uri}: prefix "{1}" not in ' \
'prefix map'.format(ni, prefix)
)
if other_ni is not ni:
raise InvalidNamespaceSetError(
'Namespace {0.uri}: prefix "{1}" maps to ' \
'wrong _NamespaceInfo (id={2}, uri={3.uri})'.format(
ni, prefix, id(other_ni), other_ni
)
)
if None in self.__prefix_map:
# None can be a preferred prefix, but should not be in the
# prefix map.
raise InvalidNamespaceSetError("None is in prefix map!") | [
"def",
"assert_valid",
"(",
"self",
")",
":",
"for",
"ns_uri",
",",
"ni",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"__ns_uri_map",
")",
":",
"if",
"not",
"ni",
".",
"uri",
":",
"raise",
"InvalidNamespaceSetError",
"(",
"\"URI not set in _NamespaceInf... | For debugging; does some sanity checks on this set. Raises
InvalidNamespaceSetError if this namespace set is invalid. Otherwise,
raises/returns nothing. | [
"For",
"debugging",
";",
"does",
"some",
"sanity",
"checks",
"on",
"this",
"set",
".",
"Raises",
"InvalidNamespaceSetError",
"if",
"this",
"namespace",
"set",
"is",
"invalid",
".",
"Otherwise",
"raises",
"/",
"returns",
"nothing",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/metric.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L33-L66 | def check_label_shapes(labels, preds, wrap=False, shape=False):
"""Helper function for checking shape of label and prediction
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
wrap : boolean
If True, wrap labels/preds in a list if they are single NDArray
shape : boolean
If True, check the shape of labels and preds;
Otherwise only check their length.
"""
if not shape:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of "
"predictions {}".format(label_shape, pred_shape))
if wrap:
if isinstance(labels, ndarray.ndarray.NDArray):
labels = [labels]
if isinstance(preds, ndarray.ndarray.NDArray):
preds = [preds]
return labels, preds | [
"def",
"check_label_shapes",
"(",
"labels",
",",
"preds",
",",
"wrap",
"=",
"False",
",",
"shape",
"=",
"False",
")",
":",
"if",
"not",
"shape",
":",
"label_shape",
",",
"pred_shape",
"=",
"len",
"(",
"labels",
")",
",",
"len",
"(",
"preds",
")",
"el... | Helper function for checking shape of label and prediction
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
wrap : boolean
If True, wrap labels/preds in a list if they are single NDArray
shape : boolean
If True, check the shape of labels and preds;
Otherwise only check their length. | [
"Helper",
"function",
"for",
"checking",
"shape",
"of",
"label",
"and",
"prediction"
] | python | train |
markovmodel/msmtools | msmtools/estimation/dense/ratematrix.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/estimation/dense/ratematrix.py#L524-L660 | def estimate_rate_matrix(C, dt=1.0, method='KL', sparsity=None,
t_agg=None, pi=None, tol=1.0E7, K0=None,
maxiter=100000, on_error='raise'):
r"""Estimate a reversible rate matrix from a count matrix.
Parameters
----------
C : (N,N) ndarray
count matrix at a lag time dt
dt : float, optional, default=1.0
lag time that was used to estimate C
method : {'KL', 'CVE', 'pseudo', 'truncated_log'}
Method to use for estimation of the rate matrix.
* 'pseudo' selects the pseudo-generator. A reversible transition
matrix T is estimated and (T-Id)/dt is retruned as the rate matrix.
* 'truncated_log' selects the truncated logarithm [3]_. A
reversible transition matrix T is estimated and max(logm(T*T)/(2dt),0)
is returned as the rate matrix. logm is the matrix logarithm and
the maximum is taken element-wise.
* 'CVE' selects the algorithm of Crommelin and Vanden-Eijnden [1]_.
It consists of minimizing the following objective function:
.. math:: f(K)=\sum_{ij}\left(\sum_{kl} U_{ik}^{-1}K_{kl}U_{lj}-L_{ij}\right)^2 \left|\Lambda_{i}\Lambda_{j}\right|
where :math:`\Lambda_i` are the eigenvalues of :math:`T` and :math:`U`
is the matrix of its (right) eigenvectors; :math:`L_{ij}=\delta_{ij}\frac{1}{\tau}\log\left|\Lambda_i\right|`.
:math:`T` is computed from C using the reversible maximum likelihood
estimator.
* 'KL' selects the algorihtm of Kalbfleisch and Lawless [2]_.
It consists of maximizing the following log-likelihood:
.. math:: f(K)=\log L=\sum_{ij}C_{ij}\log(e^{K\Delta t})_{ij}
where :math:`C_{ij}` are the transition counts at a lag-time :math:`\Delta t`.
Here :math:`e` is the matrix exponential and the logarithm is taken
element-wise.
sparsity : (N,N) ndarray or None, optional, default=None
If sparsity is None, a fully occupied rate matrix will be estimated.
Alternatively, with the methods 'CVE' and 'KL' a ndarray of the
same shape as C can be supplied. If sparsity[i,j]=0 and sparsity[j,i]=0
the rate matrix elements :math:`K_{ij}` and :math:`K_{ji}` will be
constrained to zero.
t_agg : float, optional
the aggregated simulation time;
by default this is the total number of transition counts times
the lag time (no sliding window counting). This value is used
to compute the lower bound on the transition rate (that are not zero).
If sparsity is None, this value is ignored.
pi : (N) ndarray, optional
the stationary vector of the desired rate matrix K.
If no pi is given, the function takes the stationary vector
of the MLE reversible T matrix that is computed from C.
tol : float, optional, default = 1.0E7
Tolerance of the quasi-Newton algorithm that is used to minimize
the objective function. This is passed as the `factr` parameter to
`scipy.optimize.fmin_l_bfgs_b`.
Typical values for factr are: 1e12 for low accuracy; 1e7
for moderate accuracy; 10.0 for extremely high accuracy.
maxiter : int, optional, default = 100000
Minimization of the objective function will do at most this number
of steps.
on_error : string, optional, default = 'raise'
What to do then an error happend. When 'raise' is given, raise
an exception. When 'warn' is given, produce a (Python) warning.
Retruns
-------
K : (N,N) ndarray
the optimal rate matrix
Notes
-----
In this implementation the algorithm of Crommelin and Vanden-Eijnden
(CVE) is initialized with the pseudo-generator estimate. The
algorithm of Kalbfleisch and Lawless (KL) is initialized using the
CVE result.
Example
-------
>>> import numpy as np
>>> from msmtools.estimation import rate_matrix
>>> C = np.array([[100,1],[50,50]])
>>> rate_matrix(C)
array([[-0.01384753, 0.01384753],
[ 0.69930032, -0.69930032]])
References
----------
.. [1] D. Crommelin and E. Vanden-Eijnden. Data-based inference of
generators for markov jump processes using convex optimization.
Multiscale. Model. Sim., 7(4):1751-1778, 2009.
.. [2] J. D. Kalbfleisch and J. F. Lawless. The analysis of panel
data under a markov assumption. J. Am. Stat. Assoc.,
80(392):863-871, 1985.
.. [3] E. B. Davies. Embeddable Markov Matrices. Electron. J. Probab.
15:1474, 2010.
"""
if method not in ['pseudo', 'truncated_log', 'CVE', 'KL']:
raise ValueError("method must be one of 'KL', 'CVE', 'pseudo' or 'truncated_log'")
# special case: truncated matrix logarithm
if method == 'truncated_log':
e_tlog = TruncatedLogarithmEstimator(C, dt=dt, sparsity=sparsity, t_agg=t_agg, pi=pi, tol=tol, maxiter=maxiter, on_error=on_error)
e_tlog.run()
return e_tlog.K
# remaining algorithms are based on each other in the order pseudo->CVE->KL
if method == 'pseudo' or method == 'CVE' or K0 is None or pi is None:
e_pseudo = PseudoGeneratorEstimator(C, dt=dt, sparsity=sparsity, t_agg=t_agg, pi=pi, tol=tol, maxiter=maxiter, on_error=on_error)
e_pseudo.run()
if pi is None:
pi = e_pseudo.pi
if method == 'pseudo':
return e_pseudo.K
if method == 'CVE' or K0 is None:
if K0 is not None:
K_init = K0
else:
K_init = e_pseudo.K
e_CVE = CrommelinVandenEijndenEstimator(e_pseudo.T, K_init, pi, dt=dt, sparsity=sparsity, t_agg=t_agg, tol=tol, maxiter=maxiter, on_error=on_error)
e_CVE.run()
if method == 'CVE':
return e_CVE.K
if K0 is not None:
K_init = K0
else:
K_init = e_CVE.K
e_KL = KalbfleischLawlessEstimator(C, K_init, pi, dt=dt, sparsity=sparsity, t_agg=t_agg, tol=tol, maxiter=maxiter, on_error=on_error)
e_KL.run()
return e_KL.K | [
"def",
"estimate_rate_matrix",
"(",
"C",
",",
"dt",
"=",
"1.0",
",",
"method",
"=",
"'KL'",
",",
"sparsity",
"=",
"None",
",",
"t_agg",
"=",
"None",
",",
"pi",
"=",
"None",
",",
"tol",
"=",
"1.0E7",
",",
"K0",
"=",
"None",
",",
"maxiter",
"=",
"1... | r"""Estimate a reversible rate matrix from a count matrix.
Parameters
----------
C : (N,N) ndarray
count matrix at a lag time dt
dt : float, optional, default=1.0
lag time that was used to estimate C
method : {'KL', 'CVE', 'pseudo', 'truncated_log'}
Method to use for estimation of the rate matrix.
* 'pseudo' selects the pseudo-generator. A reversible transition
matrix T is estimated and (T-Id)/dt is retruned as the rate matrix.
* 'truncated_log' selects the truncated logarithm [3]_. A
reversible transition matrix T is estimated and max(logm(T*T)/(2dt),0)
is returned as the rate matrix. logm is the matrix logarithm and
the maximum is taken element-wise.
* 'CVE' selects the algorithm of Crommelin and Vanden-Eijnden [1]_.
It consists of minimizing the following objective function:
.. math:: f(K)=\sum_{ij}\left(\sum_{kl} U_{ik}^{-1}K_{kl}U_{lj}-L_{ij}\right)^2 \left|\Lambda_{i}\Lambda_{j}\right|
where :math:`\Lambda_i` are the eigenvalues of :math:`T` and :math:`U`
is the matrix of its (right) eigenvectors; :math:`L_{ij}=\delta_{ij}\frac{1}{\tau}\log\left|\Lambda_i\right|`.
:math:`T` is computed from C using the reversible maximum likelihood
estimator.
* 'KL' selects the algorihtm of Kalbfleisch and Lawless [2]_.
It consists of maximizing the following log-likelihood:
.. math:: f(K)=\log L=\sum_{ij}C_{ij}\log(e^{K\Delta t})_{ij}
where :math:`C_{ij}` are the transition counts at a lag-time :math:`\Delta t`.
Here :math:`e` is the matrix exponential and the logarithm is taken
element-wise.
sparsity : (N,N) ndarray or None, optional, default=None
If sparsity is None, a fully occupied rate matrix will be estimated.
Alternatively, with the methods 'CVE' and 'KL' a ndarray of the
same shape as C can be supplied. If sparsity[i,j]=0 and sparsity[j,i]=0
the rate matrix elements :math:`K_{ij}` and :math:`K_{ji}` will be
constrained to zero.
t_agg : float, optional
the aggregated simulation time;
by default this is the total number of transition counts times
the lag time (no sliding window counting). This value is used
to compute the lower bound on the transition rate (that are not zero).
If sparsity is None, this value is ignored.
pi : (N) ndarray, optional
the stationary vector of the desired rate matrix K.
If no pi is given, the function takes the stationary vector
of the MLE reversible T matrix that is computed from C.
tol : float, optional, default = 1.0E7
Tolerance of the quasi-Newton algorithm that is used to minimize
the objective function. This is passed as the `factr` parameter to
`scipy.optimize.fmin_l_bfgs_b`.
Typical values for factr are: 1e12 for low accuracy; 1e7
for moderate accuracy; 10.0 for extremely high accuracy.
maxiter : int, optional, default = 100000
Minimization of the objective function will do at most this number
of steps.
on_error : string, optional, default = 'raise'
What to do then an error happend. When 'raise' is given, raise
an exception. When 'warn' is given, produce a (Python) warning.
Retruns
-------
K : (N,N) ndarray
the optimal rate matrix
Notes
-----
In this implementation the algorithm of Crommelin and Vanden-Eijnden
(CVE) is initialized with the pseudo-generator estimate. The
algorithm of Kalbfleisch and Lawless (KL) is initialized using the
CVE result.
Example
-------
>>> import numpy as np
>>> from msmtools.estimation import rate_matrix
>>> C = np.array([[100,1],[50,50]])
>>> rate_matrix(C)
array([[-0.01384753, 0.01384753],
[ 0.69930032, -0.69930032]])
References
----------
.. [1] D. Crommelin and E. Vanden-Eijnden. Data-based inference of
generators for markov jump processes using convex optimization.
Multiscale. Model. Sim., 7(4):1751-1778, 2009.
.. [2] J. D. Kalbfleisch and J. F. Lawless. The analysis of panel
data under a markov assumption. J. Am. Stat. Assoc.,
80(392):863-871, 1985.
.. [3] E. B. Davies. Embeddable Markov Matrices. Electron. J. Probab.
15:1474, 2010. | [
"r",
"Estimate",
"a",
"reversible",
"rate",
"matrix",
"from",
"a",
"count",
"matrix",
"."
] | python | train |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_filtering.py | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_filtering.py#L27-L68 | def funcSmthSpt(aryFuncChnk, varSdSmthSpt):
"""Apply spatial smoothing to the input data.
Parameters
----------
aryFuncChnk : np.array
TODO
varSdSmthSpt : float (?)
Extent of smoothing.
Returns
-------
aryFuncChnk : np.array
Smoothed data.
"""
varNdim = aryFuncChnk.ndim
# Number of time points in this chunk:
varNumVol = aryFuncChnk.shape[-1]
# Loop through volumes:
if varNdim == 4:
for idxVol in range(0, varNumVol):
aryFuncChnk[:, :, :, idxVol] = gaussian_filter(
aryFuncChnk[:, :, :, idxVol],
varSdSmthSpt,
order=0,
mode='nearest',
truncate=4.0)
elif varNdim == 5:
varNumMtnDrctns = aryFuncChnk.shape[3]
for idxVol in range(0, varNumVol):
for idxMtn in range(0, varNumMtnDrctns):
aryFuncChnk[:, :, :, idxMtn, idxVol] = gaussian_filter(
aryFuncChnk[:, :, :, idxMtn, idxVol],
varSdSmthSpt,
order=0,
mode='nearest',
truncate=4.0)
# Output list:
return aryFuncChnk | [
"def",
"funcSmthSpt",
"(",
"aryFuncChnk",
",",
"varSdSmthSpt",
")",
":",
"varNdim",
"=",
"aryFuncChnk",
".",
"ndim",
"# Number of time points in this chunk:",
"varNumVol",
"=",
"aryFuncChnk",
".",
"shape",
"[",
"-",
"1",
"]",
"# Loop through volumes:",
"if",
"varNdi... | Apply spatial smoothing to the input data.
Parameters
----------
aryFuncChnk : np.array
TODO
varSdSmthSpt : float (?)
Extent of smoothing.
Returns
-------
aryFuncChnk : np.array
Smoothed data. | [
"Apply",
"spatial",
"smoothing",
"to",
"the",
"input",
"data",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_fc_auth.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fc_auth.py#L99-L111 | def secpolicy_sa_secpolicy_active_policy_policies_policy(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
secpolicy_sa = ET.SubElement(config, "secpolicy-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth")
secpolicy = ET.SubElement(secpolicy_sa, "secpolicy")
active_policy = ET.SubElement(secpolicy, "active-policy")
policies = ET.SubElement(active_policy, "policies")
policy = ET.SubElement(policies, "policy")
policy.text = kwargs.pop('policy')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"secpolicy_sa_secpolicy_active_policy_policies_policy",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"secpolicy_sa",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"secpolicy-sa\"",
",",... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
estnltk/estnltk | estnltk/examples/split_large_koondkorpus_files.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/examples/split_large_koondkorpus_files.py#L50-L103 | def split_Text( text, file_name, verbose = True ):
''' Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts);
'''
if verbose:
print(' processing '+file_name+' ... ', end="" )
# Tokenize text into sentences
start = timer()
text = text.tokenize_sentences()
all_sentences = len(text[SENTENCES])
end = timer()
if verbose:
print(' (tok time: '+format_time( end-start )+')', end="" )
if all_sentences > max_sentences:
# Acquire spans of length *max_sentences* from the text
start = timer()
i = 0
spans = []
len_total = 0
while i < all_sentences:
startSent = text[SENTENCES][i]
endSent = text[SENTENCES][min(i+(max_sentences-1), all_sentences-1)]
span = (startSent[START], endSent[END])
len_total += (span[1]-span[0])
spans.append(span)
i += max_sentences
# Divide the text into spans
text_spans = text.texts_from_spans(spans)
assert len(text.text) >= len_total, '(!) Total spans_len must be =< than text_len: '+str(len_total)+'/'+str(len(text.text))
new_texts = []
for i, small_text in enumerate( text_spans ):
newText = Text( small_text )
for key in text.keys():
if key != TEXT and key != SENTENCES and key != PARAGRAPHS:
newText[key] = text[key]
newText['_text_split_id'] = i
newText['_text_split_origin'] = str(spans[i]) # Convert it to string; Otherwise, split_by(*) may mistakenly consider
# it a layer and may run into error while trying to split it;
newText['_text_split_file'] = file_name
#print( json.dumps(newText) )
new_texts.append( newText )
end = timer()
if verbose:
print(' (split time: '+format_time( end-start )+')', end="" )
print(' (sents: '+str(all_sentences)+', new_texts:'+str(len(new_texts))+')', end="")
print()
return new_texts
else:
if verbose:
print(' (sents: '+str(all_sentences)+', no_split)', end=" \n")
return [text] | [
"def",
"split_Text",
"(",
"text",
",",
"file_name",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"' processing '",
"+",
"file_name",
"+",
"' ... '",
",",
"end",
"=",
"\"\"",
")",
"# Tokenize text into sentences",
"start",
"=",
... | Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts); | [
"Tokenizes",
"the",
"*",
"text",
"*",
"(",
"from",
"*",
"file_name",
"*",
")",
"into",
"sentences",
"and",
"if",
"the",
"number",
"of",
"sentences",
"exceeds",
"*",
"max_sentences",
"*",
"splits",
"the",
"text",
"into",
"smaller",
"texts",
"."
] | python | train |
seleniumbase/SeleniumBase | seleniumbase/fixtures/base_case.py | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L192-L201 | def is_link_text_present(self, link_text):
""" Returns True if the link text appears in the HTML of the page.
The element doesn't need to be visible,
such as elements hidden inside a dropdown selection. """
soup = self.get_beautiful_soup()
html_links = soup.find_all('a')
for html_link in html_links:
if html_link.text.strip() == link_text.strip():
return True
return False | [
"def",
"is_link_text_present",
"(",
"self",
",",
"link_text",
")",
":",
"soup",
"=",
"self",
".",
"get_beautiful_soup",
"(",
")",
"html_links",
"=",
"soup",
".",
"find_all",
"(",
"'a'",
")",
"for",
"html_link",
"in",
"html_links",
":",
"if",
"html_link",
"... | Returns True if the link text appears in the HTML of the page.
The element doesn't need to be visible,
such as elements hidden inside a dropdown selection. | [
"Returns",
"True",
"if",
"the",
"link",
"text",
"appears",
"in",
"the",
"HTML",
"of",
"the",
"page",
".",
"The",
"element",
"doesn",
"t",
"need",
"to",
"be",
"visible",
"such",
"as",
"elements",
"hidden",
"inside",
"a",
"dropdown",
"selection",
"."
] | python | train |
sphinx-gallery/sphinx-gallery | sphinx_gallery/binder.py | https://github.com/sphinx-gallery/sphinx-gallery/blob/b0c1f6701bf3f4cef238757e1105cf3686b5e674/sphinx_gallery/binder.py#L174-L196 | def _copy_binder_notebooks(app):
"""Copy Jupyter notebooks to the binder notebooks directory.
Copy each output gallery directory structure but only including the
Jupyter notebook files."""
gallery_conf = app.config.sphinx_gallery_conf
gallery_dirs = gallery_conf.get('gallery_dirs')
binder_conf = gallery_conf.get('binder')
notebooks_dir = os.path.join(app.outdir, binder_conf.get('notebooks_dir'))
shutil.rmtree(notebooks_dir, ignore_errors=True)
os.makedirs(notebooks_dir)
if not isinstance(gallery_dirs, (list, tuple)):
gallery_dirs = [gallery_dirs]
iterator = sphinx_compatibility.status_iterator(
gallery_dirs, 'copying binder notebooks...', length=len(gallery_dirs))
for i_folder in iterator:
shutil.copytree(os.path.join(app.srcdir, i_folder),
os.path.join(notebooks_dir, i_folder),
ignore=_remove_ipynb_files) | [
"def",
"_copy_binder_notebooks",
"(",
"app",
")",
":",
"gallery_conf",
"=",
"app",
".",
"config",
".",
"sphinx_gallery_conf",
"gallery_dirs",
"=",
"gallery_conf",
".",
"get",
"(",
"'gallery_dirs'",
")",
"binder_conf",
"=",
"gallery_conf",
".",
"get",
"(",
"'bind... | Copy Jupyter notebooks to the binder notebooks directory.
Copy each output gallery directory structure but only including the
Jupyter notebook files. | [
"Copy",
"Jupyter",
"notebooks",
"to",
"the",
"binder",
"notebooks",
"directory",
"."
] | python | train |
dicaso/leopard | leopard/__init__.py | https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L79-L96 | def append(self,*args,toSection=None,**kwargs):
"""Append a new section
If toSection is None, section is appended to the main section/subs list.
Else if toSection is int or (int,int,...), it gets added to the subs (subsection)
list of the specified section.
*args* and *kwargs* are processed by Section class initiation
"""
if not toSection and toSection is not 0:
s = Section(*args,**kwargs)
self.subs.append(s)
self.lastSection = s
s._parentSection = self
s._reportSection = self._reportSection
else:
if type(toSection) is int: toSection = (toSection,)
s = self.subs[toSection[0]].append(*args,toSection=toSection[1:],**kwargs)
return s | [
"def",
"append",
"(",
"self",
",",
"*",
"args",
",",
"toSection",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"toSection",
"and",
"toSection",
"is",
"not",
"0",
":",
"s",
"=",
"Section",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",... | Append a new section
If toSection is None, section is appended to the main section/subs list.
Else if toSection is int or (int,int,...), it gets added to the subs (subsection)
list of the specified section.
*args* and *kwargs* are processed by Section class initiation | [
"Append",
"a",
"new",
"section",
"If",
"toSection",
"is",
"None",
"section",
"is",
"appended",
"to",
"the",
"main",
"section",
"/",
"subs",
"list",
".",
"Else",
"if",
"toSection",
"is",
"int",
"or",
"(",
"int",
"int",
"...",
")",
"it",
"gets",
"added",... | python | train |
burnash/gspread | gspread/client.py | https://github.com/burnash/gspread/blob/0e8debe208095aeed3e3e7136c2fa5cd74090946/gspread/client.py#L217-L286 | def copy(self, file_id, title=None, copy_permissions=False):
"""Copies a spreadsheet.
:param file_id: A key of a spreadsheet to copy.
:type title: str
:param title: (optional) A title for the new spreadsheet.
:type title: str
:param copy_permissions: (optional) If True, copy permissions from
original spreadsheet to new spreadsheet.
:type copy_permissions: bool
:returns: a :class:`~gspread.models.Spreadsheet` instance.
.. versionadded:: 3.1.0
.. note::
In order to use this method, you need to add
``https://www.googleapis.com/auth/drive`` to your oAuth scope.
Example::
scope = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive'
]
Otherwise you will get an ``Insufficient Permission`` error
when you try to copy a spreadsheet.
"""
url = '{0}/{1}/copy'.format(
DRIVE_FILES_API_V2_URL,
file_id
)
payload = {
'title': title,
'mimeType': 'application/vnd.google-apps.spreadsheet'
}
r = self.request(
'post',
url,
json=payload
)
spreadsheet_id = r.json()['id']
new_spreadsheet = self.open_by_key(spreadsheet_id)
if copy_permissions:
original = self.open_by_key(file_id)
permissions = original.list_permissions()
for p in permissions:
if p.get('deleted'):
continue
try:
new_spreadsheet.share(
value=p['emailAddress'],
perm_type=p['type'],
role=p['role'],
notify=False
)
except Exception:
pass
return new_spreadsheet | [
"def",
"copy",
"(",
"self",
",",
"file_id",
",",
"title",
"=",
"None",
",",
"copy_permissions",
"=",
"False",
")",
":",
"url",
"=",
"'{0}/{1}/copy'",
".",
"format",
"(",
"DRIVE_FILES_API_V2_URL",
",",
"file_id",
")",
"payload",
"=",
"{",
"'title'",
":",
... | Copies a spreadsheet.
:param file_id: A key of a spreadsheet to copy.
:type title: str
:param title: (optional) A title for the new spreadsheet.
:type title: str
:param copy_permissions: (optional) If True, copy permissions from
original spreadsheet to new spreadsheet.
:type copy_permissions: bool
:returns: a :class:`~gspread.models.Spreadsheet` instance.
.. versionadded:: 3.1.0
.. note::
In order to use this method, you need to add
``https://www.googleapis.com/auth/drive`` to your oAuth scope.
Example::
scope = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive'
]
Otherwise you will get an ``Insufficient Permission`` error
when you try to copy a spreadsheet. | [
"Copies",
"a",
"spreadsheet",
"."
] | python | train |
DigitalGlobe/gbdxtools | gbdxtools/idaho.py | https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/idaho.py#L34-L62 | def get_images_by_catid_and_aoi(self, catid, aoi_wkt):
""" Retrieves the IDAHO image records associated with a given catid.
Args:
catid (str): The source catalog ID from the platform catalog.
aoi_wkt (str): The well known text of the area of interest.
Returns:
results (json): The full catalog-search response for IDAHO images
within the catID.
"""
self.logger.debug('Retrieving IDAHO metadata')
# use the footprint to get the IDAHO id
url = '%s/search' % self.base_url
body = {"filters": ["catalogID = '%s'" % catid],
"types": ["IDAHOImage"],
"searchAreaWkt": aoi_wkt}
r = self.gbdx_connection.post(url, data=json.dumps(body))
r.raise_for_status()
if r.status_code == 200:
results = r.json()
numresults = len(results['results'])
self.logger.debug('%s IDAHO images found associated with catid %s'
% (numresults, catid))
return results | [
"def",
"get_images_by_catid_and_aoi",
"(",
"self",
",",
"catid",
",",
"aoi_wkt",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Retrieving IDAHO metadata'",
")",
"# use the footprint to get the IDAHO id",
"url",
"=",
"'%s/search'",
"%",
"self",
".",
"base_url... | Retrieves the IDAHO image records associated with a given catid.
Args:
catid (str): The source catalog ID from the platform catalog.
aoi_wkt (str): The well known text of the area of interest.
Returns:
results (json): The full catalog-search response for IDAHO images
within the catID. | [
"Retrieves",
"the",
"IDAHO",
"image",
"records",
"associated",
"with",
"a",
"given",
"catid",
".",
"Args",
":",
"catid",
"(",
"str",
")",
":",
"The",
"source",
"catalog",
"ID",
"from",
"the",
"platform",
"catalog",
".",
"aoi_wkt",
"(",
"str",
")",
":",
... | python | valid |
timgabets/bpc8583 | bpc8583/tools.py | https://github.com/timgabets/bpc8583/blob/1b8e95d73ad273ad9d11bff40d1af3f06f0f3503/bpc8583/tools.py#L68-L76 | def get_response(_code):
"""
Return xx1x response for xx0x codes (e.g. 0810 for 0800)
"""
if _code:
code = str(_code)
return code[:-2] + str(int(code[-2:-1]) + 1) + code[-1]
else:
return None | [
"def",
"get_response",
"(",
"_code",
")",
":",
"if",
"_code",
":",
"code",
"=",
"str",
"(",
"_code",
")",
"return",
"code",
"[",
":",
"-",
"2",
"]",
"+",
"str",
"(",
"int",
"(",
"code",
"[",
"-",
"2",
":",
"-",
"1",
"]",
")",
"+",
"1",
")",... | Return xx1x response for xx0x codes (e.g. 0810 for 0800) | [
"Return",
"xx1x",
"response",
"for",
"xx0x",
"codes",
"(",
"e",
".",
"g",
".",
"0810",
"for",
"0800",
")"
] | python | train |
fchorney/rpI2C | rpI2C.py | https://github.com/fchorney/rpI2C/blob/7c60f82fa8c91496a74182373da0b95a13919d6e/rpI2C.py#L98-L110 | def read_block_data(self, cmd, length):
"""
Read a block of bytes from the bus from the specified command register
Amount of bytes read in is defined by length
"""
results = self.bus.read_i2c_block_data(self.address, cmd, length)
self.log.debug(
"read_block_data: Read [%s] from command register 0x%02X" % (
', '.join(['0x%02X' % x for x in results]),
cmd
)
)
return results | [
"def",
"read_block_data",
"(",
"self",
",",
"cmd",
",",
"length",
")",
":",
"results",
"=",
"self",
".",
"bus",
".",
"read_i2c_block_data",
"(",
"self",
".",
"address",
",",
"cmd",
",",
"length",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"read_bloc... | Read a block of bytes from the bus from the specified command register
Amount of bytes read in is defined by length | [
"Read",
"a",
"block",
"of",
"bytes",
"from",
"the",
"bus",
"from",
"the",
"specified",
"command",
"register",
"Amount",
"of",
"bytes",
"read",
"in",
"is",
"defined",
"by",
"length"
] | python | train |
macacajs/wd.py | macaca/webdriver.py | https://github.com/macacajs/wd.py/blob/6d3c52060013e01a67cd52b68b5230b387427bad/macaca/webdriver.py#L763-L787 | def save_screenshot(self, filename, quietly = False):
"""Save the screenshot to local.
Support:
Android iOS Web(WebView)
Args:
filename(str): The path to save the image.
quietly(bool): If True, omit the IOError when
failed to save the image.
Returns:
WebElement Object.
Raises:
WebDriverException.
IOError.
"""
imgData = self.take_screenshot()
try:
with open(filename, "wb") as f:
f.write(b64decode(imgData.encode('ascii')))
except IOError as err:
if not quietly:
raise err | [
"def",
"save_screenshot",
"(",
"self",
",",
"filename",
",",
"quietly",
"=",
"False",
")",
":",
"imgData",
"=",
"self",
".",
"take_screenshot",
"(",
")",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"writ... | Save the screenshot to local.
Support:
Android iOS Web(WebView)
Args:
filename(str): The path to save the image.
quietly(bool): If True, omit the IOError when
failed to save the image.
Returns:
WebElement Object.
Raises:
WebDriverException.
IOError. | [
"Save",
"the",
"screenshot",
"to",
"local",
"."
] | python | valid |
RedHatInsights/insights-core | examples/cluster_rules/allnodes_cpu.py | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/examples/cluster_rules/allnodes_cpu.py#L53-L81 | def cluster_info(cpu, cfg):
"""
Collects fact for each host
Collects the cpu and node configuration facts to be used by the rule.
Arguments:
cpu (CpuInfo): Parser object for the cpu info.
cfg (NodeConfig): Parser object for the node configuration.
Returns:
dict: Dictionary of fact information including the keys
``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``,
``max_pods``, and ``max_pods_customized``.
"""
cpus = cpu.cpu_count
pods_per_core = cfg.doc.find("pods-per-core")
pods_per_core_int = int(pods_per_core.value) if pods_per_core else PODS_PER_CORE
cfg_max_pods = cfg.doc.find("max-pods")
cfg_max_pods_int = int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS
calc_max_pods = cpus * pods_per_core_int
return {
"cpu_count": cpus,
"pods_per_core": pods_per_core_int,
"pods_per_core_customized": bool(pods_per_core),
"max_pods": min(cfg_max_pods_int, calc_max_pods),
"max_pods_customized": bool(cfg_max_pods)
} | [
"def",
"cluster_info",
"(",
"cpu",
",",
"cfg",
")",
":",
"cpus",
"=",
"cpu",
".",
"cpu_count",
"pods_per_core",
"=",
"cfg",
".",
"doc",
".",
"find",
"(",
"\"pods-per-core\"",
")",
"pods_per_core_int",
"=",
"int",
"(",
"pods_per_core",
".",
"value",
")",
... | Collects fact for each host
Collects the cpu and node configuration facts to be used by the rule.
Arguments:
cpu (CpuInfo): Parser object for the cpu info.
cfg (NodeConfig): Parser object for the node configuration.
Returns:
dict: Dictionary of fact information including the keys
``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``,
``max_pods``, and ``max_pods_customized``. | [
"Collects",
"fact",
"for",
"each",
"host"
] | python | train |
apache/incubator-mxnet | python/mxnet/gluon/utils.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/utils.py#L93-L119 | def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
"""
if not isinstance(data, ndarray.NDArray):
data = ndarray.array(data, ctx=ctx_list[0])
if len(ctx_list) == 1:
return [data.as_in_context(ctx_list[0])]
slices = split_data(data, len(ctx_list), batch_axis, even_split)
return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)] | [
"def",
"split_and_load",
"(",
"data",
",",
"ctx_list",
",",
"batch_axis",
"=",
"0",
",",
"even_split",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"ndarray",
".",
"NDArray",
")",
":",
"data",
"=",
"ndarray",
".",
"array",
"(",
... | Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`. | [
"Splits",
"an",
"NDArray",
"into",
"len",
"(",
"ctx_list",
")",
"slices",
"along",
"batch_axis",
"and",
"loads",
"each",
"slice",
"to",
"one",
"context",
"in",
"ctx_list",
"."
] | python | train |
zeth/inputs | inputs.py | https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L1498-L1519 | def create_event_object(self,
event_type,
code,
value,
timeval=None):
"""Create an evdev style structure."""
if not timeval:
self.update_timeval()
timeval = self.timeval
try:
event_code = self.type_codes[event_type]
except KeyError:
raise UnknownEventType(
"We don't know what kind of event a %s is." % event_type)
event = struct.pack(EVENT_FORMAT,
timeval[0],
timeval[1],
event_code,
code,
value)
return event | [
"def",
"create_event_object",
"(",
"self",
",",
"event_type",
",",
"code",
",",
"value",
",",
"timeval",
"=",
"None",
")",
":",
"if",
"not",
"timeval",
":",
"self",
".",
"update_timeval",
"(",
")",
"timeval",
"=",
"self",
".",
"timeval",
"try",
":",
"e... | Create an evdev style structure. | [
"Create",
"an",
"evdev",
"style",
"structure",
"."
] | python | train |
deepmind/sonnet | sonnet/examples/learn_to_execute.py | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/examples/learn_to_execute.py#L580-L594 | def get_tokens(max_value):
"""Defines tokens.
Args:
max_value: the maximum numeric range for the token.
Returns:
list of string tokens in vocabulary.
"""
vocab = [str(i) for i in range(max_value)]
vocab = set(vocab)
vocab.update(CodeOp.LITERALS)
vocab.update(CodeOp.KEYWORDS)
vocab |= set("".join(vocab))
return sorted(vocab) | [
"def",
"get_tokens",
"(",
"max_value",
")",
":",
"vocab",
"=",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"max_value",
")",
"]",
"vocab",
"=",
"set",
"(",
"vocab",
")",
"vocab",
".",
"update",
"(",
"CodeOp",
".",
"LITERALS",
")",
... | Defines tokens.
Args:
max_value: the maximum numeric range for the token.
Returns:
list of string tokens in vocabulary. | [
"Defines",
"tokens",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/boore_atkinson_2008.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/boore_atkinson_2008.py#L187-L206 | def _get_pga_on_rock(self, rup, dists, _C):
"""
Compute and return PGA on rock conditions (that is vs30 = 760.0 m/s).
This is needed to compute non-linear site amplification term
"""
# Median PGA in g for Vref = 760.0, without site amplification,
# that is equation (1) pag 106, without the third and fourth terms
# Mref and Rref values are given in the caption to table 6, pag 119
# Note that in the original paper, the caption reads:
# "Distance-scaling coefficients (Mref=4.5 and Rref=1.0 km for all
# periods, except Rref=5.0 km for pga4nl)". However this is a mistake
# as reported in http://www.daveboore.com/pubs_online.php:
# ERRATUM: 27 August 2008. Tom Blake pointed out that the caption to
# Table 6 should read "Distance-scaling coefficients (Mref=4.5 and
# Rref=1.0 km for all periods)".
C_pga = self.COEFFS[PGA()]
pga4nl = np.exp(self._compute_magnitude_scaling(rup, C_pga) +
self._compute_distance_scaling(rup, dists, C_pga))
return pga4nl | [
"def",
"_get_pga_on_rock",
"(",
"self",
",",
"rup",
",",
"dists",
",",
"_C",
")",
":",
"# Median PGA in g for Vref = 760.0, without site amplification,",
"# that is equation (1) pag 106, without the third and fourth terms",
"# Mref and Rref values are given in the caption to table 6, pag... | Compute and return PGA on rock conditions (that is vs30 = 760.0 m/s).
This is needed to compute non-linear site amplification term | [
"Compute",
"and",
"return",
"PGA",
"on",
"rock",
"conditions",
"(",
"that",
"is",
"vs30",
"=",
"760",
".",
"0",
"m",
"/",
"s",
")",
".",
"This",
"is",
"needed",
"to",
"compute",
"non",
"-",
"linear",
"site",
"amplification",
"term"
] | python | train |
wmayner/pyphi | pyphi/partition.py | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L156-L160 | def bipartition_of_one(seq):
"""Generate bipartitions where one part is of length 1."""
seq = list(seq)
for i, elt in enumerate(seq):
yield ((elt,), tuple(seq[:i] + seq[(i + 1):])) | [
"def",
"bipartition_of_one",
"(",
"seq",
")",
":",
"seq",
"=",
"list",
"(",
"seq",
")",
"for",
"i",
",",
"elt",
"in",
"enumerate",
"(",
"seq",
")",
":",
"yield",
"(",
"(",
"elt",
",",
")",
",",
"tuple",
"(",
"seq",
"[",
":",
"i",
"]",
"+",
"s... | Generate bipartitions where one part is of length 1. | [
"Generate",
"bipartitions",
"where",
"one",
"part",
"is",
"of",
"length",
"1",
"."
] | python | train |
klen/adrest | adrest/utils/paginator.py | https://github.com/klen/adrest/blob/8b75c67123cffabe5ed98c222bb7ab43c904d89c/adrest/utils/paginator.py#L32-L45 | def to_simple(self, serializer=None):
""" Prepare to serialization.
:return dict: paginator params
"""
return dict(
count=self.paginator.count,
page=self.page_number,
num_pages=self.paginator.num_pages,
next=self.next_page,
prev=self.previous_page,
resources=self.resources,
) | [
"def",
"to_simple",
"(",
"self",
",",
"serializer",
"=",
"None",
")",
":",
"return",
"dict",
"(",
"count",
"=",
"self",
".",
"paginator",
".",
"count",
",",
"page",
"=",
"self",
".",
"page_number",
",",
"num_pages",
"=",
"self",
".",
"paginator",
".",
... | Prepare to serialization.
:return dict: paginator params | [
"Prepare",
"to",
"serialization",
"."
] | python | train |
google/grr | grr/server/grr_response_server/queue_manager.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/queue_manager.py#L499-L512 | def MultiNotifyQueue(self, notifications, mutation_pool=None):
"""This is the same as NotifyQueue but for several session_ids at once.
Args:
notifications: A list of notifications.
mutation_pool: A MutationPool object to schedule Notifications on.
Raises:
RuntimeError: An invalid session_id was passed.
"""
extract_queue = lambda notification: notification.session_id.Queue()
for queue, notifications in iteritems(
collection.Group(notifications, extract_queue)):
self._MultiNotifyQueue(queue, notifications, mutation_pool=mutation_pool) | [
"def",
"MultiNotifyQueue",
"(",
"self",
",",
"notifications",
",",
"mutation_pool",
"=",
"None",
")",
":",
"extract_queue",
"=",
"lambda",
"notification",
":",
"notification",
".",
"session_id",
".",
"Queue",
"(",
")",
"for",
"queue",
",",
"notifications",
"in... | This is the same as NotifyQueue but for several session_ids at once.
Args:
notifications: A list of notifications.
mutation_pool: A MutationPool object to schedule Notifications on.
Raises:
RuntimeError: An invalid session_id was passed. | [
"This",
"is",
"the",
"same",
"as",
"NotifyQueue",
"but",
"for",
"several",
"session_ids",
"at",
"once",
"."
] | python | train |
marrow/cinje | cinje/inline/text.py | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/text.py#L73-L110 | def gather(input):
"""Collect contiguous lines of text, preserving line numbers."""
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
# Gather contiguous (uninterrupted) lines of template text.
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line) | [
"def",
"gather",
"(",
"input",
")",
":",
"try",
":",
"line",
"=",
"input",
".",
"next",
"(",
")",
"except",
"StopIteration",
":",
"return",
"lead",
"=",
"True",
"buffer",
"=",
"[",
"]",
"# Gather contiguous (uninterrupted) lines of template text.",
"while",
"l... | Collect contiguous lines of text, preserving line numbers. | [
"Collect",
"contiguous",
"lines",
"of",
"text",
"preserving",
"line",
"numbers",
"."
] | python | train |
Erotemic/utool | utool/util_type.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L528-L553 | def get_homogenous_list_type(list_):
"""
Returns the best matching python type even if it is an ndarray assumes all
items in the list are of the same type. does not check this
"""
# TODO Expand and make work correctly
if HAVE_NUMPY and isinstance(list_, np.ndarray):
item = list_
elif isinstance(list_, list) and len(list_) > 0:
item = list_[0]
else:
item = None
if item is not None:
if is_float(item):
type_ = float
elif is_int(item):
type_ = int
elif is_bool(item):
type_ = bool
elif is_str(item):
type_ = str
else:
type_ = get_type(item)
else:
type_ = None
return type_ | [
"def",
"get_homogenous_list_type",
"(",
"list_",
")",
":",
"# TODO Expand and make work correctly",
"if",
"HAVE_NUMPY",
"and",
"isinstance",
"(",
"list_",
",",
"np",
".",
"ndarray",
")",
":",
"item",
"=",
"list_",
"elif",
"isinstance",
"(",
"list_",
",",
"list",... | Returns the best matching python type even if it is an ndarray assumes all
items in the list are of the same type. does not check this | [
"Returns",
"the",
"best",
"matching",
"python",
"type",
"even",
"if",
"it",
"is",
"an",
"ndarray",
"assumes",
"all",
"items",
"in",
"the",
"list",
"are",
"of",
"the",
"same",
"type",
".",
"does",
"not",
"check",
"this"
] | python | train |
pymupdf/PyMuPDF | demo/draw-sines.py | https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/demo/draw-sines.py#L94-L107 | def rot_points(pnts, pb, alfa):
"""Rotate a list of points by an angle alfa (radians) around pivotal point pb.
Intended for modifying the control points of trigonometric functions.
"""
points = [] # rotated points
calfa = math.cos(alfa)
salfa = math.sin(alfa)
for p in pnts:
s = p - pb
r = abs(s)
if r > 0: s /= r
np = (s.x * calfa - s.y * salfa, s.y * calfa + s.x * salfa)
points.append(pb + fitz.Point(np)*r)
return points | [
"def",
"rot_points",
"(",
"pnts",
",",
"pb",
",",
"alfa",
")",
":",
"points",
"=",
"[",
"]",
"# rotated points",
"calfa",
"=",
"math",
".",
"cos",
"(",
"alfa",
")",
"salfa",
"=",
"math",
".",
"sin",
"(",
"alfa",
")",
"for",
"p",
"in",
"pnts",
":"... | Rotate a list of points by an angle alfa (radians) around pivotal point pb.
Intended for modifying the control points of trigonometric functions. | [
"Rotate",
"a",
"list",
"of",
"points",
"by",
"an",
"angle",
"alfa",
"(",
"radians",
")",
"around",
"pivotal",
"point",
"pb",
".",
"Intended",
"for",
"modifying",
"the",
"control",
"points",
"of",
"trigonometric",
"functions",
"."
] | python | train |
resync/resync | resync/client_state.py | https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/client_state.py#L28-L49 | def set_state(self, site, timestamp=None):
"""Write status dict to client status file.
FIXME - should have some file lock to avoid race
"""
parser = ConfigParser()
parser.read(self.status_file)
status_section = 'incremental'
if (not parser.has_section(status_section)):
parser.add_section(status_section)
if (timestamp is None):
parser.remove_option(
status_section,
self.config_site_to_name(site))
else:
parser.set(
status_section,
self.config_site_to_name(site),
str(timestamp))
with open(self.status_file, 'w') as configfile:
parser.write(configfile)
configfile.close() | [
"def",
"set_state",
"(",
"self",
",",
"site",
",",
"timestamp",
"=",
"None",
")",
":",
"parser",
"=",
"ConfigParser",
"(",
")",
"parser",
".",
"read",
"(",
"self",
".",
"status_file",
")",
"status_section",
"=",
"'incremental'",
"if",
"(",
"not",
"parser... | Write status dict to client status file.
FIXME - should have some file lock to avoid race | [
"Write",
"status",
"dict",
"to",
"client",
"status",
"file",
"."
] | python | train |
riptano/ccm | ccmlib/remote.py | https://github.com/riptano/ccm/blob/275699f79d102b5039b79cc17fa6305dccf18412/ccmlib/remote.py#L490-L501 | def usage(self):
"""
Get the usage for the remote exectuion options
:return Usage for the remote execution options
"""
# Retrieve the text for just the arguments
usage = self.parser.format_help().split("optional arguments:")[1]
# Remove any blank lines and return
return "Remote Options:" + os.linesep + \
os.linesep.join([s for s in usage.splitlines() if s]) | [
"def",
"usage",
"(",
"self",
")",
":",
"# Retrieve the text for just the arguments",
"usage",
"=",
"self",
".",
"parser",
".",
"format_help",
"(",
")",
".",
"split",
"(",
"\"optional arguments:\"",
")",
"[",
"1",
"]",
"# Remove any blank lines and return",
"return",... | Get the usage for the remote exectuion options
:return Usage for the remote execution options | [
"Get",
"the",
"usage",
"for",
"the",
"remote",
"exectuion",
"options"
] | python | train |
apache/airflow | airflow/hooks/hive_hooks.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L556-L576 | def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name) | [
"def",
"check_for_named_partition",
"(",
"self",
",",
"schema",
",",
"table",
",",
"partition_name",
")",
":",
"with",
"self",
".",
"metastore",
"as",
"client",
":",
"return",
"client",
".",
"check_for_named_partition",
"(",
"schema",
",",
"table",
",",
"parti... | Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False | [
"Checks",
"whether",
"a",
"partition",
"with",
"a",
"given",
"name",
"exists"
] | python | test |
pywbem/pywbem | pywbem/tupleparse.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/tupleparse.py#L1547-L1569 | def parse_parameter_reference(self, tup_tree):
"""
::
<!ELEMENT PARAMETER.REFERENCE (QUALIFIER*)>
<!ATTLIST PARAMETER.REFERENCE
%CIMName;
%ReferenceClass;>
"""
self.check_node(tup_tree, 'PARAMETER.REFERENCE', ('NAME',),
('REFERENCECLASS',), ('QUALIFIER',))
attrl = attrs(tup_tree)
qualifiers = self.list_of_matching(tup_tree, ('QUALIFIER',))
return CIMParameter(attrl['NAME'],
type='reference',
is_array=False,
reference_class=attrl.get('REFERENCECLASS', None),
qualifiers=qualifiers,
embedded_object=False) | [
"def",
"parse_parameter_reference",
"(",
"self",
",",
"tup_tree",
")",
":",
"self",
".",
"check_node",
"(",
"tup_tree",
",",
"'PARAMETER.REFERENCE'",
",",
"(",
"'NAME'",
",",
")",
",",
"(",
"'REFERENCECLASS'",
",",
")",
",",
"(",
"'QUALIFIER'",
",",
")",
"... | ::
<!ELEMENT PARAMETER.REFERENCE (QUALIFIER*)>
<!ATTLIST PARAMETER.REFERENCE
%CIMName;
%ReferenceClass;> | [
"::"
] | python | train |
linkedin/pyexchange | pyexchange/base/calendar.py | https://github.com/linkedin/pyexchange/blob/d568f4edd326adb451b915ddf66cf1a37820e3ca/pyexchange/base/calendar.py#L305-L320 | def validate(self):
""" Validates that all required fields are present """
if not self.start:
raise ValueError("Event has no start date")
if not self.end:
raise ValueError("Event has no end date")
if self.end < self.start:
raise ValueError("Start date is after end date")
if self.reminder_minutes_before_start and not isinstance(self.reminder_minutes_before_start, int):
raise TypeError("reminder_minutes_before_start must be of type int")
if self.is_all_day and not isinstance(self.is_all_day, bool):
raise TypeError("is_all_day must be of type bool") | [
"def",
"validate",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"start",
":",
"raise",
"ValueError",
"(",
"\"Event has no start date\"",
")",
"if",
"not",
"self",
".",
"end",
":",
"raise",
"ValueError",
"(",
"\"Event has no end date\"",
")",
"if",
"self"... | Validates that all required fields are present | [
"Validates",
"that",
"all",
"required",
"fields",
"are",
"present"
] | python | train |
bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L388-L401 | def is_population_germline(rec):
"""Identify a germline calls based on annoations with ExAC or other population databases.
"""
min_count = 50
for k in population_keys:
if k in rec.info:
val = rec.info.get(k)
if "," in val:
val = val.split(",")[0]
if isinstance(val, (list, tuple)):
val = max(val)
if int(val) > min_count:
return True
return False | [
"def",
"is_population_germline",
"(",
"rec",
")",
":",
"min_count",
"=",
"50",
"for",
"k",
"in",
"population_keys",
":",
"if",
"k",
"in",
"rec",
".",
"info",
":",
"val",
"=",
"rec",
".",
"info",
".",
"get",
"(",
"k",
")",
"if",
"\",\"",
"in",
"val"... | Identify a germline calls based on annoations with ExAC or other population databases. | [
"Identify",
"a",
"germline",
"calls",
"based",
"on",
"annoations",
"with",
"ExAC",
"or",
"other",
"population",
"databases",
"."
] | python | train |
disqus/gargoyle | gargoyle/manager.py | https://github.com/disqus/gargoyle/blob/47a79e34b093d56e11c344296d6b78854ed35f12/gargoyle/manager.py#L107-L115 | def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> gargoyle.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
self._registry.pop(condition_set.get_id(), None) | [
"def",
"unregister",
"(",
"self",
",",
"condition_set",
")",
":",
"if",
"callable",
"(",
"condition_set",
")",
":",
"condition_set",
"=",
"condition_set",
"(",
")",
"self",
".",
"_registry",
".",
"pop",
"(",
"condition_set",
".",
"get_id",
"(",
")",
",",
... | Unregisters a condition set with the manager.
>>> gargoyle.unregister(condition_set) #doctest: +SKIP | [
"Unregisters",
"a",
"condition",
"set",
"with",
"the",
"manager",
"."
] | python | train |
guaix-ucm/numina | numina/array/display/overplot_ds9reg.py | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/overplot_ds9reg.py#L14-L64 | def overplot_ds9reg(filename, ax):
"""Overplot a ds9 region file.
Parameters
----------
filename : str
File name of the ds9 region file.
ax : matplotlib axes instance
Matplotlib axes instance.
"""
# read ds9 region file
with open(filename) as f:
file_content = f.read().splitlines()
# check first line
first_line = file_content[0]
if "# Region file format: DS9" not in first_line:
raise ValueError("Unrecognized ds9 region file format")
for line in file_content:
if line[0:4] == "line":
line_fields = line.split()
x1 = float(line_fields[1])
y1 = float(line_fields[2])
x2 = float(line_fields[3])
y2 = float(line_fields[4])
if "color" in line:
i = line.find("color=")
color = line[i+6:i+13]
else:
color = "green"
ax.plot([x1,x2], [y1,y2], '-', color=color)
elif line[0:4] == "text":
line_fields = line.split()
x0 = float(line_fields[1])
y0 = float(line_fields[2])
text=line_fields[3][1:-1]
if "color" in line:
i = line.find("color=")
color = line[i+6:i+13]
else:
color = "green"
ax.text(x0, y0, text, fontsize=8,
bbox=dict(boxstyle="round,pad=0.1", fc="white", ec="grey", ),
color=color, fontweight='bold', backgroundcolor='white',
ha='center')
else:
# ignore
pass | [
"def",
"overplot_ds9reg",
"(",
"filename",
",",
"ax",
")",
":",
"# read ds9 region file",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"file_content",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"# check first line",
"first_line",
... | Overplot a ds9 region file.
Parameters
----------
filename : str
File name of the ds9 region file.
ax : matplotlib axes instance
Matplotlib axes instance. | [
"Overplot",
"a",
"ds9",
"region",
"file",
"."
] | python | train |
graphql-python/graphql-core | graphql/utils/quoted_or_list.py | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/utils/quoted_or_list.py#L11-L29 | def quoted_or_list(items):
# type: (List[str]) -> str
"""Given [ A, B, C ] return '"A", "B" or "C"'."""
selected = items[:MAX_LENGTH]
quoted_items = ('"{}"'.format(t) for t in selected)
def quoted_or_text(text, quoted_and_index):
index = quoted_and_index[0]
quoted_item = quoted_and_index[1]
text += (
(", " if len(selected) > 2 and not index == len(selected) - 1 else " ")
+ ("or " if index == len(selected) - 1 else "")
+ quoted_item
)
return text
enumerated_items = enumerate(quoted_items)
first_item = next(enumerated_items)[1]
return functools.reduce(quoted_or_text, enumerated_items, first_item) | [
"def",
"quoted_or_list",
"(",
"items",
")",
":",
"# type: (List[str]) -> str",
"selected",
"=",
"items",
"[",
":",
"MAX_LENGTH",
"]",
"quoted_items",
"=",
"(",
"'\"{}\"'",
".",
"format",
"(",
"t",
")",
"for",
"t",
"in",
"selected",
")",
"def",
"quoted_or_tex... | Given [ A, B, C ] return '"A", "B" or "C"'. | [
"Given",
"[",
"A",
"B",
"C",
"]",
"return",
"A",
"B",
"or",
"C",
"."
] | python | train |
Esri/ArcREST | src/arcrest/cmp/community.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/cmp/community.py#L62-L77 | def contributionStatus(self):
"""gets the contribution status of a user"""
import time
url = "%s/contributors/%s/activeContribution" % (self.root, quote(self.contributorUID))
params = {
"agolUserToken" : self._agolSH.token,
"f" : "json"
}
res = self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if'Status' in res and \
res['Status'] == 'start':
return True
return False | [
"def",
"contributionStatus",
"(",
"self",
")",
":",
"import",
"time",
"url",
"=",
"\"%s/contributors/%s/activeContribution\"",
"%",
"(",
"self",
".",
"root",
",",
"quote",
"(",
"self",
".",
"contributorUID",
")",
")",
"params",
"=",
"{",
"\"agolUserToken\"",
"... | gets the contribution status of a user | [
"gets",
"the",
"contribution",
"status",
"of",
"a",
"user"
] | python | train |
bxlab/bx-python | lib/bx_extras/pyparsing.py | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pyparsing.py#L1086-L1109 | def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out)) | [
"def",
"transformString",
"(",
"self",
",",
"instring",
")",
":",
"out",
"=",
"[",
"]",
"lastE",
"=",
"0",
"# force preservation of <TAB>s, to minimize unwanted transformation of string, and to",
"# keep string locs straight between transformString and scanString",
"self",
".",
... | Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string. | [
"Extension",
"to",
"scanString",
"to",
"modify",
"matching",
"text",
"with",
"modified",
"tokens",
"that",
"may",
"be",
"returned",
"from",
"a",
"parse",
"action",
".",
"To",
"use",
"transformString",
"define",
"a",
"grammar",
"and",
"attach",
"a",
"parse",
... | python | train |
numenta/htmresearch | htmresearch/algorithms/column_pooler.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/column_pooler.py#L321-L429 | def _computeInferenceMode(self, feedforwardInput, lateralInputs):
"""
Inference mode: if there is some feedforward activity, perform
spatial pooling on it to recognize previously known objects, then use
lateral activity to activate a subset of the cells with feedforward
support. If there is no feedforward activity, use lateral activity to
activate a subset of the previous active cells.
Parameters:
----------------------------
@param feedforwardInput (sequence)
Sorted indices of active feedforward input bits
@param lateralInputs (list of sequences)
For each lateral layer, a list of sorted indices of active lateral
input bits
"""
prevActiveCells = self.activeCells
# Calculate the feedforward supported cells
overlaps = self.proximalPermanences.rightVecSumAtNZGteThresholdSparse(
feedforwardInput, self.connectedPermanenceProximal)
feedforwardSupportedCells = numpy.where(
overlaps >= self.minThresholdProximal)[0]
# Calculate the number of active segments on each cell
numActiveSegmentsByCell = numpy.zeros(self.cellCount, dtype="int")
overlaps = self.internalDistalPermanences.rightVecSumAtNZGteThresholdSparse(
prevActiveCells, self.connectedPermanenceDistal)
numActiveSegmentsByCell[overlaps >= self.activationThresholdDistal] += 1
for i, lateralInput in enumerate(lateralInputs):
overlaps = self.distalPermanences[i].rightVecSumAtNZGteThresholdSparse(
lateralInput, self.connectedPermanenceDistal)
numActiveSegmentsByCell[overlaps >= self.activationThresholdDistal] += 1
chosenCells = []
# First, activate the FF-supported cells that have the highest number of
# lateral active segments (as long as it's not 0)
if len(feedforwardSupportedCells) == 0:
pass
else:
numActiveSegsForFFSuppCells = numActiveSegmentsByCell[
feedforwardSupportedCells]
# This loop will select the FF-supported AND laterally-active cells, in
# order of descending lateral activation, until we exceed the sdrSize
# quorum - but will exclude cells with 0 lateral active segments.
ttop = numpy.max(numActiveSegsForFFSuppCells)
while ttop > 0 and len(chosenCells) < self.sdrSize:
chosenCells = numpy.union1d(chosenCells,
feedforwardSupportedCells[numActiveSegsForFFSuppCells >= ttop])
ttop -= 1
# If we haven't filled the sdrSize quorum, add in inertial cells.
if len(chosenCells) < self.sdrSize:
if self.useInertia:
prevCells = numpy.setdiff1d(prevActiveCells, chosenCells)
inertialCap = int(len(prevCells) * self.inertiaFactor)
if inertialCap > 0:
numActiveSegsForPrevCells = numActiveSegmentsByCell[prevCells]
# We sort the previously-active cells by number of active lateral
# segments (this really helps). We then activate them in order of
# descending lateral activation.
sortIndices = numpy.argsort(numActiveSegsForPrevCells)[::-1]
prevCells = prevCells[sortIndices]
numActiveSegsForPrevCells = numActiveSegsForPrevCells[sortIndices]
# We use inertiaFactor to limit the number of previously-active cells
# which can become active, forcing decay even if we are below quota.
prevCells = prevCells[:inertialCap]
numActiveSegsForPrevCells = numActiveSegsForPrevCells[:inertialCap]
# Activate groups of previously active cells by order of their lateral
# support until we either meet quota or run out of cells.
ttop = numpy.max(numActiveSegsForPrevCells)
while ttop >= 0 and len(chosenCells) < self.sdrSize:
chosenCells = numpy.union1d(chosenCells,
prevCells[numActiveSegsForPrevCells >= ttop])
ttop -= 1
# If we haven't filled the sdrSize quorum, add cells that have feedforward
# support and no lateral support.
discrepancy = self.sdrSize - len(chosenCells)
if discrepancy > 0:
remFFcells = numpy.setdiff1d(feedforwardSupportedCells, chosenCells)
# Inhibit cells proportionally to the number of cells that have already
# been chosen. If ~0 have been chosen activate ~all of the feedforward
# supported cells. If ~sdrSize have been chosen, activate very few of
# the feedforward supported cells.
# Use the discrepancy:sdrSize ratio to determine the number of cells to
# activate.
n = (len(remFFcells) * discrepancy) // self.sdrSize
# Activate at least 'discrepancy' cells.
n = max(n, discrepancy)
# If there aren't 'n' available, activate all of the available cells.
n = min(n, len(remFFcells))
if len(remFFcells) > n:
selected = _sample(self._random, remFFcells, n)
chosenCells = numpy.append(chosenCells, selected)
else:
chosenCells = numpy.append(chosenCells, remFFcells)
chosenCells.sort()
self.activeCells = numpy.asarray(chosenCells, dtype="uint32") | [
"def",
"_computeInferenceMode",
"(",
"self",
",",
"feedforwardInput",
",",
"lateralInputs",
")",
":",
"prevActiveCells",
"=",
"self",
".",
"activeCells",
"# Calculate the feedforward supported cells",
"overlaps",
"=",
"self",
".",
"proximalPermanences",
".",
"rightVecSumA... | Inference mode: if there is some feedforward activity, perform
spatial pooling on it to recognize previously known objects, then use
lateral activity to activate a subset of the cells with feedforward
support. If there is no feedforward activity, use lateral activity to
activate a subset of the previous active cells.
Parameters:
----------------------------
@param feedforwardInput (sequence)
Sorted indices of active feedforward input bits
@param lateralInputs (list of sequences)
For each lateral layer, a list of sorted indices of active lateral
input bits | [
"Inference",
"mode",
":",
"if",
"there",
"is",
"some",
"feedforward",
"activity",
"perform",
"spatial",
"pooling",
"on",
"it",
"to",
"recognize",
"previously",
"known",
"objects",
"then",
"use",
"lateral",
"activity",
"to",
"activate",
"a",
"subset",
"of",
"th... | python | train |
tkf/rash | rash/cli.py | https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/cli.py#L102-L111 | def locate_run(output, target, no_newline):
"""
Print location of RASH related file.
"""
from .config import ConfigStore
cfstore = ConfigStore()
path = getattr(cfstore, "{0}_path".format(target))
output.write(path)
if not no_newline:
output.write("\n") | [
"def",
"locate_run",
"(",
"output",
",",
"target",
",",
"no_newline",
")",
":",
"from",
".",
"config",
"import",
"ConfigStore",
"cfstore",
"=",
"ConfigStore",
"(",
")",
"path",
"=",
"getattr",
"(",
"cfstore",
",",
"\"{0}_path\"",
".",
"format",
"(",
"targe... | Print location of RASH related file. | [
"Print",
"location",
"of",
"RASH",
"related",
"file",
"."
] | python | train |
modin-project/modin | modin/backends/pandas/query_compiler.py | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L268-L289 | def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort) | [
"def",
"_join_index_objects",
"(",
"self",
",",
"axis",
",",
"other_index",
",",
"how",
",",
"sort",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"other_index",
",",
"list",
")",
":",
"joined_obj",
"=",
"self",
".",
"columns",
"if",
"not",
"axis",
"... | Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices. | [
"Joins",
"a",
"pair",
"of",
"index",
"objects",
"(",
"columns",
"or",
"rows",
")",
"by",
"a",
"given",
"strategy",
"."
] | python | train |
cltk/cltk | cltk/stem/sanskrit/indian_syllabifier.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/sanskrit/indian_syllabifier.py#L138-L145 | def get_phonetic_info(self, lang):
"""For a specified language (lang), it returns the matrix and the vecto
containing specifications of the characters.
"""
phonetic_data = self.all_phonetic_data if lang != LC_TA else self.tamil_phonetic_data
phonetic_vectors = self.all_phonetic_vectors if lang != LC_TA else self.tamil_phonetic_vectors
return phonetic_data, phonetic_vectors | [
"def",
"get_phonetic_info",
"(",
"self",
",",
"lang",
")",
":",
"phonetic_data",
"=",
"self",
".",
"all_phonetic_data",
"if",
"lang",
"!=",
"LC_TA",
"else",
"self",
".",
"tamil_phonetic_data",
"phonetic_vectors",
"=",
"self",
".",
"all_phonetic_vectors",
"if",
"... | For a specified language (lang), it returns the matrix and the vecto
containing specifications of the characters. | [
"For",
"a",
"specified",
"language",
"(",
"lang",
")",
"it",
"returns",
"the",
"matrix",
"and",
"the",
"vecto",
"containing",
"specifications",
"of",
"the",
"characters",
"."
] | python | train |
google/grr | grr/server/grr_response_server/databases/mysql_migration.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_migration.py#L47-L104 | def ProcessMigrations(open_conn_fn,
migrations_root):
"""Processes migrations from a given folder.
This function uses LOCK TABLE MySQL command on _migrations
table to ensure that only one GRR process is actually
performing the migration.
We have to use open_conn_fn to open 2 connections to the database,
since LOCK TABLE command is per-connection and it's not allowed
to modify non-locked tables if LOCK TABLE was called within a
connection. To overcome this limitation we use one connection
to lock _migrations and perform its updates and one connection
to do actual migrations.
Args:
open_conn_fn: A function to open new database connection.
migrations_root: A path to folder with migration files.
"""
with contextlib.closing(open_conn_fn()) as conn:
conn.autocommit(True)
with contextlib.closing(conn.cursor()) as cursor:
cursor.execute("""CREATE TABLE IF NOT EXISTS _migrations(
migration_id INT UNSIGNED PRIMARY KEY,
timestamp TIMESTAMP(6) NOT NULL DEFAULT NOW(6)
)""")
with contextlib.closing(conn.cursor()) as cursor:
cursor.execute('SELECT GET_LOCK("grr_migration", 3600)')
try:
with contextlib.closing(conn.cursor()) as cursor:
current_migration = GetLatestMigrationNumber(cursor)
to_process = ListMigrationsToProcess(migrations_root, current_migration)
logging.info("Will execute following DB migrations: %s",
", ".join(to_process))
for fname in to_process:
start_time = time.time()
logging.info("Starting migration %s", fname)
with open(os.path.join(migrations_root, fname)) as fd:
sql = fd.read()
with contextlib.closing(conn.cursor()) as cursor:
cursor.execute(sql)
logging.info("Migration %s is done. Took %.2fs", fname,
time.time() - start_time)
# Update _migrations table with the latest migration.
with contextlib.closing(conn.cursor()) as cursor:
cursor.execute("INSERT INTO _migrations (migration_id) VALUES (%s)",
[_MigrationFilenameToInt(fname)])
finally:
with contextlib.closing(conn.cursor()) as cursor:
cursor.execute('SELECT RELEASE_LOCK("grr_migration")') | [
"def",
"ProcessMigrations",
"(",
"open_conn_fn",
",",
"migrations_root",
")",
":",
"with",
"contextlib",
".",
"closing",
"(",
"open_conn_fn",
"(",
")",
")",
"as",
"conn",
":",
"conn",
".",
"autocommit",
"(",
"True",
")",
"with",
"contextlib",
".",
"closing",... | Processes migrations from a given folder.
This function uses LOCK TABLE MySQL command on _migrations
table to ensure that only one GRR process is actually
performing the migration.
We have to use open_conn_fn to open 2 connections to the database,
since LOCK TABLE command is per-connection and it's not allowed
to modify non-locked tables if LOCK TABLE was called within a
connection. To overcome this limitation we use one connection
to lock _migrations and perform its updates and one connection
to do actual migrations.
Args:
open_conn_fn: A function to open new database connection.
migrations_root: A path to folder with migration files. | [
"Processes",
"migrations",
"from",
"a",
"given",
"folder",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/clc.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/clc.py#L173-L190 | def get_monthly_estimate(call=None, for_output=True):
'''
Return a list of the VMs that are on the provider
'''
creds = get_creds()
clc.v1.SetCredentials(creds["token"], creds["token_pass"])
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
try:
billing_raw = clc.v1.Billing.GetAccountSummary(alias=creds["accountalias"])
billing_raw = salt.utils.json.dumps(billing_raw)
billing = salt.utils.json.loads(billing_raw)
billing = round(billing["MonthlyEstimate"], 2)
return {"Monthly Estimate": billing}
except RuntimeError:
return {"Monthly Estimate": 0} | [
"def",
"get_monthly_estimate",
"(",
"call",
"=",
"None",
",",
"for_output",
"=",
"True",
")",
":",
"creds",
"=",
"get_creds",
"(",
")",
"clc",
".",
"v1",
".",
"SetCredentials",
"(",
"creds",
"[",
"\"token\"",
"]",
",",
"creds",
"[",
"\"token_pass\"",
"]"... | Return a list of the VMs that are on the provider | [
"Return",
"a",
"list",
"of",
"the",
"VMs",
"that",
"are",
"on",
"the",
"provider"
] | python | train |
openstack/horizon | horizon/utils/memoized.py | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/utils/memoized.py#L28-L36 | def _try_weakref(arg, remove_callback):
"""Return a weak reference to arg if possible, or arg itself if not."""
try:
arg = weakref.ref(arg, remove_callback)
except TypeError:
# Not all types can have a weakref. That includes strings
# and floats and such, so just pass them through directly.
pass
return arg | [
"def",
"_try_weakref",
"(",
"arg",
",",
"remove_callback",
")",
":",
"try",
":",
"arg",
"=",
"weakref",
".",
"ref",
"(",
"arg",
",",
"remove_callback",
")",
"except",
"TypeError",
":",
"# Not all types can have a weakref. That includes strings",
"# and floats and such... | Return a weak reference to arg if possible, or arg itself if not. | [
"Return",
"a",
"weak",
"reference",
"to",
"arg",
"if",
"possible",
"or",
"arg",
"itself",
"if",
"not",
"."
] | python | train |
archman/beamline | beamline/element.py | https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/element.py#L288-L294 | def getConfig(self, type='online', format='elegant'):
""" only dump configuration part, dict
:param type: comm, simu, ctrl, misc, all, online (default)
:param format: elegant/mad, elegant by default
"""
return list(list(self.dumpConfigDict[type](format).values())[0].values())[0] | [
"def",
"getConfig",
"(",
"self",
",",
"type",
"=",
"'online'",
",",
"format",
"=",
"'elegant'",
")",
":",
"return",
"list",
"(",
"list",
"(",
"self",
".",
"dumpConfigDict",
"[",
"type",
"]",
"(",
"format",
")",
".",
"values",
"(",
")",
")",
"[",
"0... | only dump configuration part, dict
:param type: comm, simu, ctrl, misc, all, online (default)
:param format: elegant/mad, elegant by default | [
"only",
"dump",
"configuration",
"part",
"dict"
] | python | train |
PRIArobotics/HedgehogProtocol | hedgehog/protocol/zmq/__init__.py | https://github.com/PRIArobotics/HedgehogProtocol/blob/140df1ade46fbfee7eea7a6c0b35cbc7ffbf89fe/hedgehog/protocol/zmq/__init__.py#L17-L19 | def _rindex(mylist: Sequence[T], x: T) -> int:
"""Index of the last occurrence of x in the sequence."""
return len(mylist) - mylist[::-1].index(x) - 1 | [
"def",
"_rindex",
"(",
"mylist",
":",
"Sequence",
"[",
"T",
"]",
",",
"x",
":",
"T",
")",
"->",
"int",
":",
"return",
"len",
"(",
"mylist",
")",
"-",
"mylist",
"[",
":",
":",
"-",
"1",
"]",
".",
"index",
"(",
"x",
")",
"-",
"1"
] | Index of the last occurrence of x in the sequence. | [
"Index",
"of",
"the",
"last",
"occurrence",
"of",
"x",
"in",
"the",
"sequence",
"."
] | python | valid |
honeynet/beeswarm | beeswarm/drones/honeypot/helpers/common.py | https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/drones/honeypot/helpers/common.py#L27-L42 | def path_to_ls(fn):
""" Converts an absolute path to an entry resembling the output of
the ls command on most UNIX systems."""
st = os.stat(fn)
full_mode = 'rwxrwxrwx'
mode = ''
file_time = ''
d = ''
for i in range(9):
# Incrementally builds up the 9 character string, using characters from the
# fullmode (defined above) and mode bits from the stat() system call.
mode += ((st.st_mode >> (8 - i)) & 1) and full_mode[i] or '-'
d = (os.path.isdir(fn)) and 'd' or '-'
file_time = time.strftime(' %b %d %H:%M ', time.gmtime(st.st_mtime))
list_format = '{0}{1} 1 ftp ftp {2}\t{3}{4}'.format(d, mode, str(st.st_size), file_time, os.path.basename(fn))
return list_format | [
"def",
"path_to_ls",
"(",
"fn",
")",
":",
"st",
"=",
"os",
".",
"stat",
"(",
"fn",
")",
"full_mode",
"=",
"'rwxrwxrwx'",
"mode",
"=",
"''",
"file_time",
"=",
"''",
"d",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"9",
")",
":",
"# Incrementally buil... | Converts an absolute path to an entry resembling the output of
the ls command on most UNIX systems. | [
"Converts",
"an",
"absolute",
"path",
"to",
"an",
"entry",
"resembling",
"the",
"output",
"of",
"the",
"ls",
"command",
"on",
"most",
"UNIX",
"systems",
"."
] | python | train |
saltstack/salt | salt/utils/openstack/neutron.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/neutron.py#L806-L827 | def create_ipsecpolicy(self, name, **kwargs):
'''
Creates a new IPsecPolicy
'''
body = {'name': name}
if 'transform_protocol' in kwargs:
body['transform_protocol'] = kwargs['transform_protocol']
if 'auth_algorithm' in kwargs:
body['auth_algorithm'] = kwargs['auth_algorithm']
if 'encapsulation_mode' in kwargs:
body['encapsulation_mode'] = kwargs['encapsulation_mode']
if 'encryption_algorithm' in kwargs:
body['encryption_algorithm'] = kwargs['encryption_algorithm']
if 'pfs' in kwargs:
body['pfs'] = kwargs['pfs']
if 'units' in kwargs:
body['lifetime'] = {'units': kwargs['units']}
if 'value' in kwargs:
if 'lifetime' not in body:
body['lifetime'] = {}
body['lifetime']['value'] = kwargs['value']
return self.network_conn.create_ipsecpolicy(body={'ipsecpolicy': body}) | [
"def",
"create_ipsecpolicy",
"(",
"self",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"body",
"=",
"{",
"'name'",
":",
"name",
"}",
"if",
"'transform_protocol'",
"in",
"kwargs",
":",
"body",
"[",
"'transform_protocol'",
"]",
"=",
"kwargs",
"[",
"'tra... | Creates a new IPsecPolicy | [
"Creates",
"a",
"new",
"IPsecPolicy"
] | python | train |
gitenberg-dev/gitberg | gitenberg/make.py | https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/make.py#L47-L70 | def copy_files(self):
""" Copy the LICENSE and CONTRIBUTING files to each folder repo
Generate covers if needed. Dump the metadata.
"""
files = [u'LICENSE', u'CONTRIBUTING.rst']
this_dir = dirname(abspath(__file__))
for _file in files:
sh.cp(
'{0}/templates/{1}'.format(this_dir, _file),
'{0}/'.format(self.book.local_path)
)
# copy metadata rdf file
if self.book.meta.rdf_path: # if None, meta is from yaml file
sh.cp(
self.book.meta.rdf_path,
'{0}/'.format(self.book.local_path)
)
if 'GITenberg' not in self.book.meta.subjects:
if not self.book.meta.subjects:
self.book.meta.metadata['subjects'] = []
self.book.meta.metadata['subjects'].append('GITenberg')
self.save_meta() | [
"def",
"copy_files",
"(",
"self",
")",
":",
"files",
"=",
"[",
"u'LICENSE'",
",",
"u'CONTRIBUTING.rst'",
"]",
"this_dir",
"=",
"dirname",
"(",
"abspath",
"(",
"__file__",
")",
")",
"for",
"_file",
"in",
"files",
":",
"sh",
".",
"cp",
"(",
"'{0}/templates... | Copy the LICENSE and CONTRIBUTING files to each folder repo
Generate covers if needed. Dump the metadata. | [
"Copy",
"the",
"LICENSE",
"and",
"CONTRIBUTING",
"files",
"to",
"each",
"folder",
"repo",
"Generate",
"covers",
"if",
"needed",
".",
"Dump",
"the",
"metadata",
"."
] | python | train |
nicolargo/glances | glances/stats.py | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/stats.py#L277-L286 | def getAllLimitsAsDict(self, plugin_list=None):
"""
Return all the stats limits (dict).
Default behavor is to export all the limits
if plugin_list is provided, only export limits of given plugin (list)
"""
if plugin_list is None:
# All plugins should be exported
plugin_list = self._plugins
return {p: self._plugins[p].limits for p in plugin_list} | [
"def",
"getAllLimitsAsDict",
"(",
"self",
",",
"plugin_list",
"=",
"None",
")",
":",
"if",
"plugin_list",
"is",
"None",
":",
"# All plugins should be exported",
"plugin_list",
"=",
"self",
".",
"_plugins",
"return",
"{",
"p",
":",
"self",
".",
"_plugins",
"[",... | Return all the stats limits (dict).
Default behavor is to export all the limits
if plugin_list is provided, only export limits of given plugin (list) | [
"Return",
"all",
"the",
"stats",
"limits",
"(",
"dict",
")",
".",
"Default",
"behavor",
"is",
"to",
"export",
"all",
"the",
"limits",
"if",
"plugin_list",
"is",
"provided",
"only",
"export",
"limits",
"of",
"given",
"plugin",
"(",
"list",
")"
] | python | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L413-L428 | def aread8(np, flowdir, acc, outlet=None, streamskeleton=None, edgecontaimination=False,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run Accumulate area according to D8 flow direction"""
# -nc means do not consider edge contaimination
if not edgecontaimination:
in_params = {'-nc': None}
else:
in_params = None
fname = TauDEM.func_name('aread8')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': flowdir, '-o': outlet, '-wg': streamskeleton}, workingdir,
in_params,
{'-ad8': acc},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"aread8",
"(",
"np",
",",
"flowdir",
",",
"acc",
",",
"outlet",
"=",
"None",
",",
"streamskeleton",
"=",
"None",
",",
"edgecontaimination",
"=",
"False",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
... | Run Accumulate area according to D8 flow direction | [
"Run",
"Accumulate",
"area",
"according",
"to",
"D8",
"flow",
"direction"
] | python | train |
oscarlazoarjona/fast | fast/graphic.py | https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L736-L755 | def rotate_and_traslate(cur, alpha, v0):
r"""Rotate and translate a curve."""
if len(cur) > 2 or (type(cur[0][0]) in [list, tuple]):
cur_list = cur[:]
for i in range(len(cur_list)):
curi = cur_list[i]
curi = rotate_and_traslate(curi, alpha, v0)
cur_list[i] = curi
return cur_list
else:
x0, y0 = cur
rot = np.matrix([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]])
xn = []; yn = []
for i in range(len(x0)):
v = np.matrix([[x0[i]], [y0[i]]])
vi = np.dot(rot, v)
xn += [float(vi[0][0])+v0[0]]; yn += [float(vi[1][0])+v0[1]]
return xn, yn | [
"def",
"rotate_and_traslate",
"(",
"cur",
",",
"alpha",
",",
"v0",
")",
":",
"if",
"len",
"(",
"cur",
")",
">",
"2",
"or",
"(",
"type",
"(",
"cur",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
")",
":",
"cur_list",... | r"""Rotate and translate a curve. | [
"r",
"Rotate",
"and",
"translate",
"a",
"curve",
"."
] | python | train |
cuihantao/andes | andes/routines/tds.py | https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/routines/tds.py#L94-L157 | def calc_time_step(self):
"""
Set the time step during time domain simulations
Parameters
----------
convergence: bool
truth value of the convergence of the last step
niter: int
current iteration count
t: float
current simulation time
Returns
-------
float
computed time step size
"""
system = self.system
config = self.config
convergence = self.convergence
niter = self.niter
t = self.t
if t == 0:
self._calc_time_step_first()
return
if convergence:
if niter >= 15:
config.deltat = max(config.deltat * 0.5, config.deltatmin)
elif niter <= 6:
config.deltat = min(config.deltat * 1.1, config.deltatmax)
else:
config.deltat = max(config.deltat * 0.95, config.deltatmin)
# adjust fixed time step if niter is high
if config.fixt:
config.deltat = min(config.tstep, config.deltat)
else:
config.deltat *= 0.9
if config.deltat < config.deltatmin:
config.deltat = 0
if system.Fault.is_time(t) or system.Breaker.is_time(t):
config.deltat = min(config.deltat, 0.002778)
elif system.check_event(t):
config.deltat = min(config.deltat, 0.002778)
if config.method == 'fwdeuler':
config.deltat = min(config.deltat, config.tstep)
# last step size
if self.t + config.deltat > config.tf:
config.deltat = config.tf - self.t
# reduce time step for fixed_times events
for fixed_t in self.fixed_times:
if (fixed_t > self.t) and (fixed_t <= self.t + config.deltat):
config.deltat = fixed_t - self.t
self.switch = True
break
self.h = config.deltat | [
"def",
"calc_time_step",
"(",
"self",
")",
":",
"system",
"=",
"self",
".",
"system",
"config",
"=",
"self",
".",
"config",
"convergence",
"=",
"self",
".",
"convergence",
"niter",
"=",
"self",
".",
"niter",
"t",
"=",
"self",
".",
"t",
"if",
"t",
"==... | Set the time step during time domain simulations
Parameters
----------
convergence: bool
truth value of the convergence of the last step
niter: int
current iteration count
t: float
current simulation time
Returns
-------
float
computed time step size | [
"Set",
"the",
"time",
"step",
"during",
"time",
"domain",
"simulations"
] | python | train |
googleapis/google-cloud-python | bigquery/google/cloud/bigquery/dbapi/cursor.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/dbapi/cursor.py#L117-L176 | def execute(self, operation, parameters=None, job_id=None):
"""Prepare and execute a database operation.
.. note::
When setting query parameters, values which are "text"
(``unicode`` in Python2, ``str`` in Python3) will use
the 'STRING' BigQuery type. Values which are "bytes" (``str`` in
Python2, ``bytes`` in Python3), will use using the 'BYTES' type.
A `~datetime.datetime` parameter without timezone information uses
the 'DATETIME' BigQuery type (example: Global Pi Day Celebration
March 14, 2017 at 1:59pm). A `~datetime.datetime` parameter with
timezone information uses the 'TIMESTAMP' BigQuery type (example:
a wedding on April 29, 2011 at 11am, British Summer Time).
For more information about BigQuery data types, see:
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
``STRUCT``/``RECORD`` and ``REPEATED`` query parameters are not
yet supported. See:
https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3524
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Mapping[str, Any] or Sequence[Any]
:param parameters:
(Optional) dictionary or sequence of parameter values.
:type job_id: str
:param job_id: (Optional) The job_id to use. If not set, a job ID
is generated at random.
"""
self._query_data = None
self._query_job = None
client = self.connection._client
# The DB-API uses the pyformat formatting, since the way BigQuery does
# query parameters was not one of the standard options. Convert both
# the query and the parameters to the format expected by the client
# libraries.
formatted_operation = _format_operation(operation, parameters=parameters)
query_parameters = _helpers.to_query_parameters(parameters)
config = job.QueryJobConfig()
config.query_parameters = query_parameters
config.use_legacy_sql = False
self._query_job = client.query(
formatted_operation, job_config=config, job_id=job_id
)
# Wait for the query to finish.
try:
self._query_job.result()
except google.cloud.exceptions.GoogleCloudError as exc:
raise exceptions.DatabaseError(exc)
query_results = self._query_job._query_results
self._set_rowcount(query_results)
self._set_description(query_results.schema) | [
"def",
"execute",
"(",
"self",
",",
"operation",
",",
"parameters",
"=",
"None",
",",
"job_id",
"=",
"None",
")",
":",
"self",
".",
"_query_data",
"=",
"None",
"self",
".",
"_query_job",
"=",
"None",
"client",
"=",
"self",
".",
"connection",
".",
"_cli... | Prepare and execute a database operation.
.. note::
When setting query parameters, values which are "text"
(``unicode`` in Python2, ``str`` in Python3) will use
the 'STRING' BigQuery type. Values which are "bytes" (``str`` in
Python2, ``bytes`` in Python3), will use using the 'BYTES' type.
A `~datetime.datetime` parameter without timezone information uses
the 'DATETIME' BigQuery type (example: Global Pi Day Celebration
March 14, 2017 at 1:59pm). A `~datetime.datetime` parameter with
timezone information uses the 'TIMESTAMP' BigQuery type (example:
a wedding on April 29, 2011 at 11am, British Summer Time).
For more information about BigQuery data types, see:
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
``STRUCT``/``RECORD`` and ``REPEATED`` query parameters are not
yet supported. See:
https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3524
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Mapping[str, Any] or Sequence[Any]
:param parameters:
(Optional) dictionary or sequence of parameter values.
:type job_id: str
:param job_id: (Optional) The job_id to use. If not set, a job ID
is generated at random. | [
"Prepare",
"and",
"execute",
"a",
"database",
"operation",
"."
] | python | train |
google/google-visualization-python | gviz_api.py | https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L895-L911 | def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE") | [
"def",
"ToTsvExcel",
"(",
"self",
",",
"columns_order",
"=",
"None",
",",
"order_by",
"=",
"(",
")",
")",
":",
"csv_result",
"=",
"self",
".",
"ToCsv",
"(",
"columns_order",
",",
"order_by",
",",
"separator",
"=",
"\"\\t\"",
")",
"if",
"not",
"isinstance... | Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table. | [
"Returns",
"a",
"file",
"in",
"tab",
"-",
"separated",
"-",
"format",
"readable",
"by",
"MS",
"Excel",
"."
] | python | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L420-L426 | def get_stocks(self, symbols: List[str]) -> List[Commodity]:
""" loads stocks by symbol """
query = (
self.query
.filter(Commodity.mnemonic.in_(symbols))
).order_by(Commodity.namespace, Commodity.mnemonic)
return query.all() | [
"def",
"get_stocks",
"(",
"self",
",",
"symbols",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"Commodity",
"]",
":",
"query",
"=",
"(",
"self",
".",
"query",
".",
"filter",
"(",
"Commodity",
".",
"mnemonic",
".",
"in_",
"(",
"symbols",
")... | loads stocks by symbol | [
"loads",
"stocks",
"by",
"symbol"
] | python | train |
RRZE-HPC/kerncraft | likwid-counter-packing.py | https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/likwid-counter-packing.py#L75-L104 | def eventstr(event_tuple=None, event=None, register=None, parameters=None):
"""
Return a LIKWID event string from an event tuple or keyword arguments.
*event_tuple* may have two or three arguments: (event, register) or
(event, register, parameters)
Keyword arguments will be overwritten by *event_tuple*.
>>> eventstr(('L1D_REPLACEMENT', 'PMC0', None))
'L1D_REPLACEMENT:PMC0'
>>> eventstr(('L1D_REPLACEMENT', 'PMC0'))
'L1D_REPLACEMENT:PMC0'
>>> eventstr(('MEM_UOPS_RETIRED_LOADS', 'PMC3', {'EDGEDETECT': None, 'THRESHOLD': 2342}))
'MEM_UOPS_RETIRED_LOADS:PMC3:EDGEDETECT:THRESHOLD=0x926'
>>> eventstr(event='DTLB_LOAD_MISSES_WALK_DURATION', register='PMC3')
'DTLB_LOAD_MISSES_WALK_DURATION:PMC3'
"""
if len(event_tuple) == 3:
event, register, parameters = event_tuple
elif len(event_tuple) == 2:
event, register = event_tuple
event_dscr = [event, register]
if parameters:
for k, v in sorted(event_tuple[2].items()): # sorted for reproducability
if type(v) is int:
k += "={}".format(hex(v))
event_dscr.append(k)
return ":".join(event_dscr) | [
"def",
"eventstr",
"(",
"event_tuple",
"=",
"None",
",",
"event",
"=",
"None",
",",
"register",
"=",
"None",
",",
"parameters",
"=",
"None",
")",
":",
"if",
"len",
"(",
"event_tuple",
")",
"==",
"3",
":",
"event",
",",
"register",
",",
"parameters",
... | Return a LIKWID event string from an event tuple or keyword arguments.
*event_tuple* may have two or three arguments: (event, register) or
(event, register, parameters)
Keyword arguments will be overwritten by *event_tuple*.
>>> eventstr(('L1D_REPLACEMENT', 'PMC0', None))
'L1D_REPLACEMENT:PMC0'
>>> eventstr(('L1D_REPLACEMENT', 'PMC0'))
'L1D_REPLACEMENT:PMC0'
>>> eventstr(('MEM_UOPS_RETIRED_LOADS', 'PMC3', {'EDGEDETECT': None, 'THRESHOLD': 2342}))
'MEM_UOPS_RETIRED_LOADS:PMC3:EDGEDETECT:THRESHOLD=0x926'
>>> eventstr(event='DTLB_LOAD_MISSES_WALK_DURATION', register='PMC3')
'DTLB_LOAD_MISSES_WALK_DURATION:PMC3' | [
"Return",
"a",
"LIKWID",
"event",
"string",
"from",
"an",
"event",
"tuple",
"or",
"keyword",
"arguments",
"."
] | python | test |
inasafe/inasafe | safe/messaging/message.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/messaging/message.py#L61-L78 | def add(self, message):
"""Add a MessageElement to the end of the queue.
Strings can be passed and are automatically converted in to
item.Text()
:param message: An element to add to the message queue.
:type message: safe.messaging.Message, MessageElement, str
"""
if self._is_stringable(message) or self._is_qstring(message):
self.message.append(Text(message))
elif isinstance(message, MessageElement):
self.message.append(message)
elif isinstance(message, Message):
self.message.extend(message.message)
else:
raise InvalidMessageItemError(message, message.__class__) | [
"def",
"add",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"_is_stringable",
"(",
"message",
")",
"or",
"self",
".",
"_is_qstring",
"(",
"message",
")",
":",
"self",
".",
"message",
".",
"append",
"(",
"Text",
"(",
"message",
")",
")",
... | Add a MessageElement to the end of the queue.
Strings can be passed and are automatically converted in to
item.Text()
:param message: An element to add to the message queue.
:type message: safe.messaging.Message, MessageElement, str | [
"Add",
"a",
"MessageElement",
"to",
"the",
"end",
"of",
"the",
"queue",
"."
] | python | train |
alphagov/performanceplatform-collector | performanceplatform/collector/ga/plugins/aggregate.py | https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/ga/plugins/aggregate.py#L74-L84 | def make_aggregate(docs, aggregations):
"""
Given `docs` and `aggregations` return a single document with the
aggregations applied.
"""
new_doc = dict(docs[0])
for keyname, aggregation_function in aggregations:
new_doc[keyname] = aggregation_function(docs)
return new_doc | [
"def",
"make_aggregate",
"(",
"docs",
",",
"aggregations",
")",
":",
"new_doc",
"=",
"dict",
"(",
"docs",
"[",
"0",
"]",
")",
"for",
"keyname",
",",
"aggregation_function",
"in",
"aggregations",
":",
"new_doc",
"[",
"keyname",
"]",
"=",
"aggregation_function... | Given `docs` and `aggregations` return a single document with the
aggregations applied. | [
"Given",
"docs",
"and",
"aggregations",
"return",
"a",
"single",
"document",
"with",
"the",
"aggregations",
"applied",
"."
] | python | train |
swharden/PyOriginTools | scripts/update_version.py | https://github.com/swharden/PyOriginTools/blob/536fb8e11234ffdc27e26b1800e0358179ca7d26/scripts/update_version.py#L4-L32 | def updateVersion(fname):
"""
given a filename to a file containing a __counter__ variable,
open it, read the count, add one, rewrite the file.
This:
__counter__=123
Becomes:
__counter__=124
"""
fname=os.path.abspath(fname)
if not os.path.exists(fname):
print("can not update version! file doesn't exist:\n",fname)
return
with open(fname) as f:
raw=f.read().split("\n")
for i,line in enumerate(raw):
if line.startswith("__counter__="):
version=int(line.split("=")[1])
raw[i]="__counter__=%d"%(version+1)
with open(fname,'w') as f:
f.write("\n".join(raw))
print("upgraded version %d -> %d"%(version,version+1))
sys.path.insert(0,os.path.dirname(fname))
import version
print("New version:",version.__version__)
with open('version.txt','w') as f:
f.write(str(version.__version__)) | [
"def",
"updateVersion",
"(",
"fname",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"fname",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
":",
"print",
"(",
"\"can not update version! file doesn't exist:\\n\"",
"... | given a filename to a file containing a __counter__ variable,
open it, read the count, add one, rewrite the file.
This:
__counter__=123
Becomes:
__counter__=124 | [
"given",
"a",
"filename",
"to",
"a",
"file",
"containing",
"a",
"__counter__",
"variable",
"open",
"it",
"read",
"the",
"count",
"add",
"one",
"rewrite",
"the",
"file",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/nose/plugins/capture.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/plugins/capture.py#L47-L52 | def configure(self, options, conf):
"""Configure plugin. Plugin is enabled by default.
"""
self.conf = conf
if not options.capture:
self.enabled = False | [
"def",
"configure",
"(",
"self",
",",
"options",
",",
"conf",
")",
":",
"self",
".",
"conf",
"=",
"conf",
"if",
"not",
"options",
".",
"capture",
":",
"self",
".",
"enabled",
"=",
"False"
] | Configure plugin. Plugin is enabled by default. | [
"Configure",
"plugin",
".",
"Plugin",
"is",
"enabled",
"by",
"default",
"."
] | python | test |
DistrictDataLabs/yellowbrick | yellowbrick/regressor/residuals.py | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/regressor/residuals.py#L512-L566 | def draw(self, y_pred, residuals, train=False, **kwargs):
"""
Draw the residuals against the predicted value for the specified split.
It is best to draw the training split first, then the test split so
that the test split (usually smaller) is above the training split;
particularly if the histogram is turned on.
Parameters
----------
y_pred : ndarray or Series of length n
An array or series of predicted target values
residuals : ndarray or Series of length n
An array or series of the difference between the predicted and the
target values
train : boolean, default: False
If False, `draw` assumes that the residual points being plotted
are from the test data; if True, `draw` assumes the residuals
are the train data.
Returns
------
ax : the axis with the plotted figure
"""
if train:
color = self.colors['train_point']
label = "Train $R^2 = {:0.3f}$".format(self.train_score_)
alpha = self.alphas['train_point']
else:
color = self.colors['test_point']
label = "Test $R^2 = {:0.3f}$".format(self.test_score_)
alpha = self.alphas['test_point']
# Update the legend information
self._labels.append(label)
self._colors.append(color)
# Draw the residuals scatter plot
self.ax.scatter(
y_pred, residuals, c=color, alpha=alpha, label=label
)
# Add residuals histogram
if self.hist in {True, 'frequency'}:
self.hax.hist(residuals, bins=50, orientation="horizontal", color=color)
elif self.hist == 'density':
self.hax.hist(
residuals, bins=50, orientation="horizontal", density=True, color=color
)
# Ensure the current axes is always the main residuals axes
plt.sca(self.ax)
return self.ax | [
"def",
"draw",
"(",
"self",
",",
"y_pred",
",",
"residuals",
",",
"train",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"train",
":",
"color",
"=",
"self",
".",
"colors",
"[",
"'train_point'",
"]",
"label",
"=",
"\"Train $R^2 = {:0.3f}$\"",
"... | Draw the residuals against the predicted value for the specified split.
It is best to draw the training split first, then the test split so
that the test split (usually smaller) is above the training split;
particularly if the histogram is turned on.
Parameters
----------
y_pred : ndarray or Series of length n
An array or series of predicted target values
residuals : ndarray or Series of length n
An array or series of the difference between the predicted and the
target values
train : boolean, default: False
If False, `draw` assumes that the residual points being plotted
are from the test data; if True, `draw` assumes the residuals
are the train data.
Returns
------
ax : the axis with the plotted figure | [
"Draw",
"the",
"residuals",
"against",
"the",
"predicted",
"value",
"for",
"the",
"specified",
"split",
".",
"It",
"is",
"best",
"to",
"draw",
"the",
"training",
"split",
"first",
"then",
"the",
"test",
"split",
"so",
"that",
"the",
"test",
"split",
"(",
... | python | train |
nerdvegas/rez | src/rez/utils/colorize.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/colorize.py#L160-L173 | def _color_level(str_, level):
""" Return the string wrapped with the appropriate styling for the message
level. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
level (str): The message level. Should be one of 'critical', 'error',
'warning', 'info' or 'debug'.
Returns:
str: The string styled with the appropriate escape sequences.
"""
fore_color, back_color, styles = _get_style_from_config(level)
return _color(str_, fore_color, back_color, styles) | [
"def",
"_color_level",
"(",
"str_",
",",
"level",
")",
":",
"fore_color",
",",
"back_color",
",",
"styles",
"=",
"_get_style_from_config",
"(",
"level",
")",
"return",
"_color",
"(",
"str_",
",",
"fore_color",
",",
"back_color",
",",
"styles",
")"
] | Return the string wrapped with the appropriate styling for the message
level. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
level (str): The message level. Should be one of 'critical', 'error',
'warning', 'info' or 'debug'.
Returns:
str: The string styled with the appropriate escape sequences. | [
"Return",
"the",
"string",
"wrapped",
"with",
"the",
"appropriate",
"styling",
"for",
"the",
"message",
"level",
".",
"The",
"styling",
"will",
"be",
"determined",
"based",
"on",
"the",
"rez",
"configuration",
"."
] | python | train |
maljovec/topopy | topopy/ContourTree.py | https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L144-L172 | def _identifyBranches(self):
""" A helper function for determining all of the branches in the
tree. This should be called after the tree has been fully
constructed and its nodes and edges are populated.
"""
if self.debug:
sys.stdout.write("Identifying branches: ")
start = time.clock()
seen = set()
self.branches = set()
# Find all of the branching nodes in the tree, degree > 1
# That is, they appear in more than one edge
for e1, e2 in self.edges:
if e1 not in seen:
seen.add(e1)
else:
self.branches.add(e1)
if e2 not in seen:
seen.add(e2)
else:
self.branches.add(e2)
if self.debug:
end = time.clock()
sys.stdout.write("%f s\n" % (end - start)) | [
"def",
"_identifyBranches",
"(",
"self",
")",
":",
"if",
"self",
".",
"debug",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Identifying branches: \"",
")",
"start",
"=",
"time",
".",
"clock",
"(",
")",
"seen",
"=",
"set",
"(",
")",
"self",
".",
"... | A helper function for determining all of the branches in the
tree. This should be called after the tree has been fully
constructed and its nodes and edges are populated. | [
"A",
"helper",
"function",
"for",
"determining",
"all",
"of",
"the",
"branches",
"in",
"the",
"tree",
".",
"This",
"should",
"be",
"called",
"after",
"the",
"tree",
"has",
"been",
"fully",
"constructed",
"and",
"its",
"nodes",
"and",
"edges",
"are",
"popul... | python | train |
NASA-AMMOS/AIT-Core | ait/core/dtype.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/dtype.py#L468-L473 | def cmddict(self):
"""PrimitiveType base for the ComplexType"""
if self._cmddict is None:
self._cmddict = cmd.getDefaultDict()
return self._cmddict | [
"def",
"cmddict",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cmddict",
"is",
"None",
":",
"self",
".",
"_cmddict",
"=",
"cmd",
".",
"getDefaultDict",
"(",
")",
"return",
"self",
".",
"_cmddict"
] | PrimitiveType base for the ComplexType | [
"PrimitiveType",
"base",
"for",
"the",
"ComplexType"
] | python | train |
ethereum/py-evm | eth/chains/header.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/header.py#L165-L186 | def import_header(self,
header: BlockHeader
) -> Tuple[Tuple[BlockHeader, ...], Tuple[BlockHeader, ...]]:
"""
Direct passthrough to `headerdb`
Also updates the local `header` property to be the latest canonical head.
Returns an iterable of headers representing the headers that are newly
part of the canonical chain.
- If the imported header is not part of the canonical chain then an
empty tuple will be returned.
- If the imported header simply extends the canonical chain then a
length-1 tuple with the imported header will be returned.
- If the header is part of a non-canonical chain which overtakes the
current canonical chain then the returned tuple will contain the
headers which are newly part of the canonical chain.
"""
new_canonical_headers = self.headerdb.persist_header(header)
self.header = self.get_canonical_head()
return new_canonical_headers | [
"def",
"import_header",
"(",
"self",
",",
"header",
":",
"BlockHeader",
")",
"->",
"Tuple",
"[",
"Tuple",
"[",
"BlockHeader",
",",
"...",
"]",
",",
"Tuple",
"[",
"BlockHeader",
",",
"...",
"]",
"]",
":",
"new_canonical_headers",
"=",
"self",
".",
"header... | Direct passthrough to `headerdb`
Also updates the local `header` property to be the latest canonical head.
Returns an iterable of headers representing the headers that are newly
part of the canonical chain.
- If the imported header is not part of the canonical chain then an
empty tuple will be returned.
- If the imported header simply extends the canonical chain then a
length-1 tuple with the imported header will be returned.
- If the header is part of a non-canonical chain which overtakes the
current canonical chain then the returned tuple will contain the
headers which are newly part of the canonical chain. | [
"Direct",
"passthrough",
"to",
"headerdb"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.