repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
tensorflow/datasets | tensorflow_datasets/image/open_images.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L221-L262 | def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
paths = dl_manager.download_and_extract(_URLS)
# Load labels from CSVs:
def load(names):
csv_positions = [0] * len(names)
return functools.partial(_load_objects, [paths[name] for name in names],
csv_positions)
train_objects = load(['train_human_labels', 'train_machine_labels'])
test_objects = load(['test_human_labels', 'test_machine_labels'])
validation_objects = load(['validation_human_labels',
'validation_machine_labels'])
def load_boxes(name):
csv_positions = [0]
return functools.partial(_load_bboxes, paths[name], csv_positions)
train_bbox = load_boxes('train-annotations-bbox')
test_bbox = load_boxes('test-annotations-bbox')
validation_bbox = load_boxes('validation-annotations-bbox')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=512,
gen_kwargs=dict(archive_paths=paths['train_images'],
objects_getter=train_objects,
bboxes_getter=train_bbox,
prefixes='0123456789abcdef'),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=36,
gen_kwargs=dict(archive_paths=[paths['test_images']],
objects_getter=test_objects,
bboxes_getter=test_bbox),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=12,
gen_kwargs=dict(archive_paths=[paths['validation_images']],
objects_getter=validation_objects,
bboxes_getter=validation_bbox),
),
] | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"paths",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"_URLS",
")",
"# Load labels from CSVs:",
"def",
"load",
"(",
"names",
")",
":",
"csv_positions",
"=",
"[",
"0",
"]",
"*",
"le... | Returns SplitGenerators. | [
"Returns",
"SplitGenerators",
"."
] | python | train |
portfors-lab/sparkle | sparkle/tools/audiotools.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/audiotools.py#L53-L64 | def calc_spectrum(signal, rate):
"""Return the spectrum and frequency indexes for real-valued input signal"""
npts = len(signal)
padto = 1 << (npts - 1).bit_length()
# print 'length of signal {}, pad to {}'.format(npts, padto)
npts = padto
sp = np.fft.rfft(signal, n=padto) / npts
# print('sp len ', len(sp))
freq = np.arange((npts / 2) + 1) / (npts / rate)
# print('freq len ', len(freq))
return freq, abs(sp) | [
"def",
"calc_spectrum",
"(",
"signal",
",",
"rate",
")",
":",
"npts",
"=",
"len",
"(",
"signal",
")",
"padto",
"=",
"1",
"<<",
"(",
"npts",
"-",
"1",
")",
".",
"bit_length",
"(",
")",
"# print 'length of signal {}, pad to {}'.format(npts, padto)",
"npts",
"=... | Return the spectrum and frequency indexes for real-valued input signal | [
"Return",
"the",
"spectrum",
"and",
"frequency",
"indexes",
"for",
"real",
"-",
"valued",
"input",
"signal"
] | python | train |
vsoch/helpme | helpme/main/base/settings.py | https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/main/base/settings.py#L155-L193 | def get_setting(self, name, section=None, default=None, user=True):
'''return a setting from the environment (first priority) and then
secrets (second priority) if one can be found. If not, return None.
Parameters
==========
section: the section in the config, defaults to self.name
name: they key (index) of the setting to look up
default: (optional) if not found, return default instead.
user: if True, load from user config. Otherwise, main config
'''
loader = self._load_config_user
if not user:
loader = self._load_config
if section is None:
section = self.name
# First priority is the environment
setting = os.environ.get(name)
# Second priority is the secrets file
if setting is None:
# Loads user config on level of helper (already indexed)
config = loader()
if section in config:
if name.lower() in config[section]:
setting = config[section][name.lower()]
# Third priority, return a default
if setting is None and default is not None:
setting = default
return setting | [
"def",
"get_setting",
"(",
"self",
",",
"name",
",",
"section",
"=",
"None",
",",
"default",
"=",
"None",
",",
"user",
"=",
"True",
")",
":",
"loader",
"=",
"self",
".",
"_load_config_user",
"if",
"not",
"user",
":",
"loader",
"=",
"self",
".",
"_loa... | return a setting from the environment (first priority) and then
secrets (second priority) if one can be found. If not, return None.
Parameters
==========
section: the section in the config, defaults to self.name
name: they key (index) of the setting to look up
default: (optional) if not found, return default instead.
user: if True, load from user config. Otherwise, main config | [
"return",
"a",
"setting",
"from",
"the",
"environment",
"(",
"first",
"priority",
")",
"and",
"then",
"secrets",
"(",
"second",
"priority",
")",
"if",
"one",
"can",
"be",
"found",
".",
"If",
"not",
"return",
"None",
"."
] | python | train |
HazyResearch/metal | metal/tuners/random_tuner.py | https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/tuners/random_tuner.py#L14-L86 | def search(
self,
search_space,
valid_data,
init_args=[],
train_args=[],
init_kwargs={},
train_kwargs={},
module_args={},
module_kwargs={},
max_search=None,
shuffle=True,
verbose=True,
clean_up=True,
seed=None,
**score_kwargs,
):
"""
Args:
search_space: see config_generator() documentation
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
init_kwargs: (dict) keyword args for initializing the model
train_kwargs: (dict) keyword args for training the model
module_args: (dict) Dictionary of lists of module args
module_kwargs: (dict) Dictionary of dictionaries of module kwargs
max_search: see config_generator() documentation
shuffle: see config_generator() documentation
Returns:
best_model: the highest performing trained model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop).
"""
self._clear_state(seed)
self.search_space = search_space
# Generate configs
configs = self.config_generator(search_space, max_search, self.rng, shuffle)
# Commence search
for i, config in enumerate(configs):
score, model = self._test_model_config(
i,
config,
valid_data,
init_args=init_args,
train_args=train_args,
init_kwargs=init_kwargs,
train_kwargs=train_kwargs,
module_args=module_args,
module_kwargs=module_kwargs,
verbose=verbose,
**score_kwargs,
)
if verbose:
print("=" * 60)
print(f"[SUMMARY]")
print(f"Best model: [{self.best_index}]")
print(f"Best config: {self.best_config}")
print(f"Best score: {self.best_score}")
print("=" * 60)
self._save_report()
# Return best model
return self._load_best_model(clean_up=clean_up) | [
"def",
"search",
"(",
"self",
",",
"search_space",
",",
"valid_data",
",",
"init_args",
"=",
"[",
"]",
",",
"train_args",
"=",
"[",
"]",
",",
"init_kwargs",
"=",
"{",
"}",
",",
"train_kwargs",
"=",
"{",
"}",
",",
"module_args",
"=",
"{",
"}",
",",
... | Args:
search_space: see config_generator() documentation
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
init_kwargs: (dict) keyword args for initializing the model
train_kwargs: (dict) keyword args for training the model
module_args: (dict) Dictionary of lists of module args
module_kwargs: (dict) Dictionary of dictionaries of module kwargs
max_search: see config_generator() documentation
shuffle: see config_generator() documentation
Returns:
best_model: the highest performing trained model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop). | [
"Args",
":",
"search_space",
":",
"see",
"config_generator",
"()",
"documentation",
"valid_data",
":",
"a",
"tuple",
"of",
"Tensors",
"(",
"X",
"Y",
")",
"a",
"Dataset",
"or",
"a",
"DataLoader",
"of",
"X",
"(",
"data",
")",
"and",
"Y",
"(",
"labels",
"... | python | train |
gofed/gofedlib | gofedlib/repository/githubclient.py | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/repository/githubclient.py#L69-L80 | def commit(self, commit):
"""Get data for a given commit
Raises KeyError if a commit is not found or not parsed.
:param commit: repository commit
:type commit: string
"""
try:
return self._commitData(self.repo.get_commit(commit))
except (ValueError, KeyError, GithubException):
raise KeyError("Commit %s not found" % commit) | [
"def",
"commit",
"(",
"self",
",",
"commit",
")",
":",
"try",
":",
"return",
"self",
".",
"_commitData",
"(",
"self",
".",
"repo",
".",
"get_commit",
"(",
"commit",
")",
")",
"except",
"(",
"ValueError",
",",
"KeyError",
",",
"GithubException",
")",
":... | Get data for a given commit
Raises KeyError if a commit is not found or not parsed.
:param commit: repository commit
:type commit: string | [
"Get",
"data",
"for",
"a",
"given",
"commit"
] | python | train |
fumitoh/modelx | modelx/io/excel.py | https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/io/excel.py#L161-L241 | def _get_namedrange(book, rangename, sheetname=None):
"""Get range from a workbook.
A workbook can contain multiple definitions for a single name,
as a name can be defined for the entire book or for
a particular sheet.
If sheet is None, the book-wide def is searched,
otherwise sheet-local def is looked up.
Args:
book: An openpyxl workbook object.
rangename (str): Range expression, such as "A1", "$G4:$K10",
named range "NamedRange1".
sheetname (str, optional): None for book-wide name def,
sheet name for sheet-local named range.
Returns:
Range object specified by the name.
"""
def cond(namedef):
if namedef.type.upper() == "RANGE":
if namedef.name.upper() == rangename.upper():
if sheetname is None:
if not namedef.localSheetId:
return True
else: # sheet local name
sheet_id = [sht.upper() for sht in book.sheetnames].index(
sheetname.upper()
)
if namedef.localSheetId == sheet_id:
return True
return False
def get_destinations(name_def):
"""Workaround for the bug in DefinedName.destinations"""
from openpyxl.formula import Tokenizer
from openpyxl.utils.cell import SHEETRANGE_RE
if name_def.type == "RANGE":
tok = Tokenizer("=" + name_def.value)
for part in tok.items:
if part.subtype == "RANGE":
m = SHEETRANGE_RE.match(part.value)
if m.group("quoted"):
sheet_name = m.group("quoted")
else:
sheet_name = m.group("notquoted")
yield sheet_name, m.group("cells")
namedef = next(
(item for item in book.defined_names.definedName if cond(item)), None
)
if namedef is None:
return None
dests = get_destinations(namedef)
xlranges = []
sheetnames_upper = [name.upper() for name in book.sheetnames]
for sht, addr in dests:
if sheetname:
sht = sheetname
index = sheetnames_upper.index(sht.upper())
xlranges.append(book.worksheets[index][addr])
if len(xlranges) == 1:
return xlranges[0]
else:
return xlranges | [
"def",
"_get_namedrange",
"(",
"book",
",",
"rangename",
",",
"sheetname",
"=",
"None",
")",
":",
"def",
"cond",
"(",
"namedef",
")",
":",
"if",
"namedef",
".",
"type",
".",
"upper",
"(",
")",
"==",
"\"RANGE\"",
":",
"if",
"namedef",
".",
"name",
"."... | Get range from a workbook.
A workbook can contain multiple definitions for a single name,
as a name can be defined for the entire book or for
a particular sheet.
If sheet is None, the book-wide def is searched,
otherwise sheet-local def is looked up.
Args:
book: An openpyxl workbook object.
rangename (str): Range expression, such as "A1", "$G4:$K10",
named range "NamedRange1".
sheetname (str, optional): None for book-wide name def,
sheet name for sheet-local named range.
Returns:
Range object specified by the name. | [
"Get",
"range",
"from",
"a",
"workbook",
"."
] | python | valid |
eerimoq/bitstruct | bitstruct.py | https://github.com/eerimoq/bitstruct/blob/8e887c10241aa51c2a77c10e9923bb3978b15bcb/bitstruct.py#L523-L534 | def pack_into(fmt, buf, offset, *args, **kwargs):
"""Pack given values v1, v2, ... into given bytearray `buf`, starting
at given bit offset `offset`. Pack according to given format
string `fmt`. Give `fill_padding` as ``False`` to leave padding
bits in `buf` unmodified.
"""
return CompiledFormat(fmt).pack_into(buf,
offset,
*args,
**kwargs) | [
"def",
"pack_into",
"(",
"fmt",
",",
"buf",
",",
"offset",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"CompiledFormat",
"(",
"fmt",
")",
".",
"pack_into",
"(",
"buf",
",",
"offset",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
"... | Pack given values v1, v2, ... into given bytearray `buf`, starting
at given bit offset `offset`. Pack according to given format
string `fmt`. Give `fill_padding` as ``False`` to leave padding
bits in `buf` unmodified. | [
"Pack",
"given",
"values",
"v1",
"v2",
"...",
"into",
"given",
"bytearray",
"buf",
"starting",
"at",
"given",
"bit",
"offset",
"offset",
".",
"Pack",
"according",
"to",
"given",
"format",
"string",
"fmt",
".",
"Give",
"fill_padding",
"as",
"False",
"to",
"... | python | valid |
clalancette/pycdlib | pycdlib/pycdlib.py | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L5489-L5528 | def get_record(self, **kwargs):
# type: (str) -> Union[dr.DirectoryRecord, udfmod.UDFFileEntry]
'''
Get the directory record for a particular path.
Parameters:
iso_path - The absolute path on the ISO9660 filesystem to get the
record for.
rr_path - The absolute path on the Rock Ridge filesystem to get the
record for.
joliet_path - The absolute path on the Joliet filesystem to get the
record for.
udf_path - The absolute path on the UDF filesystem to get the record
for.
Returns:
An object that represents the path. This may be a dr.DirectoryRecord
object (in the cases of iso_path, rr_path, or joliet_path), or a
udf.UDFFileEntry object (in the case of udf_path).
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_paths = 0
for key in kwargs:
if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']:
if kwargs[key] is not None:
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if 'joliet_path' in kwargs:
return self._get_entry(None, None, self._normalize_joliet_path(kwargs['joliet_path']))
if 'rr_path' in kwargs:
return self._get_entry(None, utils.normpath(kwargs['rr_path']), None)
if 'udf_path' in kwargs:
return self._get_udf_entry(kwargs['udf_path'])
return self._get_entry(utils.normpath(kwargs['iso_path']), None, None) | [
"def",
"get_record",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (str) -> Union[dr.DirectoryRecord, udfmod.UDFFileEntry]",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidInput",
"(",
"'This object is not yet in... | Get the directory record for a particular path.
Parameters:
iso_path - The absolute path on the ISO9660 filesystem to get the
record for.
rr_path - The absolute path on the Rock Ridge filesystem to get the
record for.
joliet_path - The absolute path on the Joliet filesystem to get the
record for.
udf_path - The absolute path on the UDF filesystem to get the record
for.
Returns:
An object that represents the path. This may be a dr.DirectoryRecord
object (in the cases of iso_path, rr_path, or joliet_path), or a
udf.UDFFileEntry object (in the case of udf_path). | [
"Get",
"the",
"directory",
"record",
"for",
"a",
"particular",
"path",
"."
] | python | train |
niccokunzmann/ObservableList | ObservableList/__init__.py | https://github.com/niccokunzmann/ObservableList/blob/e5f6a93d82d2d13b248c7840ae74f98a4ba58c90/ObservableList/__init__.py#L252-L261 | def pop(self, index=-1):
"""See list.pop."""
if not isinstance(index, int):
if PY2:
raise TypeError('an integer is required')
raise TypeError("'str' object cannot be interpreted as an integer")
length = len(self)
if -length <= index < length:
self._notify_remove_at(index)
return super(ObservableList, self).pop(index) | [
"def",
"pop",
"(",
"self",
",",
"index",
"=",
"-",
"1",
")",
":",
"if",
"not",
"isinstance",
"(",
"index",
",",
"int",
")",
":",
"if",
"PY2",
":",
"raise",
"TypeError",
"(",
"'an integer is required'",
")",
"raise",
"TypeError",
"(",
"\"'str' object cann... | See list.pop. | [
"See",
"list",
".",
"pop",
"."
] | python | train |
rigetti/pyquil | pyquil/api/_qpu.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_qpu.py#L273-L294 | def _resolve_memory_references(self, expression: Expression) -> Union[float, int]:
"""
Traverse the given Expression, and replace any Memory References with whatever values
have been so far provided by the user for those memory spaces. Declared memory defaults
to zero.
:param expression: an Expression
"""
if isinstance(expression, BinaryExp):
left = self._resolve_memory_references(expression.op1)
right = self._resolve_memory_references(expression.op2)
return expression.fn(left, right)
elif isinstance(expression, Function):
return expression.fn(self._resolve_memory_references(expression.expression))
elif isinstance(expression, Parameter):
raise ValueError(f"Unexpected Parameter in gate expression: {expression}")
elif isinstance(expression, float) or isinstance(expression, int):
return expression
elif isinstance(expression, MemoryReference):
return self._variables_shim.get(ParameterAref(name=expression.name, index=expression.offset), 0)
else:
raise ValueError(f"Unexpected expression in gate parameter: {expression}") | [
"def",
"_resolve_memory_references",
"(",
"self",
",",
"expression",
":",
"Expression",
")",
"->",
"Union",
"[",
"float",
",",
"int",
"]",
":",
"if",
"isinstance",
"(",
"expression",
",",
"BinaryExp",
")",
":",
"left",
"=",
"self",
".",
"_resolve_memory_refe... | Traverse the given Expression, and replace any Memory References with whatever values
have been so far provided by the user for those memory spaces. Declared memory defaults
to zero.
:param expression: an Expression | [
"Traverse",
"the",
"given",
"Expression",
"and",
"replace",
"any",
"Memory",
"References",
"with",
"whatever",
"values",
"have",
"been",
"so",
"far",
"provided",
"by",
"the",
"user",
"for",
"those",
"memory",
"spaces",
".",
"Declared",
"memory",
"defaults",
"t... | python | train |
Alignak-monitoring/alignak | alignak/objects/item.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/item.py#L1427-L1454 | def linkify_with_timeperiods(self, timeperiods, prop):
"""
Link items with timeperiods items
:param timeperiods: all timeperiods object
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param prop: property name
:type prop: str
:return: None
"""
for i in self:
if not hasattr(i, prop):
continue
tpname = getattr(i, prop).strip()
# some default values are '', so set None
if not tpname:
setattr(i, prop, '')
continue
# Ok, get a real name, search for it
timeperiod = timeperiods.find_by_name(tpname)
if timeperiod is None:
i.add_error("The %s of the %s '%s' named '%s' is unknown!"
% (prop, i.__class__.my_type, i.get_name(), tpname))
continue
setattr(i, prop, timeperiod.uuid) | [
"def",
"linkify_with_timeperiods",
"(",
"self",
",",
"timeperiods",
",",
"prop",
")",
":",
"for",
"i",
"in",
"self",
":",
"if",
"not",
"hasattr",
"(",
"i",
",",
"prop",
")",
":",
"continue",
"tpname",
"=",
"getattr",
"(",
"i",
",",
"prop",
")",
".",
... | Link items with timeperiods items
:param timeperiods: all timeperiods object
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param prop: property name
:type prop: str
:return: None | [
"Link",
"items",
"with",
"timeperiods",
"items"
] | python | train |
grahambell/pymoc | lib/pymoc/util/tool.py | https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/util/tool.py#L307-L322 | def normalize(self):
"""Normalize the MOC to a given order.
This command takes a MOC order (0-29) and normalizes the MOC so that
its maximum order is the given order.
::
pymoctool a.fits --normalize 10 --output a_10.fits
"""
if self.moc is None:
raise CommandError('No MOC information present for normalization')
order = int(self.params.pop())
self.moc.normalize(order) | [
"def",
"normalize",
"(",
"self",
")",
":",
"if",
"self",
".",
"moc",
"is",
"None",
":",
"raise",
"CommandError",
"(",
"'No MOC information present for normalization'",
")",
"order",
"=",
"int",
"(",
"self",
".",
"params",
".",
"pop",
"(",
")",
")",
"self",... | Normalize the MOC to a given order.
This command takes a MOC order (0-29) and normalizes the MOC so that
its maximum order is the given order.
::
pymoctool a.fits --normalize 10 --output a_10.fits | [
"Normalize",
"the",
"MOC",
"to",
"a",
"given",
"order",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/core/tensors.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/tensors.py#L437-L512 | def get_ieee_rotation(structure, refine_rotation=True):
"""
Given a structure associated with a tensor, determines
the rotation matrix for IEEE conversion according to
the 1987 IEEE standards.
Args:
structure (Structure): a structure associated with the
tensor to be converted to the IEEE standard
refine_rotation (bool): whether to refine the rotation
using SquareTensor.refine_rotation
"""
# Check conventional setting:
sga = SpacegroupAnalyzer(structure)
dataset = sga.get_symmetry_dataset()
trans_mat = dataset['transformation_matrix']
conv_latt = Lattice(np.transpose(np.dot(np.transpose(
structure.lattice.matrix), np.linalg.inv(trans_mat))))
xtal_sys = sga.get_crystal_system()
vecs = conv_latt.matrix
lengths = np.array(conv_latt.abc)
angles = np.array(conv_latt.angles)
rotation = np.zeros((3, 3))
# IEEE rules: a,b,c || x1,x2,x3
if xtal_sys == "cubic":
rotation = [vecs[i] / lengths[i] for i in range(3)]
# IEEE rules: a=b in length; c,a || x3, x1
elif xtal_sys == "tetragonal":
rotation = np.array([vec / mag for (mag, vec) in
sorted(zip(lengths, vecs),
key=lambda x: x[0])])
if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]):
rotation[0], rotation[2] = rotation[2], rotation[0].copy()
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
# IEEE rules: c<a<b; c,a || x3,x1
elif xtal_sys == "orthorhombic":
rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]
rotation = np.roll(rotation, 2, axis=0)
# IEEE rules: c,a || x3,x1, c is threefold axis
# Note this also includes rhombohedral crystal systems
elif xtal_sys in ("trigonal", "hexagonal"):
# find threefold axis:
tf_index = np.argmin(abs(angles - 120.))
non_tf_mask = np.logical_not(angles == angles[tf_index])
rotation[2] = get_uvec(vecs[tf_index])
rotation[0] = get_uvec(vecs[non_tf_mask][0])
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
# IEEE rules: b,c || x2,x3; alpha=beta=90, c<a
elif xtal_sys == "monoclinic":
# Find unique axis
u_index = np.argmax(abs(angles - 90.))
n_umask = np.logical_not(angles == angles[u_index])
rotation[1] = get_uvec(vecs[u_index])
# Shorter of remaining lattice vectors for c axis
c = [vec / mag for (mag, vec) in
sorted(zip(lengths[n_umask], vecs[n_umask]))][0]
rotation[2] = np.array(c)
rotation[0] = np.cross(rotation[1], rotation[2])
# IEEE rules: c || x3, x2 normal to ac plane
elif xtal_sys == "triclinic":
rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
rotation[0] = np.cross(rotation[1], rotation[2])
rotation = SquareTensor(rotation)
if refine_rotation:
rotation = rotation.refine_rotation()
return rotation | [
"def",
"get_ieee_rotation",
"(",
"structure",
",",
"refine_rotation",
"=",
"True",
")",
":",
"# Check conventional setting:",
"sga",
"=",
"SpacegroupAnalyzer",
"(",
"structure",
")",
"dataset",
"=",
"sga",
".",
"get_symmetry_dataset",
"(",
")",
"trans_mat",
"=",
"... | Given a structure associated with a tensor, determines
the rotation matrix for IEEE conversion according to
the 1987 IEEE standards.
Args:
structure (Structure): a structure associated with the
tensor to be converted to the IEEE standard
refine_rotation (bool): whether to refine the rotation
using SquareTensor.refine_rotation | [
"Given",
"a",
"structure",
"associated",
"with",
"a",
"tensor",
"determines",
"the",
"rotation",
"matrix",
"for",
"IEEE",
"conversion",
"according",
"to",
"the",
"1987",
"IEEE",
"standards",
"."
] | python | train |
pandas-dev/pandas | pandas/tseries/holiday.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L75-L84 | def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt | [
"def",
"nearest_workday",
"(",
"dt",
")",
":",
"if",
"dt",
".",
"weekday",
"(",
")",
"==",
"5",
":",
"return",
"dt",
"-",
"timedelta",
"(",
"1",
")",
"elif",
"dt",
".",
"weekday",
"(",
")",
"==",
"6",
":",
"return",
"dt",
"+",
"timedelta",
"(",
... | If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead. | [
"If",
"holiday",
"falls",
"on",
"Saturday",
"use",
"day",
"before",
"(",
"Friday",
")",
"instead",
";",
"if",
"holiday",
"falls",
"on",
"Sunday",
"use",
"day",
"thereafter",
"(",
"Monday",
")",
"instead",
"."
] | python | train |
MisterWil/skybellpy | skybellpy/device.py | https://github.com/MisterWil/skybellpy/blob/ac966d9f590cda7654f6de7eecc94e2103459eef/skybellpy/device.py#L342-L385 | def _validate_setting(setting, value):
"""Validate the setting and value."""
if setting not in CONST.ALL_SETTINGS:
raise SkybellException(ERROR.INVALID_SETTING, setting)
if setting == CONST.SETTINGS_DO_NOT_DISTURB:
if value not in CONST.SETTINGS_DO_NOT_DISTURB_VALUES:
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(setting, value))
if setting == CONST.SETTINGS_OUTDOOR_CHIME:
if value not in CONST.SETTINGS_OUTDOOR_CHIME_VALUES:
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(setting, value))
if setting == CONST.SETTINGS_MOTION_POLICY:
if value not in CONST.SETTINGS_MOTION_POLICY_VALUES:
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(setting, value))
if setting == CONST.SETTINGS_MOTION_THRESHOLD:
if value not in CONST.SETTINGS_MOTION_THRESHOLD_VALUES:
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(setting, value))
if setting == CONST.SETTINGS_VIDEO_PROFILE:
if value not in CONST.SETTINGS_VIDEO_PROFILE_VALUES:
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(setting, value))
if setting in CONST.SETTINGS_LED_COLOR:
if (value < CONST.SETTINGS_LED_VALUES[0] or
value > CONST.SETTINGS_LED_VALUES[1]):
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(setting, value))
if setting == CONST.SETTINGS_LED_INTENSITY:
if not isinstance(value, int):
raise SkybellException(ERROR.COLOR_INTENSITY_NOT_VALID, value)
if (value < CONST.SETTINGS_LED_INTENSITY_VALUES[0] or
value > CONST.SETTINGS_LED_INTENSITY_VALUES[1]):
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(setting, value)) | [
"def",
"_validate_setting",
"(",
"setting",
",",
"value",
")",
":",
"if",
"setting",
"not",
"in",
"CONST",
".",
"ALL_SETTINGS",
":",
"raise",
"SkybellException",
"(",
"ERROR",
".",
"INVALID_SETTING",
",",
"setting",
")",
"if",
"setting",
"==",
"CONST",
".",
... | Validate the setting and value. | [
"Validate",
"the",
"setting",
"and",
"value",
"."
] | python | train |
StackStorm/pybind | pybind/nos/v7_2_0/interface/hundredgigabitethernet/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/interface/hundredgigabitethernet/__init__.py#L1672-L1693 | def _set_bpdu_drop(self, v, load=False):
"""
Setter method for bpdu_drop, mapped from YANG variable /interface/hundredgigabitethernet/bpdu_drop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bpdu_drop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bpdu_drop() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'phy-stp-config', u'sort-priority': u'105', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bpdu_drop must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'phy-stp-config', u'sort-priority': u'105', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__bpdu_drop = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_bpdu_drop",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base... | Setter method for bpdu_drop, mapped from YANG variable /interface/hundredgigabitethernet/bpdu_drop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bpdu_drop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bpdu_drop() directly. | [
"Setter",
"method",
"for",
"bpdu_drop",
"mapped",
"from",
"YANG",
"variable",
"/",
"interface",
"/",
"hundredgigabitethernet",
"/",
"bpdu_drop",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in... | python | train |
rigetti/grove | grove/measurements/term_grouping.py | https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/measurements/term_grouping.py#L121-L136 | def check_trivial_commutation(pauli_list, single_pauli_term):
"""
Check if a PauliTerm trivially commutes with a list of other terms.
:param list pauli_list: A list of PauliTerm objects
:param PauliTerm single_pauli_term: A PauliTerm object
:returns: True if pauli_two object commutes with pauli_list, False otherwise
:rtype: bool
"""
if not isinstance(pauli_list, list):
raise TypeError("pauli_list should be a list")
for term in pauli_list:
if not _commutes(term, single_pauli_term):
return False
return True | [
"def",
"check_trivial_commutation",
"(",
"pauli_list",
",",
"single_pauli_term",
")",
":",
"if",
"not",
"isinstance",
"(",
"pauli_list",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"pauli_list should be a list\"",
")",
"for",
"term",
"in",
"pauli_list",
":... | Check if a PauliTerm trivially commutes with a list of other terms.
:param list pauli_list: A list of PauliTerm objects
:param PauliTerm single_pauli_term: A PauliTerm object
:returns: True if pauli_two object commutes with pauli_list, False otherwise
:rtype: bool | [
"Check",
"if",
"a",
"PauliTerm",
"trivially",
"commutes",
"with",
"a",
"list",
"of",
"other",
"terms",
"."
] | python | train |
bitlabstudio/django-development-fabfile | development_fabfile/fabfile/utils.py | https://github.com/bitlabstudio/django-development-fabfile/blob/a135c6eb5bdd0b496a7eccfd271aca558dd99243/development_fabfile/fabfile/utils.py#L9-L26 | def require_server(fn):
"""
Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
if env.machine is None:
abort(red('ERROR: You must provide a server name to call this'
' task!'))
return fn(*args, **kwargs)
return wrapper | [
"def",
"require_server",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"env",
".",
"machine",
"is",
"None",
":",
"abort",
"(",
"red",
"(",
"'ERROR: You must provide a s... | Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed. | [
"Checks",
"if",
"the",
"user",
"has",
"called",
"the",
"task",
"with",
"a",
"server",
"name",
"."
] | python | train |
kkinder/NdbSearchableBase | NdbSearchableBase/SearchableModel.py | https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L49-L94 | def search(cls,
query_string,
options=None,
enable_facet_discovery=False,
return_facets=None,
facet_options=None,
facet_refinements=None,
deadline=None,
**kwargs):
"""
Searches the index. Conveniently searches only for documents that belong to instances of this class.
:param query_string: The query to match against documents in the index. See search.Query() for details.
:param options: A QueryOptions describing post-processing of search results.
:param enable_facet_discovery: discovery top relevent facets to this search query and return them.
:param return_facets: An iterable of FacetRequest or basestring as facet name to
return specific facet with the result.
:param facet_options: A FacetOption describing processing of facets.
:param facet_refinements: An iterable of FacetRefinement objects or refinement
token strings used to filter out search results based on a facet value.
refinements for different facets will be conjunction and refinements for
the same facet will be disjunction.
:param deadline: Deadline for RPC call in seconds; if None use the default.
:param kwargs: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:return: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:raises: QueryError: If the query string is not parseable.
TypeError: If any of the parameters have invalid types, or an unknown
attribute is passed.
ValueError: If any of the parameters have invalid values (e.g., a
negative deadline).
"""
search_class = cls.search_get_class_names()[-1]
query_string += ' ' + 'class_name:%s' % (search_class,)
q = search.Query(
query_string=query_string,
options=options,
enable_facet_discovery=enable_facet_discovery,
return_facets=return_facets,
facet_options=facet_options,
facet_refinements=facet_refinements
)
index = cls.search_get_index()
return index.search(q, deadline=deadline, **kwargs) | [
"def",
"search",
"(",
"cls",
",",
"query_string",
",",
"options",
"=",
"None",
",",
"enable_facet_discovery",
"=",
"False",
",",
"return_facets",
"=",
"None",
",",
"facet_options",
"=",
"None",
",",
"facet_refinements",
"=",
"None",
",",
"deadline",
"=",
"No... | Searches the index. Conveniently searches only for documents that belong to instances of this class.
:param query_string: The query to match against documents in the index. See search.Query() for details.
:param options: A QueryOptions describing post-processing of search results.
:param enable_facet_discovery: discovery top relevent facets to this search query and return them.
:param return_facets: An iterable of FacetRequest or basestring as facet name to
return specific facet with the result.
:param facet_options: A FacetOption describing processing of facets.
:param facet_refinements: An iterable of FacetRefinement objects or refinement
token strings used to filter out search results based on a facet value.
refinements for different facets will be conjunction and refinements for
the same facet will be disjunction.
:param deadline: Deadline for RPC call in seconds; if None use the default.
:param kwargs: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:return: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:raises: QueryError: If the query string is not parseable.
TypeError: If any of the parameters have invalid types, or an unknown
attribute is passed.
ValueError: If any of the parameters have invalid values (e.g., a
negative deadline). | [
"Searches",
"the",
"index",
".",
"Conveniently",
"searches",
"only",
"for",
"documents",
"that",
"belong",
"to",
"instances",
"of",
"this",
"class",
"."
] | python | train |
user-cont/conu | conu/backend/podman/image.py | https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/podman/image.py#L416-L425 | def get_metadata(self):
"""
Provide metadata about this image.
:return: ImageMetadata, Image metadata instance
"""
if self._metadata is None:
self._metadata = ImageMetadata()
inspect_to_metadata(self._metadata, self.inspect(refresh=True))
return self._metadata | [
"def",
"get_metadata",
"(",
"self",
")",
":",
"if",
"self",
".",
"_metadata",
"is",
"None",
":",
"self",
".",
"_metadata",
"=",
"ImageMetadata",
"(",
")",
"inspect_to_metadata",
"(",
"self",
".",
"_metadata",
",",
"self",
".",
"inspect",
"(",
"refresh",
... | Provide metadata about this image.
:return: ImageMetadata, Image metadata instance | [
"Provide",
"metadata",
"about",
"this",
"image",
"."
] | python | train |
econ-ark/HARK | HARK/core.py | https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/core.py#L494-L534 | def makeShockHistory(self):
'''
Makes a pre-specified history of shocks for the simulation. Shock variables should be named
in self.shock_vars, a list of strings that is subclass-specific. This method runs a subset
of the standard simulation loop by simulating only mortality and shocks; each variable named
in shock_vars is stored in a T_sim x AgentCount array in an attribute of self named X_hist.
Automatically sets self.read_shocks to True so that these pre-specified shocks are used for
all subsequent calls to simulate().
Parameters
----------
None
Returns
-------
None
'''
# Make sure time is flowing forward and re-initialize the simulation
orig_time = self.time_flow
self.timeFwd()
self.initializeSim()
# Make blank history arrays for each shock variable
for var_name in self.shock_vars:
setattr(self,var_name+'_hist',np.zeros((self.T_sim,self.AgentCount))+np.nan)
# Make and store the history of shocks for each period
for t in range(self.T_sim):
self.getMortality()
self.getShocks()
for var_name in self.shock_vars:
exec('self.' + var_name + '_hist[self.t_sim,:] = self.' + var_name)
self.t_sim += 1
self.t_age = self.t_age + 1 # Age all consumers by one period
self.t_cycle = self.t_cycle + 1 # Age all consumers within their cycle
self.t_cycle[self.t_cycle == self.T_cycle] = 0 # Resetting to zero for those who have reached the end
# Restore the flow of time and flag that shocks can be read rather than simulated
self.read_shocks = True
if not orig_time:
self.timeRev() | [
"def",
"makeShockHistory",
"(",
"self",
")",
":",
"# Make sure time is flowing forward and re-initialize the simulation",
"orig_time",
"=",
"self",
".",
"time_flow",
"self",
".",
"timeFwd",
"(",
")",
"self",
".",
"initializeSim",
"(",
")",
"# Make blank history arrays for... | Makes a pre-specified history of shocks for the simulation. Shock variables should be named
in self.shock_vars, a list of strings that is subclass-specific. This method runs a subset
of the standard simulation loop by simulating only mortality and shocks; each variable named
in shock_vars is stored in a T_sim x AgentCount array in an attribute of self named X_hist.
Automatically sets self.read_shocks to True so that these pre-specified shocks are used for
all subsequent calls to simulate().
Parameters
----------
None
Returns
-------
None | [
"Makes",
"a",
"pre",
"-",
"specified",
"history",
"of",
"shocks",
"for",
"the",
"simulation",
".",
"Shock",
"variables",
"should",
"be",
"named",
"in",
"self",
".",
"shock_vars",
"a",
"list",
"of",
"strings",
"that",
"is",
"subclass",
"-",
"specific",
".",... | python | train |
dgomes/pyipma | pyipma/station.py | https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/station.py#L37-L48 | async def get(cls, websession, lat, lon):
"""Retrieve the nearest station."""
self = Station(websession)
stations = await self.api.stations()
self.station = self._filter_closest(lat, lon, stations)
logger.info("Using %s as weather station", self.station.local)
return self | [
"async",
"def",
"get",
"(",
"cls",
",",
"websession",
",",
"lat",
",",
"lon",
")",
":",
"self",
"=",
"Station",
"(",
"websession",
")",
"stations",
"=",
"await",
"self",
".",
"api",
".",
"stations",
"(",
")",
"self",
".",
"station",
"=",
"self",
".... | Retrieve the nearest station. | [
"Retrieve",
"the",
"nearest",
"station",
"."
] | python | train |
mfcloud/python-zvm-sdk | zvmsdk/smtclient.py | https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/smtclient.py#L1580-L1658 | def _couple_nic(self, userid, vdev, vswitch_name,
active=False):
"""Couple NIC to vswitch by adding vswitch into user direct."""
if active:
self._is_active(userid)
msg = ('Start to couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s'
% {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s' % userid,
"API Virtual_Network_Adapter_Connect_Vswitch_DM",
"--operands",
"-v %s" % vdev,
"-n %s" % vswitch_name))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to couple nic %s to vswitch %s for user %s "
"in the guest's user direct, error: %s" %
(vdev, vswitch_name, userid, err.format_message()))
self._couple_inactive_exception(err, userid, vdev, vswitch_name)
# the inst must be active, or this call will failed
if active:
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Connect_Vswitch',
"--operands",
"-v %s" % vdev,
"-n %s" % vswitch_name))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
results1 = err1.results
msg1 = err1.format_message()
if ((results1 is not None) and
(results1['rc'] == 204) and
(results1['rs'] == 20)):
LOG.warning("Virtual device %s already connected "
"on the active guest system", vdev)
else:
persist_OK = True
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect_DM',
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results2 = err2.results
msg2 = err2.format_message()
if ((results2 is not None) and
(results2['rc'] == 212) and
(results2['rs'] == 32)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._couple_active_exception(err1, userid, vdev,
vswitch_name)
else:
raise exception.SDKNetworkOperationError(rs=3,
nic=vdev, vswitch=vswitch_name,
couple_err=msg1, revoke_err=msg2)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
vswitch_name)
msg = ('Couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s successfully'
% {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name})
LOG.info(msg) | [
"def",
"_couple_nic",
"(",
"self",
",",
"userid",
",",
"vdev",
",",
"vswitch_name",
",",
"active",
"=",
"False",
")",
":",
"if",
"active",
":",
"self",
".",
"_is_active",
"(",
"userid",
")",
"msg",
"=",
"(",
"'Start to couple nic device %(vdev)s of guest %(vm)... | Couple NIC to vswitch by adding vswitch into user direct. | [
"Couple",
"NIC",
"to",
"vswitch",
"by",
"adding",
"vswitch",
"into",
"user",
"direct",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/ptyprocess/ptyprocess.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/ptyprocess/ptyprocess.py#L830-L836 | def write(self, s):
"""Write the unicode string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
b = s.encode(self.encoding)
return super(PtyProcessUnicode, self).write(b) | [
"def",
"write",
"(",
"self",
",",
"s",
")",
":",
"b",
"=",
"s",
".",
"encode",
"(",
"self",
".",
"encoding",
")",
"return",
"super",
"(",
"PtyProcessUnicode",
",",
"self",
")",
".",
"write",
"(",
"b",
")"
] | Write the unicode string ``s`` to the pseudoterminal.
Returns the number of bytes written. | [
"Write",
"the",
"unicode",
"string",
"s",
"to",
"the",
"pseudoterminal",
"."
] | python | train |
heikomuller/sco-client | scocli/__init__.py | https://github.com/heikomuller/sco-client/blob/c4afab71297f73003379bba4c1679be9dcf7cef8/scocli/__init__.py#L140-L162 | def get_api_references(self, api_url=None):
"""Get set of HATEOAS reference for the given SCO-API. Use the default
SCO-API if none is given. References are cached as they are not expected
to change.
Parameters
----------
Returns
-------
"""
# Get subject listing Url for SCO-API
if not api_url is None:
url = api_url
else:
url = self.api_url
# Check if API references are in local cache. If not send GET request
# and add the result to the local cache
if not url in self.apis:
self.apis[url] = sco.references_to_dict(
sco.JsonResource(url).json[sco.REF_LINKS]
)
return self.apis[url] | [
"def",
"get_api_references",
"(",
"self",
",",
"api_url",
"=",
"None",
")",
":",
"# Get subject listing Url for SCO-API",
"if",
"not",
"api_url",
"is",
"None",
":",
"url",
"=",
"api_url",
"else",
":",
"url",
"=",
"self",
".",
"api_url",
"# Check if API reference... | Get set of HATEOAS reference for the given SCO-API. Use the default
SCO-API if none is given. References are cached as they are not expected
to change.
Parameters
----------
Returns
------- | [
"Get",
"set",
"of",
"HATEOAS",
"reference",
"for",
"the",
"given",
"SCO",
"-",
"API",
".",
"Use",
"the",
"default",
"SCO",
"-",
"API",
"if",
"none",
"is",
"given",
".",
"References",
"are",
"cached",
"as",
"they",
"are",
"not",
"expected",
"to",
"chang... | python | train |
elastic/apm-agent-python | elasticapm/traces.py | https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/elasticapm/traces.py#L293-L312 | def begin_transaction(self, transaction_type, trace_parent=None):
"""
Start a new transactions and bind it in a thread-local variable
:returns the Transaction object
"""
if trace_parent:
is_sampled = bool(trace_parent.trace_options.recorded)
else:
is_sampled = self._sample_rate == 1.0 or self._sample_rate > random.random()
transaction = Transaction(self, transaction_type, trace_parent=trace_parent, is_sampled=is_sampled)
if trace_parent is None:
transaction.trace_parent = TraceParent(
constants.TRACE_CONTEXT_VERSION,
"%032x" % random.getrandbits(128),
transaction.id,
TracingOptions(recorded=is_sampled),
)
execution_context.set_transaction(transaction)
return transaction | [
"def",
"begin_transaction",
"(",
"self",
",",
"transaction_type",
",",
"trace_parent",
"=",
"None",
")",
":",
"if",
"trace_parent",
":",
"is_sampled",
"=",
"bool",
"(",
"trace_parent",
".",
"trace_options",
".",
"recorded",
")",
"else",
":",
"is_sampled",
"=",... | Start a new transactions and bind it in a thread-local variable
:returns the Transaction object | [
"Start",
"a",
"new",
"transactions",
"and",
"bind",
"it",
"in",
"a",
"thread",
"-",
"local",
"variable"
] | python | train |
polyaxon/polyaxon | polyaxon/db/models/experiments.py | https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/db/models/experiments.py#L205-L207 | def has_running_jobs(self) -> bool:
""""Return a boolean indicating if the experiment has any running jobs"""
return self.jobs.exclude(status__status__in=ExperimentLifeCycle.DONE_STATUS).exists() | [
"def",
"has_running_jobs",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"jobs",
".",
"exclude",
"(",
"status__status__in",
"=",
"ExperimentLifeCycle",
".",
"DONE_STATUS",
")",
".",
"exists",
"(",
")"
] | Return a boolean indicating if the experiment has any running jobs | [
"Return",
"a",
"boolean",
"indicating",
"if",
"the",
"experiment",
"has",
"any",
"running",
"jobs"
] | python | train |
anti1869/sunhead | src/sunhead/workers/http/ext/runtime.py | https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/workers/http/ext/runtime.py#L56-L63 | async def get(self):
"""Printing runtime statistics in JSON"""
context_data = self.get_context_data()
context_data.update(getattr(self.request.app, "stats", {}))
response = self.json_response(context_data)
return response | [
"async",
"def",
"get",
"(",
"self",
")",
":",
"context_data",
"=",
"self",
".",
"get_context_data",
"(",
")",
"context_data",
".",
"update",
"(",
"getattr",
"(",
"self",
".",
"request",
".",
"app",
",",
"\"stats\"",
",",
"{",
"}",
")",
")",
"response",... | Printing runtime statistics in JSON | [
"Printing",
"runtime",
"statistics",
"in",
"JSON"
] | python | train |
TadLeonard/tfatool | tfatool/sync.py | https://github.com/TadLeonard/tfatool/blob/12da2807b5fb538c5317ef255d846b32ceb174d0/tfatool/sync.py#L83-L115 | def up_down_by_arrival(*filters, local_dir=".",
remote_dir=DEFAULT_REMOTE_DIR):
"""Monitors a local directory and a remote FlashAir directory and
generates sets of new files to be uploaded or downloaded.
Sets to upload are generated in a tuple
like (Direction.up, {...}), while download sets to download
are generated in a tuple like (Direction.down, {...}). The generator yields
before each upload or download actually takes place."""
local_monitor = watch_local_files(*filters, local_dir=local_dir)
remote_monitor = watch_remote_files(*filters, remote_dir=remote_dir)
_, lfile_set = next(local_monitor)
_, rfile_set = next(remote_monitor)
_notify_sync_ready(len(lfile_set), local_dir, remote_dir)
_notify_sync_ready(len(rfile_set), remote_dir, local_dir)
processed = set()
for new_local, new_remote in zip(local_monitor, remote_monitor):
new_local, local_set = new_local
local_arrivals = {f for f in new_local if f.filename not in processed}
yield Direction.up, local_arrivals
if local_arrivals:
new_names.update(f.filename for f in local_arrivals)
_notify_sync(Direction.up, local_arrivals)
up_by_files(local_arrivals, remote_dir)
_notify_sync_ready(len(local_set), local_dir, remote_dir)
new_remote, remote_set = new_remote
remote_arrivals = {f for f in new_remote if f.filename not in processed}
yield Direction.down, remote_arrivals
if remote_arrivals:
new_names.update(f.filename for f in remote_arrivals)
_notify_sync(Direction.down, remote_arrivals)
yield Direction.down, remote_arrivals
down_by_files(remote_arrivals, local_dir)
_notify_sync_ready(len(remote_set), remote_dir, local_dir) | [
"def",
"up_down_by_arrival",
"(",
"*",
"filters",
",",
"local_dir",
"=",
"\".\"",
",",
"remote_dir",
"=",
"DEFAULT_REMOTE_DIR",
")",
":",
"local_monitor",
"=",
"watch_local_files",
"(",
"*",
"filters",
",",
"local_dir",
"=",
"local_dir",
")",
"remote_monitor",
"... | Monitors a local directory and a remote FlashAir directory and
generates sets of new files to be uploaded or downloaded.
Sets to upload are generated in a tuple
like (Direction.up, {...}), while download sets to download
are generated in a tuple like (Direction.down, {...}). The generator yields
before each upload or download actually takes place. | [
"Monitors",
"a",
"local",
"directory",
"and",
"a",
"remote",
"FlashAir",
"directory",
"and",
"generates",
"sets",
"of",
"new",
"files",
"to",
"be",
"uploaded",
"or",
"downloaded",
".",
"Sets",
"to",
"upload",
"are",
"generated",
"in",
"a",
"tuple",
"like",
... | python | train |
aetros/aetros-cli | aetros/logger.py | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/logger.py#L69-L143 | def attach(self, buffer, read_line=None):
"""
Read buffer until end (read() returns '') and sends it to self.logger and self.job_backend.
:param buffer: a buffer instance with block read() or readline() method
:param read_line: callable or True to read line per line. If callable is given, it will be executed per line
and ignores does not redirect the line to stdout/logger when callable returns False.
"""
bid = id(buffer)
self.attach_last_messages[bid] = b''
def reader():
current_line = b''
def handle_line(buf):
if chunk == b'':
return
if read_line and callable(read_line):
res = read_line(buf)
if res is False:
return False
elif res is not None:
buf = res
if hasattr(buf, 'encode'):
buf = buf.encode('utf-8')
self.attach_last_messages[bid] += buf
if len(self.attach_last_messages[bid]) > 21 * 1024:
self.attach_last_messages[bid] = self.attach_last_messages[bid][-20 * 1024:]
self.write(buf)
flush_char = b'\n'
while True:
try:
# needs to be 1 so we fetch data in near real-time
chunk = buffer.read(1)
if chunk == b'':
if current_line:
handle_line(current_line)
return
current_line += chunk
while flush_char in current_line:
pos = current_line.find(flush_char)
line = current_line[:pos+1]
current_line = current_line[pos+1:]
handle_line(line)
# todo, periodically flush by '\r' only (progress bars for example)
# and make sure only necessary data is sent (by applying \r and \b control characters)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# we need to make sure, we continue to read otherwise the process of this buffer
# will block and we have a stuck process.
sys.__stderr__.write(traceback.format_exc() + '\n')
sys.__stderr__.flush()
thread = Thread(target=reader)
thread.daemon = True
thread.start()
def wait():
thread_join_non_blocking(thread)
self.send_buffer()
return wait | [
"def",
"attach",
"(",
"self",
",",
"buffer",
",",
"read_line",
"=",
"None",
")",
":",
"bid",
"=",
"id",
"(",
"buffer",
")",
"self",
".",
"attach_last_messages",
"[",
"bid",
"]",
"=",
"b''",
"def",
"reader",
"(",
")",
":",
"current_line",
"=",
"b''",
... | Read buffer until end (read() returns '') and sends it to self.logger and self.job_backend.
:param buffer: a buffer instance with block read() or readline() method
:param read_line: callable or True to read line per line. If callable is given, it will be executed per line
and ignores does not redirect the line to stdout/logger when callable returns False. | [
"Read",
"buffer",
"until",
"end",
"(",
"read",
"()",
"returns",
")",
"and",
"sends",
"it",
"to",
"self",
".",
"logger",
"and",
"self",
".",
"job_backend",
"."
] | python | train |
erinxocon/requests-xml | requests_xml.py | https://github.com/erinxocon/requests-xml/blob/923571ceae4ddd4f2f57a2fc8780d89b50f3e7a1/requests_xml.py#L133-L135 | def links(self) -> _Links:
"""All found links on page, in as–is form. Only works for Atom feeds."""
return list(set(x.text for x in self.xpath('//link'))) | [
"def",
"links",
"(",
"self",
")",
"->",
"_Links",
":",
"return",
"list",
"(",
"set",
"(",
"x",
".",
"text",
"for",
"x",
"in",
"self",
".",
"xpath",
"(",
"'//link'",
")",
")",
")"
] | All found links on page, in as–is form. Only works for Atom feeds. | [
"All",
"found",
"links",
"on",
"page",
"in",
"as–is",
"form",
".",
"Only",
"works",
"for",
"Atom",
"feeds",
"."
] | python | train |
twisted/axiom | axiom/scheduler.py | https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/scheduler.py#L268-L293 | def _transientSchedule(self, when, now):
"""
If the service is currently running, schedule a tick to happen no
later than C{when}.
@param when: The time at which to tick.
@type when: L{epsilon.extime.Time}
@param now: The current time.
@type now: L{epsilon.extime.Time}
"""
if not self.running:
return
if self.timer is not None:
if self.timer.getTime() < when.asPOSIXTimestamp():
return
self.timer.cancel()
delay = when.asPOSIXTimestamp() - now.asPOSIXTimestamp()
# reactor.callLater allows only positive delay values. The scheduler
# may want to have scheduled things in the past and that's OK, since we
# are dealing with Time() instances it's impossible to predict what
# they are relative to the current time from user code anyway.
delay = max(_EPSILON, delay)
self.timer = self.callLater(delay, self.tick)
self.nextEventAt = when | [
"def",
"_transientSchedule",
"(",
"self",
",",
"when",
",",
"now",
")",
":",
"if",
"not",
"self",
".",
"running",
":",
"return",
"if",
"self",
".",
"timer",
"is",
"not",
"None",
":",
"if",
"self",
".",
"timer",
".",
"getTime",
"(",
")",
"<",
"when"... | If the service is currently running, schedule a tick to happen no
later than C{when}.
@param when: The time at which to tick.
@type when: L{epsilon.extime.Time}
@param now: The current time.
@type now: L{epsilon.extime.Time} | [
"If",
"the",
"service",
"is",
"currently",
"running",
"schedule",
"a",
"tick",
"to",
"happen",
"no",
"later",
"than",
"C",
"{",
"when",
"}",
"."
] | python | train |
collectiveacuity/labPack | labpack/storage/aws/s3.py | https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L2023-L2038 | def remove(self):
'''
a method to remove collection and all records in the collection
:return: string with confirmation of deletion
'''
title = '%s.remove' % self.__class__.__name__
# request bucket delete
self.s3.delete_bucket(self.bucket_name)
# return confirmation
exit_msg = '%s collection has been removed from S3.' % self.bucket_name
return exit_msg | [
"def",
"remove",
"(",
"self",
")",
":",
"title",
"=",
"'%s.remove'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# request bucket delete ",
"self",
".",
"s3",
".",
"delete_bucket",
"(",
"self",
".",
"bucket_name",
")",
"# return confirmation",
"exit_msg",
... | a method to remove collection and all records in the collection
:return: string with confirmation of deletion | [
"a",
"method",
"to",
"remove",
"collection",
"and",
"all",
"records",
"in",
"the",
"collection"
] | python | train |
notanumber/xapian-haystack | xapian_backend.py | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1478-L1490 | def _phrase_query(self, term_list, field_name, field_type):
"""
Returns a query that matches exact terms with
positional order (i.e. ["this", "thing"] != ["thing", "this"])
and no stem.
If `field_name` is not `None`, restrict to the field.
"""
term_list = [self._term_query(term, field_name, field_type,
stemmed=False) for term in term_list]
query = xapian.Query(xapian.Query.OP_PHRASE, term_list)
return query | [
"def",
"_phrase_query",
"(",
"self",
",",
"term_list",
",",
"field_name",
",",
"field_type",
")",
":",
"term_list",
"=",
"[",
"self",
".",
"_term_query",
"(",
"term",
",",
"field_name",
",",
"field_type",
",",
"stemmed",
"=",
"False",
")",
"for",
"term",
... | Returns a query that matches exact terms with
positional order (i.e. ["this", "thing"] != ["thing", "this"])
and no stem.
If `field_name` is not `None`, restrict to the field. | [
"Returns",
"a",
"query",
"that",
"matches",
"exact",
"terms",
"with",
"positional",
"order",
"(",
"i",
".",
"e",
".",
"[",
"this",
"thing",
"]",
"!",
"=",
"[",
"thing",
"this",
"]",
")",
"and",
"no",
"stem",
"."
] | python | train |
fmenabe/python-dokuwiki | dokuwiki.py | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L36-L44 | def date(date):
"""DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime``
type and the format changes between DokuWiki versions ... This function
convert *date* to a `datetime` object.
"""
date = date.value
return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S')
if len(date) == 24
else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) | [
"def",
"date",
"(",
"date",
")",
":",
"date",
"=",
"date",
".",
"value",
"return",
"(",
"datetime",
".",
"strptime",
"(",
"date",
"[",
":",
"-",
"5",
"]",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"if",
"len",
"(",
"date",
")",
"==",
"24",
"else",
"datetime... | DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime``
type and the format changes between DokuWiki versions ... This function
convert *date* to a `datetime` object. | [
"DokuWiki",
"returns",
"dates",
"of",
"xmlrpclib",
"/",
"xmlrpc",
".",
"client",
"DateTime",
"type",
"and",
"the",
"format",
"changes",
"between",
"DokuWiki",
"versions",
"...",
"This",
"function",
"convert",
"*",
"date",
"*",
"to",
"a",
"datetime",
"object",
... | python | train |
akissa/clamavmirror | clamavmirror/__init__.py | https://github.com/akissa/clamavmirror/blob/6ef1cfa9fb4fa4a7b8439004f1cd8775f51d77f6/clamavmirror/__init__.py#L175-L180 | def verify_sigfile(sigdir, sig):
"""Verify a signature file"""
cmd = ['sigtool', '-i', '%s/%s.cvd' % (sigdir, sig)]
sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE)
ret_val = sigtool.wait()
return ret_val == 0 | [
"def",
"verify_sigfile",
"(",
"sigdir",
",",
"sig",
")",
":",
"cmd",
"=",
"[",
"'sigtool'",
",",
"'-i'",
",",
"'%s/%s.cvd'",
"%",
"(",
"sigdir",
",",
"sig",
")",
"]",
"sigtool",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"... | Verify a signature file | [
"Verify",
"a",
"signature",
"file"
] | python | train |
mitsei/dlkit | dlkit/records/assessment/basic/drag_and_drop_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/drag_and_drop_records.py#L228-L233 | def get_coordinate_conditions(self):
"""stub"""
condition_list = deepcopy(self.my_osid_object._my_map['coordinateConditions'])
for condition in condition_list:
condition['coordinate'] = BasicCoordinate(condition['coordinate'])
return condition_list | [
"def",
"get_coordinate_conditions",
"(",
"self",
")",
":",
"condition_list",
"=",
"deepcopy",
"(",
"self",
".",
"my_osid_object",
".",
"_my_map",
"[",
"'coordinateConditions'",
"]",
")",
"for",
"condition",
"in",
"condition_list",
":",
"condition",
"[",
"'coordina... | stub | [
"stub"
] | python | train |
rq/django-rq | django_rq/queues.py | https://github.com/rq/django-rq/blob/f50097dfe44351bd2a2d9d40edb19150dfc6a168/django_rq/queues.py#L181-L216 | def get_queues(*queue_names, **kwargs):
"""
Return queue instances from specified queue names.
All instances must use the same Redis connection.
"""
from .settings import QUEUES
if len(queue_names) <= 1:
# Return "default" queue if no queue name is specified
# or one queue with specified name
return [get_queue(*queue_names, **kwargs)]
# will return more than one queue
# import job class only once for all queues
kwargs['job_class'] = get_job_class(kwargs.pop('job_class', None))
queue_params = QUEUES[queue_names[0]]
connection_params = filter_connection_params(queue_params)
queues = [get_queue(queue_names[0], **kwargs)]
# do consistency checks while building return list
for name in queue_names[1:]:
queue = get_queue(name, **kwargs)
if type(queue) is not type(queues[0]):
raise ValueError(
'Queues must have the same class.'
'"{0}" and "{1}" have '
'different classes'.format(name, queue_names[0]))
if connection_params != filter_connection_params(QUEUES[name]):
raise ValueError(
'Queues must have the same redis connection.'
'"{0}" and "{1}" have '
'different connections'.format(name, queue_names[0]))
queues.append(queue)
return queues | [
"def",
"get_queues",
"(",
"*",
"queue_names",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"settings",
"import",
"QUEUES",
"if",
"len",
"(",
"queue_names",
")",
"<=",
"1",
":",
"# Return \"default\" queue if no queue name is specified",
"# or one queue with speci... | Return queue instances from specified queue names.
All instances must use the same Redis connection. | [
"Return",
"queue",
"instances",
"from",
"specified",
"queue",
"names",
".",
"All",
"instances",
"must",
"use",
"the",
"same",
"Redis",
"connection",
"."
] | python | train |
boxed/mutmut | mutmut/__init__.py | https://github.com/boxed/mutmut/blob/dd3bbe9aba3168ed21b85fbfe0b654b150239697/mutmut/__init__.py#L512-L561 | def mutate_node(node, context):
"""
:type context: Context
"""
context.stack.append(node)
try:
if node.type in ('tfpdef', 'import_from', 'import_name'):
return
if node.start_pos[0] - 1 != context.current_line_index:
context.current_line_index = node.start_pos[0] - 1
context.index = 0 # indexes are unique per line, so start over here!
if hasattr(node, 'children'):
mutate_list_of_nodes(node, context=context)
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
mutation = mutations_by_type.get(node.type)
if mutation is None:
return
for key, value in sorted(mutation.items()):
old = getattr(node, key)
if context.exclude_line():
continue
new = evaluate(
value,
context=context,
node=node,
value=getattr(node, 'value', None),
children=getattr(node, 'children', None),
)
assert not callable(new)
if new is not None and new != old:
if context.should_mutate():
context.number_of_performed_mutations += 1
context.performed_mutation_ids.append(context.mutation_id_of_current_index)
setattr(node, key, new)
context.index += 1
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
finally:
context.stack.pop() | [
"def",
"mutate_node",
"(",
"node",
",",
"context",
")",
":",
"context",
".",
"stack",
".",
"append",
"(",
"node",
")",
"try",
":",
"if",
"node",
".",
"type",
"in",
"(",
"'tfpdef'",
",",
"'import_from'",
",",
"'import_name'",
")",
":",
"return",
"if",
... | :type context: Context | [
":",
"type",
"context",
":",
"Context"
] | python | valid |
benmack/eo-box | eobox/raster/cube.py | https://github.com/benmack/eo-box/blob/a291450c766bf50ea06adcdeb5729a4aad790ed5/eobox/raster/cube.py#L517-L562 | def read_data_by_variable(self, mask=True):
"""Reads and masks (if desired) the data and converts it in one dataframe per variable."""
def print_elapsed_time(start, last_stopped, prefix):
# print(f"{prefix} - Elapsed time [s] since start / last stopped: \
# {(int(time.time() - start_time))} / {(int(time.time() - last_stopped))}")
return time.time()
start_time = time.time()
last_stopped = time.time()
last_stopped = print_elapsed_time(start_time, last_stopped, "Starting chunk function")
verbose = False
self.read_data()
last_stopped = print_elapsed_time(start_time, last_stopped, "Data read")
# 2.
sc_chunk = self.convert_data_to_dataframe()
last_stopped = print_elapsed_time(start_time, last_stopped, "Data converted to df")
# 3.B.
if mask:
# 3.A.
ilocs_qa = np.where((self.df_layers["band"] == self.qa).values)[0]
df_qa = self.data.iloc[:, ilocs_qa]
df_qa.columns = self.df_layers["date"].iloc[ilocs_qa]
df_clearsky = df_qa.isin(self.qa_valid)
last_stopped = print_elapsed_time(start_time, last_stopped, "Clearsky df created")
return_bands = self.variables
else:
return_bands = self.variables + [self.qa]
dfs_variables = {}
for var in return_bands:
if verbose:
print("VARIABLE:", var)
ilocs_var = np.where((self.df_layers["band"] == var).values)[0]
df_var = self.data.iloc[:, ilocs_var]
df_var.columns = self.df_layers["date"].iloc[ilocs_var]
if mask:
df_var = df_var.where(df_clearsky, other=np.nan)
dfs_variables[var] = df_var
last_stopped = print_elapsed_time(start_time, last_stopped, "Clearsky df created")
self._data = dfs_variables
return self | [
"def",
"read_data_by_variable",
"(",
"self",
",",
"mask",
"=",
"True",
")",
":",
"def",
"print_elapsed_time",
"(",
"start",
",",
"last_stopped",
",",
"prefix",
")",
":",
"# print(f\"{prefix} - Elapsed time [s] since start / last stopped: \\",
"# {(int(time.time() - star... | Reads and masks (if desired) the data and converts it in one dataframe per variable. | [
"Reads",
"and",
"masks",
"(",
"if",
"desired",
")",
"the",
"data",
"and",
"converts",
"it",
"in",
"one",
"dataframe",
"per",
"variable",
"."
] | python | train |
facelessuser/wcmatch | wcmatch/_wcparse.py | https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/_wcparse.py#L1153-L1225 | def root(self, pattern, current):
"""Start parsing the pattern."""
self.set_after_start()
i = util.StringIter(pattern)
iter(i)
root_specified = False
if self.win_drive_detect:
m = RE_WIN_PATH.match(pattern)
if m:
drive = m.group(0).replace('\\\\', '\\')
if drive.endswith('\\'):
slash = True
drive = drive[:-1]
current.append(re.escape(drive))
if slash:
current.append(self.get_path_sep() + _ONE_OR_MORE)
i.advance(m.end(0))
self.consume_path_sep(i)
root_specified = True
elif pattern.startswith('\\\\'):
root_specified = True
elif not self.win_drive_detect and self.pathname and pattern.startswith('/'):
root_specified = True
if not root_specified and self.realpath:
current.append(_NO_WIN_ROOT if self.win_drive_detect else _NO_ROOT)
current.append('')
for c in i:
index = i.index
if self.extend and c in EXT_TYPES and self.parse_extend(c, i, current, True):
# Nothing to do
pass
elif c == '*':
self._handle_star(i, current)
elif c == '?':
current.append(self._restrict_sequence() + _QMARK)
elif c == '/':
if self.pathname:
self.set_start_dir()
self.clean_up_inverse(current)
current.append(self.get_path_sep() + _ONE_OR_MORE)
self.consume_path_sep(i)
else:
current.append(re.escape(c))
elif c == '\\':
index = i.index
try:
value = self._references(i)
if self.dir_start:
self.clean_up_inverse(current)
self.consume_path_sep(i)
current.append(value)
except StopIteration:
i.rewind(i.index - index)
current.append(re.escape(c))
elif c == '[':
index = i.index
try:
current.append(self._sequence(i))
except StopIteration:
i.rewind(i.index - index)
current.append(re.escape(c))
else:
current.append(re.escape(c))
self.update_dir_state()
self.clean_up_inverse(current)
if self.pathname:
current.append(_PATH_TRAIL % self.get_path_sep()) | [
"def",
"root",
"(",
"self",
",",
"pattern",
",",
"current",
")",
":",
"self",
".",
"set_after_start",
"(",
")",
"i",
"=",
"util",
".",
"StringIter",
"(",
"pattern",
")",
"iter",
"(",
"i",
")",
"root_specified",
"=",
"False",
"if",
"self",
".",
"win_d... | Start parsing the pattern. | [
"Start",
"parsing",
"the",
"pattern",
"."
] | python | train |
ewels/MultiQC | multiqc/modules/theta2/theta2.py | https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/theta2/theta2.py#L56-L72 | def parse_theta2_report (self, fh):
""" Parse the final THetA2 log file. """
parsed_data = {}
for l in fh:
if l.startswith('#'):
continue
else:
s = l.split("\t")
purities = s[1].split(',')
parsed_data['proportion_germline'] = float(purities[0]) * 100.0
for i, v in enumerate(purities[1:]):
if i <= 5:
parsed_data['proportion_tumour_{}'.format(i+1)] = float(v) * 100.0
else:
parsed_data['proportion_tumour_gt5'] = (float(v) * 100.0) + parsed_data.get('proportion_tumour_gt5', 0)
break
return parsed_data | [
"def",
"parse_theta2_report",
"(",
"self",
",",
"fh",
")",
":",
"parsed_data",
"=",
"{",
"}",
"for",
"l",
"in",
"fh",
":",
"if",
"l",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"else",
":",
"s",
"=",
"l",
".",
"split",
"(",
"\"\\t\"",
"... | Parse the final THetA2 log file. | [
"Parse",
"the",
"final",
"THetA2",
"log",
"file",
"."
] | python | train |
ambitioninc/django-query-builder | querybuilder/helpers.py | https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/helpers.py#L22-L53 | def set_value_for_keypath(item, keypath, value, create_if_needed=False, delimeter='.'):
"""
Sets the value for a keypath in a dictionary
if the keypath exists. This modifies the
original dictionary.
"""
if len(keypath) == 0:
return None
keys = keypath.split(delimeter)
if len(keys) > 1:
key = keys[0]
if create_if_needed:
item[key] = item.get(key, {})
if key in item:
if set_value_for_keypath(item[key], delimeter.join(keys[1:]), value,
create_if_needed=create_if_needed, delimeter=delimeter):
return item
return None
if create_if_needed:
item[keypath] = item.get(keypath, {})
if keypath in item:
item[keypath] = value
return item
else:
return None | [
"def",
"set_value_for_keypath",
"(",
"item",
",",
"keypath",
",",
"value",
",",
"create_if_needed",
"=",
"False",
",",
"delimeter",
"=",
"'.'",
")",
":",
"if",
"len",
"(",
"keypath",
")",
"==",
"0",
":",
"return",
"None",
"keys",
"=",
"keypath",
".",
"... | Sets the value for a keypath in a dictionary
if the keypath exists. This modifies the
original dictionary. | [
"Sets",
"the",
"value",
"for",
"a",
"keypath",
"in",
"a",
"dictionary",
"if",
"the",
"keypath",
"exists",
".",
"This",
"modifies",
"the",
"original",
"dictionary",
"."
] | python | train |
awslabs/sockeye | sockeye/lexical_constraints.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/lexical_constraints.py#L67-L80 | def add_phrase(self,
phrase: List[int]) -> None:
"""
Recursively adds a phrase to this trie node.
:param phrase: A list of word IDs to add to this trie node.
"""
if len(phrase) == 1:
self.final_ids.add(phrase[0])
else:
next_word = phrase[0]
if next_word not in self.children:
self.children[next_word] = AvoidTrie()
self.step(next_word).add_phrase(phrase[1:]) | [
"def",
"add_phrase",
"(",
"self",
",",
"phrase",
":",
"List",
"[",
"int",
"]",
")",
"->",
"None",
":",
"if",
"len",
"(",
"phrase",
")",
"==",
"1",
":",
"self",
".",
"final_ids",
".",
"add",
"(",
"phrase",
"[",
"0",
"]",
")",
"else",
":",
"next_... | Recursively adds a phrase to this trie node.
:param phrase: A list of word IDs to add to this trie node. | [
"Recursively",
"adds",
"a",
"phrase",
"to",
"this",
"trie",
"node",
"."
] | python | train |
RonenNess/Fileter | fileter/iterators/grep.py | https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/grep.py#L39-L55 | def process_file(self, path, dryrun):
"""
Print files path.
"""
# if dryrun just return files
if dryrun:
return path
# scan file and match lines
ret = []
with open(path, "r") as infile:
for line in infile:
if re.search(self.__exp, line):
ret.append(line)
# if found matches return list of lines, else return None
return ret if len(ret) > 0 else None | [
"def",
"process_file",
"(",
"self",
",",
"path",
",",
"dryrun",
")",
":",
"# if dryrun just return files",
"if",
"dryrun",
":",
"return",
"path",
"# scan file and match lines",
"ret",
"=",
"[",
"]",
"with",
"open",
"(",
"path",
",",
"\"r\"",
")",
"as",
"infi... | Print files path. | [
"Print",
"files",
"path",
"."
] | python | train |
hydraplatform/hydra-base | hydra_base/lib/users.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/users.py#L84-L106 | def add_user(user, **kwargs):
"""
Add a user
"""
#check_perm(kwargs.get('user_id'), 'add_user')
u = User()
u.username = user.username
u.display_name = user.display_name
user_id = _get_user_id(u.username)
#If the user is already there, cannot add another with
#the same username.
if user_id is not None:
raise HydraError("User %s already exists!"%user.username)
u.password = bcrypt.hashpw(str(user.password).encode('utf-8'), bcrypt.gensalt())
db.DBSession.add(u)
db.DBSession.flush()
return u | [
"def",
"add_user",
"(",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"#check_perm(kwargs.get('user_id'), 'add_user')",
"u",
"=",
"User",
"(",
")",
"u",
".",
"username",
"=",
"user",
".",
"username",
"u",
".",
"display_name",
"=",
"user",
".",
"display_name",
... | Add a user | [
"Add",
"a",
"user"
] | python | train |
timothydmorton/VESPA | vespa/populations.py | https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/populations.py#L451-L566 | def _make_kde(self, use_sklearn=False, bandwidth=None, rtol=1e-6,
sig_clip=50, no_sig_clip=False, cov_all=True,
**kwargs):
"""Creates KDE objects for 3-d shape parameter distribution
KDE represents likelihood as function of trapezoidal
shape parameters (log(delta), T, T/tau).
Uses :class:`scipy.stats.gaussian_kde`` KDE by default;
Scikit-learn KDE implementation tested a bit, but not
fully implemented.
:param use_sklearn:
Whether to use scikit-learn implementation of KDE.
Not yet fully implemented, so this should stay ``False``.
:param bandwidth, rtol:
Parameters for sklearn KDE.
:param **kwargs:
Additional keyword arguments passed to
:class:`scipy.stats.gaussian_kde``.
"""
try:
#define points that are ok to use
first_ok = ((self.stars['slope'] > 0) &
(self.stars['duration'] > 0) &
(self.stars['duration'] < self.period) &
(self.depth > 0))
except KeyError:
logging.warning('Must do trapezoid fits before making KDE.')
return
self.empty = False
if first_ok.sum() < 4:
logging.warning('Empty population ({}): < 4 valid systems! Cannot calculate lhood.'.format(self.model))
self.is_empty = True #will cause is_ruled_out to be true as well.
return
#raise EmptyPopulationError('< 4 valid systems in population')
logdeps = np.log10(self.depth)
durs = self.stars['duration']
slopes = self.stars['slope']
#Now sigma-clip those points that passed first cuts
ok = np.ones(len(logdeps), dtype=bool)
for x in [logdeps, durs, slopes]:
med = np.median(x[first_ok])
mad = np.median(np.absolute(x[first_ok] - med))
ok &= np.absolute(x - med) / mad < sig_clip
second_ok = first_ok & ok
# Before making KDE for real, first calculate
# covariance and inv_cov of uncut data, to use
# when it's cut, too.
points = np.array([durs[second_ok],
logdeps[second_ok],
slopes[second_ok]])
kde = gaussian_kde(np.vstack(points)) #backward compatibility?
cov_all = kde._data_covariance
icov_all = kde._data_inv_cov
factor = kde.factor
# OK, now cut the data for constraints & proceed
ok = second_ok & self.distok
logdeps = logdeps[ok]
durs = durs[ok]
slopes = slopes[ok]
if ok.sum() < 4 and not self.empty:
logging.warning('Empty population ({}): < 4 valid systems! Cannot calculate lhood.'.format(self.model))
self.is_empty = True
return
#raise EmptyPopulationError('< 4 valid systems in population')
if use_sklearn:
self.sklearn_kde = True
logdeps_normed = (logdeps - logdeps.mean())/logdeps.std()
durs_normed = (durs - durs.mean())/durs.std()
slopes_normed = (slopes - slopes.mean())/slopes.std()
#TODO: use sklearn preprocessing to replace below
self.mean_logdepth = logdeps.mean()
self.std_logdepth = logdeps.std()
self.mean_dur = durs.mean()
self.std_dur = durs.std()
self.mean_slope = slopes.mean()
self.std_slope = slopes.std()
points = np.array([logdeps_normed, durs_normed, slopes_normed])
#find best bandwidth. For some reason this doesn't work?
if bandwidth is None:
grid = GridSearchCV(KernelDensity(rtol=rtol),
{'bandwidth':np.linspace(0.05,1,50)})
grid.fit(points)
self._best_bandwidth = grid.best_params_
self.kde = grid.best_estimator_
else:
self.kde = KernelDensity(rtol=rtol, bandwidth=bandwidth).fit(points)
else:
self.sklearn_kde = False
points = np.array([durs, logdeps, slopes])
self.kde = gaussian_kde(np.vstack(points), **kwargs) #backward compatibility?
# Reset covariance based on uncut data
self.kde._data_covariance = cov_all
self.kde._data_inv_cov = icov_all
self.kde._compute_covariance() | [
"def",
"_make_kde",
"(",
"self",
",",
"use_sklearn",
"=",
"False",
",",
"bandwidth",
"=",
"None",
",",
"rtol",
"=",
"1e-6",
",",
"sig_clip",
"=",
"50",
",",
"no_sig_clip",
"=",
"False",
",",
"cov_all",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
... | Creates KDE objects for 3-d shape parameter distribution
KDE represents likelihood as function of trapezoidal
shape parameters (log(delta), T, T/tau).
Uses :class:`scipy.stats.gaussian_kde`` KDE by default;
Scikit-learn KDE implementation tested a bit, but not
fully implemented.
:param use_sklearn:
Whether to use scikit-learn implementation of KDE.
Not yet fully implemented, so this should stay ``False``.
:param bandwidth, rtol:
Parameters for sklearn KDE.
:param **kwargs:
Additional keyword arguments passed to
:class:`scipy.stats.gaussian_kde``. | [
"Creates",
"KDE",
"objects",
"for",
"3",
"-",
"d",
"shape",
"parameter",
"distribution"
] | python | train |
GoogleCloudPlatform/appengine-gcs-client | python/src/cloudstorage/rest_api.py | https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/rest_api.py#L65-L84 | def _make_sync_method(name):
"""Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result.
"""
def sync_wrapper(self, *args, **kwds):
method = getattr(self, name)
future = method(*args, **kwds)
return future.get_result()
return sync_wrapper | [
"def",
"_make_sync_method",
"(",
"name",
")",
":",
"def",
"sync_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"method",
"=",
"getattr",
"(",
"self",
",",
"name",
")",
"future",
"=",
"method",
"(",
"*",
"args",
",",
"*",
... | Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result. | [
"Helper",
"to",
"synthesize",
"a",
"synchronous",
"method",
"from",
"an",
"async",
"method",
"name",
"."
] | python | train |
abingham/docopt-subcommands | docopt_subcommands/__init__.py | https://github.com/abingham/docopt-subcommands/blob/4b5cd75bb8eed01f9405345446ca58e9a29d67ad/docopt_subcommands/__init__.py#L8-L14 | def command(name=None):
"""A decorator to register a subcommand with the global `Subcommands` instance.
"""
def decorator(f):
_commands.append((name, f))
return f
return decorator | [
"def",
"command",
"(",
"name",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"_commands",
".",
"append",
"(",
"(",
"name",
",",
"f",
")",
")",
"return",
"f",
"return",
"decorator"
] | A decorator to register a subcommand with the global `Subcommands` instance. | [
"A",
"decorator",
"to",
"register",
"a",
"subcommand",
"with",
"the",
"global",
"Subcommands",
"instance",
"."
] | python | train |
pyfca/pyfca | pyfca/implications.py | https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L93-L107 | def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2) | [
"def",
"A",
"(",
"g",
",",
"i",
")",
":",
"g1",
"=",
"g",
"&",
"(",
"2",
"**",
"i",
")",
"if",
"i",
":",
"n",
"=",
"Awidth",
"(",
"i",
")",
"An",
"=",
"A",
"(",
"g",
",",
"i",
"-",
"1",
")",
"if",
"g1",
":",
"return",
"An",
"<<",
"n... | recursively constructs A line for g; i = len(g)-1 | [
"recursively",
"constructs",
"A",
"line",
"for",
"g",
";",
"i",
"=",
"len",
"(",
"g",
")",
"-",
"1"
] | python | train |
simpleai-team/simpleai | simpleai/machine_learning/classifiers.py | https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/classifiers.py#L322-L334 | def _max_gain_split(self, examples):
"""
Returns an OnlineInformationGain of the attribute with
max gain based on `examples`.
"""
gains = self._new_set_of_gain_counters()
for example in examples:
for gain in gains:
gain.add(example)
winner = max(gains, key=lambda gain: gain.get_gain())
if not winner.get_target_class_counts():
raise ValueError("Dataset is empty")
return winner | [
"def",
"_max_gain_split",
"(",
"self",
",",
"examples",
")",
":",
"gains",
"=",
"self",
".",
"_new_set_of_gain_counters",
"(",
")",
"for",
"example",
"in",
"examples",
":",
"for",
"gain",
"in",
"gains",
":",
"gain",
".",
"add",
"(",
"example",
")",
"winn... | Returns an OnlineInformationGain of the attribute with
max gain based on `examples`. | [
"Returns",
"an",
"OnlineInformationGain",
"of",
"the",
"attribute",
"with",
"max",
"gain",
"based",
"on",
"examples",
"."
] | python | train |
saltstack/salt | salt/utils/configparser.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/configparser.py#L57-L137 | def _read(self, fp, fpname):
'''
Makes the following changes from the RawConfigParser:
1. Strip leading tabs from non-section-header lines.
2. Treat 8 spaces at the beginning of a line as a tab.
3. Treat lines beginning with a tab as options.
4. Drops support for continuation lines.
5. Multiple values for a given option are stored as a list.
6. Keys and values are decoded to the system encoding.
'''
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = salt.utils.stringutils.to_unicode(fp.readline())
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in 'rR':
# no leading whitespace
continue
# Replace space indentation with a tab. Allows parser to work
# properly in cases where someone has edited the git config by hand
# and indented using spaces instead of tabs.
if line.startswith(self.SPACEINDENT):
line = '\t' + line[len(self.SPACEINDENT):]
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == self.DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError( # pylint: disable=undefined-variable
salt.utils.stringutils.to_str(fpname),
lineno,
salt.utils.stringutils.to_str(line))
# an option line?
else:
mo = self._optcre.match(line.lstrip())
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
if optval is None:
optval = ''
if optval:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# Empty strings should be considered as blank strings
if optval in ('""', "''"):
optval = ''
self._add_option(cursect, optname, optval)
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname) # pylint: disable=undefined-variable
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e | [
"def",
"_read",
"(",
"self",
",",
"fp",
",",
"fpname",
")",
":",
"cursect",
"=",
"None",
"# None, or a dictionary",
"optname",
"=",
"None",
"lineno",
"=",
"0",
"e",
"=",
"None",
"# None, or an exception",
"while",
"True",
":",
"line",
"=",
"salt",
".",
"... | Makes the following changes from the RawConfigParser:
1. Strip leading tabs from non-section-header lines.
2. Treat 8 spaces at the beginning of a line as a tab.
3. Treat lines beginning with a tab as options.
4. Drops support for continuation lines.
5. Multiple values for a given option are stored as a list.
6. Keys and values are decoded to the system encoding. | [
"Makes",
"the",
"following",
"changes",
"from",
"the",
"RawConfigParser",
":"
] | python | train |
Unity-Technologies/ml-agents | ml-agents/mlagents/trainers/bc/trainer.py | https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/bc/trainer.py#L87-L114 | def add_experiences(self, curr_info: AllBrainInfo, next_info: AllBrainInfo,
take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method.
"""
# Used to collect information about student performance.
info_student = curr_info[self.brain_name]
next_info_student = next_info[self.brain_name]
for agent_id in info_student.agents:
self.evaluation_buffer[agent_id].last_brain_info = info_student
for agent_id in next_info_student.agents:
stored_info_student = self.evaluation_buffer[agent_id].last_brain_info
if stored_info_student is None:
continue
else:
next_idx = next_info_student.agents.index(agent_id)
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info_student.rewards[next_idx]
if not next_info_student.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1 | [
"def",
"add_experiences",
"(",
"self",
",",
"curr_info",
":",
"AllBrainInfo",
",",
"next_info",
":",
"AllBrainInfo",
",",
"take_action_outputs",
")",
":",
"# Used to collect information about student performance.",
"info_student",
"=",
"curr_info",
"[",
"self",
".",
"br... | Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method. | [
"Adds",
"experiences",
"to",
"each",
"agent",
"s",
"experience",
"history",
".",
":",
"param",
"curr_info",
":",
"Current",
"AllBrainInfo",
"(",
"Dictionary",
"of",
"all",
"current",
"brains",
"and",
"corresponding",
"BrainInfo",
")",
".",
":",
"param",
"next_... | python | train |
sporteasy/python-poeditor | poeditor/client.py | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L501-L552 | def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file | [
"def",
"export",
"(",
"self",
",",
"project_id",
",",
"language_code",
",",
"file_type",
"=",
"'po'",
",",
"filters",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"local_file",
"=",
"None",
")",
":",
"if",
"file_type",
"not",
"in",
"self",
".",
"FILE_TY... | Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"] | [
"Return",
"terms",
"/",
"translations"
] | python | train |
ianmiell/shutit | shutit.py | https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit.py#L33-L94 | def create_session(docker_image=None,
docker_rm=None,
echo=False,
loglevel='WARNING',
nocolor=False,
session_type='bash',
vagrant_session_name=None,
vagrant_image='ubuntu/xenial64',
vagrant_gui=False,
vagrant_memory='1024',
vagrant_num_machines='1',
vagrant_provider='virtualbox',
vagrant_root_folder=None,
vagrant_swapsize='2G',
vagrant_version='1.8.6',
vagrant_virt_method='virtualbox',
vagrant_cpu='1',
video=-1,
walkthrough=False):
"""Creates a distinct ShutIt session. Sessions can be of type:
bash - a bash shell is spawned and
vagrant - a Vagrantfile is created and 'vagrant up'ped
"""
assert session_type in ('bash','docker','vagrant'), shutit_util.print_debug()
shutit_global_object = shutit_global.shutit_global_object
if video != -1 and video > 0:
walkthrough = True
if session_type in ('bash','docker'):
return shutit_global_object.create_session(session_type,
docker_image=docker_image,
rm=docker_rm,
echo=echo,
walkthrough=walkthrough,
walkthrough_wait=video,
nocolor=nocolor,
loglevel=loglevel)
elif session_type == 'vagrant':
if vagrant_session_name is None:
vagrant_session_name = 'shutit' + shutit_util.random_id()
if isinstance(vagrant_num_machines, int):
vagrant_num_machines = str(vagrant_num_machines)
assert isinstance(vagrant_num_machines, str)
assert isinstance(int(vagrant_num_machines), int)
if vagrant_root_folder is None:
vagrant_root_folder = shutit_global.shutit_global_object.owd
return create_session_vagrant(vagrant_session_name,
vagrant_num_machines,
vagrant_image,
vagrant_provider,
vagrant_gui,
vagrant_memory,
vagrant_swapsize,
echo,
walkthrough,
nocolor,
video,
vagrant_version,
vagrant_virt_method,
vagrant_root_folder,
vagrant_cpu,
loglevel) | [
"def",
"create_session",
"(",
"docker_image",
"=",
"None",
",",
"docker_rm",
"=",
"None",
",",
"echo",
"=",
"False",
",",
"loglevel",
"=",
"'WARNING'",
",",
"nocolor",
"=",
"False",
",",
"session_type",
"=",
"'bash'",
",",
"vagrant_session_name",
"=",
"None"... | Creates a distinct ShutIt session. Sessions can be of type:
bash - a bash shell is spawned and
vagrant - a Vagrantfile is created and 'vagrant up'ped | [
"Creates",
"a",
"distinct",
"ShutIt",
"session",
".",
"Sessions",
"can",
"be",
"of",
"type",
":"
] | python | train |
Gorialis/jishaku | jishaku/cog.py | https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/cog.py#L639-L644 | async def jsk_git(self, ctx: commands.Context, *, argument: CodeblockConverter):
"""
Shortcut for 'jsk sh git'. Invokes the system shell.
"""
return await ctx.invoke(self.jsk_shell, argument=Codeblock(argument.language, "git " + argument.content)) | [
"async",
"def",
"jsk_git",
"(",
"self",
",",
"ctx",
":",
"commands",
".",
"Context",
",",
"*",
",",
"argument",
":",
"CodeblockConverter",
")",
":",
"return",
"await",
"ctx",
".",
"invoke",
"(",
"self",
".",
"jsk_shell",
",",
"argument",
"=",
"Codeblock"... | Shortcut for 'jsk sh git'. Invokes the system shell. | [
"Shortcut",
"for",
"jsk",
"sh",
"git",
".",
"Invokes",
"the",
"system",
"shell",
"."
] | python | train |
penguinmenac3/starttf | starttf/layers/tile_2d.py | https://github.com/penguinmenac3/starttf/blob/f4086489d169757c0504e822165db2fea534b944/starttf/layers/tile_2d.py#L131-L157 | def upsampling_feature_passthrough(early_feat, late_feat, filters, name, kernel_size=(1, 1)):
"""
An upsampling feature passthrough layer inspired by yolo9000 and the tiling layer.
It can be proven, that this layer does the same as conv(concat(early_feat, tile_2d(late_feat))).
This layer has no activation function.
:param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _].
s_x and s_y are integers computed internally describing the scale between the layers.
:param late_feat: The late feature layer of shape [batch_size, h, w, _].
:param filters: The number of convolution filters.
:param name: The name of the layer.
:param kernel_size: The size of the kernel. Default (1x1).
:return: The output tensor of shape [batch_size, h * s_x, w * s_y, outputs]
"""
_, h_early, w_early, c_early = early_feat.get_shape().as_list()
_, h_late, w_late, c_late = late_feat.get_shape().as_list()
s_x = int(w_early / w_late)
s_y = int(h_early / h_late)
assert h_late * s_y == h_early and w_late * s_x == w_early
with tf.variable_scope(name) as scope:
tiled = tile_2d(late_feat, s_x, s_y, "tile_2d", reorder_required=False)
concated = tf.concat([early_feat, tiled], axis=-1)
return tf.layers.conv2d(concated, filters=filters, kernel_size=kernel_size, strides=(1, 1), padding="same") | [
"def",
"upsampling_feature_passthrough",
"(",
"early_feat",
",",
"late_feat",
",",
"filters",
",",
"name",
",",
"kernel_size",
"=",
"(",
"1",
",",
"1",
")",
")",
":",
"_",
",",
"h_early",
",",
"w_early",
",",
"c_early",
"=",
"early_feat",
".",
"get_shape",... | An upsampling feature passthrough layer inspired by yolo9000 and the tiling layer.
It can be proven, that this layer does the same as conv(concat(early_feat, tile_2d(late_feat))).
This layer has no activation function.
:param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _].
s_x and s_y are integers computed internally describing the scale between the layers.
:param late_feat: The late feature layer of shape [batch_size, h, w, _].
:param filters: The number of convolution filters.
:param name: The name of the layer.
:param kernel_size: The size of the kernel. Default (1x1).
:return: The output tensor of shape [batch_size, h * s_x, w * s_y, outputs] | [
"An",
"upsampling",
"feature",
"passthrough",
"layer",
"inspired",
"by",
"yolo9000",
"and",
"the",
"tiling",
"layer",
"."
] | python | train |
krukas/Trionyx | trionyx/config.py | https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/config.py#L215-L220 | def auto_load_configs(self):
"""Auto load all configs from app configs"""
for app in apps.get_app_configs():
for model in app.get_models():
config = ModelConfig(model, getattr(app, model.__name__, None))
self.configs[self.get_model_name(model)] = config | [
"def",
"auto_load_configs",
"(",
"self",
")",
":",
"for",
"app",
"in",
"apps",
".",
"get_app_configs",
"(",
")",
":",
"for",
"model",
"in",
"app",
".",
"get_models",
"(",
")",
":",
"config",
"=",
"ModelConfig",
"(",
"model",
",",
"getattr",
"(",
"app",... | Auto load all configs from app configs | [
"Auto",
"load",
"all",
"configs",
"from",
"app",
"configs"
] | python | train |
theislab/scanpy | scanpy/tools/_diffmap.py | https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_diffmap.py#L4-L46 | def diffmap(adata, n_comps=15, copy=False):
"""Diffusion Maps [Coifman05]_ [Haghverdi15]_ [Wolf18]_.
Diffusion maps [Coifman05]_ has been proposed for visualizing single-cell
data by [Haghverdi15]_. The tool uses the adapted Gaussian kernel suggested
by [Haghverdi16]_ in the implementation of [Wolf18]_.
The width ("sigma") of the connectivity kernel is implicitly determined by
the number of neighbors used to compute the single-cell graph in
:func:`~scanpy.api.neighbors`. To reproduce the original implementation
using a Gaussian kernel, use `method=='gauss'` in
:func:`~scanpy.api.neighbors`. To use an exponential kernel, use the default
`method=='umap'`. Differences between these options shouldn't usually be
dramatic.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_comps : `int`, optional (default: 15)
The number of dimensions of the representation.
copy : `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_diffmap** : :class:`numpy.ndarray` (`adata.obsm`)
Diffusion map representation of data, which is the right eigen basis of
the transition matrix with eigenvectors as columns.
**diffmap_evals** : :class:`numpy.ndarray` (`adata.uns`)
Array of size (number of eigen vectors). Eigenvalues of transition matrix.
"""
if 'neighbors' not in adata.uns:
raise ValueError(
'You need to run `pp.neighbors` first to compute a neighborhood graph.')
if n_comps <= 2:
raise ValueError(
'Provide any value greater than 2 for `n_comps`. ')
adata = adata.copy() if copy else adata
_diffmap(adata, n_comps=n_comps)
return adata if copy else None | [
"def",
"diffmap",
"(",
"adata",
",",
"n_comps",
"=",
"15",
",",
"copy",
"=",
"False",
")",
":",
"if",
"'neighbors'",
"not",
"in",
"adata",
".",
"uns",
":",
"raise",
"ValueError",
"(",
"'You need to run `pp.neighbors` first to compute a neighborhood graph.'",
")",
... | Diffusion Maps [Coifman05]_ [Haghverdi15]_ [Wolf18]_.
Diffusion maps [Coifman05]_ has been proposed for visualizing single-cell
data by [Haghverdi15]_. The tool uses the adapted Gaussian kernel suggested
by [Haghverdi16]_ in the implementation of [Wolf18]_.
The width ("sigma") of the connectivity kernel is implicitly determined by
the number of neighbors used to compute the single-cell graph in
:func:`~scanpy.api.neighbors`. To reproduce the original implementation
using a Gaussian kernel, use `method=='gauss'` in
:func:`~scanpy.api.neighbors`. To use an exponential kernel, use the default
`method=='umap'`. Differences between these options shouldn't usually be
dramatic.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_comps : `int`, optional (default: 15)
The number of dimensions of the representation.
copy : `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_diffmap** : :class:`numpy.ndarray` (`adata.obsm`)
Diffusion map representation of data, which is the right eigen basis of
the transition matrix with eigenvectors as columns.
**diffmap_evals** : :class:`numpy.ndarray` (`adata.uns`)
Array of size (number of eigen vectors). Eigenvalues of transition matrix. | [
"Diffusion",
"Maps",
"[",
"Coifman05",
"]",
"_",
"[",
"Haghverdi15",
"]",
"_",
"[",
"Wolf18",
"]",
"_",
"."
] | python | train |
Microsoft/nni | examples/trials/weight_sharing/ga_squad/data.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/weight_sharing/ga_squad/data.py#L223-L237 | def get_answer_begin_end(data):
'''
Get answer's index of begin and end.
'''
begin = []
end = []
for qa_pair in data:
tokens = qa_pair['passage_tokens']
char_begin = qa_pair['answer_begin']
char_end = qa_pair['answer_end']
word_begin = get_word_index(tokens, char_begin)
word_end = get_word_index(tokens, char_end)
begin.append(word_begin)
end.append(word_end)
return np.asarray(begin), np.asarray(end) | [
"def",
"get_answer_begin_end",
"(",
"data",
")",
":",
"begin",
"=",
"[",
"]",
"end",
"=",
"[",
"]",
"for",
"qa_pair",
"in",
"data",
":",
"tokens",
"=",
"qa_pair",
"[",
"'passage_tokens'",
"]",
"char_begin",
"=",
"qa_pair",
"[",
"'answer_begin'",
"]",
"ch... | Get answer's index of begin and end. | [
"Get",
"answer",
"s",
"index",
"of",
"begin",
"and",
"end",
"."
] | python | train |
pvlib/pvlib-python | pvlib/irradiance.py | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/irradiance.py#L1242-L1292 | def clearness_index(ghi, solar_zenith, extra_radiation, min_cos_zenith=0.065,
max_clearness_index=2.0):
"""
Calculate the clearness index.
The clearness index is the ratio of global to extraterrestrial
irradiance on a horizontal plane.
Parameters
----------
ghi : numeric
Global horizontal irradiance in W/m^2.
solar_zenith : numeric
True (not refraction-corrected) solar zenith angle in decimal
degrees.
extra_radiation : numeric
Irradiance incident at the top of the atmosphere
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_clearness_index : numeric, default 2.0
Maximum value of the clearness index. The default, 2.0, allows
for over-irradiance events typically seen in sub-hourly data.
NREL's SRRL Fortran code used 0.82 for hourly data.
Returns
-------
kt : numeric
Clearness index
References
----------
.. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly
Global Horizontal to Direct Normal Insolation", Technical
Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research
Institute, 1987.
"""
cos_zenith = tools.cosd(solar_zenith)
I0h = extra_radiation * np.maximum(cos_zenith, min_cos_zenith)
# consider adding
# with np.errstate(invalid='ignore', divide='ignore'):
# to kt calculation, but perhaps it's good to allow these
# warnings to the users that override min_cos_zenith
kt = ghi / I0h
kt = np.maximum(kt, 0)
kt = np.minimum(kt, max_clearness_index)
return kt | [
"def",
"clearness_index",
"(",
"ghi",
",",
"solar_zenith",
",",
"extra_radiation",
",",
"min_cos_zenith",
"=",
"0.065",
",",
"max_clearness_index",
"=",
"2.0",
")",
":",
"cos_zenith",
"=",
"tools",
".",
"cosd",
"(",
"solar_zenith",
")",
"I0h",
"=",
"extra_radi... | Calculate the clearness index.
The clearness index is the ratio of global to extraterrestrial
irradiance on a horizontal plane.
Parameters
----------
ghi : numeric
Global horizontal irradiance in W/m^2.
solar_zenith : numeric
True (not refraction-corrected) solar zenith angle in decimal
degrees.
extra_radiation : numeric
Irradiance incident at the top of the atmosphere
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_clearness_index : numeric, default 2.0
Maximum value of the clearness index. The default, 2.0, allows
for over-irradiance events typically seen in sub-hourly data.
NREL's SRRL Fortran code used 0.82 for hourly data.
Returns
-------
kt : numeric
Clearness index
References
----------
.. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly
Global Horizontal to Direct Normal Insolation", Technical
Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research
Institute, 1987. | [
"Calculate",
"the",
"clearness",
"index",
"."
] | python | train |
graphql-python/graphql-core | graphql/execution/executor.py | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/executor.py#L717-L741 | def complete_nonnull_value(
exe_context, # type: ExecutionContext
return_type, # type: GraphQLNonNull
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Any
"""
Complete a NonNull value by completing the inner type
"""
completed = complete_value(
exe_context, return_type.of_type, field_asts, info, path, result
)
if completed is None:
raise GraphQLError(
"Cannot return null for non-nullable field {}.{}.".format(
info.parent_type, info.field_name
),
field_asts,
path=path,
)
return completed | [
"def",
"complete_nonnull_value",
"(",
"exe_context",
",",
"# type: ExecutionContext",
"return_type",
",",
"# type: GraphQLNonNull",
"field_asts",
",",
"# type: List[Field]",
"info",
",",
"# type: ResolveInfo",
"path",
",",
"# type: List[Union[int, str]]",
"result",
",",
"# ty... | Complete a NonNull value by completing the inner type | [
"Complete",
"a",
"NonNull",
"value",
"by",
"completing",
"the",
"inner",
"type"
] | python | train |
josiah-wolf-oberholtzer/uqbar | uqbar/containers/UniqueTreeNode.py | https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/containers/UniqueTreeNode.py#L118-L154 | def graph_order(self):
"""
Get graph-order tuple for node.
::
>>> from uqbar.containers import UniqueTreeContainer, UniqueTreeNode
>>> root_container = UniqueTreeContainer(name="root")
>>> outer_container = UniqueTreeContainer(name="outer")
>>> inner_container = UniqueTreeContainer(name="inner")
>>> node_a = UniqueTreeNode(name="a")
>>> node_b = UniqueTreeNode(name="b")
>>> node_c = UniqueTreeNode(name="c")
>>> node_d = UniqueTreeNode(name="d")
>>> root_container.extend([node_a, outer_container])
>>> outer_container.extend([inner_container, node_d])
>>> inner_container.extend([node_b, node_c])
::
>>> for node in root_container.depth_first():
... print(node.name, node.graph_order)
...
a (0,)
outer (1,)
inner (1, 0)
b (1, 0, 0)
c (1, 0, 1)
d (1, 1)
"""
parentage = tuple(reversed(self.parentage))
graph_order = []
for i in range(len(parentage) - 1):
parent, child = parentage[i : i + 2]
graph_order.append(parent.index(child))
return tuple(graph_order) | [
"def",
"graph_order",
"(",
"self",
")",
":",
"parentage",
"=",
"tuple",
"(",
"reversed",
"(",
"self",
".",
"parentage",
")",
")",
"graph_order",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"parentage",
")",
"-",
"1",
")",
":",
"paren... | Get graph-order tuple for node.
::
>>> from uqbar.containers import UniqueTreeContainer, UniqueTreeNode
>>> root_container = UniqueTreeContainer(name="root")
>>> outer_container = UniqueTreeContainer(name="outer")
>>> inner_container = UniqueTreeContainer(name="inner")
>>> node_a = UniqueTreeNode(name="a")
>>> node_b = UniqueTreeNode(name="b")
>>> node_c = UniqueTreeNode(name="c")
>>> node_d = UniqueTreeNode(name="d")
>>> root_container.extend([node_a, outer_container])
>>> outer_container.extend([inner_container, node_d])
>>> inner_container.extend([node_b, node_c])
::
>>> for node in root_container.depth_first():
... print(node.name, node.graph_order)
...
a (0,)
outer (1,)
inner (1, 0)
b (1, 0, 0)
c (1, 0, 1)
d (1, 1) | [
"Get",
"graph",
"-",
"order",
"tuple",
"for",
"node",
"."
] | python | train |
JukeboxPipeline/jukebox-core | src/jukeboxcore/addons/guerilla/guerillamgmt.py | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1708-L1722 | def shot_view_task(self, ):
"""View the task that is currently selected on the shot page
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
i = self.shot_task_tablev.currentIndex()
item = i.internalPointer()
if item:
task = item.internal_data()
self.view_task(task) | [
"def",
"shot_view_task",
"(",
"self",
",",
")",
":",
"if",
"not",
"self",
".",
"cur_shot",
":",
"return",
"i",
"=",
"self",
".",
"shot_task_tablev",
".",
"currentIndex",
"(",
")",
"item",
"=",
"i",
".",
"internalPointer",
"(",
")",
"if",
"item",
":",
... | View the task that is currently selected on the shot page
:returns: None
:rtype: None
:raises: None | [
"View",
"the",
"task",
"that",
"is",
"currently",
"selected",
"on",
"the",
"shot",
"page"
] | python | train |
conchoecia/gloTK | gloTK/scripts/glotk_mitoshaper.py | https://github.com/conchoecia/gloTK/blob/58abee663fcfbbd09f4863c3ca3ae054e33184a8/gloTK/scripts/glotk_mitoshaper.py#L247-L303 | def main():
"""
1. Reads in a meraculous config file and outputs all of the associated config
files to $PWD/configs
2. The name of each run and the path to the directory is passed to a
multiprocessing core that controls which assemblies are executed and when.
"""
parser = CommandLine()
#this block from here: http://stackoverflow.com/a/4042861/5843327
if len(sys.argv)==1:
parser.parser.print_help()
sys.exit(1)
parser.parse()
myArgs = parser.args
#Figure out how many processors to give to each assembly since we will be
# running some things in parallel. The MerParse class will handle overriding
# whatever is found in the config file in the read_config() method.
procsPerAssembly = int(myArgs.maxProcs / myArgs.simultaneous)
setattr(myArgs, "maxProcs", procsPerAssembly)
# 1. Reads in a meraculous config file and outputs all of the associated config
# files to $PWD/configs
merparser = MerParse(myArgs.inputConfig, myArgs.sweep, myArgs.sstart,
myArgs.sstop, myArgs.sinterval, myArgs.maxProcs,
asPrefix = myArgs.prefix,
asSI = myArgs.index,
genus = myArgs.genus,
species = myArgs.species)
configPaths = merparser.sweeper_output()
#make the assemblies dir ONCE to avoid a race condition for os.makedirs()
cwd = os.path.abspath(os.getcwd())
allAssembliesDir = os.path.join(cwd, "assemblies")
if not os.path.exists(allAssembliesDir):
os.makedirs(allAssembliesDir)
#instantiate all of the classes that we will be using in parallel processing.
# configPaths above returns a dict with the run name and abs path of config
# as key:value pairs
instances = []
for runName in configPaths:
configPath = configPaths.get(runName)
thisInstance = MerRunner(runName, configPath, myArgs.cleanup)
instances.append(thisInstance)
if len(instances) == 0:
print("There are no meraculous folders in this directory. Exiting")
elif len(instances) > 0:
# run the program for each instance
# pool size is the number of simultaneous runs for the server
pool = ThreadPool(myArgs.simultaneous)
results = pool.map(mer_runner_dummy, instances)
pool.close()
pool.join() | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"CommandLine",
"(",
")",
"#this block from here: http://stackoverflow.com/a/4042861/5843327",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"==",
"1",
":",
"parser",
".",
"parser",
".",
"print_help",
"(",
")",
"sys",
... | 1. Reads in a meraculous config file and outputs all of the associated config
files to $PWD/configs
2. The name of each run and the path to the directory is passed to a
multiprocessing core that controls which assemblies are executed and when. | [
"1",
".",
"Reads",
"in",
"a",
"meraculous",
"config",
"file",
"and",
"outputs",
"all",
"of",
"the",
"associated",
"config",
"files",
"to",
"$PWD",
"/",
"configs",
"2",
".",
"The",
"name",
"of",
"each",
"run",
"and",
"the",
"path",
"to",
"the",
"directo... | python | train |
python-beaver/python-beaver | beaver/worker/tail.py | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L130-L184 | def _buffer_extract(self, data):
"""
Extract takes an arbitrary string of input data and returns an array of
tokenized entities, provided there were any available to extract. This
makes for easy processing of datagrams using a pattern like:
tokenizer.extract(data).map { |entity| Decode(entity) }.each do ..."""
# Extract token-delimited entities from the input string with the split command.
# There's a bit of craftiness here with the -1 parameter. Normally split would
# behave no differently regardless of if the token lies at the very end of the
# input buffer or not (i.e. a literal edge case) Specifying -1 forces split to
# return "" in this case, meaning that the last entry in the list represents a
# new segment of data where the token has not been encountered
entities = collections.deque(data.split(self._delimiter, -1))
# Check to see if the buffer has exceeded capacity, if we're imposing a limit
if self._size_limit:
if self.input_size + len(entities[0]) > self._size_limit:
raise Exception('input buffer full')
self._input_size += len(entities[0])
# Move the first entry in the resulting array into the input buffer. It represents
# the last segment of a token-delimited entity unless it's the only entry in the list.
first_entry = entities.popleft()
if len(first_entry) > 0:
self._input.append(first_entry)
# If the resulting array from the split is empty, the token was not encountered
# (not even at the end of the buffer). Since we've encountered no token-delimited
# entities this go-around, return an empty array.
if len(entities) == 0:
return []
# At this point, we've hit a token, or potentially multiple tokens. Now we can bring
# together all the data we've buffered from earlier calls without hitting a token,
# and add it to our list of discovered entities.
entities.appendleft(''.join(self._input))
# Now that we've hit a token, joined the input buffer and added it to the entities
# list, we can go ahead and clear the input buffer. All of the segments that were
# stored before the join can now be garbage collected.
self._input.clear()
# The last entity in the list is not token delimited, however, thanks to the -1
# passed to split. It represents the beginning of a new list of as-yet-untokenized
# data, so we add it to the start of the list.
self._input.append(entities.pop())
# Set the new input buffer size, provided we're keeping track
if self._size_limit:
self._input_size = len(self._input[0])
# Now we're left with the list of extracted token-delimited entities we wanted
# in the first place. Hooray!
return entities | [
"def",
"_buffer_extract",
"(",
"self",
",",
"data",
")",
":",
"# Extract token-delimited entities from the input string with the split command.",
"# There's a bit of craftiness here with the -1 parameter. Normally split would",
"# behave no differently regardless of if the token lies at the ver... | Extract takes an arbitrary string of input data and returns an array of
tokenized entities, provided there were any available to extract. This
makes for easy processing of datagrams using a pattern like:
tokenizer.extract(data).map { |entity| Decode(entity) }.each do ... | [
"Extract",
"takes",
"an",
"arbitrary",
"string",
"of",
"input",
"data",
"and",
"returns",
"an",
"array",
"of",
"tokenized",
"entities",
"provided",
"there",
"were",
"any",
"available",
"to",
"extract",
".",
"This",
"makes",
"for",
"easy",
"processing",
"of",
... | python | train |
rodluger/everest | everest/standalone.py | https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/standalone.py#L42-L86 | def DetrendFITS(fitsfile, raw=False, season=None, clobber=False, **kwargs):
"""
De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`.
:param str fitsfile: The full path to the FITS file
:param ndarray aperture: A 2D integer array corresponding to the \
desired photometric aperture (1 = in aperture, 0 = outside \
aperture). Default is to interactively select an aperture.
:param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`.
:returns: An :py:class:`everest.Everest` instance.
"""
# Get info
EPIC = pyfits.getheader(fitsfile, 0)['KEPLERID']
if season is None:
season = pyfits.getheader(fitsfile, 0)['CAMPAIGN']
if season is None or season == "":
season = 0
everestfile = os.path.join(
everest.missions.k2.TargetDirectory(EPIC, season),
everest.missions.k2.FITSFile(EPIC, season))
# De-trend?
if clobber or not os.path.exists(everestfile):
# Get raw data
data = GetData(fitsfile, EPIC, season, clobber=clobber, **kwargs)
# De-trend
model = everest.rPLD(EPIC,
data=data,
season=season, debug=True,
clobber=clobber, **kwargs)
# Publish it
everest.fits.MakeFITS(model)
shutil.copyfile(os.path.join(model.dir, model.name + '.pdf'),
os.path.join(model.dir,
model._mission.DVSFile(model.ID,
model.season,
model.cadence)))
# Return an Everest instance
return everest.Everest(EPIC, season=season) | [
"def",
"DetrendFITS",
"(",
"fitsfile",
",",
"raw",
"=",
"False",
",",
"season",
"=",
"None",
",",
"clobber",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get info",
"EPIC",
"=",
"pyfits",
".",
"getheader",
"(",
"fitsfile",
",",
"0",
")",
"[",
... | De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`.
:param str fitsfile: The full path to the FITS file
:param ndarray aperture: A 2D integer array corresponding to the \
desired photometric aperture (1 = in aperture, 0 = outside \
aperture). Default is to interactively select an aperture.
:param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`.
:returns: An :py:class:`everest.Everest` instance. | [
"De",
"-",
"trend",
"a",
"K2",
"FITS",
"file",
"using",
":",
"py",
":",
"class",
":",
"everest",
".",
"detrender",
".",
"rPLD",
"."
] | python | train |
rosenbrockc/fortpy | fortpy/scripts/analyze.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L822-L832 | def do_rmfit(self, arg):
"""Removes a fit function from a variable. See 'fit'."""
if arg in self.curargs["fits"]:
del self.curargs["fits"][arg]
#We also need to remove the variable entry if it exists.
if "timing" in arg:
fitvar = "{}|fit".format(arg)
else:
fitvar = "{}.fit".format(arg)
if fitvar in self.curargs["dependents"]:
self.curargs["dependents"].remove(fitvar) | [
"def",
"do_rmfit",
"(",
"self",
",",
"arg",
")",
":",
"if",
"arg",
"in",
"self",
".",
"curargs",
"[",
"\"fits\"",
"]",
":",
"del",
"self",
".",
"curargs",
"[",
"\"fits\"",
"]",
"[",
"arg",
"]",
"#We also need to remove the variable entry if it exists.",
"if"... | Removes a fit function from a variable. See 'fit'. | [
"Removes",
"a",
"fit",
"function",
"from",
"a",
"variable",
".",
"See",
"fit",
"."
] | python | train |
indico/indico-plugins | livesync/indico_livesync/cli.py | https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/livesync/indico_livesync/cli.py#L45-L66 | def agents():
"""Lists the currently active agents"""
print 'The following LiveSync agents are active:'
agent_list = LiveSyncAgent.find().order_by(LiveSyncAgent.backend_name, db.func.lower(LiveSyncAgent.name)).all()
table_data = [['ID', 'Name', 'Backend', 'Initial Export', 'Queue']]
for agent in agent_list:
initial = (cformat('%{green!}done%{reset}') if agent.initial_data_exported else
cformat('%{yellow!}pending%{reset}'))
if agent.backend is None:
backend_title = cformat('%{red!}invalid backend ({})%{reset}').format(agent.backend_name)
else:
backend_title = agent.backend.title
table_data.append([unicode(agent.id), agent.name, backend_title, initial,
unicode(agent.queue.filter_by(processed=False).count())])
table = AsciiTable(table_data)
table.justify_columns[4] = 'right'
print table.table
if not all(a.initial_data_exported for a in agent_list):
print
print "You need to perform the initial data export for some agents."
print cformat("To do so, run "
"%{yellow!}indico livesync initial_export %{reset}%{yellow}<agent_id>%{reset} for those agents.") | [
"def",
"agents",
"(",
")",
":",
"print",
"'The following LiveSync agents are active:'",
"agent_list",
"=",
"LiveSyncAgent",
".",
"find",
"(",
")",
".",
"order_by",
"(",
"LiveSyncAgent",
".",
"backend_name",
",",
"db",
".",
"func",
".",
"lower",
"(",
"LiveSyncAge... | Lists the currently active agents | [
"Lists",
"the",
"currently",
"active",
"agents"
] | python | train |
UUDigitalHumanitieslab/tei_reader | tei_reader/models/element.py | https://github.com/UUDigitalHumanitieslab/tei_reader/blob/7b19c34a9d7cc941a36ecdcf6f361e26c6488697/tei_reader/models/element.py#L69-L81 | def parts(self):
"""
Get the parts directly below this element.
"""
for item in self.__parts_and_divisions:
if item.tag == 'part':
yield item
else:
# Divisions shouldn't be beneath a part, but here's a fallback
# for if this does happen
for part in item.parts:
yield part | [
"def",
"parts",
"(",
"self",
")",
":",
"for",
"item",
"in",
"self",
".",
"__parts_and_divisions",
":",
"if",
"item",
".",
"tag",
"==",
"'part'",
":",
"yield",
"item",
"else",
":",
"# Divisions shouldn't be beneath a part, but here's a fallback",
"# for if this does ... | Get the parts directly below this element. | [
"Get",
"the",
"parts",
"directly",
"below",
"this",
"element",
"."
] | python | train |
project-rig/rig | rig/place_and_route/place/rcm.py | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L45-L60 | def _get_connected_subgraphs(vertices, vertices_neighbours):
"""Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...]
"""
remaining_vertices = set(vertices)
subgraphs = []
while remaining_vertices:
subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours))
remaining_vertices.difference_update(subgraph)
subgraphs.append(subgraph)
return subgraphs | [
"def",
"_get_connected_subgraphs",
"(",
"vertices",
",",
"vertices_neighbours",
")",
":",
"remaining_vertices",
"=",
"set",
"(",
"vertices",
")",
"subgraphs",
"=",
"[",
"]",
"while",
"remaining_vertices",
":",
"subgraph",
"=",
"set",
"(",
"_dfs",
"(",
"remaining... | Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...] | [
"Break",
"a",
"graph",
"containing",
"unconnected",
"subgraphs",
"into",
"a",
"list",
"of",
"connected",
"subgraphs",
"."
] | python | train |
zhanglab/psamm | psamm/datasource/native.py | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/native.py#L1187-L1201 | def parse_model_group_list(path, groups):
"""Parse a structured list of model groups as obtained from a YAML file
Yields reaction IDs. Path can be given as a string or a context.
"""
context = FilePathContext(path)
for model_group in groups:
if 'include' in model_group:
include_context = context.resolve(model_group['include'])
for reaction_id in parse_model_file(include_context):
yield reaction_id
else:
for reaction_id in parse_model_group(context, model_group):
yield reaction_id | [
"def",
"parse_model_group_list",
"(",
"path",
",",
"groups",
")",
":",
"context",
"=",
"FilePathContext",
"(",
"path",
")",
"for",
"model_group",
"in",
"groups",
":",
"if",
"'include'",
"in",
"model_group",
":",
"include_context",
"=",
"context",
".",
"resolve... | Parse a structured list of model groups as obtained from a YAML file
Yields reaction IDs. Path can be given as a string or a context. | [
"Parse",
"a",
"structured",
"list",
"of",
"model",
"groups",
"as",
"obtained",
"from",
"a",
"YAML",
"file"
] | python | train |
roclark/sportsreference | sportsreference/ncaaf/roster.py | https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/roster.py#L233-L264 | def _combine_all_stats(self, player_info):
"""
Pull stats from all tables into a single data structure.
Pull the stats from all of the requested tables into a dictionary that
is separated by season to allow easy queries of the player's stats for
each season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing all of the stats information for the
requested player.
Returns
-------
dictionary
Returns a dictionary where all stats from each table are combined
by season to allow easy queries by year.
"""
all_stats_dict = {}
for table_id in ['passing', 'rushing', 'defense', 'scoring']:
table_items = utils._get_stats_table(player_info,
'table#%s' % table_id)
career_items = utils._get_stats_table(player_info,
'table#%s' % table_id,
footer=True)
all_stats_dict = self._combine_season_stats(table_items,
career_items,
all_stats_dict)
return all_stats_dict | [
"def",
"_combine_all_stats",
"(",
"self",
",",
"player_info",
")",
":",
"all_stats_dict",
"=",
"{",
"}",
"for",
"table_id",
"in",
"[",
"'passing'",
",",
"'rushing'",
",",
"'defense'",
",",
"'scoring'",
"]",
":",
"table_items",
"=",
"utils",
".",
"_get_stats_... | Pull stats from all tables into a single data structure.
Pull the stats from all of the requested tables into a dictionary that
is separated by season to allow easy queries of the player's stats for
each season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing all of the stats information for the
requested player.
Returns
-------
dictionary
Returns a dictionary where all stats from each table are combined
by season to allow easy queries by year. | [
"Pull",
"stats",
"from",
"all",
"tables",
"into",
"a",
"single",
"data",
"structure",
"."
] | python | train |
google/grr | grr/server/grr_response_server/aff4_objects/aff4_grr.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/aff4_grr.py#L374-L400 | def Update(self, attribute=None):
"""Update an attribute from the client."""
# List the directory on the client
currently_running = self.Get(self.Schema.CONTENT_LOCK)
# Is this flow still active?
if currently_running:
flow_obj = aff4.FACTORY.Open(currently_running, token=self.token)
if flow_obj and flow_obj.GetRunner().IsRunning():
return
# The client_id is the first element of the URN
client_id = self.urn.Path().split("/", 2)[1]
# Get the pathspec for this object
pathspec = self.Get(self.Schema.STAT).pathspec
flow_urn = flow.StartAFF4Flow(
client_id=client_id,
# TODO(user): dependency loop between aff4_grr.py and transfer.py
# flow_name=transfer.MultiGetFile.__name__,
flow_name="MultiGetFile",
token=self.token,
pathspecs=[pathspec])
self.Set(self.Schema.CONTENT_LOCK(flow_urn))
self.Close()
return flow_urn | [
"def",
"Update",
"(",
"self",
",",
"attribute",
"=",
"None",
")",
":",
"# List the directory on the client",
"currently_running",
"=",
"self",
".",
"Get",
"(",
"self",
".",
"Schema",
".",
"CONTENT_LOCK",
")",
"# Is this flow still active?",
"if",
"currently_running"... | Update an attribute from the client. | [
"Update",
"an",
"attribute",
"from",
"the",
"client",
"."
] | python | train |
iamteem/redisco | redisco/models/base.py | https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/models/base.py#L521-L528 | def get_model_from_key(key):
"""Gets the model from a given key."""
_known_models = {}
model_name = key.split(':', 2)[0]
# populate
for klass in Model.__subclasses__():
_known_models[klass.__name__] = klass
return _known_models.get(model_name, None) | [
"def",
"get_model_from_key",
"(",
"key",
")",
":",
"_known_models",
"=",
"{",
"}",
"model_name",
"=",
"key",
".",
"split",
"(",
"':'",
",",
"2",
")",
"[",
"0",
"]",
"# populate",
"for",
"klass",
"in",
"Model",
".",
"__subclasses__",
"(",
")",
":",
"_... | Gets the model from a given key. | [
"Gets",
"the",
"model",
"from",
"a",
"given",
"key",
"."
] | python | train |
horazont/aioxmpp | aioxmpp/structs.py | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/structs.py#L1310-L1328 | def lookup(self, language_ranges):
"""
Perform an RFC4647 language range lookup on the keys in the
dictionary. `language_ranges` must be a sequence of
:class:`LanguageRange` instances.
Return the entry in the dictionary with a key as produced by
`lookup_language`. If `lookup_language` does not find a match and the
mapping contains an entry with key :data:`None`, that entry is
returned, otherwise :class:`KeyError` is raised.
"""
keys = list(self.keys())
try:
keys.remove(None)
except ValueError:
pass
keys.sort()
key = lookup_language(keys, language_ranges)
return self[key] | [
"def",
"lookup",
"(",
"self",
",",
"language_ranges",
")",
":",
"keys",
"=",
"list",
"(",
"self",
".",
"keys",
"(",
")",
")",
"try",
":",
"keys",
".",
"remove",
"(",
"None",
")",
"except",
"ValueError",
":",
"pass",
"keys",
".",
"sort",
"(",
")",
... | Perform an RFC4647 language range lookup on the keys in the
dictionary. `language_ranges` must be a sequence of
:class:`LanguageRange` instances.
Return the entry in the dictionary with a key as produced by
`lookup_language`. If `lookup_language` does not find a match and the
mapping contains an entry with key :data:`None`, that entry is
returned, otherwise :class:`KeyError` is raised. | [
"Perform",
"an",
"RFC4647",
"language",
"range",
"lookup",
"on",
"the",
"keys",
"in",
"the",
"dictionary",
".",
"language_ranges",
"must",
"be",
"a",
"sequence",
"of",
":",
"class",
":",
"LanguageRange",
"instances",
"."
] | python | train |
Contraz/demosys-py | demosys/management/base.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/management/base.py#L80-L92 | def validate_name(self, name):
"""
Can the name be used as a python module or package?
Raises ``ValueError`` if the name is invalid.
:param name: the name to check
"""
if not name:
raise ValueError("Name cannot be empty")
# Can the name be used as an identifier in python (module or package name)
if not name.isidentifier():
raise ValueError("{} is not a valid identifier".format(name)) | [
"def",
"validate_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"name",
":",
"raise",
"ValueError",
"(",
"\"Name cannot be empty\"",
")",
"# Can the name be used as an identifier in python (module or package name)\r",
"if",
"not",
"name",
".",
"isidentifier",
"... | Can the name be used as a python module or package?
Raises ``ValueError`` if the name is invalid.
:param name: the name to check | [
"Can",
"the",
"name",
"be",
"used",
"as",
"a",
"python",
"module",
"or",
"package?",
"Raises",
"ValueError",
"if",
"the",
"name",
"is",
"invalid",
".",
":",
"param",
"name",
":",
"the",
"name",
"to",
"check"
] | python | valid |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L112-L120 | def package_description(self):
"""
Given an Article class instance, this is responsible for returning an
article description. For this method I have taken the approach of
serializing the article's first abstract, if it has one. This results
in 0 or 1 descriptions per article.
"""
abstract = self.article.root.xpath('./front/article-meta/abstract')
return serialize(abstract[0], strip=True) if abstract else None | [
"def",
"package_description",
"(",
"self",
")",
":",
"abstract",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/abstract'",
")",
"return",
"serialize",
"(",
"abstract",
"[",
"0",
"]",
",",
"strip",
"=",
"True",
")",
"if... | Given an Article class instance, this is responsible for returning an
article description. For this method I have taken the approach of
serializing the article's first abstract, if it has one. This results
in 0 or 1 descriptions per article. | [
"Given",
"an",
"Article",
"class",
"instance",
"this",
"is",
"responsible",
"for",
"returning",
"an",
"article",
"description",
".",
"For",
"this",
"method",
"I",
"have",
"taken",
"the",
"approach",
"of",
"serializing",
"the",
"article",
"s",
"first",
"abstrac... | python | train |
quantopian/pgcontents | pgcontents/utils/migrate.py | https://github.com/quantopian/pgcontents/blob/ed36268b7917332d16868208e1e565742a8753e1/pgcontents/utils/migrate.py#L17-L31 | def temp_alembic_ini(alembic_dir_location, sqlalchemy_url):
"""
Temporarily write an alembic.ini file for use with alembic migration
scripts.
"""
with TemporaryDirectory() as tempdir:
alembic_ini_filename = join(tempdir, 'temp_alembic.ini')
with open(alembic_ini_filename, 'w') as f:
f.write(
ALEMBIC_INI_TEMPLATE.format(
alembic_dir_location=alembic_dir_location,
sqlalchemy_url=sqlalchemy_url,
)
)
yield alembic_ini_filename | [
"def",
"temp_alembic_ini",
"(",
"alembic_dir_location",
",",
"sqlalchemy_url",
")",
":",
"with",
"TemporaryDirectory",
"(",
")",
"as",
"tempdir",
":",
"alembic_ini_filename",
"=",
"join",
"(",
"tempdir",
",",
"'temp_alembic.ini'",
")",
"with",
"open",
"(",
"alembi... | Temporarily write an alembic.ini file for use with alembic migration
scripts. | [
"Temporarily",
"write",
"an",
"alembic",
".",
"ini",
"file",
"for",
"use",
"with",
"alembic",
"migration",
"scripts",
"."
] | python | test |
MediaFire/mediafire-python-open-sdk | mediafire/client.py | https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/client.py#L132-L164 | def get_resource_by_key(self, resource_key):
"""Return resource by quick_key/folder_key.
key -- quick_key or folder_key
"""
# search for quick_key by default
lookup_order = ["quick_key", "folder_key"]
if len(resource_key) == FOLDER_KEY_LENGTH:
lookup_order = ["folder_key", "quick_key"]
resource = None
for lookup_key in lookup_order:
try:
if lookup_key == "folder_key":
info = self.api.folder_get_info(folder_key=resource_key)
resource = Folder(info['folder_info'])
elif lookup_key == "quick_key":
info = self.api.file_get_info(quick_key=resource_key)
resource = File(info['file_info'])
except MediaFireApiError:
# TODO: Check response code
pass
if resource:
break
if not resource:
raise ResourceNotFoundError(resource_key)
return resource | [
"def",
"get_resource_by_key",
"(",
"self",
",",
"resource_key",
")",
":",
"# search for quick_key by default",
"lookup_order",
"=",
"[",
"\"quick_key\"",
",",
"\"folder_key\"",
"]",
"if",
"len",
"(",
"resource_key",
")",
"==",
"FOLDER_KEY_LENGTH",
":",
"lookup_order",... | Return resource by quick_key/folder_key.
key -- quick_key or folder_key | [
"Return",
"resource",
"by",
"quick_key",
"/",
"folder_key",
"."
] | python | train |
hubo1016/vlcp | vlcp/server/module.py | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/server/module.py#L642-L723 | async def reload_modules(self, pathlist):
"""
Reload modules with a full path in the pathlist
"""
loadedModules = []
failures = []
for path in pathlist:
p, module = findModule(path, False)
if module is not None and hasattr(module, '_instance') and module._instance.state != ModuleLoadStateChanged.UNLOADED:
loadedModules.append(module)
# Unload all modules
ums = [ModuleLoadStateChanged.createMatcher(m, ModuleLoadStateChanged.UNLOADED) for m in loadedModules]
for m in loadedModules:
# Only unload the module itself, not its dependencies, since we will restart the module soon enough
self.subroutine(self.unloadmodule(m, True), False)
await self.wait_for_all(*ums)
# Group modules by package
grouped = {}
for path in pathlist:
dotpos = path.rfind('.')
if dotpos == -1:
raise ModuleLoadException('Must specify module with full path, including package name')
package = path[:dotpos]
classname = path[dotpos + 1:]
mlist = grouped.setdefault(package, [])
p, module = findModule(path, False)
mlist.append((classname, module))
for package, mlist in grouped.items():
# Reload each package only once
try:
p = sys.modules[package]
# Remove cache to ensure a clean import from source file
removeCache(p)
p = reload(p)
except KeyError:
try:
p = __import__(package, fromlist=[m[0] for m in mlist])
except Exception:
self._logger.warning('Failed to import a package: %r, resume others', package, exc_info = True)
failures.append('Failed to import: ' + package)
continue
except Exception:
self._logger.warning('Failed to import a package: %r, resume others', package, exc_info = True)
failures.append('Failed to import: ' + package)
continue
for cn, module in mlist:
try:
module2 = getattr(p, cn)
except AttributeError:
self._logger.warning('Cannot find module %r in package %r, resume others', package, cn)
failures.append('Failed to import: ' + package + '.' + cn)
continue
if module is not None and module is not module2:
# Update the references
try:
lpos = loadedModules.index(module)
loaded = True
except Exception:
loaded = False
for d in module.depends:
# The new reference is automatically added on import, only remove the old reference
d.referencedBy.remove(module)
if loaded and hasattr(d, '_instance'):
try:
d._instance.dependedBy.remove(module)
d._instance.dependedBy.add(module2)
except ValueError:
pass
if hasattr(module, 'referencedBy'):
for d in module.referencedBy:
pos = d.depends.index(module)
d.depends[pos] = module2
if not hasattr(module2, 'referencedBy'):
module2.referencedBy = []
module2.referencedBy.append(d)
if loaded:
loadedModules[lpos] = module2
# Start the uploaded modules
for m in loadedModules:
self.subroutine(self.loadmodule(m))
if failures:
raise ModuleLoadException('Following errors occurred during reloading, check log for more details:\n' + '\n'.join(failures)) | [
"async",
"def",
"reload_modules",
"(",
"self",
",",
"pathlist",
")",
":",
"loadedModules",
"=",
"[",
"]",
"failures",
"=",
"[",
"]",
"for",
"path",
"in",
"pathlist",
":",
"p",
",",
"module",
"=",
"findModule",
"(",
"path",
",",
"False",
")",
"if",
"m... | Reload modules with a full path in the pathlist | [
"Reload",
"modules",
"with",
"a",
"full",
"path",
"in",
"the",
"pathlist"
] | python | train |
PMEAL/OpenPNM | openpnm/io/iMorph.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/io/iMorph.py#L23-L204 | def load(cls, path,
node_file="throats_cellsThroatsGraph_Nodes.txt",
graph_file="throats_cellsThroatsGraph.txt",
network=None, voxel_size=None, return_geometry=False):
r"""
Loads network data from an iMorph processed image stack
Parameters
----------
path : string
The path of the folder where the subfiles are held
node_file : string
The file that describes the pores and throats, the
default iMorph name is: throats_cellsThroatsGraph_Nodes.txt
graph_file : string
The file that describes the connectivity of the network, the
default iMorph name is: throats_cellsThroatsGraph.txt
network : OpenPNM Network Object
The OpenPNM Network onto which the data should be loaded. If no
network is supplied then an empty import network is created and
returned.
voxel_size : float
Allows the user to define a voxel size different than what is
contained in the node_file. The value must be in meters.
return_geometry : Boolean
If True, then all geometrical related properties are removed from
the Network object and added to a GenericGeometry object. In this
case the method returns a tuple containing (network, geometry). If
False (default) then the returned Network will contain all
properties that were in the original file. In this case, the user
can call the ```split_geometry``` method explicitly to perform the
separation.
Returns
-------
If no Network object is supplied then one will be created and returned.
If return_geometry is True, then a tuple is returned containing both
the network and a geometry object.
"""
#
path = Path(path)
node_file = os.path.join(path.resolve(), node_file)
graph_file = os.path.join(path.resolve(), graph_file)
# parsing the nodes file
with open(node_file, 'r') as file:
Np = sp.fromstring(file.readline().rsplit('=')[1], sep='\t',
dtype=int)[0]
vox_size = sp.fromstring(file.readline().rsplit(')')[1], sep='\t',)[0]
# network always recreated to prevent errors
network = GenericNetwork(Np=Np, Nt=0)
# Define expected properies
network['pore.volume'] = sp.nan
scrap_lines = [file.readline() for line in range(4)]
while True:
vals = file.readline().split('\t')
if len(vals) == 1:
break
network['pore.volume'][int(vals[0])] = float(vals[3])
if 'pore.'+vals[2] not in network.labels():
network['pore.'+vals[2]] = False
network['pore.'+vals[2]][int(vals[0])] = True
if voxel_size is None:
voxel_size = vox_size * 1.0E-6 # file stores value in microns
if voxel_size < 0:
raise(Exception('Error - Voxel size must be specfied in ' +
'the Nodes file or as a keyword argument.'))
# parsing the graph file
with open(graph_file, 'r') as file:
# Define expected properties
network['pore.coords'] = sp.zeros((Np, 3))*sp.nan
network['pore.types'] = sp.nan
network['pore.color'] = sp.nan
network['pore.radius'] = sp.nan
network['pore.dmax'] = sp.nan
network['pore.node_number'] = sp.nan
# Scan file to get pore coordinate data
scrap_lines = [file.readline() for line in range(3)]
line = file.readline()
xmax = 0.0
ymax = 0.0
zmax = 0.0
node_num = 0
while line != 'connectivity table\n':
vals = sp.fromstring(line, sep='\t')
xmax = vals[1] if vals[1] > xmax else xmax
ymax = vals[2] if vals[2] > ymax else ymax
zmax = vals[3] if vals[3] > zmax else zmax
network['pore.coords'][int(vals[0]), :] = vals[1:4]
network['pore.types'][int(vals[0])] = vals[4]
network['pore.color'][int(vals[0])] = vals[5]
network['pore.radius'][int(vals[0])] = vals[6]
network['pore.dmax'][int(vals[0])] = vals[7]
network['pore.node_number'][int(vals[0])] = node_num
node_num += 1
line = file.readline()
# Scan file to get to connectivity data
scrap_lines.append(file.readline()) # Skip line
# Create sparse lil array incrementally build adjacency matrix
lil = sp.sparse.lil_matrix((Np, Np), dtype=int)
while True:
vals = sp.fromstring(file.readline(), sep='\t', dtype=int)
if len(vals) <= 1:
break
lil.rows[vals[0]] = vals[2:]
lil.data[vals[0]] = sp.ones(vals[1])
# fixing any negative volumes or distances so they are 1 voxel/micron
network['pore.volume'][sp.where(network['pore.volume'] < 0)[0]] = 1.0
network['pore.radius'][sp.where(network['pore.radius'] < 0)[0]] = 1.0
network['pore.dmax'][sp.where(network['pore.dmax'] < 0)[0]] = 1.0
# Add adjacency matrix to OpenPNM network
conns = sp.sparse.triu(lil, k=1, format='coo')
network.update({'throat.all': sp.ones(len(conns.col), dtype=bool)})
network['throat.conns'] = sp.vstack([conns.row, conns.col]).T
network['pore.to_trim'] = False
network['pore.to_trim'][network.pores('*throat')] = True
Ts = network.pores('to_trim')
new_conns = network.find_neighbor_pores(pores=Ts, flatten=False)
extend(network=network, throat_conns=new_conns, labels='new_conns')
for item in network.props('pore'):
item = item.split('.')[1]
arr = sp.ones_like(network['pore.'+item])[0]
arr = sp.tile(A=arr, reps=[network.Nt, 1])*sp.nan
network['throat.'+item] = sp.squeeze(arr)
network['throat.'+item][network.throats('new_conns')] = \
network['pore.'+item][Ts]
trim(network=network, pores=Ts)
# setting up boundary pores
x_coord, y_coord, z_coord = sp.hsplit(network['pore.coords'], 3)
network['pore.front_boundary'] = sp.ravel(x_coord == 0)
network['pore.back_boundary'] = sp.ravel(x_coord == xmax)
network['pore.left_boundary'] = sp.ravel(y_coord == 0)
network['pore.right_boundary'] = sp.ravel(y_coord == ymax)
network['pore.bottom_boundary'] = sp.ravel(z_coord == 0)
network['pore.top_boundary'] = sp.ravel(z_coord == zmax)
# removing any pores that got classified as a boundary pore but
# weren't labled a border_cell_face
ps = sp.where(~sp.in1d(network.pores('*_boundary'),
network.pores('border_cell_face')))[0]
ps = network.pores('*_boundary')[ps]
for side in ['front', 'back', 'left', 'right', 'top', 'bottom']:
network['pore.'+side+'_boundary'][ps] = False
# setting internal label
network['pore.internal'] = False
network['pore.internal'][network.pores('*_boundary', mode='not')] = True
# adding props to border cell face throats and from pores
Ts = sp.where(network['throat.conns'][:, 1] >
network.pores('border_cell_face')[0] - 1)[0]
faces = network['throat.conns'][Ts, 1]
for item in network.props('pore'):
item = item.split('.')[1]
network['throat.'+item][Ts] = network['pore.'+item][faces]
network['pore.volume'][faces] = 0.0
# applying unit conversions
# TODO: Determine if radius and dmax are indeed microns and not voxels
network['pore.coords'] = network['pore.coords'] * 1e-6
network['pore.radius'] = network['pore.radius'] * 1e-6
network['pore.dmax'] = network['pore.dmax'] * 1e-6
network['pore.volume'] = network['pore.volume'] * voxel_size**3
network['throat.coords'] = network['throat.coords'] * 1e-6
network['throat.radius'] = network['throat.radius'] * 1e-6
network['throat.dmax'] = network['throat.dmax'] * 1e-6
network['throat.volume'] = network['throat.volume'] * voxel_size**3
return network.project | [
"def",
"load",
"(",
"cls",
",",
"path",
",",
"node_file",
"=",
"\"throats_cellsThroatsGraph_Nodes.txt\"",
",",
"graph_file",
"=",
"\"throats_cellsThroatsGraph.txt\"",
",",
"network",
"=",
"None",
",",
"voxel_size",
"=",
"None",
",",
"return_geometry",
"=",
"False",
... | r"""
Loads network data from an iMorph processed image stack
Parameters
----------
path : string
The path of the folder where the subfiles are held
node_file : string
The file that describes the pores and throats, the
default iMorph name is: throats_cellsThroatsGraph_Nodes.txt
graph_file : string
The file that describes the connectivity of the network, the
default iMorph name is: throats_cellsThroatsGraph.txt
network : OpenPNM Network Object
The OpenPNM Network onto which the data should be loaded. If no
network is supplied then an empty import network is created and
returned.
voxel_size : float
Allows the user to define a voxel size different than what is
contained in the node_file. The value must be in meters.
return_geometry : Boolean
If True, then all geometrical related properties are removed from
the Network object and added to a GenericGeometry object. In this
case the method returns a tuple containing (network, geometry). If
False (default) then the returned Network will contain all
properties that were in the original file. In this case, the user
can call the ```split_geometry``` method explicitly to perform the
separation.
Returns
-------
If no Network object is supplied then one will be created and returned.
If return_geometry is True, then a tuple is returned containing both
the network and a geometry object. | [
"r",
"Loads",
"network",
"data",
"from",
"an",
"iMorph",
"processed",
"image",
"stack"
] | python | train |
johnbywater/eventsourcing | eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py | https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L374-L397 | def find_substring(substring, suffix_tree, edge_repo):
"""Returns the index if substring in tree, otherwise -1.
"""
assert isinstance(substring, str)
assert isinstance(suffix_tree, SuffixTree)
assert isinstance(edge_repo, EventSourcedRepository)
if not substring:
return -1
if suffix_tree.case_insensitive:
substring = substring.lower()
curr_node_id = suffix_tree.root_node_id
i = 0
while i < len(substring):
edge_id = make_edge_id(curr_node_id, substring[i])
try:
edge = edge_repo[edge_id]
except RepositoryKeyError:
return -1
ln = min(edge.length + 1, len(substring) - i)
if substring[i:i + ln] != suffix_tree.string[edge.first_char_index:edge.first_char_index + ln]:
return -1
i += edge.length + 1
curr_node_id = edge.dest_node_id
return edge.first_char_index - len(substring) + ln | [
"def",
"find_substring",
"(",
"substring",
",",
"suffix_tree",
",",
"edge_repo",
")",
":",
"assert",
"isinstance",
"(",
"substring",
",",
"str",
")",
"assert",
"isinstance",
"(",
"suffix_tree",
",",
"SuffixTree",
")",
"assert",
"isinstance",
"(",
"edge_repo",
... | Returns the index if substring in tree, otherwise -1. | [
"Returns",
"the",
"index",
"if",
"substring",
"in",
"tree",
"otherwise",
"-",
"1",
"."
] | python | train |
minio/minio-py | minio/helpers.py | https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/helpers.py#L308-L327 | def is_virtual_host(endpoint_url, bucket_name):
"""
Check to see if the ``bucket_name`` can be part of virtual host
style.
:param endpoint_url: Endpoint url which will be used for virtual host.
:param bucket_name: Bucket name to be validated against.
"""
is_valid_bucket_name(bucket_name)
parsed_url = urlsplit(endpoint_url)
# bucket_name can be valid but '.' in the hostname will fail
# SSL certificate validation. So do not use host-style for
# such buckets.
if 'https' in parsed_url.scheme and '.' in bucket_name:
return False
for host in ['s3.amazonaws.com', 'aliyuncs.com']:
if host in parsed_url.netloc:
return True
return False | [
"def",
"is_virtual_host",
"(",
"endpoint_url",
",",
"bucket_name",
")",
":",
"is_valid_bucket_name",
"(",
"bucket_name",
")",
"parsed_url",
"=",
"urlsplit",
"(",
"endpoint_url",
")",
"# bucket_name can be valid but '.' in the hostname will fail",
"# SSL certificate validation. ... | Check to see if the ``bucket_name`` can be part of virtual host
style.
:param endpoint_url: Endpoint url which will be used for virtual host.
:param bucket_name: Bucket name to be validated against. | [
"Check",
"to",
"see",
"if",
"the",
"bucket_name",
"can",
"be",
"part",
"of",
"virtual",
"host",
"style",
"."
] | python | train |
tensorflow/datasets | tensorflow_datasets/image/imagenet.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet.py#L86-L102 | def _get_validation_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)
with tf.io.gfile.GFile(labels_path) as labels_f:
labels = labels_f.read().strip().split('\n')
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels)) | [
"def",
"_get_validation_labels",
"(",
"val_path",
")",
":",
"labels_path",
"=",
"tfds",
".",
"core",
".",
"get_tfds_path",
"(",
"_VALIDATION_LABELS_FNAME",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_path",
")",
"as",
"labels_f",
"... | Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str). | [
"Returns",
"labels",
"for",
"validation",
"."
] | python | train |
geertj/gruvi | lib/gruvi/util.py | https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/util.py#L94-L143 | def delegate_method(other, method, name=None):
"""Add a method to the current class that delegates to another method.
The *other* argument must be a property that returns the instance to
delegate to. Due to an implementation detail, the property must be defined
in the current class. The *method* argument specifies a method to delegate
to. It can be any callable as long as it takes the instances as its first
argument.
It is a common paradigm in Gruvi to expose protocol methods onto clients.
This keeps most of the logic into the protocol, but prevents the user from
having to type ``'client.protocol.*methodname*'`` all the time.
For example::
class MyClient(Client):
protocol = Client.protocol
delegate_method(protocol, MyProtocol.method)
"""
frame = sys._getframe(1)
classdict = frame.f_locals
@functools.wraps(method)
def delegate(self, *args, **kwargs):
other_self = other.__get__(self)
return method(other_self, *args, **kwargs)
if getattr(method, '__switchpoint__', False):
delegate.__switchpoint__ = True
if name is None:
name = method.__name__
propname = None
for key in classdict:
if classdict[key] is other:
propname = key
break
# If we know the property name, replace the docstring with a small
# reference instead of copying the function docstring.
if propname:
qname = getattr(method, '__qualname__', method.__name__)
if '.' in qname:
delegate.__doc__ = 'A shorthand for ``self.{propname}.{name}()``.' \
.format(name=name, propname=propname)
else:
delegate.__doc__ = 'A shorthand for ``{name}({propname}, ...)``.' \
.format(name=name, propname=propname)
classdict[name] = delegate | [
"def",
"delegate_method",
"(",
"other",
",",
"method",
",",
"name",
"=",
"None",
")",
":",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
"classdict",
"=",
"frame",
".",
"f_locals",
"@",
"functools",
".",
"wraps",
"(",
"method",
")",
"def",
"d... | Add a method to the current class that delegates to another method.
The *other* argument must be a property that returns the instance to
delegate to. Due to an implementation detail, the property must be defined
in the current class. The *method* argument specifies a method to delegate
to. It can be any callable as long as it takes the instances as its first
argument.
It is a common paradigm in Gruvi to expose protocol methods onto clients.
This keeps most of the logic into the protocol, but prevents the user from
having to type ``'client.protocol.*methodname*'`` all the time.
For example::
class MyClient(Client):
protocol = Client.protocol
delegate_method(protocol, MyProtocol.method) | [
"Add",
"a",
"method",
"to",
"the",
"current",
"class",
"that",
"delegates",
"to",
"another",
"method",
"."
] | python | train |
NuGrid/NuGridPy | nugridpy/astronomy.py | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/astronomy.py#L289-L312 | def mass_loss_loon05(L,Teff):
'''
mass loss rate van Loon etal (2005).
Parameters
----------
L : float
L in L_sun.
Teff : float
Teff in K.
Returns
-------
Mdot
Mdot in Msun/yr
Notes
-----
ref: van Loon etal 2005, A&A 438, 273
'''
Mdot = -5.65 + np.log10(old_div(L,10.**4)) -6.3*np.log10(old_div(Teff,3500.))
return Mdot | [
"def",
"mass_loss_loon05",
"(",
"L",
",",
"Teff",
")",
":",
"Mdot",
"=",
"-",
"5.65",
"+",
"np",
".",
"log10",
"(",
"old_div",
"(",
"L",
",",
"10.",
"**",
"4",
")",
")",
"-",
"6.3",
"*",
"np",
".",
"log10",
"(",
"old_div",
"(",
"Teff",
",",
"... | mass loss rate van Loon etal (2005).
Parameters
----------
L : float
L in L_sun.
Teff : float
Teff in K.
Returns
-------
Mdot
Mdot in Msun/yr
Notes
-----
ref: van Loon etal 2005, A&A 438, 273 | [
"mass",
"loss",
"rate",
"van",
"Loon",
"etal",
"(",
"2005",
")",
"."
] | python | train |
annoviko/pyclustering | pyclustering/cluster/clique.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/clique.py#L108-L125 | def __draw_two_dimension_data(ax, data, pair):
"""!
@brief Display data in two-dimensional canvas.
@param[in] ax (Axis): Canvas where data should be displayed.
@param[in] data (list): Data points that should be displayed.
@param[in] pair (tuple): Pair of dimension indexes.
"""
ax.set_xlabel("x%d" % pair[0])
ax.set_ylabel("x%d" % pair[1])
for point in data:
if len(data[0]) > 1:
ax.plot(point[pair[0]], point[pair[1]], color='red', marker='.')
else:
ax.plot(point[pair[0]], 0, color='red', marker='.')
ax.yaxis.set_ticklabels([]) | [
"def",
"__draw_two_dimension_data",
"(",
"ax",
",",
"data",
",",
"pair",
")",
":",
"ax",
".",
"set_xlabel",
"(",
"\"x%d\"",
"%",
"pair",
"[",
"0",
"]",
")",
"ax",
".",
"set_ylabel",
"(",
"\"x%d\"",
"%",
"pair",
"[",
"1",
"]",
")",
"for",
"point",
"... | !
@brief Display data in two-dimensional canvas.
@param[in] ax (Axis): Canvas where data should be displayed.
@param[in] data (list): Data points that should be displayed.
@param[in] pair (tuple): Pair of dimension indexes. | [
"!"
] | python | valid |
pyQode/pyqode.core | pyqode/core/modes/filewatcher.py | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/modes/filewatcher.py#L184-L193 | def _check_for_pending(self, *args, **kwargs):
"""
Checks if a notification is pending.
"""
if self._notification_pending and not self._processing:
self._processing = True
args, kwargs = self._data
self._notify(*args, **kwargs)
self._notification_pending = False
self._processing = False | [
"def",
"_check_for_pending",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_notification_pending",
"and",
"not",
"self",
".",
"_processing",
":",
"self",
".",
"_processing",
"=",
"True",
"args",
",",
"kwargs",
"=",... | Checks if a notification is pending. | [
"Checks",
"if",
"a",
"notification",
"is",
"pending",
"."
] | python | train |
trec-kba/streamcorpus-pipeline | streamcorpus_pipeline/_run_lingpipe.py | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_run_lingpipe.py#L126-L192 | def align_chunk_with_ner(tmp_ner_path, i_chunk, tmp_done_path):
'''
iterate through the i_chunk and tmp_ner_path to generate a new
Chunk with body.ner
'''
o_chunk = Chunk()
input_iter = i_chunk.__iter__()
ner = ''
stream_id = None
all_ner = xml.dom.minidom.parse(open(tmp_ner_path))
for raw_ner in all_ner.getElementsByTagName('FILENAME'):
stream_item = input_iter.next()
## get stream_id out of the XML
stream_id = raw_ner.attributes.get('docid').value
assert stream_id and stream_id == stream_item.stream_id, \
'%s != %s\nner=%r' % (stream_id, stream_item.stream_id, ner)
tagger_id = 'lingpipe'
tagging = Tagging()
tagging.tagger_id = tagger_id
## get this one file out of its FILENAME tags
tagged_doc = list(lingpipe.files(raw_ner.toxml()))[0][1]
tagging.raw_tagging = tagged_doc
tagging.generation_time = streamcorpus.make_stream_time()
stream_item.body.taggings[tagger_id] = tagging
sentences = list(lingpipe.sentences(tagged_doc))
## make JS labels on individual tokens
assert stream_item.ratings[0].mentions, stream_item.stream_id
john_smith_label = Label()
john_smith_label.annotator = stream_item.ratings[0].annotator
john_smith_label.target_id = stream_item.ratings[0].target_id
# first map all corefchains to their words
equiv_ids = collections.defaultdict(lambda: set())
for sent in sentences:
for tok in sent.tokens:
if tok.entity_type is not None:
equiv_ids[tok.equiv_id].add(cleanse(tok.token))
## find all the chains that are John Smith
johnsmiths = set()
for equiv_id, names in equiv_ids.items():
## detect 'smith' in 'smithye'
_names = cleanse(' '.join(names))
if 'john' in _names and 'smith' in _names:
johnsmiths.add(equiv_id)
print len(johnsmiths)
## now apply the label
for sent in sentences:
for tok in sent.tokens:
if tok.equiv_id in johnsmiths:
tok.labels = [john_smith_label]
stream_item.body.sentences[tagger_id] = sentences
o_chunk.add(stream_item)
## put the o_chunk bytes into the specified file
open(tmp_done_path, 'wb').write(str(o_chunk))
## replace this with log.info()
print 'created %s' % tmp_done_path | [
"def",
"align_chunk_with_ner",
"(",
"tmp_ner_path",
",",
"i_chunk",
",",
"tmp_done_path",
")",
":",
"o_chunk",
"=",
"Chunk",
"(",
")",
"input_iter",
"=",
"i_chunk",
".",
"__iter__",
"(",
")",
"ner",
"=",
"''",
"stream_id",
"=",
"None",
"all_ner",
"=",
"xml... | iterate through the i_chunk and tmp_ner_path to generate a new
Chunk with body.ner | [
"iterate",
"through",
"the",
"i_chunk",
"and",
"tmp_ner_path",
"to",
"generate",
"a",
"new",
"Chunk",
"with",
"body",
".",
"ner"
] | python | test |
kodexlab/reliure | reliure/utils/log.py | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/utils/log.py#L76-L119 | def get_app_logger_color(appname, app_log_level=logging.INFO, log_level=logging.WARN, logfile=None):
""" Configure the logging for an app using reliure (it log's both the app and reliure lib)
:param appname: the name of the application to log
:parap app_log_level: log level for the app
:param log_level: log level for the reliure
:param logfile: file that store the log, time rotating file (by day), no if None
"""
# create lib handler
stderr_handler = logging.StreamHandler()
stderr_handler.setLevel(log_level)
# create formatter and add it to the handlers
name = "reliure"
name += "_"*(max(0, len(appname)-len(name)))
formatter = ColorFormatter('$BG-BLUE$WHITE%s$RESET:%%(asctime)s:$COLOR%%(levelname)s$RESET:$BOLD%%(name)s$RESET: %%(message)s' % name)
stderr_handler.setFormatter(formatter)
# get the logers it self
logger = logging.getLogger("reliure")
logger.setLevel(logging.DEBUG)
# add the handlers to the loggers
logger.addHandler(stderr_handler)
# create app handler
app_stderr_handler = logging.StreamHandler()
app_stderr_handler.setLevel(app_log_level)
# create formatter and add it to the handlers
app_formatter = ColorFormatter("$BG-CYAN$WHITE%s$RESET:%%(asctime)s:$COLOR%%(levelname)s$RESET:$BOLD%%(name)s$RESET: %%(message)s" % appname.upper())
app_stderr_handler.setFormatter(app_formatter)
# get the logers it self
app_logger = logging.getLogger(appname)
app_logger.setLevel(logging.DEBUG)
# add the handlers to the loggers
app_logger.addHandler(app_stderr_handler)
if logfile is not None:
file_format = '%(asctime)s:%(levelname)s:%(name)s: %(message)s'
from logging.handlers import TimedRotatingFileHandler
file_handler = TimedRotatingFileHandler(logfile, when="D", interval=1, backupCount=7)
file_handler.setFormatter(logging.Formatter(file_format))
# add the handlers to the loggers
logger.addHandler(file_handler)
# add the handlers to the loggers
app_logger.addHandler(file_handler)
return app_logger | [
"def",
"get_app_logger_color",
"(",
"appname",
",",
"app_log_level",
"=",
"logging",
".",
"INFO",
",",
"log_level",
"=",
"logging",
".",
"WARN",
",",
"logfile",
"=",
"None",
")",
":",
"# create lib handler",
"stderr_handler",
"=",
"logging",
".",
"StreamHandler"... | Configure the logging for an app using reliure (it log's both the app and reliure lib)
:param appname: the name of the application to log
:parap app_log_level: log level for the app
:param log_level: log level for the reliure
:param logfile: file that store the log, time rotating file (by day), no if None | [
"Configure",
"the",
"logging",
"for",
"an",
"app",
"using",
"reliure",
"(",
"it",
"log",
"s",
"both",
"the",
"app",
"and",
"reliure",
"lib",
")"
] | python | train |
OpenHydrology/floodestimation | floodestimation/fehdata.py | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L137-L157 | def nrfa_metadata():
"""
Return metadata on the NRFA data.
Returned metadata is a dict with the following elements:
- `url`: string with NRFA data download URL
- `version`: string with NRFA version number, e.g. '3.3.4'
- `published_on`: datetime of data release/publication (only month and year are accurate, rest should be ignored)
- `downloaded_on`: datetime of last download
:return: metadata
:rtype: dict
"""
result = {
'url': config.get('nrfa', 'url', fallback=None) or None, # Empty strings '' become None
'version': config.get('nrfa', 'version', fallback=None) or None,
'published_on': config.get_datetime('nrfa', 'published_on', fallback=None) or None,
'downloaded_on': config.get_datetime('nrfa', 'downloaded_on', fallback=None) or None
}
return result | [
"def",
"nrfa_metadata",
"(",
")",
":",
"result",
"=",
"{",
"'url'",
":",
"config",
".",
"get",
"(",
"'nrfa'",
",",
"'url'",
",",
"fallback",
"=",
"None",
")",
"or",
"None",
",",
"# Empty strings '' become None",
"'version'",
":",
"config",
".",
"get",
"(... | Return metadata on the NRFA data.
Returned metadata is a dict with the following elements:
- `url`: string with NRFA data download URL
- `version`: string with NRFA version number, e.g. '3.3.4'
- `published_on`: datetime of data release/publication (only month and year are accurate, rest should be ignored)
- `downloaded_on`: datetime of last download
:return: metadata
:rtype: dict | [
"Return",
"metadata",
"on",
"the",
"NRFA",
"data",
"."
] | python | train |
jayclassless/tidypy | src/tidypy/config.py | https://github.com/jayclassless/tidypy/blob/3c3497ca377fbbe937103b77b02b326c860c748f/src/tidypy/config.py#L124-L135 | def purge_config_cache(location=None):
"""
Clears out the cache of TidyPy configurations that were retrieved from
outside the normal locations.
"""
cache_path = get_cache_path(location)
if location:
os.remove(cache_path)
else:
shutil.rmtree(cache_path) | [
"def",
"purge_config_cache",
"(",
"location",
"=",
"None",
")",
":",
"cache_path",
"=",
"get_cache_path",
"(",
"location",
")",
"if",
"location",
":",
"os",
".",
"remove",
"(",
"cache_path",
")",
"else",
":",
"shutil",
".",
"rmtree",
"(",
"cache_path",
")"... | Clears out the cache of TidyPy configurations that were retrieved from
outside the normal locations. | [
"Clears",
"out",
"the",
"cache",
"of",
"TidyPy",
"configurations",
"that",
"were",
"retrieved",
"from",
"outside",
"the",
"normal",
"locations",
"."
] | python | valid |
christophertbrown/bioscripts | ctbBio/16SfromHMM.py | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L85-L101 | def hit_groups(hits):
"""
* each sequence may have more than one 16S rRNA gene
* group hits for each gene
"""
groups = []
current = False
for hit in sorted(hits, key = itemgetter(0)):
if current is False:
current = [hit]
elif check_overlap(current, hit) is True or check_order(current, hit) is False:
groups.append(current)
current = [hit]
else:
current.append(hit)
groups.append(current)
return groups | [
"def",
"hit_groups",
"(",
"hits",
")",
":",
"groups",
"=",
"[",
"]",
"current",
"=",
"False",
"for",
"hit",
"in",
"sorted",
"(",
"hits",
",",
"key",
"=",
"itemgetter",
"(",
"0",
")",
")",
":",
"if",
"current",
"is",
"False",
":",
"current",
"=",
... | * each sequence may have more than one 16S rRNA gene
* group hits for each gene | [
"*",
"each",
"sequence",
"may",
"have",
"more",
"than",
"one",
"16S",
"rRNA",
"gene",
"*",
"group",
"hits",
"for",
"each",
"gene"
] | python | train |
estnltk/estnltk | estnltk/text.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L401-L405 | def paragraph_starts(self):
"""The start positions of ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.starts(PARAGRAPHS) | [
"def",
"paragraph_starts",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_tagged",
"(",
"PARAGRAPHS",
")",
":",
"self",
".",
"tokenize_paragraphs",
"(",
")",
"return",
"self",
".",
"starts",
"(",
"PARAGRAPHS",
")"
] | The start positions of ``paragraphs`` layer elements. | [
"The",
"start",
"positions",
"of",
"paragraphs",
"layer",
"elements",
"."
] | python | train |
pbrod/numdifftools | src/numdifftools/step_generators.py | https://github.com/pbrod/numdifftools/blob/2c88878df732c9c6629febea56e7a91fd898398d/src/numdifftools/step_generators.py#L21-L29 | def valarray(shape, value=np.NaN, typecode=None):
"""Return an array of all value."""
if typecode is None:
typecode = bool
out = np.ones(shape, dtype=typecode) * value
if not isinstance(out, np.ndarray):
out = np.asarray(out)
return out | [
"def",
"valarray",
"(",
"shape",
",",
"value",
"=",
"np",
".",
"NaN",
",",
"typecode",
"=",
"None",
")",
":",
"if",
"typecode",
"is",
"None",
":",
"typecode",
"=",
"bool",
"out",
"=",
"np",
".",
"ones",
"(",
"shape",
",",
"dtype",
"=",
"typecode",
... | Return an array of all value. | [
"Return",
"an",
"array",
"of",
"all",
"value",
"."
] | python | train |
buriburisuri/sugartensor | sugartensor/sg_initializer.py | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_initializer.py#L152-L184 | def orthogonal(name, shape, scale=1.1, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True):
r"""Creates a tensor variable of which initial values are of
an orthogonal ndarray.
See [Saxe et al. 2014.](http://arxiv.org/pdf/1312.6120.pdf)
Args:
name: The name of new variable.
shape: A tuple/list of integers.
scale: A Python scalar.
dtype: Either float32 or float64.
summary: If True, add this constant to tensor board summary.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
trainable: If True, add this constant to trainable collection. Default is True.
Returns:
A `Variable`.
"""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
# create variable
x = tf.get_variable(name,
initializer=tf.constant(scale * q[:shape[0], :shape[1]], dtype=dtype),
regularizer=regularizer, trainable=trainable)
# add summary
if summary:
tf.sg_summary_param(x)
return x | [
"def",
"orthogonal",
"(",
"name",
",",
"shape",
",",
"scale",
"=",
"1.1",
",",
"dtype",
"=",
"tf",
".",
"sg_floatx",
",",
"summary",
"=",
"True",
",",
"regularizer",
"=",
"None",
",",
"trainable",
"=",
"True",
")",
":",
"flat_shape",
"=",
"(",
"shape... | r"""Creates a tensor variable of which initial values are of
an orthogonal ndarray.
See [Saxe et al. 2014.](http://arxiv.org/pdf/1312.6120.pdf)
Args:
name: The name of new variable.
shape: A tuple/list of integers.
scale: A Python scalar.
dtype: Either float32 or float64.
summary: If True, add this constant to tensor board summary.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
trainable: If True, add this constant to trainable collection. Default is True.
Returns:
A `Variable`. | [
"r",
"Creates",
"a",
"tensor",
"variable",
"of",
"which",
"initial",
"values",
"are",
"of",
"an",
"orthogonal",
"ndarray",
".",
"See",
"[",
"Saxe",
"et",
"al",
".",
"2014",
".",
"]",
"(",
"http",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"13... | python | train |
Ex-Mente/auxi.0 | auxi/modelling/process/materials/thermo.py | https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/process/materials/thermo.py#L1666-L1676 | def afr(self):
"""
Determine the sum of amount flow rates of all the compounds.
:returns: Amount flow rate. [kmol/h]
"""
result = 0.0
for compound in self.material.compounds:
result += self.get_compound_afr(compound)
return result | [
"def",
"afr",
"(",
"self",
")",
":",
"result",
"=",
"0.0",
"for",
"compound",
"in",
"self",
".",
"material",
".",
"compounds",
":",
"result",
"+=",
"self",
".",
"get_compound_afr",
"(",
"compound",
")",
"return",
"result"
] | Determine the sum of amount flow rates of all the compounds.
:returns: Amount flow rate. [kmol/h] | [
"Determine",
"the",
"sum",
"of",
"amount",
"flow",
"rates",
"of",
"all",
"the",
"compounds",
"."
] | python | valid |
annoviko/pyclustering | pyclustering/cluster/bang.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bang.py#L1128-L1141 | def __validate_arguments(self):
"""!
@brief Check input arguments of BANG algorithm and if one of them is not correct then appropriate exception
is thrown.
"""
if self.__levels <= 0:
raise ValueError("Incorrect amount of levels '%d'. Level value should be greater than 0." % self.__levels)
if len(self.__data) == 0:
raise ValueError("Empty input data. Data should contain at least one point.")
if self.__density_threshold < 0:
raise ValueError("Incorrect density threshold '%f'. Density threshold should not be negative." % self.__density_threshold) | [
"def",
"__validate_arguments",
"(",
"self",
")",
":",
"if",
"self",
".",
"__levels",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Incorrect amount of levels '%d'. Level value should be greater than 0.\"",
"%",
"self",
".",
"__levels",
")",
"if",
"len",
"(",
"self"... | !
@brief Check input arguments of BANG algorithm and if one of them is not correct then appropriate exception
is thrown. | [
"!"
] | python | valid |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L3833-L3868 | def ekacld(handle, segno, column, dvals, entszs, nlflgs, rcptrs, wkindx):
"""
Add an entire double precision column to an EK segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacld_c.html
:param handle: EK file handle.
:type handle: int
:param segno: Number of segment to add column to.
:type segno: int
:param column: Column name.
:type column: str
:param dvals: Double precision values to add to column.
:type dvals: Array of floats
:param entszs: Array of sizes of column entries.
:type entszs: Array of ints
:param nlflgs: Array of null flags for column entries.
:type nlflgs: Array of bools
:param rcptrs: Record pointers for segment.
:type rcptrs: Array of ints
:param wkindx: Work space for column index.
:type wkindx: Array of ints
:return: Work space for column index.
:rtype: Array of ints
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
column = stypes.stringToCharP(column)
dvals = stypes.toDoubleVector(dvals)
entszs = stypes.toIntVector(entszs)
nlflgs = stypes.toIntVector(nlflgs)
rcptrs = stypes.toIntVector(rcptrs)
wkindx = stypes.toIntVector(wkindx)
libspice.ekacld_c(handle, segno, column, dvals, entszs, nlflgs, rcptrs,
wkindx)
return stypes.cVectorToPython(wkindx) | [
"def",
"ekacld",
"(",
"handle",
",",
"segno",
",",
"column",
",",
"dvals",
",",
"entszs",
",",
"nlflgs",
",",
"rcptrs",
",",
"wkindx",
")",
":",
"handle",
"=",
"ctypes",
".",
"c_int",
"(",
"handle",
")",
"segno",
"=",
"ctypes",
".",
"c_int",
"(",
"... | Add an entire double precision column to an EK segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacld_c.html
:param handle: EK file handle.
:type handle: int
:param segno: Number of segment to add column to.
:type segno: int
:param column: Column name.
:type column: str
:param dvals: Double precision values to add to column.
:type dvals: Array of floats
:param entszs: Array of sizes of column entries.
:type entszs: Array of ints
:param nlflgs: Array of null flags for column entries.
:type nlflgs: Array of bools
:param rcptrs: Record pointers for segment.
:type rcptrs: Array of ints
:param wkindx: Work space for column index.
:type wkindx: Array of ints
:return: Work space for column index.
:rtype: Array of ints | [
"Add",
"an",
"entire",
"double",
"precision",
"column",
"to",
"an",
"EK",
"segment",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.