text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def prepare_timestamp_micros(data, schema):
"""Converts datetime.datetime to int timestamp with microseconds"""
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = (data - epoch)
return int(delta.total_seconds() * MCS_PER_SECOND)
t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \
data.microsecond
return t
else:
return data | [
"def",
"prepare_timestamp_micros",
"(",
"data",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"datetime",
".",
"datetime",
")",
":",
"if",
"data",
".",
"tzinfo",
"is",
"not",
"None",
":",
"delta",
"=",
"(",
"data",
"-",
"epoch",
")",
"return",
"int",
"(",
"delta",
".",
"total_seconds",
"(",
")",
"*",
"MCS_PER_SECOND",
")",
"t",
"=",
"int",
"(",
"time",
".",
"mktime",
"(",
"data",
".",
"timetuple",
"(",
")",
")",
")",
"*",
"MCS_PER_SECOND",
"+",
"data",
".",
"microsecond",
"return",
"t",
"else",
":",
"return",
"data"
] | 38.818182 | 14 |
def remap_overlapping_column_names(table_op, root_table, data_columns):
"""Return an ``OrderedDict`` mapping possibly suffixed column names to
column names without suffixes.
Parameters
----------
table_op : TableNode
The ``TableNode`` we're selecting from.
root_table : TableNode
The root table of the expression we're selecting from.
data_columns : set or frozenset
The available columns to select from
Returns
-------
mapping : OrderedDict[str, str]
A map from possibly-suffixed column names to column names without
suffixes.
"""
if not isinstance(table_op, ops.Join):
return None
left_root, right_root = ops.distinct_roots(table_op.left, table_op.right)
suffixes = {
left_root: constants.LEFT_JOIN_SUFFIX,
right_root: constants.RIGHT_JOIN_SUFFIX,
}
column_names = [
({name, name + suffixes[root_table]} & data_columns, name)
for name in root_table.schema.names
]
mapping = OrderedDict(
(first(col_name), final_name)
for col_name, final_name in column_names
if col_name
)
return mapping | [
"def",
"remap_overlapping_column_names",
"(",
"table_op",
",",
"root_table",
",",
"data_columns",
")",
":",
"if",
"not",
"isinstance",
"(",
"table_op",
",",
"ops",
".",
"Join",
")",
":",
"return",
"None",
"left_root",
",",
"right_root",
"=",
"ops",
".",
"distinct_roots",
"(",
"table_op",
".",
"left",
",",
"table_op",
".",
"right",
")",
"suffixes",
"=",
"{",
"left_root",
":",
"constants",
".",
"LEFT_JOIN_SUFFIX",
",",
"right_root",
":",
"constants",
".",
"RIGHT_JOIN_SUFFIX",
",",
"}",
"column_names",
"=",
"[",
"(",
"{",
"name",
",",
"name",
"+",
"suffixes",
"[",
"root_table",
"]",
"}",
"&",
"data_columns",
",",
"name",
")",
"for",
"name",
"in",
"root_table",
".",
"schema",
".",
"names",
"]",
"mapping",
"=",
"OrderedDict",
"(",
"(",
"first",
"(",
"col_name",
")",
",",
"final_name",
")",
"for",
"col_name",
",",
"final_name",
"in",
"column_names",
"if",
"col_name",
")",
"return",
"mapping"
] | 30.783784 | 19.351351 |
def get_section_relations(Section):
"""Find every relationship between section and the item model."""
all_rels = (Section._meta.get_all_related_objects() +
Section._meta.get_all_related_many_to_many_objects())
return filter_item_rels(all_rels) | [
"def",
"get_section_relations",
"(",
"Section",
")",
":",
"all_rels",
"=",
"(",
"Section",
".",
"_meta",
".",
"get_all_related_objects",
"(",
")",
"+",
"Section",
".",
"_meta",
".",
"get_all_related_many_to_many_objects",
"(",
")",
")",
"return",
"filter_item_rels",
"(",
"all_rels",
")"
] | 53.4 | 10.8 |
def messageReceived(self, value, sender, target):
"""
An AMP-formatted message was received. Dispatch to the appropriate
command responder, i.e. a method on this object exposed with
L{commandMethod.expose}.
@see IMessageReceiver.messageReceived
"""
if value.type != AMP_MESSAGE_TYPE:
raise UnknownMessageType()
inputBox = self._boxFromData(value.data)
thunk = commandMethod.responderForName(self, inputBox[COMMAND])
placeholder = _ProtocolPlaceholder(sender, target)
arguments = thunk.command.parseArguments(inputBox, placeholder)
try:
result = thunk(**arguments)
except tuple(thunk.command.errors.keys()), knownError:
errorCode = thunk.command.errors[knownError.__class__]
raise RevertAndRespond(
Value(AMP_ANSWER_TYPE,
Box(_error_code=errorCode,
_error_description=str(knownError)).serialize()))
else:
response = thunk.command.makeResponse(result, None)
return Value(AMP_ANSWER_TYPE, response.serialize()) | [
"def",
"messageReceived",
"(",
"self",
",",
"value",
",",
"sender",
",",
"target",
")",
":",
"if",
"value",
".",
"type",
"!=",
"AMP_MESSAGE_TYPE",
":",
"raise",
"UnknownMessageType",
"(",
")",
"inputBox",
"=",
"self",
".",
"_boxFromData",
"(",
"value",
".",
"data",
")",
"thunk",
"=",
"commandMethod",
".",
"responderForName",
"(",
"self",
",",
"inputBox",
"[",
"COMMAND",
"]",
")",
"placeholder",
"=",
"_ProtocolPlaceholder",
"(",
"sender",
",",
"target",
")",
"arguments",
"=",
"thunk",
".",
"command",
".",
"parseArguments",
"(",
"inputBox",
",",
"placeholder",
")",
"try",
":",
"result",
"=",
"thunk",
"(",
"*",
"*",
"arguments",
")",
"except",
"tuple",
"(",
"thunk",
".",
"command",
".",
"errors",
".",
"keys",
"(",
")",
")",
",",
"knownError",
":",
"errorCode",
"=",
"thunk",
".",
"command",
".",
"errors",
"[",
"knownError",
".",
"__class__",
"]",
"raise",
"RevertAndRespond",
"(",
"Value",
"(",
"AMP_ANSWER_TYPE",
",",
"Box",
"(",
"_error_code",
"=",
"errorCode",
",",
"_error_description",
"=",
"str",
"(",
"knownError",
")",
")",
".",
"serialize",
"(",
")",
")",
")",
"else",
":",
"response",
"=",
"thunk",
".",
"command",
".",
"makeResponse",
"(",
"result",
",",
"None",
")",
"return",
"Value",
"(",
"AMP_ANSWER_TYPE",
",",
"response",
".",
"serialize",
"(",
")",
")"
] | 45.56 | 16.92 |
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
'''This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. '''
exp = Expecter(self, searcher, searchwindowsize)
return exp.expect_loop(timeout) | [
"def",
"expect_loop",
"(",
"self",
",",
"searcher",
",",
"timeout",
"=",
"-",
"1",
",",
"searchwindowsize",
"=",
"-",
"1",
")",
":",
"exp",
"=",
"Expecter",
"(",
"self",
",",
"searcher",
",",
"searchwindowsize",
")",
"return",
"exp",
".",
"expect_loop",
"(",
"timeout",
")"
] | 47.888889 | 25.888889 |
def unregister_checker(self, checker):
"""Unregister a checker instance."""
if checker in self._checkers:
self._checkers.remove(checker) | [
"def",
"unregister_checker",
"(",
"self",
",",
"checker",
")",
":",
"if",
"checker",
"in",
"self",
".",
"_checkers",
":",
"self",
".",
"_checkers",
".",
"remove",
"(",
"checker",
")"
] | 40.25 | 1.75 |
def delete(cls, session, record, endpoint_override=None, out_type=None):
"""Delete a record.
Args:
session (requests.sessions.Session): Authenticated session.
record (helpscout.BaseModel): The record to be deleted.
endpoint_override (str, optional): Override the default
endpoint using this.
out_type (helpscout.BaseModel, optional): The type of record to
output. This should be provided by child classes, by calling
super.
Returns:
NoneType: Nothing.
"""
cls._check_implements('delete')
return cls(
endpoint_override or '/%s/%s.json' % (
cls.__endpoint__, record.id,
),
request_type=RequestPaginator.DELETE,
singleton=True,
session=session,
out_type=out_type,
) | [
"def",
"delete",
"(",
"cls",
",",
"session",
",",
"record",
",",
"endpoint_override",
"=",
"None",
",",
"out_type",
"=",
"None",
")",
":",
"cls",
".",
"_check_implements",
"(",
"'delete'",
")",
"return",
"cls",
"(",
"endpoint_override",
"or",
"'/%s/%s.json'",
"%",
"(",
"cls",
".",
"__endpoint__",
",",
"record",
".",
"id",
",",
")",
",",
"request_type",
"=",
"RequestPaginator",
".",
"DELETE",
",",
"singleton",
"=",
"True",
",",
"session",
"=",
"session",
",",
"out_type",
"=",
"out_type",
",",
")"
] | 35.68 | 19.52 |
def reconfigure_bird(cmd):
"""Reconfigure BIRD daemon.
Arguments:
cmd (string): A command to trigger a reconfiguration of Bird daemon
Notes:
Runs 'birdc configure' to reconfigure BIRD. Some useful information on
how birdc tool works:
-- Returns a non-zero exit code only when it can't access BIRD
daemon via the control socket (/var/run/bird.ctl). This happens
when BIRD daemon is either down or when the caller of birdc
doesn't have access to the control socket.
-- Returns zero exit code when reconfigure fails due to invalid
configuration. Thus, we catch this case by looking at the output
and not at the exit code.
-- Returns zero exit code when reconfigure was successful.
-- Should never timeout, if it does then it is a bug.
"""
log = logging.getLogger(PROGRAM_NAME)
cmd = shlex.split(cmd)
log.info("reconfiguring BIRD by running %s", ' '.join(cmd))
try:
output = subprocess.check_output(
cmd,
timeout=2,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
except subprocess.TimeoutExpired:
log.error("reconfiguring bird timed out")
return
except subprocess.CalledProcessError as error:
# birdc returns 0 even when it fails due to invalid config,
# but it returns 1 when BIRD is down.
log.error("reconfiguring BIRD failed, either BIRD daemon is down or "
"we don't have privileges to reconfigure it (sudo problems?)"
":%s", error.output.strip())
return
except FileNotFoundError as error:
log.error("reconfiguring BIRD failed with: %s", error)
return
# 'Reconfigured' string will be in the output if and only if conf is valid.
pattern = re.compile('^Reconfigured$', re.MULTILINE)
if pattern.search(str(output)):
log.info('reconfigured BIRD daemon')
else:
# We will end up here only if we generated an invalid conf
# or someone broke bird.conf.
log.error("reconfiguring BIRD returned error, most likely we generated"
" an invalid configuration file or Bird configuration in is "
"broken:%s", output) | [
"def",
"reconfigure_bird",
"(",
"cmd",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"PROGRAM_NAME",
")",
"cmd",
"=",
"shlex",
".",
"split",
"(",
"cmd",
")",
"log",
".",
"info",
"(",
"\"reconfiguring BIRD by running %s\"",
",",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"try",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"cmd",
",",
"timeout",
"=",
"2",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"universal_newlines",
"=",
"True",
",",
")",
"except",
"subprocess",
".",
"TimeoutExpired",
":",
"log",
".",
"error",
"(",
"\"reconfiguring bird timed out\"",
")",
"return",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"error",
":",
"# birdc returns 0 even when it fails due to invalid config,",
"# but it returns 1 when BIRD is down.",
"log",
".",
"error",
"(",
"\"reconfiguring BIRD failed, either BIRD daemon is down or \"",
"\"we don't have privileges to reconfigure it (sudo problems?)\"",
"\":%s\"",
",",
"error",
".",
"output",
".",
"strip",
"(",
")",
")",
"return",
"except",
"FileNotFoundError",
"as",
"error",
":",
"log",
".",
"error",
"(",
"\"reconfiguring BIRD failed with: %s\"",
",",
"error",
")",
"return",
"# 'Reconfigured' string will be in the output if and only if conf is valid.",
"pattern",
"=",
"re",
".",
"compile",
"(",
"'^Reconfigured$'",
",",
"re",
".",
"MULTILINE",
")",
"if",
"pattern",
".",
"search",
"(",
"str",
"(",
"output",
")",
")",
":",
"log",
".",
"info",
"(",
"'reconfigured BIRD daemon'",
")",
"else",
":",
"# We will end up here only if we generated an invalid conf",
"# or someone broke bird.conf.",
"log",
".",
"error",
"(",
"\"reconfiguring BIRD returned error, most likely we generated\"",
"\" an invalid configuration file or Bird configuration in is \"",
"\"broken:%s\"",
",",
"output",
")"
] | 42.574074 | 21.388889 |
def __extract_model_summary_value(model, value):
"""
Extract a model summary field value
"""
field_value = None
if isinstance(value, _precomputed_field):
field_value = value.field
else:
field_value = model._get(value)
if isinstance(field_value, float):
try:
field_value = round(field_value, 4)
except:
pass
return field_value | [
"def",
"__extract_model_summary_value",
"(",
"model",
",",
"value",
")",
":",
"field_value",
"=",
"None",
"if",
"isinstance",
"(",
"value",
",",
"_precomputed_field",
")",
":",
"field_value",
"=",
"value",
".",
"field",
"else",
":",
"field_value",
"=",
"model",
".",
"_get",
"(",
"value",
")",
"if",
"isinstance",
"(",
"field_value",
",",
"float",
")",
":",
"try",
":",
"field_value",
"=",
"round",
"(",
"field_value",
",",
"4",
")",
"except",
":",
"pass",
"return",
"field_value"
] | 26.6 | 11.666667 |
def is_repository(self, path):
"""
Check if there is a Repository in path.
:Parameters:
#. path (string): The real path of the directory where to check if there is a repository.
:Returns:
#. result (boolean): Whether its a repository or not.
"""
realPath = os.path.realpath( os.path.expanduser(path) )
if not os.path.isdir(realPath):
return False
if ".pyrepinfo" not in os.listdir(realPath):
return False
return True | [
"def",
"is_repository",
"(",
"self",
",",
"path",
")",
":",
"realPath",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"realPath",
")",
":",
"return",
"False",
"if",
"\".pyrepinfo\"",
"not",
"in",
"os",
".",
"listdir",
"(",
"realPath",
")",
":",
"return",
"False",
"return",
"True"
] | 32.6875 | 19.6875 |
def decompress_G1(z: G1Compressed) -> G1Uncompressed:
"""
Recovers x and y coordinates from the compressed point.
"""
# b_flag == 1 indicates the infinity point
b_flag = (z % POW_2_383) // POW_2_382
if b_flag == 1:
return Z1
x = z % POW_2_381
# Try solving y coordinate from the equation Y^2 = X^3 + b
# using quadratic residue
y = pow((x**3 + b.n) % q, (q + 1) // 4, q)
if pow(y, 2, q) != (x**3 + b.n) % q:
raise ValueError(
"The given point is not on G1: y**2 = x**3 + b"
)
# Choose the y whose leftmost bit is equal to the a_flag
a_flag = (z % POW_2_382) // POW_2_381
if (y * 2) // q != a_flag:
y = q - y
return (FQ(x), FQ(y), FQ(1)) | [
"def",
"decompress_G1",
"(",
"z",
":",
"G1Compressed",
")",
"->",
"G1Uncompressed",
":",
"# b_flag == 1 indicates the infinity point",
"b_flag",
"=",
"(",
"z",
"%",
"POW_2_383",
")",
"//",
"POW_2_382",
"if",
"b_flag",
"==",
"1",
":",
"return",
"Z1",
"x",
"=",
"z",
"%",
"POW_2_381",
"# Try solving y coordinate from the equation Y^2 = X^3 + b",
"# using quadratic residue",
"y",
"=",
"pow",
"(",
"(",
"x",
"**",
"3",
"+",
"b",
".",
"n",
")",
"%",
"q",
",",
"(",
"q",
"+",
"1",
")",
"//",
"4",
",",
"q",
")",
"if",
"pow",
"(",
"y",
",",
"2",
",",
"q",
")",
"!=",
"(",
"x",
"**",
"3",
"+",
"b",
".",
"n",
")",
"%",
"q",
":",
"raise",
"ValueError",
"(",
"\"The given point is not on G1: y**2 = x**3 + b\"",
")",
"# Choose the y whose leftmost bit is equal to the a_flag",
"a_flag",
"=",
"(",
"z",
"%",
"POW_2_382",
")",
"//",
"POW_2_381",
"if",
"(",
"y",
"*",
"2",
")",
"//",
"q",
"!=",
"a_flag",
":",
"y",
"=",
"q",
"-",
"y",
"return",
"(",
"FQ",
"(",
"x",
")",
",",
"FQ",
"(",
"y",
")",
",",
"FQ",
"(",
"1",
")",
")"
] | 31.304348 | 15.130435 |
def iter_bitstream(self, iter_duration_generator):
"""
iterate over self.iter_trigger() and
yield the bits
"""
assert self.half_sinus == False # Allways trigger full sinus cycle
# build min/max Hz values
bit_nul_min_hz = self.cfg.BIT_NUL_HZ - self.cfg.HZ_VARIATION
bit_nul_max_hz = self.cfg.BIT_NUL_HZ + self.cfg.HZ_VARIATION
bit_one_min_hz = self.cfg.BIT_ONE_HZ - self.cfg.HZ_VARIATION
bit_one_max_hz = self.cfg.BIT_ONE_HZ + self.cfg.HZ_VARIATION
bit_nul_max_duration = self._hz2duration(bit_nul_min_hz)
bit_nul_min_duration = self._hz2duration(bit_nul_max_hz)
bit_one_max_duration = self._hz2duration(bit_one_min_hz)
bit_one_min_duration = self._hz2duration(bit_one_max_hz)
log.info("bit-0 in %sHz - %sHz (duration: %s-%s) | bit-1 in %sHz - %sHz (duration: %s-%s)" % (
bit_nul_min_hz, bit_nul_max_hz, bit_nul_min_duration, bit_nul_max_duration,
bit_one_min_hz, bit_one_max_hz, bit_one_min_duration, bit_one_max_duration,
))
assert bit_nul_max_hz < bit_one_min_hz, "HZ_VARIATION value is %sHz too high!" % (
((bit_nul_max_hz - bit_one_min_hz) / 2) + 1
)
assert bit_one_max_duration < bit_nul_min_duration, "HZ_VARIATION value is too high!"
# for end statistics
bit_one_count = 0
one_hz_min = sys.maxint
one_hz_avg = None
one_hz_max = 0
bit_nul_count = 0
nul_hz_min = sys.maxint
nul_hz_avg = None
nul_hz_max = 0
for duration in iter_duration_generator:
if bit_one_min_duration < duration < bit_one_max_duration:
hz = self._duration2hz(duration)
log.log(5,
"bit 1 at %s in %sSamples = %sHz" % (
self.pformat_pos(), duration, hz
)
)
yield 1
bit_one_count += 1
if hz < one_hz_min:
one_hz_min = hz
if hz > one_hz_max:
one_hz_max = hz
one_hz_avg = average(one_hz_avg, hz, bit_one_count)
elif bit_nul_min_duration < duration < bit_nul_max_duration:
hz = self._duration2hz(duration)
log.log(5,
"bit 0 at %s in %sSamples = %sHz" % (
self.pformat_pos(), duration, hz
)
)
yield 0
bit_nul_count += 1
if hz < nul_hz_min:
nul_hz_min = hz
if hz > nul_hz_max:
nul_hz_max = hz
nul_hz_avg = average(nul_hz_avg, hz, bit_nul_count)
else:
hz = self._duration2hz(duration)
log.log(7,
"Skip signal at %s with %sHz (%sSamples) out of frequency range." % (
self.pformat_pos(), hz, duration
)
)
continue
bit_count = bit_one_count + bit_nul_count
if bit_count == 0:
print "ERROR: No information from wave to generate the bits"
print "trigger volume to high?"
sys.exit(-1)
log.info("\n%i Bits: %i positive bits and %i negative bits" % (
bit_count, bit_one_count, bit_nul_count
))
if bit_one_count > 0:
log.info("Bit 1: %sHz - %sHz avg: %.1fHz variation: %sHz" % (
one_hz_min, one_hz_max, one_hz_avg, one_hz_max - one_hz_min
))
if bit_nul_count > 0:
log.info("Bit 0: %sHz - %sHz avg: %.1fHz variation: %sHz" % (
nul_hz_min, nul_hz_max, nul_hz_avg, nul_hz_max - nul_hz_min
)) | [
"def",
"iter_bitstream",
"(",
"self",
",",
"iter_duration_generator",
")",
":",
"assert",
"self",
".",
"half_sinus",
"==",
"False",
"# Allways trigger full sinus cycle",
"# build min/max Hz values",
"bit_nul_min_hz",
"=",
"self",
".",
"cfg",
".",
"BIT_NUL_HZ",
"-",
"self",
".",
"cfg",
".",
"HZ_VARIATION",
"bit_nul_max_hz",
"=",
"self",
".",
"cfg",
".",
"BIT_NUL_HZ",
"+",
"self",
".",
"cfg",
".",
"HZ_VARIATION",
"bit_one_min_hz",
"=",
"self",
".",
"cfg",
".",
"BIT_ONE_HZ",
"-",
"self",
".",
"cfg",
".",
"HZ_VARIATION",
"bit_one_max_hz",
"=",
"self",
".",
"cfg",
".",
"BIT_ONE_HZ",
"+",
"self",
".",
"cfg",
".",
"HZ_VARIATION",
"bit_nul_max_duration",
"=",
"self",
".",
"_hz2duration",
"(",
"bit_nul_min_hz",
")",
"bit_nul_min_duration",
"=",
"self",
".",
"_hz2duration",
"(",
"bit_nul_max_hz",
")",
"bit_one_max_duration",
"=",
"self",
".",
"_hz2duration",
"(",
"bit_one_min_hz",
")",
"bit_one_min_duration",
"=",
"self",
".",
"_hz2duration",
"(",
"bit_one_max_hz",
")",
"log",
".",
"info",
"(",
"\"bit-0 in %sHz - %sHz (duration: %s-%s) | bit-1 in %sHz - %sHz (duration: %s-%s)\"",
"%",
"(",
"bit_nul_min_hz",
",",
"bit_nul_max_hz",
",",
"bit_nul_min_duration",
",",
"bit_nul_max_duration",
",",
"bit_one_min_hz",
",",
"bit_one_max_hz",
",",
"bit_one_min_duration",
",",
"bit_one_max_duration",
",",
")",
")",
"assert",
"bit_nul_max_hz",
"<",
"bit_one_min_hz",
",",
"\"HZ_VARIATION value is %sHz too high!\"",
"%",
"(",
"(",
"(",
"bit_nul_max_hz",
"-",
"bit_one_min_hz",
")",
"/",
"2",
")",
"+",
"1",
")",
"assert",
"bit_one_max_duration",
"<",
"bit_nul_min_duration",
",",
"\"HZ_VARIATION value is too high!\"",
"# for end statistics",
"bit_one_count",
"=",
"0",
"one_hz_min",
"=",
"sys",
".",
"maxint",
"one_hz_avg",
"=",
"None",
"one_hz_max",
"=",
"0",
"bit_nul_count",
"=",
"0",
"nul_hz_min",
"=",
"sys",
".",
"maxint",
"nul_hz_avg",
"=",
"None",
"nul_hz_max",
"=",
"0",
"for",
"duration",
"in",
"iter_duration_generator",
":",
"if",
"bit_one_min_duration",
"<",
"duration",
"<",
"bit_one_max_duration",
":",
"hz",
"=",
"self",
".",
"_duration2hz",
"(",
"duration",
")",
"log",
".",
"log",
"(",
"5",
",",
"\"bit 1 at %s in %sSamples = %sHz\"",
"%",
"(",
"self",
".",
"pformat_pos",
"(",
")",
",",
"duration",
",",
"hz",
")",
")",
"yield",
"1",
"bit_one_count",
"+=",
"1",
"if",
"hz",
"<",
"one_hz_min",
":",
"one_hz_min",
"=",
"hz",
"if",
"hz",
">",
"one_hz_max",
":",
"one_hz_max",
"=",
"hz",
"one_hz_avg",
"=",
"average",
"(",
"one_hz_avg",
",",
"hz",
",",
"bit_one_count",
")",
"elif",
"bit_nul_min_duration",
"<",
"duration",
"<",
"bit_nul_max_duration",
":",
"hz",
"=",
"self",
".",
"_duration2hz",
"(",
"duration",
")",
"log",
".",
"log",
"(",
"5",
",",
"\"bit 0 at %s in %sSamples = %sHz\"",
"%",
"(",
"self",
".",
"pformat_pos",
"(",
")",
",",
"duration",
",",
"hz",
")",
")",
"yield",
"0",
"bit_nul_count",
"+=",
"1",
"if",
"hz",
"<",
"nul_hz_min",
":",
"nul_hz_min",
"=",
"hz",
"if",
"hz",
">",
"nul_hz_max",
":",
"nul_hz_max",
"=",
"hz",
"nul_hz_avg",
"=",
"average",
"(",
"nul_hz_avg",
",",
"hz",
",",
"bit_nul_count",
")",
"else",
":",
"hz",
"=",
"self",
".",
"_duration2hz",
"(",
"duration",
")",
"log",
".",
"log",
"(",
"7",
",",
"\"Skip signal at %s with %sHz (%sSamples) out of frequency range.\"",
"%",
"(",
"self",
".",
"pformat_pos",
"(",
")",
",",
"hz",
",",
"duration",
")",
")",
"continue",
"bit_count",
"=",
"bit_one_count",
"+",
"bit_nul_count",
"if",
"bit_count",
"==",
"0",
":",
"print",
"\"ERROR: No information from wave to generate the bits\"",
"print",
"\"trigger volume to high?\"",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"log",
".",
"info",
"(",
"\"\\n%i Bits: %i positive bits and %i negative bits\"",
"%",
"(",
"bit_count",
",",
"bit_one_count",
",",
"bit_nul_count",
")",
")",
"if",
"bit_one_count",
">",
"0",
":",
"log",
".",
"info",
"(",
"\"Bit 1: %sHz - %sHz avg: %.1fHz variation: %sHz\"",
"%",
"(",
"one_hz_min",
",",
"one_hz_max",
",",
"one_hz_avg",
",",
"one_hz_max",
"-",
"one_hz_min",
")",
")",
"if",
"bit_nul_count",
">",
"0",
":",
"log",
".",
"info",
"(",
"\"Bit 0: %sHz - %sHz avg: %.1fHz variation: %sHz\"",
"%",
"(",
"nul_hz_min",
",",
"nul_hz_max",
",",
"nul_hz_avg",
",",
"nul_hz_max",
"-",
"nul_hz_min",
")",
")"
] | 38.916667 | 22 |
def _dihed_cos_low(a, b, c, deriv):
"""Similar to dihed_cos, but with relative vectors"""
a = Vector3(9, deriv, a, (0, 1, 2))
b = Vector3(9, deriv, b, (3, 4, 5))
c = Vector3(9, deriv, c, (6, 7, 8))
b /= b.norm()
tmp = b.copy()
tmp *= dot(a, b)
a -= tmp
tmp = b.copy()
tmp *= dot(c, b)
c -= tmp
a /= a.norm()
c /= c.norm()
return dot(a, c).results() | [
"def",
"_dihed_cos_low",
"(",
"a",
",",
"b",
",",
"c",
",",
"deriv",
")",
":",
"a",
"=",
"Vector3",
"(",
"9",
",",
"deriv",
",",
"a",
",",
"(",
"0",
",",
"1",
",",
"2",
")",
")",
"b",
"=",
"Vector3",
"(",
"9",
",",
"deriv",
",",
"b",
",",
"(",
"3",
",",
"4",
",",
"5",
")",
")",
"c",
"=",
"Vector3",
"(",
"9",
",",
"deriv",
",",
"c",
",",
"(",
"6",
",",
"7",
",",
"8",
")",
")",
"b",
"/=",
"b",
".",
"norm",
"(",
")",
"tmp",
"=",
"b",
".",
"copy",
"(",
")",
"tmp",
"*=",
"dot",
"(",
"a",
",",
"b",
")",
"a",
"-=",
"tmp",
"tmp",
"=",
"b",
".",
"copy",
"(",
")",
"tmp",
"*=",
"dot",
"(",
"c",
",",
"b",
")",
"c",
"-=",
"tmp",
"a",
"/=",
"a",
".",
"norm",
"(",
")",
"c",
"/=",
"c",
".",
"norm",
"(",
")",
"return",
"dot",
"(",
"a",
",",
"c",
")",
".",
"results",
"(",
")"
] | 26 | 15.133333 |
def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--name', '-n', required=True, action='store', help='Name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--location', '-l', required=True, action='store',
help='Location, e.g. eastus')
arg_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print operational details')
args = arg_parser.parse_args()
name = args.name
rgname = args.rgname
location = args.location
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# initialize haikunator
hkn = Haikunator()
# create NSG
nsg_name = name + 'nsg'
print('Creating NSG: ' + nsg_name)
rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location)
nsg_id = rmreturn.json()['id']
print('nsg_id = ' + nsg_id)
# create NSG rule
nsg_rule = 'ssh'
print('Creating NSG rule: ' + nsg_rule)
rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name, nsg_rule,
description='ssh rule', destination_range='22')
print(rmreturn)
print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
# create VNET
vnetname = name + 'vnet'
print('Creating VNet: ' + vnetname)
rmreturn = azurerm.create_vnet(access_token, subscription_id, rgname, vnetname, location,
nsg_id=nsg_id)
print(rmreturn)
# print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
subnet_id = rmreturn.json()['properties']['subnets'][0]['id']
print('subnet_id = ' + subnet_id)
# create public IP address
public_ip_name = name + 'ip'
dns_label = name + 'ip'
print('Creating public IP address: ' + public_ip_name)
rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name,
dns_label, location)
print(rmreturn)
ip_id = rmreturn.json()['id']
print('ip_id = ' + ip_id)
print('Waiting for IP provisioning..')
waiting = True
while waiting:
ipa = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name)
if ipa['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
# create NIC
nic_name = name + 'nic'
print('Creating NIC: ' + nic_name)
rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id,
subnet_id, location)
#print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
nic_id = rmreturn.json()['id']
print('Waiting for NIC provisioning..')
waiting = True
while waiting:
nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name)
if nic['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
# create VM
vm_name = name
vm_size = 'Standard_D1'
publisher = 'CoreOS'
offer = 'CoreOS'
sku = 'Stable'
version = 'latest'
username = 'azure'
password = hkn.haikunate(delimiter=',') # creates random password
print('password = ' + password)
print('Creating VM: ' + vm_name)
rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vm_size,
publisher, offer, sku, version, nic_id, location,
username=username, password=password)
print(rmreturn)
print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) | [
"def",
"main",
"(",
")",
":",
"# validate command line arguments",
"arg_parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--name'",
",",
"'-n'",
",",
"required",
"=",
"True",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Name'",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--rgname'",
",",
"'-g'",
",",
"required",
"=",
"True",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Resource Group Name'",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--location'",
",",
"'-l'",
",",
"required",
"=",
"True",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Location, e.g. eastus'",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--verbose'",
",",
"'-v'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Print operational details'",
")",
"args",
"=",
"arg_parser",
".",
"parse_args",
"(",
")",
"name",
"=",
"args",
".",
"name",
"rgname",
"=",
"args",
".",
"rgname",
"location",
"=",
"args",
".",
"location",
"# Load Azure app defaults",
"try",
":",
"with",
"open",
"(",
"'azurermconfig.json'",
")",
"as",
"config_file",
":",
"config_data",
"=",
"json",
".",
"load",
"(",
"config_file",
")",
"except",
"FileNotFoundError",
":",
"sys",
".",
"exit",
"(",
"'Error: Expecting azurermconfig.json in current folder'",
")",
"tenant_id",
"=",
"config_data",
"[",
"'tenantId'",
"]",
"app_id",
"=",
"config_data",
"[",
"'appId'",
"]",
"app_secret",
"=",
"config_data",
"[",
"'appSecret'",
"]",
"subscription_id",
"=",
"config_data",
"[",
"'subscriptionId'",
"]",
"# authenticate",
"access_token",
"=",
"azurerm",
".",
"get_access_token",
"(",
"tenant_id",
",",
"app_id",
",",
"app_secret",
")",
"# initialize haikunator",
"hkn",
"=",
"Haikunator",
"(",
")",
"# create NSG",
"nsg_name",
"=",
"name",
"+",
"'nsg'",
"print",
"(",
"'Creating NSG: '",
"+",
"nsg_name",
")",
"rmreturn",
"=",
"azurerm",
".",
"create_nsg",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"nsg_name",
",",
"location",
")",
"nsg_id",
"=",
"rmreturn",
".",
"json",
"(",
")",
"[",
"'id'",
"]",
"print",
"(",
"'nsg_id = '",
"+",
"nsg_id",
")",
"# create NSG rule",
"nsg_rule",
"=",
"'ssh'",
"print",
"(",
"'Creating NSG rule: '",
"+",
"nsg_rule",
")",
"rmreturn",
"=",
"azurerm",
".",
"create_nsg_rule",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"nsg_name",
",",
"nsg_rule",
",",
"description",
"=",
"'ssh rule'",
",",
"destination_range",
"=",
"'22'",
")",
"print",
"(",
"rmreturn",
")",
"print",
"(",
"json",
".",
"dumps",
"(",
"rmreturn",
".",
"json",
"(",
")",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"# create VNET",
"vnetname",
"=",
"name",
"+",
"'vnet'",
"print",
"(",
"'Creating VNet: '",
"+",
"vnetname",
")",
"rmreturn",
"=",
"azurerm",
".",
"create_vnet",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"vnetname",
",",
"location",
",",
"nsg_id",
"=",
"nsg_id",
")",
"print",
"(",
"rmreturn",
")",
"# print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))",
"subnet_id",
"=",
"rmreturn",
".",
"json",
"(",
")",
"[",
"'properties'",
"]",
"[",
"'subnets'",
"]",
"[",
"0",
"]",
"[",
"'id'",
"]",
"print",
"(",
"'subnet_id = '",
"+",
"subnet_id",
")",
"# create public IP address",
"public_ip_name",
"=",
"name",
"+",
"'ip'",
"dns_label",
"=",
"name",
"+",
"'ip'",
"print",
"(",
"'Creating public IP address: '",
"+",
"public_ip_name",
")",
"rmreturn",
"=",
"azurerm",
".",
"create_public_ip",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"public_ip_name",
",",
"dns_label",
",",
"location",
")",
"print",
"(",
"rmreturn",
")",
"ip_id",
"=",
"rmreturn",
".",
"json",
"(",
")",
"[",
"'id'",
"]",
"print",
"(",
"'ip_id = '",
"+",
"ip_id",
")",
"print",
"(",
"'Waiting for IP provisioning..'",
")",
"waiting",
"=",
"True",
"while",
"waiting",
":",
"ipa",
"=",
"azurerm",
".",
"get_public_ip",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"public_ip_name",
")",
"if",
"ipa",
"[",
"'properties'",
"]",
"[",
"'provisioningState'",
"]",
"==",
"'Succeeded'",
":",
"waiting",
"=",
"False",
"time",
".",
"sleep",
"(",
"1",
")",
"# create NIC",
"nic_name",
"=",
"name",
"+",
"'nic'",
"print",
"(",
"'Creating NIC: '",
"+",
"nic_name",
")",
"rmreturn",
"=",
"azurerm",
".",
"create_nic",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"nic_name",
",",
"ip_id",
",",
"subnet_id",
",",
"location",
")",
"#print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))",
"nic_id",
"=",
"rmreturn",
".",
"json",
"(",
")",
"[",
"'id'",
"]",
"print",
"(",
"'Waiting for NIC provisioning..'",
")",
"waiting",
"=",
"True",
"while",
"waiting",
":",
"nic",
"=",
"azurerm",
".",
"get_nic",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"nic_name",
")",
"if",
"nic",
"[",
"'properties'",
"]",
"[",
"'provisioningState'",
"]",
"==",
"'Succeeded'",
":",
"waiting",
"=",
"False",
"time",
".",
"sleep",
"(",
"1",
")",
"# create VM",
"vm_name",
"=",
"name",
"vm_size",
"=",
"'Standard_D1'",
"publisher",
"=",
"'CoreOS'",
"offer",
"=",
"'CoreOS'",
"sku",
"=",
"'Stable'",
"version",
"=",
"'latest'",
"username",
"=",
"'azure'",
"password",
"=",
"hkn",
".",
"haikunate",
"(",
"delimiter",
"=",
"','",
")",
"# creates random password",
"print",
"(",
"'password = '",
"+",
"password",
")",
"print",
"(",
"'Creating VM: '",
"+",
"vm_name",
")",
"rmreturn",
"=",
"azurerm",
".",
"create_vm",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"vm_name",
",",
"vm_size",
",",
"publisher",
",",
"offer",
",",
"sku",
",",
"version",
",",
"nic_id",
",",
"location",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"print",
"(",
"rmreturn",
")",
"print",
"(",
"json",
".",
"dumps",
"(",
"rmreturn",
".",
"json",
"(",
")",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")"
] | 37.9375 | 23.705357 |
def get_sample(self, res, resMode='abs', DL=None, method='sum', ind=None,
compact=False):
""" Return a linear sampling of the LOS
The LOS is sampled into a series a points and segments lengths
The resolution (segments length) is <= res
The sampling can be done according to different methods
It is possible to sample only a subset of the LOS
Parameters
----------
res: float
Desired resolution
resMode: str
Flag indicating res should be understood as:
- 'abs': an absolute distance in meters
- 'rel': a relative distance (fraction of the LOS length)
DL: None / iterable
The fraction [L1;L2] of the LOS that should be sampled, where
L1 and L2 are distances from the starting point of the LOS (LOS.D)
method: str
Flag indicating which to use for sampling:
- 'sum': the LOS is sampled into N segments of equal length,
where N is the smallest int such that:
* segment length <= resolution(res,resMode)
The points returned are the center of each segment
- 'simps': the LOS is sampled into N segments of equal length,
where N is the smallest int such that:
* segment length <= resolution(res,resMode)
* N is even
The points returned are the egdes of each segment
- 'romb': the LOS is sampled into N segments of equal length,
where N is the smallest int such that:
* segment length <= resolution(res,resMode)
* N = 2^k + 1
The points returned are the egdes of each segment
Returns
-------
pts: np.ndarray
A (3,NP) array of NP points along the LOS in (X,Y,Z) coordinates
k: np.ndarray
A (NP,) array of the points distances from the LOS starting point
reseff: float
The effective resolution (<= res input), as an absolute distance
"""
ind = self._check_indch(ind)
# preload k
kIn = self.kIn
kOut = self.kOut
# Preformat DL
if DL is None:
DL = np.array([kIn[ind], kOut[ind]])
elif np.asarray(DL).size==2:
DL = np.tile(np.asarray(DL).ravel(),(len(ind),1)).T
DL = np.ascontiguousarray(DL).astype(float)
assert type(DL) is np.ndarray and DL.ndim==2
assert DL.shape==(2,len(ind)), "Arg DL has wrong shape !"
# Check consistency of limits
ii = DL[0,:] < kIn[ind]
DL[0,ii] = kIn[ind][ii]
ii[:] = DL[0,:] >= kOut[ind]
DL[0,ii] = kOut[ind][ii]
ii[:] = DL[1,:] > kOut[ind]
DL[1,ii] = kOut[ind][ii]
ii[:] = DL[1,:] <= kIn[ind]
DL[1,ii] = kIn[ind][ii]
# Preformat Ds, us
Ds, us = self.D[:,ind], self.u[:,ind]
if len(ind)==1:
Ds, us = Ds.reshape((3,1)), us.reshape((3,1))
Ds, us = np.ascontiguousarray(Ds), np.ascontiguousarray(us)
# Launch # NB : find a way to exclude cases with DL[0,:]>=DL[1,:] !!
# Todo : reverse in _GG : make compact default for faster computation !
lpts, k, reseff = _GG.LOS_get_sample(Ds, us, res, DL,
dLMode=resMode, method=method)
if compact:
pts = np.concatenate(lpts, axis=1)
ind = np.array([pt.shape[1] for pt in lpts], dtype=int)
ind = np.cumsum(ind)[:-1]
return pts, k, reseff, ind
else:
return lpts, k, reseff | [
"def",
"get_sample",
"(",
"self",
",",
"res",
",",
"resMode",
"=",
"'abs'",
",",
"DL",
"=",
"None",
",",
"method",
"=",
"'sum'",
",",
"ind",
"=",
"None",
",",
"compact",
"=",
"False",
")",
":",
"ind",
"=",
"self",
".",
"_check_indch",
"(",
"ind",
")",
"# preload k",
"kIn",
"=",
"self",
".",
"kIn",
"kOut",
"=",
"self",
".",
"kOut",
"# Preformat DL",
"if",
"DL",
"is",
"None",
":",
"DL",
"=",
"np",
".",
"array",
"(",
"[",
"kIn",
"[",
"ind",
"]",
",",
"kOut",
"[",
"ind",
"]",
"]",
")",
"elif",
"np",
".",
"asarray",
"(",
"DL",
")",
".",
"size",
"==",
"2",
":",
"DL",
"=",
"np",
".",
"tile",
"(",
"np",
".",
"asarray",
"(",
"DL",
")",
".",
"ravel",
"(",
")",
",",
"(",
"len",
"(",
"ind",
")",
",",
"1",
")",
")",
".",
"T",
"DL",
"=",
"np",
".",
"ascontiguousarray",
"(",
"DL",
")",
".",
"astype",
"(",
"float",
")",
"assert",
"type",
"(",
"DL",
")",
"is",
"np",
".",
"ndarray",
"and",
"DL",
".",
"ndim",
"==",
"2",
"assert",
"DL",
".",
"shape",
"==",
"(",
"2",
",",
"len",
"(",
"ind",
")",
")",
",",
"\"Arg DL has wrong shape !\"",
"# Check consistency of limits",
"ii",
"=",
"DL",
"[",
"0",
",",
":",
"]",
"<",
"kIn",
"[",
"ind",
"]",
"DL",
"[",
"0",
",",
"ii",
"]",
"=",
"kIn",
"[",
"ind",
"]",
"[",
"ii",
"]",
"ii",
"[",
":",
"]",
"=",
"DL",
"[",
"0",
",",
":",
"]",
">=",
"kOut",
"[",
"ind",
"]",
"DL",
"[",
"0",
",",
"ii",
"]",
"=",
"kOut",
"[",
"ind",
"]",
"[",
"ii",
"]",
"ii",
"[",
":",
"]",
"=",
"DL",
"[",
"1",
",",
":",
"]",
">",
"kOut",
"[",
"ind",
"]",
"DL",
"[",
"1",
",",
"ii",
"]",
"=",
"kOut",
"[",
"ind",
"]",
"[",
"ii",
"]",
"ii",
"[",
":",
"]",
"=",
"DL",
"[",
"1",
",",
":",
"]",
"<=",
"kIn",
"[",
"ind",
"]",
"DL",
"[",
"1",
",",
"ii",
"]",
"=",
"kIn",
"[",
"ind",
"]",
"[",
"ii",
"]",
"# Preformat Ds, us",
"Ds",
",",
"us",
"=",
"self",
".",
"D",
"[",
":",
",",
"ind",
"]",
",",
"self",
".",
"u",
"[",
":",
",",
"ind",
"]",
"if",
"len",
"(",
"ind",
")",
"==",
"1",
":",
"Ds",
",",
"us",
"=",
"Ds",
".",
"reshape",
"(",
"(",
"3",
",",
"1",
")",
")",
",",
"us",
".",
"reshape",
"(",
"(",
"3",
",",
"1",
")",
")",
"Ds",
",",
"us",
"=",
"np",
".",
"ascontiguousarray",
"(",
"Ds",
")",
",",
"np",
".",
"ascontiguousarray",
"(",
"us",
")",
"# Launch # NB : find a way to exclude cases with DL[0,:]>=DL[1,:] !!",
"# Todo : reverse in _GG : make compact default for faster computation !",
"lpts",
",",
"k",
",",
"reseff",
"=",
"_GG",
".",
"LOS_get_sample",
"(",
"Ds",
",",
"us",
",",
"res",
",",
"DL",
",",
"dLMode",
"=",
"resMode",
",",
"method",
"=",
"method",
")",
"if",
"compact",
":",
"pts",
"=",
"np",
".",
"concatenate",
"(",
"lpts",
",",
"axis",
"=",
"1",
")",
"ind",
"=",
"np",
".",
"array",
"(",
"[",
"pt",
".",
"shape",
"[",
"1",
"]",
"for",
"pt",
"in",
"lpts",
"]",
",",
"dtype",
"=",
"int",
")",
"ind",
"=",
"np",
".",
"cumsum",
"(",
"ind",
")",
"[",
":",
"-",
"1",
"]",
"return",
"pts",
",",
"k",
",",
"reseff",
",",
"ind",
"else",
":",
"return",
"lpts",
",",
"k",
",",
"reseff"
] | 43.125 | 21.443182 |
def _unfuse(self):
"""Unfuses the fused RNN in to a stack of rnn cells."""
assert not self._projection_size, "_unfuse does not support projection layer yet!"
assert not self._lstm_state_clip_min and not self._lstm_state_clip_max, \
"_unfuse does not support state clipping yet!"
get_cell = {'rnn_relu': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='relu',
**kwargs),
'rnn_tanh': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='tanh',
**kwargs),
'lstm': lambda **kwargs: rnn_cell.LSTMCell(self._hidden_size,
**kwargs),
'gru': lambda **kwargs: rnn_cell.GRUCell(self._hidden_size,
**kwargs)}[self._mode]
stack = rnn_cell.HybridSequentialRNNCell(prefix=self.prefix, params=self.params)
with stack.name_scope():
ni = self._input_size
for i in range(self._num_layers):
kwargs = {'input_size': ni,
'i2h_weight_initializer': self._i2h_weight_initializer,
'h2h_weight_initializer': self._h2h_weight_initializer,
'i2h_bias_initializer': self._i2h_bias_initializer,
'h2h_bias_initializer': self._h2h_bias_initializer}
if self._dir == 2:
stack.add(rnn_cell.BidirectionalCell(
get_cell(prefix='l%d_'%i, **kwargs),
get_cell(prefix='r%d_'%i, **kwargs)))
else:
stack.add(get_cell(prefix='l%d_'%i, **kwargs))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(rnn_cell.DropoutCell(self._dropout))
ni = self._hidden_size * self._dir
return stack | [
"def",
"_unfuse",
"(",
"self",
")",
":",
"assert",
"not",
"self",
".",
"_projection_size",
",",
"\"_unfuse does not support projection layer yet!\"",
"assert",
"not",
"self",
".",
"_lstm_state_clip_min",
"and",
"not",
"self",
".",
"_lstm_state_clip_max",
",",
"\"_unfuse does not support state clipping yet!\"",
"get_cell",
"=",
"{",
"'rnn_relu'",
":",
"lambda",
"*",
"*",
"kwargs",
":",
"rnn_cell",
".",
"RNNCell",
"(",
"self",
".",
"_hidden_size",
",",
"activation",
"=",
"'relu'",
",",
"*",
"*",
"kwargs",
")",
",",
"'rnn_tanh'",
":",
"lambda",
"*",
"*",
"kwargs",
":",
"rnn_cell",
".",
"RNNCell",
"(",
"self",
".",
"_hidden_size",
",",
"activation",
"=",
"'tanh'",
",",
"*",
"*",
"kwargs",
")",
",",
"'lstm'",
":",
"lambda",
"*",
"*",
"kwargs",
":",
"rnn_cell",
".",
"LSTMCell",
"(",
"self",
".",
"_hidden_size",
",",
"*",
"*",
"kwargs",
")",
",",
"'gru'",
":",
"lambda",
"*",
"*",
"kwargs",
":",
"rnn_cell",
".",
"GRUCell",
"(",
"self",
".",
"_hidden_size",
",",
"*",
"*",
"kwargs",
")",
"}",
"[",
"self",
".",
"_mode",
"]",
"stack",
"=",
"rnn_cell",
".",
"HybridSequentialRNNCell",
"(",
"prefix",
"=",
"self",
".",
"prefix",
",",
"params",
"=",
"self",
".",
"params",
")",
"with",
"stack",
".",
"name_scope",
"(",
")",
":",
"ni",
"=",
"self",
".",
"_input_size",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_num_layers",
")",
":",
"kwargs",
"=",
"{",
"'input_size'",
":",
"ni",
",",
"'i2h_weight_initializer'",
":",
"self",
".",
"_i2h_weight_initializer",
",",
"'h2h_weight_initializer'",
":",
"self",
".",
"_h2h_weight_initializer",
",",
"'i2h_bias_initializer'",
":",
"self",
".",
"_i2h_bias_initializer",
",",
"'h2h_bias_initializer'",
":",
"self",
".",
"_h2h_bias_initializer",
"}",
"if",
"self",
".",
"_dir",
"==",
"2",
":",
"stack",
".",
"add",
"(",
"rnn_cell",
".",
"BidirectionalCell",
"(",
"get_cell",
"(",
"prefix",
"=",
"'l%d_'",
"%",
"i",
",",
"*",
"*",
"kwargs",
")",
",",
"get_cell",
"(",
"prefix",
"=",
"'r%d_'",
"%",
"i",
",",
"*",
"*",
"kwargs",
")",
")",
")",
"else",
":",
"stack",
".",
"add",
"(",
"get_cell",
"(",
"prefix",
"=",
"'l%d_'",
"%",
"i",
",",
"*",
"*",
"kwargs",
")",
")",
"if",
"self",
".",
"_dropout",
">",
"0",
"and",
"i",
"!=",
"self",
".",
"_num_layers",
"-",
"1",
":",
"stack",
".",
"add",
"(",
"rnn_cell",
".",
"DropoutCell",
"(",
"self",
".",
"_dropout",
")",
")",
"ni",
"=",
"self",
".",
"_hidden_size",
"*",
"self",
".",
"_dir",
"return",
"stack"
] | 57.289474 | 29.421053 |
def _get_signature_type_and_params(self, request):
"""Extracts parameters from query, headers and body. Signature type
is set to the source in which parameters were found.
"""
# Per RFC5849, only the Authorization header may contain the 'realm'
# optional parameter.
header_params = signature.collect_parameters(headers=request.headers,
exclude_oauth_signature=False, with_realm=True)
body_params = signature.collect_parameters(body=request.body,
exclude_oauth_signature=False)
query_params = signature.collect_parameters(uri_query=request.uri_query,
exclude_oauth_signature=False)
params = []
params.extend(header_params)
params.extend(body_params)
params.extend(query_params)
signature_types_with_oauth_params = list(filter(lambda s: s[2], (
(SIGNATURE_TYPE_AUTH_HEADER, params,
utils.filter_oauth_params(header_params)),
(SIGNATURE_TYPE_BODY, params,
utils.filter_oauth_params(body_params)),
(SIGNATURE_TYPE_QUERY, params,
utils.filter_oauth_params(query_params))
)))
if len(signature_types_with_oauth_params) > 1:
found_types = [s[0] for s in signature_types_with_oauth_params]
raise errors.InvalidRequestError(
description=('oauth_ params must come from only 1 signature'
'type but were found in %s',
', '.join(found_types)))
try:
signature_type, params, oauth_params = signature_types_with_oauth_params[
0]
except IndexError:
raise errors.InvalidRequestError(
description='Missing mandatory OAuth parameters.')
return signature_type, params, oauth_params | [
"def",
"_get_signature_type_and_params",
"(",
"self",
",",
"request",
")",
":",
"# Per RFC5849, only the Authorization header may contain the 'realm'",
"# optional parameter.",
"header_params",
"=",
"signature",
".",
"collect_parameters",
"(",
"headers",
"=",
"request",
".",
"headers",
",",
"exclude_oauth_signature",
"=",
"False",
",",
"with_realm",
"=",
"True",
")",
"body_params",
"=",
"signature",
".",
"collect_parameters",
"(",
"body",
"=",
"request",
".",
"body",
",",
"exclude_oauth_signature",
"=",
"False",
")",
"query_params",
"=",
"signature",
".",
"collect_parameters",
"(",
"uri_query",
"=",
"request",
".",
"uri_query",
",",
"exclude_oauth_signature",
"=",
"False",
")",
"params",
"=",
"[",
"]",
"params",
".",
"extend",
"(",
"header_params",
")",
"params",
".",
"extend",
"(",
"body_params",
")",
"params",
".",
"extend",
"(",
"query_params",
")",
"signature_types_with_oauth_params",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"s",
":",
"s",
"[",
"2",
"]",
",",
"(",
"(",
"SIGNATURE_TYPE_AUTH_HEADER",
",",
"params",
",",
"utils",
".",
"filter_oauth_params",
"(",
"header_params",
")",
")",
",",
"(",
"SIGNATURE_TYPE_BODY",
",",
"params",
",",
"utils",
".",
"filter_oauth_params",
"(",
"body_params",
")",
")",
",",
"(",
"SIGNATURE_TYPE_QUERY",
",",
"params",
",",
"utils",
".",
"filter_oauth_params",
"(",
"query_params",
")",
")",
")",
")",
")",
"if",
"len",
"(",
"signature_types_with_oauth_params",
")",
">",
"1",
":",
"found_types",
"=",
"[",
"s",
"[",
"0",
"]",
"for",
"s",
"in",
"signature_types_with_oauth_params",
"]",
"raise",
"errors",
".",
"InvalidRequestError",
"(",
"description",
"=",
"(",
"'oauth_ params must come from only 1 signature'",
"'type but were found in %s'",
",",
"', '",
".",
"join",
"(",
"found_types",
")",
")",
")",
"try",
":",
"signature_type",
",",
"params",
",",
"oauth_params",
"=",
"signature_types_with_oauth_params",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"errors",
".",
"InvalidRequestError",
"(",
"description",
"=",
"'Missing mandatory OAuth parameters.'",
")",
"return",
"signature_type",
",",
"params",
",",
"oauth_params"
] | 47.853659 | 22.341463 |
def mail_sent_content(self, text, part):
"""
Test an email contains (assert text in) the given text in the relevant
message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have sent an email with "pandas" in the body
"""
if not any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"No email contained expected text in the {0}.".format(part)) | [
"def",
"mail_sent_content",
"(",
"self",
",",
"text",
",",
"part",
")",
":",
"if",
"not",
"any",
"(",
"text",
"in",
"getattr",
"(",
"email",
",",
"part",
")",
"for",
"email",
"in",
"mail",
".",
"outbox",
")",
":",
"dump_emails",
"(",
"part",
")",
"raise",
"AssertionError",
"(",
"\"No email contained expected text in the {0}.\"",
".",
"format",
"(",
"part",
")",
")"
] | 28.428571 | 23.571429 |
def generate_code(max_length, max_nest, ops):
"""Generates code samples.
Args:
max_length: int. max literal length.
max_nest: int. max nesting level.
ops: CodeOp. set of allowable operations.
Returns:
1. (str) output value.
2. (str) Code operation.
"""
stack = []
def fetch_one():
# Always use an existing nested value for one of the operands.
if stack:
return stack.pop()
else:
# Produce a numeral of max_length-digits.
value = random.randint(10 ** (max_length - 1), 10 ** max_length - 1)
code = str(value)
return value, code
def fetch(num_operands):
values, codes = zip(*[fetch_one() for _ in six.moves.range(num_operands)])
return values, codes
for _ in six.moves.range(max_nest):
op = random.choice(ops)
values, codes = fetch(op.num_operands)
new_value = op.eval(values)
new_code = op.get_code(codes)
stack.append((new_value, "(" + new_code + ")"))
final_value, final_code = stack.pop()
final_code = final_code[1:-1]
final_code.strip("()")
if not op.is_memory:
final_value = int(final_value) % 10 ** (max_length+1)
return str(final_value), final_code | [
"def",
"generate_code",
"(",
"max_length",
",",
"max_nest",
",",
"ops",
")",
":",
"stack",
"=",
"[",
"]",
"def",
"fetch_one",
"(",
")",
":",
"# Always use an existing nested value for one of the operands.",
"if",
"stack",
":",
"return",
"stack",
".",
"pop",
"(",
")",
"else",
":",
"# Produce a numeral of max_length-digits.",
"value",
"=",
"random",
".",
"randint",
"(",
"10",
"**",
"(",
"max_length",
"-",
"1",
")",
",",
"10",
"**",
"max_length",
"-",
"1",
")",
"code",
"=",
"str",
"(",
"value",
")",
"return",
"value",
",",
"code",
"def",
"fetch",
"(",
"num_operands",
")",
":",
"values",
",",
"codes",
"=",
"zip",
"(",
"*",
"[",
"fetch_one",
"(",
")",
"for",
"_",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"num_operands",
")",
"]",
")",
"return",
"values",
",",
"codes",
"for",
"_",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"max_nest",
")",
":",
"op",
"=",
"random",
".",
"choice",
"(",
"ops",
")",
"values",
",",
"codes",
"=",
"fetch",
"(",
"op",
".",
"num_operands",
")",
"new_value",
"=",
"op",
".",
"eval",
"(",
"values",
")",
"new_code",
"=",
"op",
".",
"get_code",
"(",
"codes",
")",
"stack",
".",
"append",
"(",
"(",
"new_value",
",",
"\"(\"",
"+",
"new_code",
"+",
"\")\"",
")",
")",
"final_value",
",",
"final_code",
"=",
"stack",
".",
"pop",
"(",
")",
"final_code",
"=",
"final_code",
"[",
"1",
":",
"-",
"1",
"]",
"final_code",
".",
"strip",
"(",
"\"()\"",
")",
"if",
"not",
"op",
".",
"is_memory",
":",
"final_value",
"=",
"int",
"(",
"final_value",
")",
"%",
"10",
"**",
"(",
"max_length",
"+",
"1",
")",
"return",
"str",
"(",
"final_value",
")",
",",
"final_code"
] | 29.25641 | 17.025641 |
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta | [
"def",
"_get_metadata",
"(",
"self",
",",
"key",
"=",
"''",
",",
"default",
"=",
"None",
",",
"builtin",
"=",
"True",
")",
":",
"# Retrieve metadata",
"assert",
"isinstance",
"(",
"key",
",",
"str",
")",
"meta",
"=",
"self",
".",
"metadata",
"# Retrieve specific key",
"if",
"key",
":",
"for",
"k",
"in",
"key",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"isinstance",
"(",
"meta",
",",
"MetaMap",
")",
"and",
"k",
"in",
"meta",
".",
"content",
":",
"meta",
"=",
"meta",
"[",
"k",
"]",
"else",
":",
"return",
"default",
"# Stringify contents",
"return",
"meta2builtin",
"(",
"meta",
")",
"if",
"builtin",
"else",
"meta"
] | 35.8 | 27.15 |
def encodeMessage(self, events):
"""Encode a list of Tensor events with protobuf"""
message = proto_pb2.Msg(
events=[self.encodeEvent(e) for e in events if e._type=='riemann']
)
return message.SerializeToString() | [
"def",
"encodeMessage",
"(",
"self",
",",
"events",
")",
":",
"message",
"=",
"proto_pb2",
".",
"Msg",
"(",
"events",
"=",
"[",
"self",
".",
"encodeEvent",
"(",
"e",
")",
"for",
"e",
"in",
"events",
"if",
"e",
".",
"_type",
"==",
"'riemann'",
"]",
")",
"return",
"message",
".",
"SerializeToString",
"(",
")"
] | 31.375 | 20.875 |
def pop(self, count):
"""Returns new context stack, which doesn't contain few levels
"""
if len(self._contexts) - 1 < count:
_logger.error("#pop value is too big %d", len(self._contexts))
if len(self._contexts) > 1:
return ContextStack(self._contexts[:1], self._data[:1])
else:
return self
return ContextStack(self._contexts[:-count], self._data[:-count]) | [
"def",
"pop",
"(",
"self",
",",
"count",
")",
":",
"if",
"len",
"(",
"self",
".",
"_contexts",
")",
"-",
"1",
"<",
"count",
":",
"_logger",
".",
"error",
"(",
"\"#pop value is too big %d\"",
",",
"len",
"(",
"self",
".",
"_contexts",
")",
")",
"if",
"len",
"(",
"self",
".",
"_contexts",
")",
">",
"1",
":",
"return",
"ContextStack",
"(",
"self",
".",
"_contexts",
"[",
":",
"1",
"]",
",",
"self",
".",
"_data",
"[",
":",
"1",
"]",
")",
"else",
":",
"return",
"self",
"return",
"ContextStack",
"(",
"self",
".",
"_contexts",
"[",
":",
"-",
"count",
"]",
",",
"self",
".",
"_data",
"[",
":",
"-",
"count",
"]",
")"
] | 40.545455 | 17.909091 |
def serverUrl(self, value):
"""gets/sets the server url"""
if value.lower() != self._serverUrl.lower():
self._serverUrl = value | [
"def",
"serverUrl",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
".",
"lower",
"(",
")",
"!=",
"self",
".",
"_serverUrl",
".",
"lower",
"(",
")",
":",
"self",
".",
"_serverUrl",
"=",
"value"
] | 38 | 7.5 |
def PauseHunt(hunt_id, reason=None):
"""Pauses a hunt with a given id."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
if hunt_obj.hunt_state != hunt_obj.HuntState.STARTED:
raise OnlyStartedHuntCanBePausedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id, hunt_state=hunt_obj.HuntState.PAUSED, hunt_state_comment=reason)
data_store.REL_DB.RemoveForemanRule(hunt_id=hunt_obj.hunt_id)
return data_store.REL_DB.ReadHuntObject(hunt_id) | [
"def",
"PauseHunt",
"(",
"hunt_id",
",",
"reason",
"=",
"None",
")",
":",
"hunt_obj",
"=",
"data_store",
".",
"REL_DB",
".",
"ReadHuntObject",
"(",
"hunt_id",
")",
"if",
"hunt_obj",
".",
"hunt_state",
"!=",
"hunt_obj",
".",
"HuntState",
".",
"STARTED",
":",
"raise",
"OnlyStartedHuntCanBePausedError",
"(",
"hunt_obj",
")",
"data_store",
".",
"REL_DB",
".",
"UpdateHuntObject",
"(",
"hunt_id",
",",
"hunt_state",
"=",
"hunt_obj",
".",
"HuntState",
".",
"PAUSED",
",",
"hunt_state_comment",
"=",
"reason",
")",
"data_store",
".",
"REL_DB",
".",
"RemoveForemanRule",
"(",
"hunt_id",
"=",
"hunt_obj",
".",
"hunt_id",
")",
"return",
"data_store",
".",
"REL_DB",
".",
"ReadHuntObject",
"(",
"hunt_id",
")"
] | 38.583333 | 19.916667 |
def _split_sched_block_instance(self, scheduling_block):
"""Split the scheduling block data into multiple names
before adding to the configuration database"""
# Initialise empty list
_scheduling_block_data = {}
_processing_block_data = {}
_processing_block_id = []
for block in scheduling_block:
values = scheduling_block[block]
if block != 'processing_blocks':
_scheduling_block_data[block] = values
else:
# Check if there is a processing block that already exits in
# the database
processing_block_id = self.get_processing_block_ids()
for value in values:
if value['id'] not in processing_block_id:
_processing_block_data = values
else:
raise Exception("Processing block already exits",
value['id'])
# Adding processing block id to the scheduling block list
for block_id in _processing_block_data:
_processing_block_id.append(block_id['id'])
_scheduling_block_data['processing_block_ids'] = _processing_block_id
return _scheduling_block_data, _processing_block_data | [
"def",
"_split_sched_block_instance",
"(",
"self",
",",
"scheduling_block",
")",
":",
"# Initialise empty list",
"_scheduling_block_data",
"=",
"{",
"}",
"_processing_block_data",
"=",
"{",
"}",
"_processing_block_id",
"=",
"[",
"]",
"for",
"block",
"in",
"scheduling_block",
":",
"values",
"=",
"scheduling_block",
"[",
"block",
"]",
"if",
"block",
"!=",
"'processing_blocks'",
":",
"_scheduling_block_data",
"[",
"block",
"]",
"=",
"values",
"else",
":",
"# Check if there is a processing block that already exits in",
"# the database",
"processing_block_id",
"=",
"self",
".",
"get_processing_block_ids",
"(",
")",
"for",
"value",
"in",
"values",
":",
"if",
"value",
"[",
"'id'",
"]",
"not",
"in",
"processing_block_id",
":",
"_processing_block_data",
"=",
"values",
"else",
":",
"raise",
"Exception",
"(",
"\"Processing block already exits\"",
",",
"value",
"[",
"'id'",
"]",
")",
"# Adding processing block id to the scheduling block list",
"for",
"block_id",
"in",
"_processing_block_data",
":",
"_processing_block_id",
".",
"append",
"(",
"block_id",
"[",
"'id'",
"]",
")",
"_scheduling_block_data",
"[",
"'processing_block_ids'",
"]",
"=",
"_processing_block_id",
"return",
"_scheduling_block_data",
",",
"_processing_block_data"
] | 40.1875 | 19.0625 |
def applications(self):
"""returns all the group applications to join"""
url = self._url + "/applications"
params = {"f" : "json"}
res = self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
items = []
if "applications" in res.keys():
for apps in res['applications']:
items.append(
self.Application(url="%s/%s" % (self._url, apps['username']),
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
return items | [
"def",
"applications",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/applications\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
"}",
"res",
"=",
"self",
".",
"_get",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
"items",
"=",
"[",
"]",
"if",
"\"applications\"",
"in",
"res",
".",
"keys",
"(",
")",
":",
"for",
"apps",
"in",
"res",
"[",
"'applications'",
"]",
":",
"items",
".",
"append",
"(",
"self",
".",
"Application",
"(",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"_url",
",",
"apps",
"[",
"'username'",
"]",
")",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
")",
"return",
"items"
] | 43.777778 | 15.111111 |
async def _on_progress_notification(self, progress):
"""Callback function called when a progress notification is received.
Args:
progress (dict): The received notification containing the progress information
"""
conn_string = progress.get('connection_string')
done = progress.get('done_count')
total = progress.get('total_count')
operation = progress.get('operation')
await self.notify_progress(conn_string, operation, done, total, wait=True) | [
"async",
"def",
"_on_progress_notification",
"(",
"self",
",",
"progress",
")",
":",
"conn_string",
"=",
"progress",
".",
"get",
"(",
"'connection_string'",
")",
"done",
"=",
"progress",
".",
"get",
"(",
"'done_count'",
")",
"total",
"=",
"progress",
".",
"get",
"(",
"'total_count'",
")",
"operation",
"=",
"progress",
".",
"get",
"(",
"'operation'",
")",
"await",
"self",
".",
"notify_progress",
"(",
"conn_string",
",",
"operation",
",",
"done",
",",
"total",
",",
"wait",
"=",
"True",
")"
] | 39.153846 | 21.153846 |
def delete_bams(job, bams, patient_id):
"""
Delete the bams from the job Store once their purpose has been achieved (i.e. after all
mutation calling steps). Will also delete the chimeric junction file from Star.
:param dict bams: Dict of bam and bai files
:param str patient_id: The ID of the patient for logging purposes.
"""
bams = {b: v for b, v in bams.items()
if (b.endswith('.bam') or b.endswith('.bai')) and v is not None}
if bams:
for key, val in bams.items():
job.fileStore.logToMaster('Deleting "%s" for patient "%s".' % (key, patient_id))
job.fileStore.deleteGlobalFile(val)
elif 'rna_genome' in bams:
delete_bams(job, bams['rna_genome'], patient_id)
job.fileStore.logToMaster('Deleting "rna_transcriptome.bam" for patient "%s".' % patient_id)
job.fileStore.deleteGlobalFile(bams['rna_transcriptome.bam'])
elif 'rnaChimeric.out.junction' in bams:
job.fileStore.logToMaster('Deleting "rnaChimeric.out.junction" for patient "%s".' %
patient_id)
job.fileStore.deleteGlobalFile(bams['rnaChimeric.out.junction'])
else:
pass | [
"def",
"delete_bams",
"(",
"job",
",",
"bams",
",",
"patient_id",
")",
":",
"bams",
"=",
"{",
"b",
":",
"v",
"for",
"b",
",",
"v",
"in",
"bams",
".",
"items",
"(",
")",
"if",
"(",
"b",
".",
"endswith",
"(",
"'.bam'",
")",
"or",
"b",
".",
"endswith",
"(",
"'.bai'",
")",
")",
"and",
"v",
"is",
"not",
"None",
"}",
"if",
"bams",
":",
"for",
"key",
",",
"val",
"in",
"bams",
".",
"items",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Deleting \"%s\" for patient \"%s\".'",
"%",
"(",
"key",
",",
"patient_id",
")",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"val",
")",
"elif",
"'rna_genome'",
"in",
"bams",
":",
"delete_bams",
"(",
"job",
",",
"bams",
"[",
"'rna_genome'",
"]",
",",
"patient_id",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Deleting \"rna_transcriptome.bam\" for patient \"%s\".'",
"%",
"patient_id",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"bams",
"[",
"'rna_transcriptome.bam'",
"]",
")",
"elif",
"'rnaChimeric.out.junction'",
"in",
"bams",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Deleting \"rnaChimeric.out.junction\" for patient \"%s\".'",
"%",
"patient_id",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"bams",
"[",
"'rnaChimeric.out.junction'",
"]",
")",
"else",
":",
"pass"
] | 49.041667 | 23.541667 |
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default= ``last``).
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
passed no argument or an integer argument, it will use ``list``
semantics and pop tokens from the list of parsed tokens. If passed
a non-integer argument (most likely a string), it will use ``dict``
semantics and pop the corresponding value from any defined results
names. A second default return value argument is supported, just as in
``dict.pop()``.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue | [
"def",
"pop",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"args",
":",
"args",
"=",
"[",
"-",
"1",
"]",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'default'",
":",
"args",
"=",
"(",
"args",
"[",
"0",
"]",
",",
"v",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"pop() got an unexpected keyword argument '%s'\"",
"%",
"k",
")",
"if",
"(",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"int",
")",
"or",
"len",
"(",
"args",
")",
"==",
"1",
"or",
"args",
"[",
"0",
"]",
"in",
"self",
")",
":",
"index",
"=",
"args",
"[",
"0",
"]",
"ret",
"=",
"self",
"[",
"index",
"]",
"del",
"self",
"[",
"index",
"]",
"return",
"ret",
"else",
":",
"defaultvalue",
"=",
"args",
"[",
"1",
"]",
"return",
"defaultvalue"
] | 37.611111 | 20.981481 |
def fastMean(img, f=10, inplace=False):
'''
for bigger ksizes it if often faster to resize an image
rather than blur it...
'''
s0,s1 = img.shape[:2]
ss0 = int(round(s0/f))
ss1 = int(round(s1/f))
small = cv2.resize(img,(ss1,ss0), interpolation=cv2.INTER_AREA)
#bigger
k = {'interpolation':cv2.INTER_LINEAR}
if inplace:
k['dst']=img
return cv2.resize(small,(s1,s0), **k) | [
"def",
"fastMean",
"(",
"img",
",",
"f",
"=",
"10",
",",
"inplace",
"=",
"False",
")",
":",
"s0",
",",
"s1",
"=",
"img",
".",
"shape",
"[",
":",
"2",
"]",
"ss0",
"=",
"int",
"(",
"round",
"(",
"s0",
"/",
"f",
")",
")",
"ss1",
"=",
"int",
"(",
"round",
"(",
"s1",
"/",
"f",
")",
")",
"small",
"=",
"cv2",
".",
"resize",
"(",
"img",
",",
"(",
"ss1",
",",
"ss0",
")",
",",
"interpolation",
"=",
"cv2",
".",
"INTER_AREA",
")",
"#bigger\r",
"k",
"=",
"{",
"'interpolation'",
":",
"cv2",
".",
"INTER_LINEAR",
"}",
"if",
"inplace",
":",
"k",
"[",
"'dst'",
"]",
"=",
"img",
"return",
"cv2",
".",
"resize",
"(",
"small",
",",
"(",
"s1",
",",
"s0",
")",
",",
"*",
"*",
"k",
")"
] | 28.6 | 18.333333 |
def get_ec2_info(instance_id,
region,
access_key_id,
secret_access_key,
username):
""" queries EC2 for details about a particular instance_id
"""
conn = connect_to_ec2(region, access_key_id, secret_access_key)
instance = conn.get_only_instances(
filters={'instance_id': instance_id}
)[0]
data = {}
data['public_dns_name'] = instance.public_dns_name
data['id'] = instance.id
data['instance_type'] = instance.instance_type
data['ip_address'] = instance.ip_address
data['architecture'] = instance.architecture
data['state'] = instance.state
data['region'] = region
data['cloud_type'] = 'ec2'
data['username'] = username
# find out the distribution running on the instance
if username is not None:
wait_for_ssh(data['ip_address'])
with settings(host_string=username + '@' + data['ip_address']):
data['distribution'] = linux_distribution(username,
data['ip_address'])
data['os_release'] = os_release(username,
data['ip_address'])
try:
volume = conn.get_all_volumes(
filters={'attachment.instance-id': instance.id})[0].id
data['volume'] = volume
except:
data['volume'] = ''
return data | [
"def",
"get_ec2_info",
"(",
"instance_id",
",",
"region",
",",
"access_key_id",
",",
"secret_access_key",
",",
"username",
")",
":",
"conn",
"=",
"connect_to_ec2",
"(",
"region",
",",
"access_key_id",
",",
"secret_access_key",
")",
"instance",
"=",
"conn",
".",
"get_only_instances",
"(",
"filters",
"=",
"{",
"'instance_id'",
":",
"instance_id",
"}",
")",
"[",
"0",
"]",
"data",
"=",
"{",
"}",
"data",
"[",
"'public_dns_name'",
"]",
"=",
"instance",
".",
"public_dns_name",
"data",
"[",
"'id'",
"]",
"=",
"instance",
".",
"id",
"data",
"[",
"'instance_type'",
"]",
"=",
"instance",
".",
"instance_type",
"data",
"[",
"'ip_address'",
"]",
"=",
"instance",
".",
"ip_address",
"data",
"[",
"'architecture'",
"]",
"=",
"instance",
".",
"architecture",
"data",
"[",
"'state'",
"]",
"=",
"instance",
".",
"state",
"data",
"[",
"'region'",
"]",
"=",
"region",
"data",
"[",
"'cloud_type'",
"]",
"=",
"'ec2'",
"data",
"[",
"'username'",
"]",
"=",
"username",
"# find out the distribution running on the instance",
"if",
"username",
"is",
"not",
"None",
":",
"wait_for_ssh",
"(",
"data",
"[",
"'ip_address'",
"]",
")",
"with",
"settings",
"(",
"host_string",
"=",
"username",
"+",
"'@'",
"+",
"data",
"[",
"'ip_address'",
"]",
")",
":",
"data",
"[",
"'distribution'",
"]",
"=",
"linux_distribution",
"(",
"username",
",",
"data",
"[",
"'ip_address'",
"]",
")",
"data",
"[",
"'os_release'",
"]",
"=",
"os_release",
"(",
"username",
",",
"data",
"[",
"'ip_address'",
"]",
")",
"try",
":",
"volume",
"=",
"conn",
".",
"get_all_volumes",
"(",
"filters",
"=",
"{",
"'attachment.instance-id'",
":",
"instance",
".",
"id",
"}",
")",
"[",
"0",
"]",
".",
"id",
"data",
"[",
"'volume'",
"]",
"=",
"volume",
"except",
":",
"data",
"[",
"'volume'",
"]",
"=",
"''",
"return",
"data"
] | 35.333333 | 16.230769 |
def get(self, query, *parameters, **kwparameters):
"""Returns the first row returned for the given query."""
rows = self._query(query, parameters, kwparameters)
if not rows:
return None
elif not isinstance(rows, list):
raise MySQLError("Query is not a select query")
elif len(rows) > 1:
raise MySQLError("Multiple rows returned for Database.get() query")
else:
return rows[0] | [
"def",
"get",
"(",
"self",
",",
"query",
",",
"*",
"parameters",
",",
"*",
"*",
"kwparameters",
")",
":",
"rows",
"=",
"self",
".",
"_query",
"(",
"query",
",",
"parameters",
",",
"kwparameters",
")",
"if",
"not",
"rows",
":",
"return",
"None",
"elif",
"not",
"isinstance",
"(",
"rows",
",",
"list",
")",
":",
"raise",
"MySQLError",
"(",
"\"Query is not a select query\"",
")",
"elif",
"len",
"(",
"rows",
")",
">",
"1",
":",
"raise",
"MySQLError",
"(",
"\"Multiple rows returned for Database.get() query\"",
")",
"else",
":",
"return",
"rows",
"[",
"0",
"]"
] | 41.909091 | 16.181818 |
def present(name,
tablespace=None,
encoding=None,
lc_collate=None,
lc_ctype=None,
owner=None,
owner_recurse=False,
template=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named database is present with the specified properties.
For more information about all of these options see man createdb(1)
name
The name of the database to manage
tablespace
Default tablespace for the database
encoding
The character encoding scheme to be used in this database
lc_collate
The LC_COLLATE setting to be used in this database
lc_ctype
The LC_CTYPE setting to be used in this database
owner
The username of the database owner
owner_recurse
Recurse owner change to all relations in the database
template
The template database from which to build this database
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
.. versionadded:: 0.17.0
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
dbs = __salt__['postgres.db_list'](**db_args)
db_params = dbs.get(name, {})
if name in dbs and all((
db_params.get('Tablespace') == tablespace if tablespace else True,
(
db_params.get('Encoding').lower() == encoding.lower()
if encoding else True
),
db_params.get('Collate') == lc_collate if lc_collate else True,
db_params.get('Ctype') == lc_ctype if lc_ctype else True,
db_params.get('Owner') == owner if owner else True
)):
return ret
elif name in dbs and any((
db_params.get('Encoding').lower() != encoding.lower() if encoding else False,
db_params.get('Collate') != lc_collate if lc_collate else False,
db_params.get('Ctype') != lc_ctype if lc_ctype else False
)):
ret['comment'] = 'Database {0} has wrong parameters ' \
'which couldn\'t be changed on fly.'.format(name)
ret['result'] = False
return ret
# The database is not present, make it!
if __opts__['test']:
ret['result'] = None
if name not in dbs:
ret['comment'] = 'Database {0} is set to be created'.format(name)
else:
ret['comment'] = 'Database {0} exists, but parameters ' \
'need to be changed'.format(name)
return ret
if (
name not in dbs and __salt__['postgres.db_create'](
name,
tablespace=tablespace,
encoding=encoding,
lc_collate=lc_collate,
lc_ctype=lc_ctype,
owner=owner,
template=template,
**db_args)
):
ret['comment'] = 'The database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
elif (
name in dbs and __salt__['postgres.db_alter'](
name,
tablespace=tablespace,
owner=owner, owner_recurse=owner_recurse, **db_args)
):
ret['comment'] = ('Parameters for database {0} have been changed'
).format(name)
ret['changes'][name] = 'Parameters changed'
elif name in dbs:
ret['comment'] = ('Failed to change parameters for database {0}'
).format(name)
ret['result'] = False
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret | [
"def",
"present",
"(",
"name",
",",
"tablespace",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"lc_collate",
"=",
"None",
",",
"lc_ctype",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"owner_recurse",
"=",
"False",
",",
"template",
"=",
"None",
",",
"user",
"=",
"None",
",",
"maintenance_db",
"=",
"None",
",",
"db_password",
"=",
"None",
",",
"db_host",
"=",
"None",
",",
"db_port",
"=",
"None",
",",
"db_user",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"'Database {0} is already present'",
".",
"format",
"(",
"name",
")",
"}",
"db_args",
"=",
"{",
"'maintenance_db'",
":",
"maintenance_db",
",",
"'runas'",
":",
"user",
",",
"'host'",
":",
"db_host",
",",
"'user'",
":",
"db_user",
",",
"'port'",
":",
"db_port",
",",
"'password'",
":",
"db_password",
",",
"}",
"dbs",
"=",
"__salt__",
"[",
"'postgres.db_list'",
"]",
"(",
"*",
"*",
"db_args",
")",
"db_params",
"=",
"dbs",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
"if",
"name",
"in",
"dbs",
"and",
"all",
"(",
"(",
"db_params",
".",
"get",
"(",
"'Tablespace'",
")",
"==",
"tablespace",
"if",
"tablespace",
"else",
"True",
",",
"(",
"db_params",
".",
"get",
"(",
"'Encoding'",
")",
".",
"lower",
"(",
")",
"==",
"encoding",
".",
"lower",
"(",
")",
"if",
"encoding",
"else",
"True",
")",
",",
"db_params",
".",
"get",
"(",
"'Collate'",
")",
"==",
"lc_collate",
"if",
"lc_collate",
"else",
"True",
",",
"db_params",
".",
"get",
"(",
"'Ctype'",
")",
"==",
"lc_ctype",
"if",
"lc_ctype",
"else",
"True",
",",
"db_params",
".",
"get",
"(",
"'Owner'",
")",
"==",
"owner",
"if",
"owner",
"else",
"True",
")",
")",
":",
"return",
"ret",
"elif",
"name",
"in",
"dbs",
"and",
"any",
"(",
"(",
"db_params",
".",
"get",
"(",
"'Encoding'",
")",
".",
"lower",
"(",
")",
"!=",
"encoding",
".",
"lower",
"(",
")",
"if",
"encoding",
"else",
"False",
",",
"db_params",
".",
"get",
"(",
"'Collate'",
")",
"!=",
"lc_collate",
"if",
"lc_collate",
"else",
"False",
",",
"db_params",
".",
"get",
"(",
"'Ctype'",
")",
"!=",
"lc_ctype",
"if",
"lc_ctype",
"else",
"False",
")",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Database {0} has wrong parameters '",
"'which couldn\\'t be changed on fly.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"# The database is not present, make it!",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"if",
"name",
"not",
"in",
"dbs",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Database {0} is set to be created'",
".",
"format",
"(",
"name",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Database {0} exists, but parameters '",
"'need to be changed'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"(",
"name",
"not",
"in",
"dbs",
"and",
"__salt__",
"[",
"'postgres.db_create'",
"]",
"(",
"name",
",",
"tablespace",
"=",
"tablespace",
",",
"encoding",
"=",
"encoding",
",",
"lc_collate",
"=",
"lc_collate",
",",
"lc_ctype",
"=",
"lc_ctype",
",",
"owner",
"=",
"owner",
",",
"template",
"=",
"template",
",",
"*",
"*",
"db_args",
")",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'The database {0} has been created'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'Present'",
"elif",
"(",
"name",
"in",
"dbs",
"and",
"__salt__",
"[",
"'postgres.db_alter'",
"]",
"(",
"name",
",",
"tablespace",
"=",
"tablespace",
",",
"owner",
"=",
"owner",
",",
"owner_recurse",
"=",
"owner_recurse",
",",
"*",
"*",
"db_args",
")",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'Parameters for database {0} have been changed'",
")",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'Parameters changed'",
"elif",
"name",
"in",
"dbs",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'Failed to change parameters for database {0}'",
")",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create database {0}'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret"
] | 30.411765 | 22.735294 |
async def _retrieve_messages_around_strategy(self, retrieve):
"""Retrieve messages using around parameter."""
if self.around:
around = self.around.id if self.around else None
data = await self.logs_from(self.channel.id, retrieve, around=around)
self.around = None
return data
return [] | [
"async",
"def",
"_retrieve_messages_around_strategy",
"(",
"self",
",",
"retrieve",
")",
":",
"if",
"self",
".",
"around",
":",
"around",
"=",
"self",
".",
"around",
".",
"id",
"if",
"self",
".",
"around",
"else",
"None",
"data",
"=",
"await",
"self",
".",
"logs_from",
"(",
"self",
".",
"channel",
".",
"id",
",",
"retrieve",
",",
"around",
"=",
"around",
")",
"self",
".",
"around",
"=",
"None",
"return",
"data",
"return",
"[",
"]"
] | 43.75 | 18.625 |
def degree(self, vertex):
"""
Return the degree of a vertex
"""
try:
return len(self.vertices[vertex])
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,)) | [
"def",
"degree",
"(",
"self",
",",
"vertex",
")",
":",
"try",
":",
"return",
"len",
"(",
"self",
".",
"vertices",
"[",
"vertex",
"]",
")",
"except",
"KeyError",
":",
"raise",
"GraphInsertError",
"(",
"\"Vertex %s doesn't exist.\"",
"%",
"(",
"vertex",
",",
")",
")"
] | 30.75 | 12.25 |
def word_break(s, word_dict):
"""
:type s: str
:type word_dict: Set[str]
:rtype: bool
"""
dp = [False] * (len(s)+1)
dp[0] = True
for i in range(1, len(s)+1):
for j in range(0, i):
if dp[j] and s[j:i] in word_dict:
dp[i] = True
break
return dp[-1] | [
"def",
"word_break",
"(",
"s",
",",
"word_dict",
")",
":",
"dp",
"=",
"[",
"False",
"]",
"*",
"(",
"len",
"(",
"s",
")",
"+",
"1",
")",
"dp",
"[",
"0",
"]",
"=",
"True",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"s",
")",
"+",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"i",
")",
":",
"if",
"dp",
"[",
"j",
"]",
"and",
"s",
"[",
"j",
":",
"i",
"]",
"in",
"word_dict",
":",
"dp",
"[",
"i",
"]",
"=",
"True",
"break",
"return",
"dp",
"[",
"-",
"1",
"]"
] | 22.928571 | 13.071429 |
def keyReleaseEvent(self, event):
"""Override Qt method."""
if event.key() == Qt.Key_Alt:
self._alt_key_is_down = False
self.update() | [
"def",
"keyReleaseEvent",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"key",
"(",
")",
"==",
"Qt",
".",
"Key_Alt",
":",
"self",
".",
"_alt_key_is_down",
"=",
"False",
"self",
".",
"update",
"(",
")"
] | 33.8 | 5.2 |
def generate_type_docs(types):
"""Parse an object of types and generate RAML documentation for them.
Expects each type to be either a regular type or a list/array. If a type is a list,
it must specify what type to use for each item.
"""
output = StringIO()
indent = " " # 2
# loop through the basic types and add them to the RAML formatted output
for type_name in types:
if types[type_name].get("type") != None:
output.write(indent + type_name + ":\n")
indent += " " # 4
type_safe = types[type_name]["type"].replace(" ", "_")
# if we are dealing with a list, set type to array and specify type of items
if types[type_name].get("items") != None:
items_safe = types[type_name]["items"].replace(" ", "_")
# if items_safe == "base":
# items_safe = "object"
output.write(indent + "type: " + type_safe + "\n")
output.write(indent + "items: " + items_safe + "\n")
# otherwise, use the type per normal
else:
output.write(indent + "type: " + type_safe + "\n")
# add the description
if types[type_name].get("description") != None:
output.write(indent + "description: " + types[type_name]["description"] + "\n")
indent = indent[:-2] # 2
type_docs = output.getvalue()
output.close()
return type_docs | [
"def",
"generate_type_docs",
"(",
"types",
")",
":",
"output",
"=",
"StringIO",
"(",
")",
"indent",
"=",
"\" \"",
"# 2",
"# loop through the basic types and add them to the RAML formatted output",
"for",
"type_name",
"in",
"types",
":",
"if",
"types",
"[",
"type_name",
"]",
".",
"get",
"(",
"\"type\"",
")",
"!=",
"None",
":",
"output",
".",
"write",
"(",
"indent",
"+",
"type_name",
"+",
"\":\\n\"",
")",
"indent",
"+=",
"\" \"",
"# 4",
"type_safe",
"=",
"types",
"[",
"type_name",
"]",
"[",
"\"type\"",
"]",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"# if we are dealing with a list, set type to array and specify type of items",
"if",
"types",
"[",
"type_name",
"]",
".",
"get",
"(",
"\"items\"",
")",
"!=",
"None",
":",
"items_safe",
"=",
"types",
"[",
"type_name",
"]",
"[",
"\"items\"",
"]",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"# if items_safe == \"base\":",
"# items_safe = \"object\"",
"output",
".",
"write",
"(",
"indent",
"+",
"\"type: \"",
"+",
"type_safe",
"+",
"\"\\n\"",
")",
"output",
".",
"write",
"(",
"indent",
"+",
"\"items: \"",
"+",
"items_safe",
"+",
"\"\\n\"",
")",
"# otherwise, use the type per normal",
"else",
":",
"output",
".",
"write",
"(",
"indent",
"+",
"\"type: \"",
"+",
"type_safe",
"+",
"\"\\n\"",
")",
"# add the description",
"if",
"types",
"[",
"type_name",
"]",
".",
"get",
"(",
"\"description\"",
")",
"!=",
"None",
":",
"output",
".",
"write",
"(",
"indent",
"+",
"\"description: \"",
"+",
"types",
"[",
"type_name",
"]",
"[",
"\"description\"",
"]",
"+",
"\"\\n\"",
")",
"indent",
"=",
"indent",
"[",
":",
"-",
"2",
"]",
"# 2",
"type_docs",
"=",
"output",
".",
"getvalue",
"(",
")",
"output",
".",
"close",
"(",
")",
"return",
"type_docs"
] | 35.439024 | 24.073171 |
def build_database_sortmerna(fasta_path,
max_pos=None,
output_dir=None,
temp_dir=tempfile.gettempdir(),
HALT_EXEC=False):
""" Build sortmerna db from fasta_path; return db name
and list of files created
Parameters
----------
fasta_path : string
path to fasta file of sequences to build database.
max_pos : integer, optional
maximum positions to store per seed in index
[default: 10000].
output_dir : string, optional
directory where output should be written
[default: same directory as fasta_path]
HALT_EXEC : boolean, optional
halt just before running the indexdb_rna command
and print the command -- useful for debugging
[default: False].
Return
------
db_name : string
filepath to indexed database.
db_filepaths : list
output files by indexdb_rna
"""
if fasta_path is None:
raise ValueError("Error: path to fasta reference "
"sequences must exist.")
fasta_dir, fasta_filename = split(fasta_path)
if not output_dir:
output_dir = fasta_dir or '.'
# Will cd to this directory, so just pass the filename
# so the app is not confused by relative paths
fasta_path = fasta_filename
index_basename = splitext(fasta_filename)[0]
db_name = join(output_dir, index_basename)
# Instantiate the object
sdb = IndexDB(WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
# The parameter --ref STRING must follow the format where
# STRING = /path/to/ref.fasta,/path/to/ref.idx
sdb.Parameters['--ref'].on("%s,%s" % (fasta_path, db_name))
# Set temporary directory
sdb.Parameters['--tmpdir'].on(temp_dir)
# Override --max_pos parameter
if max_pos is not None:
sdb.Parameters['--max_pos'].on(max_pos)
# Run indexdb_rna
app_result = sdb()
# Return all output files (by indexdb_rna) as a list,
# first however remove the StdErr and StdOut filepaths
# as they files will be destroyed at the exit from
# this function (IndexDB is a local instance)
db_filepaths = [v.name for k, v in app_result.items()
if k not in {'StdErr', 'StdOut'} and hasattr(v, 'name')]
return db_name, db_filepaths | [
"def",
"build_database_sortmerna",
"(",
"fasta_path",
",",
"max_pos",
"=",
"None",
",",
"output_dir",
"=",
"None",
",",
"temp_dir",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"HALT_EXEC",
"=",
"False",
")",
":",
"if",
"fasta_path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Error: path to fasta reference \"",
"\"sequences must exist.\"",
")",
"fasta_dir",
",",
"fasta_filename",
"=",
"split",
"(",
"fasta_path",
")",
"if",
"not",
"output_dir",
":",
"output_dir",
"=",
"fasta_dir",
"or",
"'.'",
"# Will cd to this directory, so just pass the filename",
"# so the app is not confused by relative paths",
"fasta_path",
"=",
"fasta_filename",
"index_basename",
"=",
"splitext",
"(",
"fasta_filename",
")",
"[",
"0",
"]",
"db_name",
"=",
"join",
"(",
"output_dir",
",",
"index_basename",
")",
"# Instantiate the object",
"sdb",
"=",
"IndexDB",
"(",
"WorkingDir",
"=",
"output_dir",
",",
"HALT_EXEC",
"=",
"HALT_EXEC",
")",
"# The parameter --ref STRING must follow the format where",
"# STRING = /path/to/ref.fasta,/path/to/ref.idx",
"sdb",
".",
"Parameters",
"[",
"'--ref'",
"]",
".",
"on",
"(",
"\"%s,%s\"",
"%",
"(",
"fasta_path",
",",
"db_name",
")",
")",
"# Set temporary directory",
"sdb",
".",
"Parameters",
"[",
"'--tmpdir'",
"]",
".",
"on",
"(",
"temp_dir",
")",
"# Override --max_pos parameter",
"if",
"max_pos",
"is",
"not",
"None",
":",
"sdb",
".",
"Parameters",
"[",
"'--max_pos'",
"]",
".",
"on",
"(",
"max_pos",
")",
"# Run indexdb_rna",
"app_result",
"=",
"sdb",
"(",
")",
"# Return all output files (by indexdb_rna) as a list,",
"# first however remove the StdErr and StdOut filepaths",
"# as they files will be destroyed at the exit from",
"# this function (IndexDB is a local instance)",
"db_filepaths",
"=",
"[",
"v",
".",
"name",
"for",
"k",
",",
"v",
"in",
"app_result",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"{",
"'StdErr'",
",",
"'StdOut'",
"}",
"and",
"hasattr",
"(",
"v",
",",
"'name'",
")",
"]",
"return",
"db_name",
",",
"db_filepaths"
] | 33.985915 | 17.464789 |
def mkconstraints():
"""
Make constraint list for binary constraint problem.
"""
constraints = []
for j in range(1, 10):
vars = ["%s%d" % (i, j) for i in uppercase[:9]]
constraints.extend((c, const_different) for c in combinations(vars, 2))
for i in uppercase[:9]:
vars = ["%s%d" % (i, j) for j in range(1, 10)]
constraints.extend((c, const_different) for c in combinations(vars, 2))
for b0 in ['ABC', 'DEF', 'GHI']:
for b1 in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]:
vars = ["%s%d" % (i, j) for i in b0 for j in b1]
l = list((c, const_different) for c in combinations(vars, 2))
constraints.extend(l)
return constraints | [
"def",
"mkconstraints",
"(",
")",
":",
"constraints",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"10",
")",
":",
"vars",
"=",
"[",
"\"%s%d\"",
"%",
"(",
"i",
",",
"j",
")",
"for",
"i",
"in",
"uppercase",
"[",
":",
"9",
"]",
"]",
"constraints",
".",
"extend",
"(",
"(",
"c",
",",
"const_different",
")",
"for",
"c",
"in",
"combinations",
"(",
"vars",
",",
"2",
")",
")",
"for",
"i",
"in",
"uppercase",
"[",
":",
"9",
"]",
":",
"vars",
"=",
"[",
"\"%s%d\"",
"%",
"(",
"i",
",",
"j",
")",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"10",
")",
"]",
"constraints",
".",
"extend",
"(",
"(",
"c",
",",
"const_different",
")",
"for",
"c",
"in",
"combinations",
"(",
"vars",
",",
"2",
")",
")",
"for",
"b0",
"in",
"[",
"'ABC'",
",",
"'DEF'",
",",
"'GHI'",
"]",
":",
"for",
"b1",
"in",
"[",
"[",
"1",
",",
"2",
",",
"3",
"]",
",",
"[",
"4",
",",
"5",
",",
"6",
"]",
",",
"[",
"7",
",",
"8",
",",
"9",
"]",
"]",
":",
"vars",
"=",
"[",
"\"%s%d\"",
"%",
"(",
"i",
",",
"j",
")",
"for",
"i",
"in",
"b0",
"for",
"j",
"in",
"b1",
"]",
"l",
"=",
"list",
"(",
"(",
"c",
",",
"const_different",
")",
"for",
"c",
"in",
"combinations",
"(",
"vars",
",",
"2",
")",
")",
"constraints",
".",
"extend",
"(",
"l",
")",
"return",
"constraints"
] | 33.571429 | 21.095238 |
def add_path(self, nodes, **attr):
"""In replacement for Deprecated add_path method"""
if nx.__version__[0] == "1":
return super().add_path(nodes, **attr)
else:
return nx.add_path(self, nodes, **attr) | [
"def",
"add_path",
"(",
"self",
",",
"nodes",
",",
"*",
"*",
"attr",
")",
":",
"if",
"nx",
".",
"__version__",
"[",
"0",
"]",
"==",
"\"1\"",
":",
"return",
"super",
"(",
")",
".",
"add_path",
"(",
"nodes",
",",
"*",
"*",
"attr",
")",
"else",
":",
"return",
"nx",
".",
"add_path",
"(",
"self",
",",
"nodes",
",",
"*",
"*",
"attr",
")"
] | 40.5 | 9.666667 |
def delete_report(server, report_number, timeout=HQ_DEFAULT_TIMEOUT):
"""
Delete a specific crash report from the server.
:param report_number: Report Number
:return: server response
"""
try:
r = requests.post(server + "/reports/delete/%d" % report_number, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r | [
"def",
"delete_report",
"(",
"server",
",",
"report_number",
",",
"timeout",
"=",
"HQ_DEFAULT_TIMEOUT",
")",
":",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"server",
"+",
"\"/reports/delete/%d\"",
"%",
"report_number",
",",
"timeout",
"=",
"timeout",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"e",
")",
"return",
"False",
"return",
"r"
] | 29.230769 | 19.384615 |
async def set(self, *args, **kwargs):
"""
Set Secret
Set the secret associated with some key. If the secret already exists, it is
updated instead.
This method takes input: ``v1/secret.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["set"], *args, **kwargs) | [
"async",
"def",
"set",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"await",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"set\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 26.769231 | 23.230769 |
def _get_style_of_faulting_term(self, C, rup):
"""
Returns the style-of-faulting scaling term defined in equations 4 to 6
"""
if (rup.rake > 30.0) and (rup.rake < 150.):
frv = 1.0
fnm = 0.0
elif (rup.rake > -150.0) and (rup.rake < -30.0):
fnm = 1.0
frv = 0.0
else:
fnm = 0.0
frv = 0.0
fflt_f = (self.CONSTS["c8"] * frv) + (C["c9"] * fnm)
if rup.mag <= 4.5:
fflt_m = 0.0
elif rup.mag > 5.5:
fflt_m = 1.0
else:
fflt_m = rup.mag - 4.5
return fflt_f * fflt_m | [
"def",
"_get_style_of_faulting_term",
"(",
"self",
",",
"C",
",",
"rup",
")",
":",
"if",
"(",
"rup",
".",
"rake",
">",
"30.0",
")",
"and",
"(",
"rup",
".",
"rake",
"<",
"150.",
")",
":",
"frv",
"=",
"1.0",
"fnm",
"=",
"0.0",
"elif",
"(",
"rup",
".",
"rake",
">",
"-",
"150.0",
")",
"and",
"(",
"rup",
".",
"rake",
"<",
"-",
"30.0",
")",
":",
"fnm",
"=",
"1.0",
"frv",
"=",
"0.0",
"else",
":",
"fnm",
"=",
"0.0",
"frv",
"=",
"0.0",
"fflt_f",
"=",
"(",
"self",
".",
"CONSTS",
"[",
"\"c8\"",
"]",
"*",
"frv",
")",
"+",
"(",
"C",
"[",
"\"c9\"",
"]",
"*",
"fnm",
")",
"if",
"rup",
".",
"mag",
"<=",
"4.5",
":",
"fflt_m",
"=",
"0.0",
"elif",
"rup",
".",
"mag",
">",
"5.5",
":",
"fflt_m",
"=",
"1.0",
"else",
":",
"fflt_m",
"=",
"rup",
".",
"mag",
"-",
"4.5",
"return",
"fflt_f",
"*",
"fflt_m"
] | 28.636364 | 17 |
def _thread_loop(self):
"""Background thread used when Sender is in asynchronous/interval mode."""
last_check_time = time.time()
messages = []
while True:
# Get first message from queue, blocking until the next time we
# should be sending
time_since_last_check = time.time() - last_check_time
time_till_next_check = max(0, self.interval - time_since_last_check)
try:
message = self._queue.get(timeout=time_till_next_check)
except queue.Empty:
pass
else:
if message is None:
# None is the signal to stop this background thread
break
messages.append(message)
# Get any other messages currently on queue without blocking,
# paying attention to None ("stop thread" signal)
should_stop = False
while True:
try:
message = self._queue.get_nowait()
except queue.Empty:
break
if message is None:
should_stop = True
break
messages.append(message)
if should_stop:
break
# If it's time to send, send what we've collected
current_time = time.time()
if current_time - last_check_time >= self.interval:
last_check_time = current_time
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch))
messages = []
# Send any final messages before exiting thread
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch)) | [
"def",
"_thread_loop",
"(",
"self",
")",
":",
"last_check_time",
"=",
"time",
".",
"time",
"(",
")",
"messages",
"=",
"[",
"]",
"while",
"True",
":",
"# Get first message from queue, blocking until the next time we",
"# should be sending",
"time_since_last_check",
"=",
"time",
".",
"time",
"(",
")",
"-",
"last_check_time",
"time_till_next_check",
"=",
"max",
"(",
"0",
",",
"self",
".",
"interval",
"-",
"time_since_last_check",
")",
"try",
":",
"message",
"=",
"self",
".",
"_queue",
".",
"get",
"(",
"timeout",
"=",
"time_till_next_check",
")",
"except",
"queue",
".",
"Empty",
":",
"pass",
"else",
":",
"if",
"message",
"is",
"None",
":",
"# None is the signal to stop this background thread",
"break",
"messages",
".",
"append",
"(",
"message",
")",
"# Get any other messages currently on queue without blocking,",
"# paying attention to None (\"stop thread\" signal)",
"should_stop",
"=",
"False",
"while",
"True",
":",
"try",
":",
"message",
"=",
"self",
".",
"_queue",
".",
"get_nowait",
"(",
")",
"except",
"queue",
".",
"Empty",
":",
"break",
"if",
"message",
"is",
"None",
":",
"should_stop",
"=",
"True",
"break",
"messages",
".",
"append",
"(",
"message",
")",
"if",
"should_stop",
":",
"break",
"# If it's time to send, send what we've collected",
"current_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"current_time",
"-",
"last_check_time",
">=",
"self",
".",
"interval",
":",
"last_check_time",
"=",
"current_time",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"messages",
")",
",",
"self",
".",
"batch_size",
")",
":",
"batch",
"=",
"messages",
"[",
"i",
":",
"i",
"+",
"self",
".",
"batch_size",
"]",
"self",
".",
"send_socket",
"(",
"b''",
".",
"join",
"(",
"batch",
")",
")",
"messages",
"=",
"[",
"]",
"# Send any final messages before exiting thread",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"messages",
")",
",",
"self",
".",
"batch_size",
")",
":",
"batch",
"=",
"messages",
"[",
"i",
":",
"i",
"+",
"self",
".",
"batch_size",
"]",
"self",
".",
"send_socket",
"(",
"b''",
".",
"join",
"(",
"batch",
")",
")"
] | 41.382979 | 16.659574 |
def _print_SCALAR_TYPES(self, expr, *args, **kwargs):
"""Render scalars"""
adjoint = kwargs.get('adjoint', False)
if adjoint:
expr = expr.conjugate()
if isinstance(expr, SympyBasic):
self._sympy_printer._print_level = self._print_level + 1
res = self._sympy_printer.doprint(expr)
else: # numeric type
try:
if int(expr) == expr:
# In Python, objects that evaluate equal (e.g. 2.0 == 2)
# have the same hash. We want to normalize this, so that we
# get consistent results when printing with a cache
expr = int(expr)
except TypeError:
pass
if adjoint:
kwargs = {
key: val for (key, val) in kwargs.items()
if key != 'adjoint'}
res = self._print(expr, *args, **kwargs)
return res | [
"def",
"_print_SCALAR_TYPES",
"(",
"self",
",",
"expr",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"adjoint",
"=",
"kwargs",
".",
"get",
"(",
"'adjoint'",
",",
"False",
")",
"if",
"adjoint",
":",
"expr",
"=",
"expr",
".",
"conjugate",
"(",
")",
"if",
"isinstance",
"(",
"expr",
",",
"SympyBasic",
")",
":",
"self",
".",
"_sympy_printer",
".",
"_print_level",
"=",
"self",
".",
"_print_level",
"+",
"1",
"res",
"=",
"self",
".",
"_sympy_printer",
".",
"doprint",
"(",
"expr",
")",
"else",
":",
"# numeric type",
"try",
":",
"if",
"int",
"(",
"expr",
")",
"==",
"expr",
":",
"# In Python, objects that evaluate equal (e.g. 2.0 == 2)",
"# have the same hash. We want to normalize this, so that we",
"# get consistent results when printing with a cache",
"expr",
"=",
"int",
"(",
"expr",
")",
"except",
"TypeError",
":",
"pass",
"if",
"adjoint",
":",
"kwargs",
"=",
"{",
"key",
":",
"val",
"for",
"(",
"key",
",",
"val",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"key",
"!=",
"'adjoint'",
"}",
"res",
"=",
"self",
".",
"_print",
"(",
"expr",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"res"
] | 41.434783 | 15.173913 |
def _cloglog_utility_transform(systematic_utilities,
alt_IDs,
rows_to_alts,
shape_params,
intercept_params,
intercept_ref_pos=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
All elements should be ints, floats, or longs. Should contain the
systematic utilities of each observation per available alternative.
Note that this vector is formed by the dot product of the design matrix
with the vector of utility coefficients.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset. All
elements should be zeros or ones.
shape_params : None or 1D ndarray.
Should be None since the clog-log model has no shape parameters.
intercept_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. If J is the
total number of possible alternatives for the dataset being modeled,
there should be J-1 elements in the array. Use None if no outside
intercepts are being estimated.
intercept_ref_pos : int, or None, optional.
Specifies the index of the alternative, in the ordered array of unique
alternatives, that is not having its intercept parameter estimated (in
order to ensure identifiability). Should only be None if
`intercept_params` is None.
Returns
-------
transformations : 2D ndarray.
Should have shape `(systematic_utilities.shape[0], 1)`. The returned
array contains the transformed utility values for this model. All
elements will be ints, longs, or floats.
"""
# Calculate the data dependent part of the transformation
# Also, along the way, guard against numeric underflow or overflow
exp_v = np.exp(systematic_utilities)
# exp_v[np.isposinf(exp_v)] = max_comp_value
exp_exp_v = np.exp(exp_v)
# exp_exp_v[np.isposinf(exp_exp_v)] = max_comp_value
# Calculate the transformed systematic utilities
transformations = np.log(exp_exp_v - 1)
# Guard against underflow
transformations[np.isneginf(transformations)] = -1 * max_comp_value
# Guard against overflow when systematic utilities are moderately large
too_big_idx = np.where(systematic_utilities >= 3.7)
transformations[too_big_idx] = np.exp(systematic_utilities[too_big_idx])
# Guard against overflow when systematic utilities are completely too big.
inf_idx = np.isposinf(transformations)
transformations[inf_idx] = max_comp_value
# Account for the outside intercept parameters if there are any.
if intercept_params is not None and intercept_ref_pos is not None:
# Get a list of all the indices (or row indices) corresponding to the
# alternatives whose intercept parameters are being estimated.
needed_idxs = range(rows_to_alts.shape[1])
needed_idxs.remove(intercept_ref_pos)
if len(intercept_params.shape) > 1 and intercept_params.shape[1] > 1:
# Get an array of zeros with shape
# (num_possible_alternatives, num_parameter_samples)
all_intercepts = np.zeros((rows_to_alts.shape[1],
intercept_params.shape[1]))
# For alternatives having their intercept estimated, replace the
# zeros with the current value of the estimated intercepts
all_intercepts[needed_idxs, :] = intercept_params
else:
# Get an array of zeros with shape (num_possible_alternatives,)
all_intercepts = np.zeros(rows_to_alts.shape[1])
# For alternatives having their intercept estimated, replace the
# zeros with the current value of the estimated intercepts
all_intercepts[needed_idxs] = intercept_params
# Add the intercept values to f(x, beta, c)
transformations += rows_to_alts.dot(all_intercepts)
# Be sure to return a 2D array since other functions will be expecting that
if len(transformations.shape) == 1:
transformations = transformations[:, None]
return transformations | [
"def",
"_cloglog_utility_transform",
"(",
"systematic_utilities",
",",
"alt_IDs",
",",
"rows_to_alts",
",",
"shape_params",
",",
"intercept_params",
",",
"intercept_ref_pos",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Calculate the data dependent part of the transformation",
"# Also, along the way, guard against numeric underflow or overflow",
"exp_v",
"=",
"np",
".",
"exp",
"(",
"systematic_utilities",
")",
"# exp_v[np.isposinf(exp_v)] = max_comp_value",
"exp_exp_v",
"=",
"np",
".",
"exp",
"(",
"exp_v",
")",
"# exp_exp_v[np.isposinf(exp_exp_v)] = max_comp_value",
"# Calculate the transformed systematic utilities",
"transformations",
"=",
"np",
".",
"log",
"(",
"exp_exp_v",
"-",
"1",
")",
"# Guard against underflow",
"transformations",
"[",
"np",
".",
"isneginf",
"(",
"transformations",
")",
"]",
"=",
"-",
"1",
"*",
"max_comp_value",
"# Guard against overflow when systematic utilities are moderately large",
"too_big_idx",
"=",
"np",
".",
"where",
"(",
"systematic_utilities",
">=",
"3.7",
")",
"transformations",
"[",
"too_big_idx",
"]",
"=",
"np",
".",
"exp",
"(",
"systematic_utilities",
"[",
"too_big_idx",
"]",
")",
"# Guard against overflow when systematic utilities are completely too big.",
"inf_idx",
"=",
"np",
".",
"isposinf",
"(",
"transformations",
")",
"transformations",
"[",
"inf_idx",
"]",
"=",
"max_comp_value",
"# Account for the outside intercept parameters if there are any.",
"if",
"intercept_params",
"is",
"not",
"None",
"and",
"intercept_ref_pos",
"is",
"not",
"None",
":",
"# Get a list of all the indices (or row indices) corresponding to the",
"# alternatives whose intercept parameters are being estimated.",
"needed_idxs",
"=",
"range",
"(",
"rows_to_alts",
".",
"shape",
"[",
"1",
"]",
")",
"needed_idxs",
".",
"remove",
"(",
"intercept_ref_pos",
")",
"if",
"len",
"(",
"intercept_params",
".",
"shape",
")",
">",
"1",
"and",
"intercept_params",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"# Get an array of zeros with shape",
"# (num_possible_alternatives, num_parameter_samples)",
"all_intercepts",
"=",
"np",
".",
"zeros",
"(",
"(",
"rows_to_alts",
".",
"shape",
"[",
"1",
"]",
",",
"intercept_params",
".",
"shape",
"[",
"1",
"]",
")",
")",
"# For alternatives having their intercept estimated, replace the",
"# zeros with the current value of the estimated intercepts",
"all_intercepts",
"[",
"needed_idxs",
",",
":",
"]",
"=",
"intercept_params",
"else",
":",
"# Get an array of zeros with shape (num_possible_alternatives,)",
"all_intercepts",
"=",
"np",
".",
"zeros",
"(",
"rows_to_alts",
".",
"shape",
"[",
"1",
"]",
")",
"# For alternatives having their intercept estimated, replace the",
"# zeros with the current value of the estimated intercepts",
"all_intercepts",
"[",
"needed_idxs",
"]",
"=",
"intercept_params",
"# Add the intercept values to f(x, beta, c)",
"transformations",
"+=",
"rows_to_alts",
".",
"dot",
"(",
"all_intercepts",
")",
"# Be sure to return a 2D array since other functions will be expecting that",
"if",
"len",
"(",
"transformations",
".",
"shape",
")",
"==",
"1",
":",
"transformations",
"=",
"transformations",
"[",
":",
",",
"None",
"]",
"return",
"transformations"
] | 49.946237 | 21.989247 |
def get(ctx):
"""Get build job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build -b 1 get
```
\b
```bash
$ polyaxon build --build=1 --project=project_name get
```
"""
user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build'))
try:
response = PolyaxonClient().build_job.get_build(user, project_name, _build)
cache.cache(config_manager=BuildJobManager, response=response)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get build job `{}`.'.format(_build))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
get_build_details(response) | [
"def",
"get",
"(",
"ctx",
")",
":",
"user",
",",
"project_name",
",",
"_build",
"=",
"get_build_or_local",
"(",
"ctx",
".",
"obj",
".",
"get",
"(",
"'project'",
")",
",",
"ctx",
".",
"obj",
".",
"get",
"(",
"'build'",
")",
")",
"try",
":",
"response",
"=",
"PolyaxonClient",
"(",
")",
".",
"build_job",
".",
"get_build",
"(",
"user",
",",
"project_name",
",",
"_build",
")",
"cache",
".",
"cache",
"(",
"config_manager",
"=",
"BuildJobManager",
",",
"response",
"=",
"response",
")",
"except",
"(",
"PolyaxonHTTPError",
",",
"PolyaxonShouldExitError",
",",
"PolyaxonClientException",
")",
"as",
"e",
":",
"Printer",
".",
"print_error",
"(",
"'Could not get build job `{}`.'",
".",
"format",
"(",
"_build",
")",
")",
"Printer",
".",
"print_error",
"(",
"'Error message `{}`.'",
".",
"format",
"(",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"get_build_details",
"(",
"response",
")"
] | 28.518519 | 28.888889 |
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result | [
"def",
"list_installed_files",
"(",
"self",
")",
":",
"def",
"_md5",
"(",
"path",
")",
":",
"f",
"=",
"open",
"(",
"path",
",",
"'rb'",
")",
"try",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"return",
"hashlib",
".",
"md5",
"(",
"content",
")",
".",
"hexdigest",
"(",
")",
"def",
"_size",
"(",
"path",
")",
":",
"return",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_size",
"record_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"'installed-files.txt'",
")",
"result",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"record_path",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"record_path",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"p",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"line",
")",
")",
"# \"./\" is present as a marker between installed files",
"# and installation metadata files",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
":",
"logger",
".",
"warning",
"(",
"'Non-existent file: %s'",
",",
"p",
")",
"if",
"p",
".",
"endswith",
"(",
"(",
"'.pyc'",
",",
"'.pyo'",
")",
")",
":",
"continue",
"#otherwise fall through and fail",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"p",
")",
":",
"result",
".",
"append",
"(",
"(",
"p",
",",
"_md5",
"(",
"p",
")",
",",
"_size",
"(",
"p",
")",
")",
")",
"result",
".",
"append",
"(",
"(",
"record_path",
",",
"None",
",",
"None",
")",
")",
"return",
"result"
] | 37.432432 | 16.891892 |
def get_reference_types(self):
"""Return the reference types.
Zotero.item_types() caches data after the first API call.
"""
item_types = self._zotero_lib.item_types()
return sorted([x["itemType"] for x in item_types]) | [
"def",
"get_reference_types",
"(",
"self",
")",
":",
"item_types",
"=",
"self",
".",
"_zotero_lib",
".",
"item_types",
"(",
")",
"return",
"sorted",
"(",
"[",
"x",
"[",
"\"itemType\"",
"]",
"for",
"x",
"in",
"item_types",
"]",
")"
] | 36 | 14.714286 |
def disassemble(self, data, address, architecture_mode=None):
"""Disassemble the data into an instruction.
"""
# TODO: Improve this code!
if architecture_mode is None:
if self._arch_mode is None:
architecture_mode = ARCH_ARM_MODE_THUMB
else:
architecture_mode = self._arch_mode
self._disassembler = self._available_disassemblers[architecture_mode]
disasm = self._cs_disassemble_one(data, address)
instr = self._cs_translate_insn(disasm)
if instr:
instr.address = address
instr.size = disasm.size
instr.bytes = data[0:disasm.size]
else:
raise DisassemblerError()
return instr | [
"def",
"disassemble",
"(",
"self",
",",
"data",
",",
"address",
",",
"architecture_mode",
"=",
"None",
")",
":",
"# TODO: Improve this code!",
"if",
"architecture_mode",
"is",
"None",
":",
"if",
"self",
".",
"_arch_mode",
"is",
"None",
":",
"architecture_mode",
"=",
"ARCH_ARM_MODE_THUMB",
"else",
":",
"architecture_mode",
"=",
"self",
".",
"_arch_mode",
"self",
".",
"_disassembler",
"=",
"self",
".",
"_available_disassemblers",
"[",
"architecture_mode",
"]",
"disasm",
"=",
"self",
".",
"_cs_disassemble_one",
"(",
"data",
",",
"address",
")",
"instr",
"=",
"self",
".",
"_cs_translate_insn",
"(",
"disasm",
")",
"if",
"instr",
":",
"instr",
".",
"address",
"=",
"address",
"instr",
".",
"size",
"=",
"disasm",
".",
"size",
"instr",
".",
"bytes",
"=",
"data",
"[",
"0",
":",
"disasm",
".",
"size",
"]",
"else",
":",
"raise",
"DisassemblerError",
"(",
")",
"return",
"instr"
] | 30.833333 | 17.791667 |
def setup_default_permissions(session, instance):
"""Setup default permissions on newly created entities according to.
:attr:`Entity.__default_permissions__`.
"""
if instance not in session.new or not isinstance(instance, Entity):
return
if not current_app:
# working outside app_context. Raw object manipulation
return
_setup_default_permissions(instance) | [
"def",
"setup_default_permissions",
"(",
"session",
",",
"instance",
")",
":",
"if",
"instance",
"not",
"in",
"session",
".",
"new",
"or",
"not",
"isinstance",
"(",
"instance",
",",
"Entity",
")",
":",
"return",
"if",
"not",
"current_app",
":",
"# working outside app_context. Raw object manipulation",
"return",
"_setup_default_permissions",
"(",
"instance",
")"
] | 30.384615 | 19.538462 |
def field(self, type, field, default=None):
'''convenient function for returning an arbitrary MAVLink
field with a default'''
if not type in self.messages:
return default
return getattr(self.messages[type], field, default) | [
"def",
"field",
"(",
"self",
",",
"type",
",",
"field",
",",
"default",
"=",
"None",
")",
":",
"if",
"not",
"type",
"in",
"self",
".",
"messages",
":",
"return",
"default",
"return",
"getattr",
"(",
"self",
".",
"messages",
"[",
"type",
"]",
",",
"field",
",",
"default",
")"
] | 44 | 11.666667 |
def print_loop(self, sf, sftag, f=sys.stdout, file_format="nmrstar", tw=3):
"""Print loop into a file or stdout.
:param str sf: Saveframe name.
:param str sftag: Saveframe tag, i.e. field name.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `nmrstar` or `json`.
:param int tw: Tab width.
:return: None
:rtype: :py:obj:`None`
"""
if file_format == "nmrstar":
# First print the fields
for field in self[sf][sftag][0]:
print(u"{}_{}".format(tw * u" ", field), file=f)
print(u"", file=f) # new line between fields and values
# Then print the values
for valuesdict in self[sf][sftag][1]:
# need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words
print(u"{}{}".format(tw * u" ", u" ".join([u"'{}'".format(value) if len(value.split()) > 1 else value for value
in valuesdict.values()])), file=f)
elif file_format == "json":
print(json.dumps(self[sf][sftag], sort_keys=False, indent=4), file=f) | [
"def",
"print_loop",
"(",
"self",
",",
"sf",
",",
"sftag",
",",
"f",
"=",
"sys",
".",
"stdout",
",",
"file_format",
"=",
"\"nmrstar\"",
",",
"tw",
"=",
"3",
")",
":",
"if",
"file_format",
"==",
"\"nmrstar\"",
":",
"# First print the fields",
"for",
"field",
"in",
"self",
"[",
"sf",
"]",
"[",
"sftag",
"]",
"[",
"0",
"]",
":",
"print",
"(",
"u\"{}_{}\"",
".",
"format",
"(",
"tw",
"*",
"u\" \"",
",",
"field",
")",
",",
"file",
"=",
"f",
")",
"print",
"(",
"u\"\"",
",",
"file",
"=",
"f",
")",
"# new line between fields and values",
"# Then print the values",
"for",
"valuesdict",
"in",
"self",
"[",
"sf",
"]",
"[",
"sftag",
"]",
"[",
"1",
"]",
":",
"# need to escape value with quotes (i.e. u\"'{}'\".format()) if value consists of two or more words",
"print",
"(",
"u\"{}{}\"",
".",
"format",
"(",
"tw",
"*",
"u\" \"",
",",
"u\" \"",
".",
"join",
"(",
"[",
"u\"'{}'\"",
".",
"format",
"(",
"value",
")",
"if",
"len",
"(",
"value",
".",
"split",
"(",
")",
")",
">",
"1",
"else",
"value",
"for",
"value",
"in",
"valuesdict",
".",
"values",
"(",
")",
"]",
")",
")",
",",
"file",
"=",
"f",
")",
"elif",
"file_format",
"==",
"\"json\"",
":",
"print",
"(",
"json",
".",
"dumps",
"(",
"self",
"[",
"sf",
"]",
"[",
"sftag",
"]",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"4",
")",
",",
"file",
"=",
"f",
")"
] | 48.52 | 23.6 |
def on_mouse_event(self, event):
'''handle mouse events'''
pos = event.GetPosition()
if event.RightDown() and self.popup_menu is not None:
self.show_popup_menu(pos)
return
if event.Leaving():
self.mouse_pos = None
else:
self.mouse_pos = pos
if event.LeftDown():
self.mouse_down = self.image_coordinates(pos)
if event.Dragging() and event.ButtonIsDown(wx.MOUSE_BTN_LEFT):
self.on_drag_event(event) | [
"def",
"on_mouse_event",
"(",
"self",
",",
"event",
")",
":",
"pos",
"=",
"event",
".",
"GetPosition",
"(",
")",
"if",
"event",
".",
"RightDown",
"(",
")",
"and",
"self",
".",
"popup_menu",
"is",
"not",
"None",
":",
"self",
".",
"show_popup_menu",
"(",
"pos",
")",
"return",
"if",
"event",
".",
"Leaving",
"(",
")",
":",
"self",
".",
"mouse_pos",
"=",
"None",
"else",
":",
"self",
".",
"mouse_pos",
"=",
"pos",
"if",
"event",
".",
"LeftDown",
"(",
")",
":",
"self",
".",
"mouse_down",
"=",
"self",
".",
"image_coordinates",
"(",
"pos",
")",
"if",
"event",
".",
"Dragging",
"(",
")",
"and",
"event",
".",
"ButtonIsDown",
"(",
"wx",
".",
"MOUSE_BTN_LEFT",
")",
":",
"self",
".",
"on_drag_event",
"(",
"event",
")"
] | 34.066667 | 15 |
def format(self, record):
"""
The formatting function
:param record: The record object
:return: The string representation of the record
"""
try:
n = record.n
except AttributeError:
n = 'default'
try:
message = record.message
except AttributeError:
message = record.msg
senml = OrderedDict(
uid="hyperstream",
bt=datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z',
e=[OrderedDict(n=n, v=message)]
)
formatted_json = json.dumps(senml)
return formatted_json | [
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"try",
":",
"n",
"=",
"record",
".",
"n",
"except",
"AttributeError",
":",
"n",
"=",
"'default'",
"try",
":",
"message",
"=",
"record",
".",
"message",
"except",
"AttributeError",
":",
"message",
"=",
"record",
".",
"msg",
"senml",
"=",
"OrderedDict",
"(",
"uid",
"=",
"\"hyperstream\"",
",",
"bt",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"record",
".",
"created",
")",
".",
"isoformat",
"(",
")",
"[",
":",
"-",
"3",
"]",
"+",
"'Z'",
",",
"e",
"=",
"[",
"OrderedDict",
"(",
"n",
"=",
"n",
",",
"v",
"=",
"message",
")",
"]",
")",
"formatted_json",
"=",
"json",
".",
"dumps",
"(",
"senml",
")",
"return",
"formatted_json"
] | 24.461538 | 18 |
def open(self, **params):
"""Open telnet connection
Args:
params (dict), must contain two parameters "ip" - ip address or hostname and "port" - port number
Example:
params = {'port': 23, 'ip': 'localhost'}
"""
logger.info('opening telnet')
self.port = params['port']
self.ip = params['ip']
self.tn = None
self._init() | [
"def",
"open",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"logger",
".",
"info",
"(",
"'opening telnet'",
")",
"self",
".",
"port",
"=",
"params",
"[",
"'port'",
"]",
"self",
".",
"ip",
"=",
"params",
"[",
"'ip'",
"]",
"self",
".",
"tn",
"=",
"None",
"self",
".",
"_init",
"(",
")"
] | 28.714286 | 20.285714 |
def _pretty_format(self, statement, result):
""" Format the return value of a query for humans """
if result is None:
return "Success"
ret = result
if statement.action in ("SELECT", "SCAN"):
if statement.save_file:
filename = statement.save_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
ret = "Saved %d record%s to %s" % (result, plural(result), filename)
elif isinstance(result, int):
if result == result.scanned_count:
ret = "%d" % result
else:
ret = "%d (scanned count: %d)" % (result, result.scanned_count)
elif statement.action == "UPDATE":
if isinstance(result, int):
ret = "Updated %d item%s" % (result, plural(result))
elif statement.action == "DELETE":
ret = "Deleted %d item%s" % (result, plural(result))
elif statement.action == "CREATE":
if result:
ret = "Created table %r" % statement.table
else:
ret = "Table %r already exists" % statement.table
elif statement.action == "INSERT":
ret = "Inserted %d item%s" % (result, plural(result))
elif statement.action == "DROP":
if result:
ret = "Dropped table %r" % statement.table
else:
ret = "Table %r does not exist" % statement.table
elif statement.action == "ANALYZE":
ret = self._pretty_format(statement[1], result)
elif statement.action == "LOAD":
ret = "Loaded %d item%s" % (result, plural(result))
return ret | [
"def",
"_pretty_format",
"(",
"self",
",",
"statement",
",",
"result",
")",
":",
"if",
"result",
"is",
"None",
":",
"return",
"\"Success\"",
"ret",
"=",
"result",
"if",
"statement",
".",
"action",
"in",
"(",
"\"SELECT\"",
",",
"\"SCAN\"",
")",
":",
"if",
"statement",
".",
"save_file",
":",
"filename",
"=",
"statement",
".",
"save_file",
"[",
"0",
"]",
"if",
"filename",
"[",
"0",
"]",
"in",
"[",
"'\"'",
",",
"\"'\"",
"]",
":",
"filename",
"=",
"unwrap",
"(",
"filename",
")",
"ret",
"=",
"\"Saved %d record%s to %s\"",
"%",
"(",
"result",
",",
"plural",
"(",
"result",
")",
",",
"filename",
")",
"elif",
"isinstance",
"(",
"result",
",",
"int",
")",
":",
"if",
"result",
"==",
"result",
".",
"scanned_count",
":",
"ret",
"=",
"\"%d\"",
"%",
"result",
"else",
":",
"ret",
"=",
"\"%d (scanned count: %d)\"",
"%",
"(",
"result",
",",
"result",
".",
"scanned_count",
")",
"elif",
"statement",
".",
"action",
"==",
"\"UPDATE\"",
":",
"if",
"isinstance",
"(",
"result",
",",
"int",
")",
":",
"ret",
"=",
"\"Updated %d item%s\"",
"%",
"(",
"result",
",",
"plural",
"(",
"result",
")",
")",
"elif",
"statement",
".",
"action",
"==",
"\"DELETE\"",
":",
"ret",
"=",
"\"Deleted %d item%s\"",
"%",
"(",
"result",
",",
"plural",
"(",
"result",
")",
")",
"elif",
"statement",
".",
"action",
"==",
"\"CREATE\"",
":",
"if",
"result",
":",
"ret",
"=",
"\"Created table %r\"",
"%",
"statement",
".",
"table",
"else",
":",
"ret",
"=",
"\"Table %r already exists\"",
"%",
"statement",
".",
"table",
"elif",
"statement",
".",
"action",
"==",
"\"INSERT\"",
":",
"ret",
"=",
"\"Inserted %d item%s\"",
"%",
"(",
"result",
",",
"plural",
"(",
"result",
")",
")",
"elif",
"statement",
".",
"action",
"==",
"\"DROP\"",
":",
"if",
"result",
":",
"ret",
"=",
"\"Dropped table %r\"",
"%",
"statement",
".",
"table",
"else",
":",
"ret",
"=",
"\"Table %r does not exist\"",
"%",
"statement",
".",
"table",
"elif",
"statement",
".",
"action",
"==",
"\"ANALYZE\"",
":",
"ret",
"=",
"self",
".",
"_pretty_format",
"(",
"statement",
"[",
"1",
"]",
",",
"result",
")",
"elif",
"statement",
".",
"action",
"==",
"\"LOAD\"",
":",
"ret",
"=",
"\"Loaded %d item%s\"",
"%",
"(",
"result",
",",
"plural",
"(",
"result",
")",
")",
"return",
"ret"
] | 45.105263 | 13.815789 |
def load_cufflinks_dict(*args, **kwargs):
"""
Returns dictionary mapping feature identifier (either transcript or gene ID)
to a DataFrame row with fields:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list
"""
return {
row.id: row
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
} | [
"def",
"load_cufflinks_dict",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"{",
"row",
".",
"id",
":",
"row",
"for",
"(",
"_",
",",
"row",
")",
"in",
"load_cufflinks_dataframe",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
".",
"iterrows",
"(",
")",
"}"
] | 25.117647 | 18.529412 |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
title = obj.Title()
description = obj.Description()
url = obj.absolute_url()
item["replace"]["Title"] = get_link(url, value=title)
item["Description"] = description
sample_types = obj.getSampleTypes()
if sample_types:
links = map(
lambda st: get_link(st.absolute_url(),
value=st.Title(),
css_class="link"),
sample_types)
item["replace"]["SampleTypes"] = ", ".join(links)
else:
item["SampleTypes"] = ""
parent = obj.aq_parent
if parent.portal_type == "Client":
item["Owner"] = parent.aq_parent.Title()
item["replace"]["Owner"] = get_link(
parent.absolute_url(), value=parent.getName())
else:
item["Owner"] = self.context.bika_setup.laboratory.Title()
return item | [
"def",
"folderitem",
"(",
"self",
",",
"obj",
",",
"item",
",",
"index",
")",
":",
"title",
"=",
"obj",
".",
"Title",
"(",
")",
"description",
"=",
"obj",
".",
"Description",
"(",
")",
"url",
"=",
"obj",
".",
"absolute_url",
"(",
")",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Title\"",
"]",
"=",
"get_link",
"(",
"url",
",",
"value",
"=",
"title",
")",
"item",
"[",
"\"Description\"",
"]",
"=",
"description",
"sample_types",
"=",
"obj",
".",
"getSampleTypes",
"(",
")",
"if",
"sample_types",
":",
"links",
"=",
"map",
"(",
"lambda",
"st",
":",
"get_link",
"(",
"st",
".",
"absolute_url",
"(",
")",
",",
"value",
"=",
"st",
".",
"Title",
"(",
")",
",",
"css_class",
"=",
"\"link\"",
")",
",",
"sample_types",
")",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"SampleTypes\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"links",
")",
"else",
":",
"item",
"[",
"\"SampleTypes\"",
"]",
"=",
"\"\"",
"parent",
"=",
"obj",
".",
"aq_parent",
"if",
"parent",
".",
"portal_type",
"==",
"\"Client\"",
":",
"item",
"[",
"\"Owner\"",
"]",
"=",
"parent",
".",
"aq_parent",
".",
"Title",
"(",
")",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Owner\"",
"]",
"=",
"get_link",
"(",
"parent",
".",
"absolute_url",
"(",
")",
",",
"value",
"=",
"parent",
".",
"getName",
"(",
")",
")",
"else",
":",
"item",
"[",
"\"Owner\"",
"]",
"=",
"self",
".",
"context",
".",
"bika_setup",
".",
"laboratory",
".",
"Title",
"(",
")",
"return",
"item"
] | 34.657895 | 18.263158 |
def save(self, filename, binary=True):
"""
Writes a rectilinear grid to disk.
Parameters
----------
filename : str
Filename of grid to be written. The file extension will select the
type of writer to use. ".vtk" will use the legacy writer, while
".vtr" will select the VTK XML writer.
binary : bool, optional
Writes as a binary file by default. Set to False to write ASCII.
Notes
-----
Binary files write much faster than ASCII, but binary files written on
one system may not be readable on other systems. Binary can be used
only with the legacy writer.
"""
filename = os.path.abspath(os.path.expanduser(filename))
# Use legacy writer if vtk is in filename
if '.vtk' in filename:
writer = vtk.vtkRectilinearGridWriter()
legacy = True
elif '.vtr' in filename:
writer = vtk.vtkXMLRectilinearGridWriter()
legacy = False
else:
raise Exception('Extension should be either ".vtr" (xml) or' +
'".vtk" (legacy)')
# Write
writer.SetFileName(filename)
writer.SetInputData(self)
if binary and legacy:
writer.SetFileTypeToBinary()
writer.Write() | [
"def",
"save",
"(",
"self",
",",
"filename",
",",
"binary",
"=",
"True",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
")",
"# Use legacy writer if vtk is in filename",
"if",
"'.vtk'",
"in",
"filename",
":",
"writer",
"=",
"vtk",
".",
"vtkRectilinearGridWriter",
"(",
")",
"legacy",
"=",
"True",
"elif",
"'.vtr'",
"in",
"filename",
":",
"writer",
"=",
"vtk",
".",
"vtkXMLRectilinearGridWriter",
"(",
")",
"legacy",
"=",
"False",
"else",
":",
"raise",
"Exception",
"(",
"'Extension should be either \".vtr\" (xml) or'",
"+",
"'\".vtk\" (legacy)'",
")",
"# Write",
"writer",
".",
"SetFileName",
"(",
"filename",
")",
"writer",
".",
"SetInputData",
"(",
"self",
")",
"if",
"binary",
"and",
"legacy",
":",
"writer",
".",
"SetFileTypeToBinary",
"(",
")",
"writer",
".",
"Write",
"(",
")"
] | 34.051282 | 19.641026 |
def fromimage(im, flatten=False, mode=None):
"""
Return a copy of a PIL image as a numpy array.
Parameters
----------
im : PIL image
Input image.
flatten : bool
If true, convert the output to grey-scale.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
`imread` docstring for more details.
Returns
-------
fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
"""
if not Image.isImageType(im):
raise TypeError("Input is not a PIL image.")
if mode is not None:
if mode != im.mode:
im = im.convert(mode)
elif im.mode == 'P':
# Mode 'P' means there is an indexed "palette". If we leave the mode
# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
# containing the indices into the palette, and not a 3-D array
# containing the RGB or RGBA values.
if 'transparency' in im.info:
im = im.convert('RGBA')
else:
im = im.convert('RGB')
if flatten:
im = im.convert('F')
elif im.mode == '1':
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
# can cause a seg. fault, or generate garbage. See
# https://github.com/scipy/scipy/issues/2138 and
# https://github.com/python-pillow/Pillow/issues/350.
#
# This converts im from a 1-bit image to an 8-bit image.
im = im.convert('L')
a = array(im)
return a | [
"def",
"fromimage",
"(",
"im",
",",
"flatten",
"=",
"False",
",",
"mode",
"=",
"None",
")",
":",
"if",
"not",
"Image",
".",
"isImageType",
"(",
"im",
")",
":",
"raise",
"TypeError",
"(",
"\"Input is not a PIL image.\"",
")",
"if",
"mode",
"is",
"not",
"None",
":",
"if",
"mode",
"!=",
"im",
".",
"mode",
":",
"im",
"=",
"im",
".",
"convert",
"(",
"mode",
")",
"elif",
"im",
".",
"mode",
"==",
"'P'",
":",
"# Mode 'P' means there is an indexed \"palette\". If we leave the mode",
"# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D",
"# containing the indices into the palette, and not a 3-D array",
"# containing the RGB or RGBA values.",
"if",
"'transparency'",
"in",
"im",
".",
"info",
":",
"im",
"=",
"im",
".",
"convert",
"(",
"'RGBA'",
")",
"else",
":",
"im",
"=",
"im",
".",
"convert",
"(",
"'RGB'",
")",
"if",
"flatten",
":",
"im",
"=",
"im",
".",
"convert",
"(",
"'F'",
")",
"elif",
"im",
".",
"mode",
"==",
"'1'",
":",
"# Workaround for crash in PIL. When im is 1-bit, the call array(im)",
"# can cause a seg. fault, or generate garbage. See",
"# https://github.com/scipy/scipy/issues/2138 and",
"# https://github.com/python-pillow/Pillow/issues/350.",
"#",
"# This converts im from a 1-bit image to an 8-bit image.",
"im",
"=",
"im",
".",
"convert",
"(",
"'L'",
")",
"a",
"=",
"array",
"(",
"im",
")",
"return",
"a"
] | 31.705882 | 20.215686 |
def cf_to_proj(var):
r"""Convert a Variable with projection information to a Proj.4 Projection instance.
The attributes of this Variable must conform to the Climate and Forecasting (CF)
netCDF conventions.
Parameters
----------
var : Variable
The projection variable with appropriate attributes.
"""
import pyproj
kwargs = {'lat_0': var.latitude_of_projection_origin, 'a': var.earth_radius,
'b': var.earth_radius}
if var.grid_mapping_name == 'lambert_conformal_conic':
kwargs['proj'] = 'lcc'
kwargs['lon_0'] = var.longitude_of_central_meridian
kwargs['lat_1'] = var.standard_parallel
kwargs['lat_2'] = var.standard_parallel
elif var.grid_mapping_name == 'polar_stereographic':
kwargs['proj'] = 'stere'
kwargs['lon_0'] = var.straight_vertical_longitude_from_pole
kwargs['lat_0'] = var.latitude_of_projection_origin
kwargs['lat_ts'] = var.standard_parallel
kwargs['x_0'] = False # Easting
kwargs['y_0'] = False # Northing
elif var.grid_mapping_name == 'mercator':
kwargs['proj'] = 'merc'
kwargs['lon_0'] = var.longitude_of_projection_origin
kwargs['lat_ts'] = var.standard_parallel
kwargs['x_0'] = False # Easting
kwargs['y_0'] = False # Northing
return pyproj.Proj(**kwargs) | [
"def",
"cf_to_proj",
"(",
"var",
")",
":",
"import",
"pyproj",
"kwargs",
"=",
"{",
"'lat_0'",
":",
"var",
".",
"latitude_of_projection_origin",
",",
"'a'",
":",
"var",
".",
"earth_radius",
",",
"'b'",
":",
"var",
".",
"earth_radius",
"}",
"if",
"var",
".",
"grid_mapping_name",
"==",
"'lambert_conformal_conic'",
":",
"kwargs",
"[",
"'proj'",
"]",
"=",
"'lcc'",
"kwargs",
"[",
"'lon_0'",
"]",
"=",
"var",
".",
"longitude_of_central_meridian",
"kwargs",
"[",
"'lat_1'",
"]",
"=",
"var",
".",
"standard_parallel",
"kwargs",
"[",
"'lat_2'",
"]",
"=",
"var",
".",
"standard_parallel",
"elif",
"var",
".",
"grid_mapping_name",
"==",
"'polar_stereographic'",
":",
"kwargs",
"[",
"'proj'",
"]",
"=",
"'stere'",
"kwargs",
"[",
"'lon_0'",
"]",
"=",
"var",
".",
"straight_vertical_longitude_from_pole",
"kwargs",
"[",
"'lat_0'",
"]",
"=",
"var",
".",
"latitude_of_projection_origin",
"kwargs",
"[",
"'lat_ts'",
"]",
"=",
"var",
".",
"standard_parallel",
"kwargs",
"[",
"'x_0'",
"]",
"=",
"False",
"# Easting",
"kwargs",
"[",
"'y_0'",
"]",
"=",
"False",
"# Northing",
"elif",
"var",
".",
"grid_mapping_name",
"==",
"'mercator'",
":",
"kwargs",
"[",
"'proj'",
"]",
"=",
"'merc'",
"kwargs",
"[",
"'lon_0'",
"]",
"=",
"var",
".",
"longitude_of_projection_origin",
"kwargs",
"[",
"'lat_ts'",
"]",
"=",
"var",
".",
"standard_parallel",
"kwargs",
"[",
"'x_0'",
"]",
"=",
"False",
"# Easting",
"kwargs",
"[",
"'y_0'",
"]",
"=",
"False",
"# Northing",
"return",
"pyproj",
".",
"Proj",
"(",
"*",
"*",
"kwargs",
")"
] | 38.314286 | 16.942857 |
def chinese_remainder(n, a):
"""
Solves CRT for moduli and remainders
:param n:
:param a:
:return:
"""
sum = 0
prod = reduce(lambda a, b: a * b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * DlogFprint.mul_inv(p, n_i) * p
return sum % prod | [
"def",
"chinese_remainder",
"(",
"n",
",",
"a",
")",
":",
"sum",
"=",
"0",
"prod",
"=",
"reduce",
"(",
"lambda",
"a",
",",
"b",
":",
"a",
"*",
"b",
",",
"n",
")",
"for",
"n_i",
",",
"a_i",
"in",
"zip",
"(",
"n",
",",
"a",
")",
":",
"p",
"=",
"prod",
"//",
"n_i",
"sum",
"+=",
"a_i",
"*",
"DlogFprint",
".",
"mul_inv",
"(",
"p",
",",
"n_i",
")",
"*",
"p",
"return",
"sum",
"%",
"prod"
] | 24.571429 | 14.571429 |
def get_configurable_by_name(self, name):
"""
Returns the registered configurable with the specified name or ``None`` if no
such configurator exists.
"""
l = [c for c in self.configurables if c.name == name]
if l:
return l[0] | [
"def",
"get_configurable_by_name",
"(",
"self",
",",
"name",
")",
":",
"l",
"=",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"configurables",
"if",
"c",
".",
"name",
"==",
"name",
"]",
"if",
"l",
":",
"return",
"l",
"[",
"0",
"]"
] | 34.75 | 14.75 |
def unique(lst):
"""Unique with keeping sort/order
["a", "c", "b", "c", "c", ["d", "e"]]
Results in
["a", "c", "b", "d", "e"]
"""
nl = []
[(nl.append(e) if type(e) is not list else nl.extend(e)) \
for e in lst if e not in nl]
return nl | [
"def",
"unique",
"(",
"lst",
")",
":",
"nl",
"=",
"[",
"]",
"[",
"(",
"nl",
".",
"append",
"(",
"e",
")",
"if",
"type",
"(",
"e",
")",
"is",
"not",
"list",
"else",
"nl",
".",
"extend",
"(",
"e",
")",
")",
"for",
"e",
"in",
"lst",
"if",
"e",
"not",
"in",
"nl",
"]",
"return",
"nl"
] | 24.636364 | 17 |
async def unsubscribe(self, topic: str):
"""Unsubscribe from a topic
:param topic: required
:returns: None
Sample ws response
.. code-block:: python
{
"id": "1545910840805",
"type": "ack"
}
"""
req_msg = {
'type': 'unsubscribe',
'topic': topic,
'response': True
}
await self._conn.send_message(req_msg) | [
"async",
"def",
"unsubscribe",
"(",
"self",
",",
"topic",
":",
"str",
")",
":",
"req_msg",
"=",
"{",
"'type'",
":",
"'unsubscribe'",
",",
"'topic'",
":",
"topic",
",",
"'response'",
":",
"True",
"}",
"await",
"self",
".",
"_conn",
".",
"send_message",
"(",
"req_msg",
")"
] | 18 | 21.12 |
def get_activity(self, name):
"""
Retrieve an activity given its name.
:param name: The name of the activity.
:returns: The activity.
"""
return [a for a in self.activities if a.name == name][0] | [
"def",
"get_activity",
"(",
"self",
",",
"name",
")",
":",
"return",
"[",
"a",
"for",
"a",
"in",
"self",
".",
"activities",
"if",
"a",
".",
"name",
"==",
"name",
"]",
"[",
"0",
"]"
] | 23.6 | 17.4 |
def run(self):
"""
Start monitoring operation policy files.
"""
self.initialize_tracking_structures()
if self.live_monitoring:
self.logger.info("Starting up the operation policy file monitor.")
while not self.halt_trigger.is_set():
time.sleep(1)
self.scan_policies()
self.logger.info("Stopping the operation policy file monitor.")
else:
self.scan_policies() | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"initialize_tracking_structures",
"(",
")",
"if",
"self",
".",
"live_monitoring",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Starting up the operation policy file monitor.\"",
")",
"while",
"not",
"self",
".",
"halt_trigger",
".",
"is_set",
"(",
")",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"self",
".",
"scan_policies",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Stopping the operation policy file monitor.\"",
")",
"else",
":",
"self",
".",
"scan_policies",
"(",
")"
] | 33.785714 | 15.642857 |
def patch_compat(config):
"""
Support older config values.
"""
if 'web_host' in config:
config['host'] = config.pop('web_host')
if 'web_port' in config:
config['port'] = config.pop('web_port') | [
"def",
"patch_compat",
"(",
"config",
")",
":",
"if",
"'web_host'",
"in",
"config",
":",
"config",
"[",
"'host'",
"]",
"=",
"config",
".",
"pop",
"(",
"'web_host'",
")",
"if",
"'web_port'",
"in",
"config",
":",
"config",
"[",
"'port'",
"]",
"=",
"config",
".",
"pop",
"(",
"'web_port'",
")"
] | 24.25 | 7.25 |
def stationarystate(self, k):
"""See docs for `Model` abstract base class."""
assert 0 <= k < self.ncats
return self._models[k].stationarystate | [
"def",
"stationarystate",
"(",
"self",
",",
"k",
")",
":",
"assert",
"0",
"<=",
"k",
"<",
"self",
".",
"ncats",
"return",
"self",
".",
"_models",
"[",
"k",
"]",
".",
"stationarystate"
] | 41 | 5.75 |
def main():
"""
%prog database.fa query.fa [options]
Wrapper for NCBI BLAST+.
"""
p = OptionParser(main.__doc__)
p.add_option("--format", default=" \'6 qseqid sseqid pident length " \
"mismatch gapopen qstart qend sstart send evalue bitscore\' ",
help="0-11, learn more with \"blastp -help\". [default: %default]")
p.add_option("--path", dest="blast_path", default=None,
help="specify BLAST+ path including the program name")
p.add_option("--prog", dest="blast_program", default="blastp",
help="specify BLAST+ program to use. See complete list here: " \
"http://www.ncbi.nlm.nih.gov/books/NBK52640/#chapter1.Installation"
" [default: %default]")
p.set_align(evalue=.01)
p.add_option("--best", default=1, type="int",
help="Only look for best N hits [default: %default]")
p.set_cpus()
p.add_option("--nprocs", default=1, type="int",
help="number of BLAST processes to run in parallel. " + \
"split query.fa into `nprocs` chunks, " + \
"each chunk uses -num_threads=`cpus`")
p.set_params()
p.set_outfile()
opts, args = p.parse_args()
if len(args) != 2 or opts.blast_program is None:
sys.exit(not p.print_help())
bfasta_fn, afasta_fn = args
for fn in (afasta_fn, bfasta_fn):
assert op.exists(fn)
afasta_fn = op.abspath(afasta_fn)
bfasta_fn = op.abspath(bfasta_fn)
out_fh = must_open(opts.outfile, "w")
extra = opts.extra
blast_path = opts.blast_path
blast_program = opts.blast_program
blast_bin = blast_path or blast_program
if op.basename(blast_bin) != blast_program:
blast_bin = op.join(blast_bin, blast_program)
nprocs, cpus = opts.nprocs, opts.cpus
if nprocs > 1:
logging.debug("Dispatch job to %d processes" % nprocs)
outdir = "outdir"
fs = split([afasta_fn, outdir, str(nprocs)])
queries = fs.names
else:
queries = [afasta_fn]
dbtype = "prot" if op.basename(blast_bin) in ("blastp", "blastx") \
else "nucl"
db = bfasta_fn
if dbtype == "prot":
nin = db + ".pin"
else:
nin = db + ".nin"
nin00 = db + ".00.nin"
nin = nin00 if op.exists(nin00) else (db + ".nin")
run_formatdb(infile=db, outfile=nin, dbtype=dbtype)
lock = Lock()
blastplus_template = "{0} -db {1} -outfmt {2}"
blast_cmd = blastplus_template.format(blast_bin, bfasta_fn, opts.format)
blast_cmd += " -evalue {0} -max_target_seqs {1}".\
format(opts.evalue, opts.best)
blast_cmd += " -num_threads {0}".format(cpus)
if extra:
blast_cmd += " " + extra.strip()
args = [(out_fh, blast_cmd, query, lock) for query in queries]
g = Jobs(target=blastplus, args=args)
g.run() | [
"def",
"main",
"(",
")",
":",
"p",
"=",
"OptionParser",
"(",
"main",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--format\"",
",",
"default",
"=",
"\" \\'6 qseqid sseqid pident length \"",
"\"mismatch gapopen qstart qend sstart send evalue bitscore\\' \"",
",",
"help",
"=",
"\"0-11, learn more with \\\"blastp -help\\\". [default: %default]\"",
")",
"p",
".",
"add_option",
"(",
"\"--path\"",
",",
"dest",
"=",
"\"blast_path\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"specify BLAST+ path including the program name\"",
")",
"p",
".",
"add_option",
"(",
"\"--prog\"",
",",
"dest",
"=",
"\"blast_program\"",
",",
"default",
"=",
"\"blastp\"",
",",
"help",
"=",
"\"specify BLAST+ program to use. See complete list here: \"",
"\"http://www.ncbi.nlm.nih.gov/books/NBK52640/#chapter1.Installation\"",
"\" [default: %default]\"",
")",
"p",
".",
"set_align",
"(",
"evalue",
"=",
".01",
")",
"p",
".",
"add_option",
"(",
"\"--best\"",
",",
"default",
"=",
"1",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Only look for best N hits [default: %default]\"",
")",
"p",
".",
"set_cpus",
"(",
")",
"p",
".",
"add_option",
"(",
"\"--nprocs\"",
",",
"default",
"=",
"1",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"number of BLAST processes to run in parallel. \"",
"+",
"\"split query.fa into `nprocs` chunks, \"",
"+",
"\"each chunk uses -num_threads=`cpus`\"",
")",
"p",
".",
"set_params",
"(",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
"or",
"opts",
".",
"blast_program",
"is",
"None",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"bfasta_fn",
",",
"afasta_fn",
"=",
"args",
"for",
"fn",
"in",
"(",
"afasta_fn",
",",
"bfasta_fn",
")",
":",
"assert",
"op",
".",
"exists",
"(",
"fn",
")",
"afasta_fn",
"=",
"op",
".",
"abspath",
"(",
"afasta_fn",
")",
"bfasta_fn",
"=",
"op",
".",
"abspath",
"(",
"bfasta_fn",
")",
"out_fh",
"=",
"must_open",
"(",
"opts",
".",
"outfile",
",",
"\"w\"",
")",
"extra",
"=",
"opts",
".",
"extra",
"blast_path",
"=",
"opts",
".",
"blast_path",
"blast_program",
"=",
"opts",
".",
"blast_program",
"blast_bin",
"=",
"blast_path",
"or",
"blast_program",
"if",
"op",
".",
"basename",
"(",
"blast_bin",
")",
"!=",
"blast_program",
":",
"blast_bin",
"=",
"op",
".",
"join",
"(",
"blast_bin",
",",
"blast_program",
")",
"nprocs",
",",
"cpus",
"=",
"opts",
".",
"nprocs",
",",
"opts",
".",
"cpus",
"if",
"nprocs",
">",
"1",
":",
"logging",
".",
"debug",
"(",
"\"Dispatch job to %d processes\"",
"%",
"nprocs",
")",
"outdir",
"=",
"\"outdir\"",
"fs",
"=",
"split",
"(",
"[",
"afasta_fn",
",",
"outdir",
",",
"str",
"(",
"nprocs",
")",
"]",
")",
"queries",
"=",
"fs",
".",
"names",
"else",
":",
"queries",
"=",
"[",
"afasta_fn",
"]",
"dbtype",
"=",
"\"prot\"",
"if",
"op",
".",
"basename",
"(",
"blast_bin",
")",
"in",
"(",
"\"blastp\"",
",",
"\"blastx\"",
")",
"else",
"\"nucl\"",
"db",
"=",
"bfasta_fn",
"if",
"dbtype",
"==",
"\"prot\"",
":",
"nin",
"=",
"db",
"+",
"\".pin\"",
"else",
":",
"nin",
"=",
"db",
"+",
"\".nin\"",
"nin00",
"=",
"db",
"+",
"\".00.nin\"",
"nin",
"=",
"nin00",
"if",
"op",
".",
"exists",
"(",
"nin00",
")",
"else",
"(",
"db",
"+",
"\".nin\"",
")",
"run_formatdb",
"(",
"infile",
"=",
"db",
",",
"outfile",
"=",
"nin",
",",
"dbtype",
"=",
"dbtype",
")",
"lock",
"=",
"Lock",
"(",
")",
"blastplus_template",
"=",
"\"{0} -db {1} -outfmt {2}\"",
"blast_cmd",
"=",
"blastplus_template",
".",
"format",
"(",
"blast_bin",
",",
"bfasta_fn",
",",
"opts",
".",
"format",
")",
"blast_cmd",
"+=",
"\" -evalue {0} -max_target_seqs {1}\"",
".",
"format",
"(",
"opts",
".",
"evalue",
",",
"opts",
".",
"best",
")",
"blast_cmd",
"+=",
"\" -num_threads {0}\"",
".",
"format",
"(",
"cpus",
")",
"if",
"extra",
":",
"blast_cmd",
"+=",
"\" \"",
"+",
"extra",
".",
"strip",
"(",
")",
"args",
"=",
"[",
"(",
"out_fh",
",",
"blast_cmd",
",",
"query",
",",
"lock",
")",
"for",
"query",
"in",
"queries",
"]",
"g",
"=",
"Jobs",
"(",
"target",
"=",
"blastplus",
",",
"args",
"=",
"args",
")",
"g",
".",
"run",
"(",
")"
] | 33.373494 | 19.879518 |
def registerEditor(self,
name,
op,
cls=None,
defaultValue=None,
flags=0):
"""
Registers an editor for the given operator as the given name. If no
editor class is supplied, no editor widget will be created for the
operator unless you overload the createEditor method and create your own
:param name | <str>
op | <Query.Op>
cls | <subclass of QWidget> || None
defaultValue | <variant>
"""
registry = XEditorRegistry(op=op,
cls=cls,
defaultValue=defaultValue,
flags=flags)
self._operatorMap[nativestring(name)] = registry | [
"def",
"registerEditor",
"(",
"self",
",",
"name",
",",
"op",
",",
"cls",
"=",
"None",
",",
"defaultValue",
"=",
"None",
",",
"flags",
"=",
"0",
")",
":",
"registry",
"=",
"XEditorRegistry",
"(",
"op",
"=",
"op",
",",
"cls",
"=",
"cls",
",",
"defaultValue",
"=",
"defaultValue",
",",
"flags",
"=",
"flags",
")",
"self",
".",
"_operatorMap",
"[",
"nativestring",
"(",
"name",
")",
"]",
"=",
"registry"
] | 41.5 | 14.5 |
def _prop_name(self):
"""
Calculate property name from tag name, e.g. a:schemeClr -> schemeClr.
"""
if ':' in self._nsptagname:
start = self._nsptagname.index(':') + 1
else:
start = 0
return self._nsptagname[start:] | [
"def",
"_prop_name",
"(",
"self",
")",
":",
"if",
"':'",
"in",
"self",
".",
"_nsptagname",
":",
"start",
"=",
"self",
".",
"_nsptagname",
".",
"index",
"(",
"':'",
")",
"+",
"1",
"else",
":",
"start",
"=",
"0",
"return",
"self",
".",
"_nsptagname",
"[",
"start",
":",
"]"
] | 31 | 13.222222 |
def construct_request(self, request_args=None, **kwargs):
"""
The method where everything is setup for sending the request.
The request information is gathered and the where and how of sending the
request is decided.
:param request_args: Initial request arguments
:param kwargs: Extra keyword arguments
:return: A dictionary with the keys 'url' and possibly 'body', 'kwargs',
'request' and 'ht_args'.
"""
if request_args is None:
request_args = {}
# remove arguments that should not be included in the request
# _args = dict(
# [(k, v) for k, v in kwargs.items() if v and k not in SPECIAL_ARGS])
return self.construct(request_args, **kwargs) | [
"def",
"construct_request",
"(",
"self",
",",
"request_args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"request_args",
"is",
"None",
":",
"request_args",
"=",
"{",
"}",
"# remove arguments that should not be included in the request",
"# _args = dict(",
"# [(k, v) for k, v in kwargs.items() if v and k not in SPECIAL_ARGS])",
"return",
"self",
".",
"construct",
"(",
"request_args",
",",
"*",
"*",
"kwargs",
")"
] | 39.842105 | 21.105263 |
def refresh_all_state_machines(self):
""" Refreshes all state machine tabs
"""
self.refresh_state_machines(list(self.model.state_machine_manager.state_machines.keys())) | [
"def",
"refresh_all_state_machines",
"(",
"self",
")",
":",
"self",
".",
"refresh_state_machines",
"(",
"list",
"(",
"self",
".",
"model",
".",
"state_machine_manager",
".",
"state_machines",
".",
"keys",
"(",
")",
")",
")"
] | 47.25 | 15 |
def threadpooled(
func: None = None,
*,
loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] = None,
loop_getter_need_context: bool = False,
) -> ThreadPooled:
"""Overload: No function.""" | [
"def",
"threadpooled",
"(",
"func",
":",
"None",
"=",
"None",
",",
"*",
",",
"loop_getter",
":",
"typing",
".",
"Union",
"[",
"None",
",",
"typing",
".",
"Callable",
"[",
"...",
",",
"asyncio",
".",
"AbstractEventLoop",
"]",
",",
"asyncio",
".",
"AbstractEventLoop",
"]",
"=",
"None",
",",
"loop_getter_need_context",
":",
"bool",
"=",
"False",
",",
")",
"->",
"ThreadPooled",
":"
] | 36.714286 | 25.571429 |
def register_parameters(self, *parameters):
"""
Register parameters.
"""
for param in parameters:
self.parameter_count += np.prod(param.get_value().shape)
self.parameters.extend(parameters) | [
"def",
"register_parameters",
"(",
"self",
",",
"*",
"parameters",
")",
":",
"for",
"param",
"in",
"parameters",
":",
"self",
".",
"parameter_count",
"+=",
"np",
".",
"prod",
"(",
"param",
".",
"get_value",
"(",
")",
".",
"shape",
")",
"self",
".",
"parameters",
".",
"extend",
"(",
"parameters",
")"
] | 33.571429 | 7.571429 |
def cache(self, CachableItem):
"""Updates cache area with latest information
"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_dirtyCachedItem = self.mapper.get(CachableItem)
logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__))
cached_item = self.session.merge(_dirtyCachedItem)
notify(CacheObjectCreatedEvent(cached_item, self))
return cached_item
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
cached_item = self.session.merge(_newCacheItem)
notify(CacheObjectModifiedEvent(cached_item, self))
return cached_item
return False | [
"def",
"cache",
"(",
"self",
",",
"CachableItem",
")",
":",
"_cachedItem",
"=",
"self",
".",
"get",
"(",
"CachableItem",
")",
"if",
"not",
"_cachedItem",
":",
"_dirtyCachedItem",
"=",
"self",
".",
"mapper",
".",
"get",
"(",
"CachableItem",
")",
"logger",
".",
"debug",
"(",
"\"new cachable item added to sql cache area {id: %s, type: %s}\"",
",",
"str",
"(",
"_dirtyCachedItem",
".",
"getId",
"(",
")",
")",
",",
"str",
"(",
"_dirtyCachedItem",
".",
"__class__",
")",
")",
"cached_item",
"=",
"self",
".",
"session",
".",
"merge",
"(",
"_dirtyCachedItem",
")",
"notify",
"(",
"CacheObjectCreatedEvent",
"(",
"cached_item",
",",
"self",
")",
")",
"return",
"cached_item",
"else",
":",
"_newCacheItem",
"=",
"self",
".",
"mapper",
".",
"get",
"(",
"CachableItem",
")",
"if",
"_cachedItem",
"!=",
"_newCacheItem",
":",
"logger",
".",
"debug",
"(",
"\"Cachable item modified in sql cache area {id: %s, type: %s}\"",
",",
"str",
"(",
"_newCacheItem",
".",
"getId",
"(",
")",
")",
",",
"str",
"(",
"_newCacheItem",
".",
"__class__",
")",
")",
"cached_item",
"=",
"self",
".",
"session",
".",
"merge",
"(",
"_newCacheItem",
")",
"notify",
"(",
"CacheObjectModifiedEvent",
"(",
"cached_item",
",",
"self",
")",
")",
"return",
"cached_item",
"return",
"False"
] | 54.333333 | 24.777778 |
def disassemble(self, lineno_width=3,
mark_as_current=False,
asm_format=False,
show_bytes=False):
"""Format instruction details for inclusion in disassembly output
*lineno_width* sets the width of the line number field (0 omits it)
*mark_as_current* inserts a '-->' marker arrow as part of the line
"""
fields = []
if asm_format:
indexed_operand = set(['name', 'local', 'compare', 'free'])
# Column: Source code line number
if lineno_width:
if self.starts_line is not None:
if asm_format:
lineno_fmt = "%%%dd:\n" % lineno_width
fields.append(lineno_fmt % self.starts_line)
fields.append(' ' * (lineno_width))
if self.is_jump_target:
fields.append(' ' * (lineno_width-1))
else:
lineno_fmt = "%%%dd:" % lineno_width
fields.append(lineno_fmt % self.starts_line)
else:
fields.append(' ' * (lineno_width+1))
# Column: Current instruction indicator
if mark_as_current and not asm_format:
fields.append('-->')
else:
fields.append(' ')
# Column: Jump target marker
if self.is_jump_target:
if not asm_format:
fields.append('>>')
else:
fields = ["L%d:\n" % self.offset] + fields
if not self.starts_line:
fields.append(' ')
else:
fields.append(' ')
# Column: Instruction offset from start of code sequence
if not asm_format:
fields.append(repr(self.offset).rjust(4))
if show_bytes:
hex_bytecode = "|%02x" % self.opcode
if self.inst_size == 1:
# Not 3.6 or later
hex_bytecode += ' ' * (2*3)
if self.inst_size == 2:
# Must by Python 3.6 or later
if self.has_arg:
hex_bytecode += " %02x" % (self.arg % 256)
else :
hex_bytecode += ' 00'
elif self.inst_size == 3:
# Not 3.6 or later
hex_bytecode += " %02x %02x" % (
(self.arg >> 8, self.arg % 256))
fields.append(hex_bytecode + '|')
# Column: Opcode name
fields.append(self.opname.ljust(20))
# Column: Opcode argument
if self.arg is not None:
argrepr = self.argrepr
if asm_format:
if self.optype == 'jabs':
fields.append('L' + str(self.arg))
elif self.optype == 'jrel':
argval = self.offset + self.arg + self.inst_size
fields.append('L' + str(argval))
elif self.optype in indexed_operand:
fields.append('(%s)' % argrepr)
argrepr = None
elif (self.optype == 'const'
and not re.search('\s', argrepr)):
fields.append('(%s)' % argrepr)
argrepr = None
else:
fields.append(repr(self.arg))
elif not (show_bytes and argrepr):
fields.append(repr(self.arg).rjust(6))
# Column: Opcode argument details
if argrepr:
fields.append('(%s)' % argrepr)
pass
pass
return ' '.join(fields).rstrip() | [
"def",
"disassemble",
"(",
"self",
",",
"lineno_width",
"=",
"3",
",",
"mark_as_current",
"=",
"False",
",",
"asm_format",
"=",
"False",
",",
"show_bytes",
"=",
"False",
")",
":",
"fields",
"=",
"[",
"]",
"if",
"asm_format",
":",
"indexed_operand",
"=",
"set",
"(",
"[",
"'name'",
",",
"'local'",
",",
"'compare'",
",",
"'free'",
"]",
")",
"# Column: Source code line number",
"if",
"lineno_width",
":",
"if",
"self",
".",
"starts_line",
"is",
"not",
"None",
":",
"if",
"asm_format",
":",
"lineno_fmt",
"=",
"\"%%%dd:\\n\"",
"%",
"lineno_width",
"fields",
".",
"append",
"(",
"lineno_fmt",
"%",
"self",
".",
"starts_line",
")",
"fields",
".",
"append",
"(",
"' '",
"*",
"(",
"lineno_width",
")",
")",
"if",
"self",
".",
"is_jump_target",
":",
"fields",
".",
"append",
"(",
"' '",
"*",
"(",
"lineno_width",
"-",
"1",
")",
")",
"else",
":",
"lineno_fmt",
"=",
"\"%%%dd:\"",
"%",
"lineno_width",
"fields",
".",
"append",
"(",
"lineno_fmt",
"%",
"self",
".",
"starts_line",
")",
"else",
":",
"fields",
".",
"append",
"(",
"' '",
"*",
"(",
"lineno_width",
"+",
"1",
")",
")",
"# Column: Current instruction indicator",
"if",
"mark_as_current",
"and",
"not",
"asm_format",
":",
"fields",
".",
"append",
"(",
"'-->'",
")",
"else",
":",
"fields",
".",
"append",
"(",
"' '",
")",
"# Column: Jump target marker",
"if",
"self",
".",
"is_jump_target",
":",
"if",
"not",
"asm_format",
":",
"fields",
".",
"append",
"(",
"'>>'",
")",
"else",
":",
"fields",
"=",
"[",
"\"L%d:\\n\"",
"%",
"self",
".",
"offset",
"]",
"+",
"fields",
"if",
"not",
"self",
".",
"starts_line",
":",
"fields",
".",
"append",
"(",
"' '",
")",
"else",
":",
"fields",
".",
"append",
"(",
"' '",
")",
"# Column: Instruction offset from start of code sequence",
"if",
"not",
"asm_format",
":",
"fields",
".",
"append",
"(",
"repr",
"(",
"self",
".",
"offset",
")",
".",
"rjust",
"(",
"4",
")",
")",
"if",
"show_bytes",
":",
"hex_bytecode",
"=",
"\"|%02x\"",
"%",
"self",
".",
"opcode",
"if",
"self",
".",
"inst_size",
"==",
"1",
":",
"# Not 3.6 or later",
"hex_bytecode",
"+=",
"' '",
"*",
"(",
"2",
"*",
"3",
")",
"if",
"self",
".",
"inst_size",
"==",
"2",
":",
"# Must by Python 3.6 or later",
"if",
"self",
".",
"has_arg",
":",
"hex_bytecode",
"+=",
"\" %02x\"",
"%",
"(",
"self",
".",
"arg",
"%",
"256",
")",
"else",
":",
"hex_bytecode",
"+=",
"' 00'",
"elif",
"self",
".",
"inst_size",
"==",
"3",
":",
"# Not 3.6 or later",
"hex_bytecode",
"+=",
"\" %02x %02x\"",
"%",
"(",
"(",
"self",
".",
"arg",
">>",
"8",
",",
"self",
".",
"arg",
"%",
"256",
")",
")",
"fields",
".",
"append",
"(",
"hex_bytecode",
"+",
"'|'",
")",
"# Column: Opcode name",
"fields",
".",
"append",
"(",
"self",
".",
"opname",
".",
"ljust",
"(",
"20",
")",
")",
"# Column: Opcode argument",
"if",
"self",
".",
"arg",
"is",
"not",
"None",
":",
"argrepr",
"=",
"self",
".",
"argrepr",
"if",
"asm_format",
":",
"if",
"self",
".",
"optype",
"==",
"'jabs'",
":",
"fields",
".",
"append",
"(",
"'L'",
"+",
"str",
"(",
"self",
".",
"arg",
")",
")",
"elif",
"self",
".",
"optype",
"==",
"'jrel'",
":",
"argval",
"=",
"self",
".",
"offset",
"+",
"self",
".",
"arg",
"+",
"self",
".",
"inst_size",
"fields",
".",
"append",
"(",
"'L'",
"+",
"str",
"(",
"argval",
")",
")",
"elif",
"self",
".",
"optype",
"in",
"indexed_operand",
":",
"fields",
".",
"append",
"(",
"'(%s)'",
"%",
"argrepr",
")",
"argrepr",
"=",
"None",
"elif",
"(",
"self",
".",
"optype",
"==",
"'const'",
"and",
"not",
"re",
".",
"search",
"(",
"'\\s'",
",",
"argrepr",
")",
")",
":",
"fields",
".",
"append",
"(",
"'(%s)'",
"%",
"argrepr",
")",
"argrepr",
"=",
"None",
"else",
":",
"fields",
".",
"append",
"(",
"repr",
"(",
"self",
".",
"arg",
")",
")",
"elif",
"not",
"(",
"show_bytes",
"and",
"argrepr",
")",
":",
"fields",
".",
"append",
"(",
"repr",
"(",
"self",
".",
"arg",
")",
".",
"rjust",
"(",
"6",
")",
")",
"# Column: Opcode argument details",
"if",
"argrepr",
":",
"fields",
".",
"append",
"(",
"'(%s)'",
"%",
"argrepr",
")",
"pass",
"pass",
"return",
"' '",
".",
"join",
"(",
"fields",
")",
".",
"rstrip",
"(",
")"
] | 38.543478 | 13.021739 |
def main_hrun():
""" parse command line options and run commands."""
parser = argparse.ArgumentParser(description="Tools for http(s) test. Base on rtsf.")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--log-file',
help="Write logs to specified file path.")
parser.add_argument(
'case_file',
help="yaml testcase file")
color_print("httpdriver {}".format(__version__), "GREEN")
args = parser.parse_args()
logger.setup_logger(args.log_level, args.log_file)
runner = TestRunner(runner = HttpDriver).run(args.case_file)
html_report = runner.gen_html_report()
color_print("report: {}".format(html_report)) | [
"def",
"main_hrun",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Tools for http(s) test. Base on rtsf.\"",
")",
"parser",
".",
"add_argument",
"(",
"'--log-level'",
",",
"default",
"=",
"'INFO'",
",",
"help",
"=",
"\"Specify logging level, default is INFO.\"",
")",
"parser",
".",
"add_argument",
"(",
"'--log-file'",
",",
"help",
"=",
"\"Write logs to specified file path.\"",
")",
"parser",
".",
"add_argument",
"(",
"'case_file'",
",",
"help",
"=",
"\"yaml testcase file\"",
")",
"color_print",
"(",
"\"httpdriver {}\"",
".",
"format",
"(",
"__version__",
")",
",",
"\"GREEN\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"logger",
".",
"setup_logger",
"(",
"args",
".",
"log_level",
",",
"args",
".",
"log_file",
")",
"runner",
"=",
"TestRunner",
"(",
"runner",
"=",
"HttpDriver",
")",
".",
"run",
"(",
"args",
".",
"case_file",
")",
"html_report",
"=",
"runner",
".",
"gen_html_report",
"(",
")",
"color_print",
"(",
"\"report: {}\"",
".",
"format",
"(",
"html_report",
")",
")"
] | 33.75 | 19.833333 |
def contents(self):
"""Get svg string
"""
c = self._header[:]
c.append(' font-weight="{}"'.format(self.font_weight))
c.append(' font-family="{}"'.format(self.font_family))
c.append(' width="{}" height="{}"'.format(*self.screen_size))
sclw = self.original_size[0] * self.scale_factor
sclh = self.original_size[1] * self.scale_factor
longside = max([sclw, sclh])
width = round(longside + self.margin * 2, 2)
height = round(longside + self.margin * 2, 2)
xleft = round(-self.margin - (longside - sclw) / 2, 2)
ytop = round(-self.margin - (longside - sclh) / 2, 2)
c.append(' viewBox="{} {} {} {}">\n'.format(
xleft, ytop, width, height))
if self.bgcolor is not None:
c.append('<rect x="{}", y="{}" width="{}" height="{}" fill="{}" \
/>\n'.format(xleft, ytop, width, height, self.bgcolor))
c.extend(self._elems)
c.append("</svg>")
return "".join(c) | [
"def",
"contents",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"_header",
"[",
":",
"]",
"c",
".",
"append",
"(",
"' font-weight=\"{}\"'",
".",
"format",
"(",
"self",
".",
"font_weight",
")",
")",
"c",
".",
"append",
"(",
"' font-family=\"{}\"'",
".",
"format",
"(",
"self",
".",
"font_family",
")",
")",
"c",
".",
"append",
"(",
"' width=\"{}\" height=\"{}\"'",
".",
"format",
"(",
"*",
"self",
".",
"screen_size",
")",
")",
"sclw",
"=",
"self",
".",
"original_size",
"[",
"0",
"]",
"*",
"self",
".",
"scale_factor",
"sclh",
"=",
"self",
".",
"original_size",
"[",
"1",
"]",
"*",
"self",
".",
"scale_factor",
"longside",
"=",
"max",
"(",
"[",
"sclw",
",",
"sclh",
"]",
")",
"width",
"=",
"round",
"(",
"longside",
"+",
"self",
".",
"margin",
"*",
"2",
",",
"2",
")",
"height",
"=",
"round",
"(",
"longside",
"+",
"self",
".",
"margin",
"*",
"2",
",",
"2",
")",
"xleft",
"=",
"round",
"(",
"-",
"self",
".",
"margin",
"-",
"(",
"longside",
"-",
"sclw",
")",
"/",
"2",
",",
"2",
")",
"ytop",
"=",
"round",
"(",
"-",
"self",
".",
"margin",
"-",
"(",
"longside",
"-",
"sclh",
")",
"/",
"2",
",",
"2",
")",
"c",
".",
"append",
"(",
"' viewBox=\"{} {} {} {}\">\\n'",
".",
"format",
"(",
"xleft",
",",
"ytop",
",",
"width",
",",
"height",
")",
")",
"if",
"self",
".",
"bgcolor",
"is",
"not",
"None",
":",
"c",
".",
"append",
"(",
"'<rect x=\"{}\", y=\"{}\" width=\"{}\" height=\"{}\" fill=\"{}\" \\\n />\\n'",
".",
"format",
"(",
"xleft",
",",
"ytop",
",",
"width",
",",
"height",
",",
"self",
".",
"bgcolor",
")",
")",
"c",
".",
"extend",
"(",
"self",
".",
"_elems",
")",
"c",
".",
"append",
"(",
"\"</svg>\"",
")",
"return",
"\"\"",
".",
"join",
"(",
"c",
")"
] | 45.772727 | 15.227273 |
def from_directory_as_inmemory_db(cls, gtfs_directory):
"""
Instantiate a GTFS object by computing
Parameters
----------
gtfs_directory: str
path to the directory for importing the database
"""
# this import is here to avoid circular imports (which turned out to be a problem)
from gtfspy.import_gtfs import import_gtfs
conn = sqlite3.connect(":memory:")
import_gtfs(gtfs_directory,
conn,
preserve_connection=True,
print_progress=False)
return cls(conn) | [
"def",
"from_directory_as_inmemory_db",
"(",
"cls",
",",
"gtfs_directory",
")",
":",
"# this import is here to avoid circular imports (which turned out to be a problem)",
"from",
"gtfspy",
".",
"import_gtfs",
"import",
"import_gtfs",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"\":memory:\"",
")",
"import_gtfs",
"(",
"gtfs_directory",
",",
"conn",
",",
"preserve_connection",
"=",
"True",
",",
"print_progress",
"=",
"False",
")",
"return",
"cls",
"(",
"conn",
")"
] | 35.176471 | 14.235294 |
def p_new_expr_nobf(self, p):
"""new_expr_nobf : member_expr_nobf
| NEW new_expr
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = self.asttypes.NewExpr(p[2])
p[0].setpos(p) | [
"def",
"p_new_expr_nobf",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"NewExpr",
"(",
"p",
"[",
"2",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")"
] | 28.111111 | 10.333333 |
def to_seconds(value, time_unit):
"""
:param value: (Number), value to be translated to seconds
:param time_unit: Time duration in seconds
:return: Value of the value in seconds
"""
if isinstance(value, bool):
# bool is a subclass of int. Don't let bool and float multiplication.
raise TypeError
return float(value) * time_unit | [
"def",
"to_seconds",
"(",
"value",
",",
"time_unit",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"# bool is a subclass of int. Don't let bool and float multiplication.",
"raise",
"TypeError",
"return",
"float",
"(",
"value",
")",
"*",
"time_unit"
] | 39.8 | 10.8 |
def __calculate_weight(self, stimulus1, stimulus2):
"""!
@brief Calculate weight between neurons that have external stimulus1 and stimulus2.
@param[in] stimulus1 (list): External stimulus of the first neuron.
@param[in] stimulus2 (list): External stimulus of the second neuron.
@return (double) Weight between neurons that are under specified stimulus.
"""
distance = euclidean_distance_square(stimulus1, stimulus2)
return math.exp(-distance / (2.0 * self.__average_distance)) | [
"def",
"__calculate_weight",
"(",
"self",
",",
"stimulus1",
",",
"stimulus2",
")",
":",
"distance",
"=",
"euclidean_distance_square",
"(",
"stimulus1",
",",
"stimulus2",
")",
"return",
"math",
".",
"exp",
"(",
"-",
"distance",
"/",
"(",
"2.0",
"*",
"self",
".",
"__average_distance",
")",
")"
] | 44.307692 | 27.615385 |
def clean(dry_run='n'):
"""Wipes compiled and cached python files. To simulate: pynt clean[dry_run=y]"""
file_patterns = ['*.pyc', '*.pyo', '*~']
dir_patterns = ['__pycache__']
recursive_pattern_delete(project_paths.root, file_patterns, dir_patterns, dry_run=bool(dry_run.lower() == 'y')) | [
"def",
"clean",
"(",
"dry_run",
"=",
"'n'",
")",
":",
"file_patterns",
"=",
"[",
"'*.pyc'",
",",
"'*.pyo'",
",",
"'*~'",
"]",
"dir_patterns",
"=",
"[",
"'__pycache__'",
"]",
"recursive_pattern_delete",
"(",
"project_paths",
".",
"root",
",",
"file_patterns",
",",
"dir_patterns",
",",
"dry_run",
"=",
"bool",
"(",
"dry_run",
".",
"lower",
"(",
")",
"==",
"'y'",
")",
")"
] | 60 | 20.4 |
def create_routes(routes: Tuple[HTTPMethod], handle_http: Callable,
default_base_handler_class: Any) -> List[Tuple[str, Any]]:
"""Creates handler routes from the provided routes.
:param routes: A tuple containing the route and another tuple with
all http methods allowed for the route.
:param handle_http: The HTTP handler function that should be
used to wrap the logic functions.
:param default_base_handler_class: The default base handler class that
should be used.
:returns: A list of tuples containing the route and generated handler.
"""
created_routes = []
all_handler_names = []
for r in routes:
handler = None
if r.base_handler_class is not None:
base_handler_class = r.base_handler_class
else:
base_handler_class = default_base_handler_class
# Define the handler name. To prevent issues where auto-generated
# handler names conflict with existing, appending a number to the
# end of the hanlder name if it already exists.
handler_name = get_handler_name(r, r.methods[0].logic)
if handler_name in all_handler_names:
handler_name = '{}{}'.format(
handler_name, len(all_handler_names))
all_handler_names.append(handler_name)
for method in r.methods:
logic = method.logic
http_method = method.method
http_func = create_http_method(logic, http_method, handle_http,
before=r.before, after=r.after)
handler_methods_and_properties = {
'__name__': handler_name,
'_doctor_heading': r.heading,
'methods': set([http_method.upper()]),
http_method: http_func,
}
if handler is None:
handler = type(
handler_name, (base_handler_class,),
handler_methods_and_properties)
else:
setattr(handler, http_method, http_func)
# This is specific to Flask. Its MethodView class
# initializes the methods attribute in __new__ so we
# need to add all the other http methods we are defining
# on the handler after it gets created by type.
if hasattr(handler, 'methods'):
handler.methods.add(http_method.upper())
created_routes.append((r.route, handler))
return created_routes | [
"def",
"create_routes",
"(",
"routes",
":",
"Tuple",
"[",
"HTTPMethod",
"]",
",",
"handle_http",
":",
"Callable",
",",
"default_base_handler_class",
":",
"Any",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"created_routes",
"=",
"[",
"]",
"all_handler_names",
"=",
"[",
"]",
"for",
"r",
"in",
"routes",
":",
"handler",
"=",
"None",
"if",
"r",
".",
"base_handler_class",
"is",
"not",
"None",
":",
"base_handler_class",
"=",
"r",
".",
"base_handler_class",
"else",
":",
"base_handler_class",
"=",
"default_base_handler_class",
"# Define the handler name. To prevent issues where auto-generated",
"# handler names conflict with existing, appending a number to the",
"# end of the hanlder name if it already exists.",
"handler_name",
"=",
"get_handler_name",
"(",
"r",
",",
"r",
".",
"methods",
"[",
"0",
"]",
".",
"logic",
")",
"if",
"handler_name",
"in",
"all_handler_names",
":",
"handler_name",
"=",
"'{}{}'",
".",
"format",
"(",
"handler_name",
",",
"len",
"(",
"all_handler_names",
")",
")",
"all_handler_names",
".",
"append",
"(",
"handler_name",
")",
"for",
"method",
"in",
"r",
".",
"methods",
":",
"logic",
"=",
"method",
".",
"logic",
"http_method",
"=",
"method",
".",
"method",
"http_func",
"=",
"create_http_method",
"(",
"logic",
",",
"http_method",
",",
"handle_http",
",",
"before",
"=",
"r",
".",
"before",
",",
"after",
"=",
"r",
".",
"after",
")",
"handler_methods_and_properties",
"=",
"{",
"'__name__'",
":",
"handler_name",
",",
"'_doctor_heading'",
":",
"r",
".",
"heading",
",",
"'methods'",
":",
"set",
"(",
"[",
"http_method",
".",
"upper",
"(",
")",
"]",
")",
",",
"http_method",
":",
"http_func",
",",
"}",
"if",
"handler",
"is",
"None",
":",
"handler",
"=",
"type",
"(",
"handler_name",
",",
"(",
"base_handler_class",
",",
")",
",",
"handler_methods_and_properties",
")",
"else",
":",
"setattr",
"(",
"handler",
",",
"http_method",
",",
"http_func",
")",
"# This is specific to Flask. Its MethodView class",
"# initializes the methods attribute in __new__ so we",
"# need to add all the other http methods we are defining",
"# on the handler after it gets created by type.",
"if",
"hasattr",
"(",
"handler",
",",
"'methods'",
")",
":",
"handler",
".",
"methods",
".",
"add",
"(",
"http_method",
".",
"upper",
"(",
")",
")",
"created_routes",
".",
"append",
"(",
"(",
"r",
".",
"route",
",",
"handler",
")",
")",
"return",
"created_routes"
] | 44.446429 | 18.125 |
def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None,
ssl_pymongo_options=None):
"""
Ping server and wait for response.
Ping a mongod or mongos every `interval` seconds until it responds, or
`timeout` seconds have passed. If `to_start` is set to False, will wait for
the node to shut down instead. This function can be called as a separate
thread.
If queue is provided, it will place the results in the message queue and
return, otherwise it will just return the result directly.
"""
host = 'localhost:%i' % port
start_time = time.time()
while True:
if (time.time() - start_time) > timeout:
if queue:
queue.put_nowait((port, False))
return False
try:
# make connection and ping host
con = MongoConnection(host, **(ssl_pymongo_options or {}))
con.admin.command('ping')
if to_start:
if queue:
queue.put_nowait((port, True))
return True
else:
time.sleep(interval)
except Exception:
if to_start:
time.sleep(interval)
else:
if queue:
queue.put_nowait((port, True))
return True | [
"def",
"wait_for_host",
"(",
"port",
",",
"interval",
"=",
"1",
",",
"timeout",
"=",
"30",
",",
"to_start",
"=",
"True",
",",
"queue",
"=",
"None",
",",
"ssl_pymongo_options",
"=",
"None",
")",
":",
"host",
"=",
"'localhost:%i'",
"%",
"port",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"True",
":",
"if",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
">",
"timeout",
":",
"if",
"queue",
":",
"queue",
".",
"put_nowait",
"(",
"(",
"port",
",",
"False",
")",
")",
"return",
"False",
"try",
":",
"# make connection and ping host",
"con",
"=",
"MongoConnection",
"(",
"host",
",",
"*",
"*",
"(",
"ssl_pymongo_options",
"or",
"{",
"}",
")",
")",
"con",
".",
"admin",
".",
"command",
"(",
"'ping'",
")",
"if",
"to_start",
":",
"if",
"queue",
":",
"queue",
".",
"put_nowait",
"(",
"(",
"port",
",",
"True",
")",
")",
"return",
"True",
"else",
":",
"time",
".",
"sleep",
"(",
"interval",
")",
"except",
"Exception",
":",
"if",
"to_start",
":",
"time",
".",
"sleep",
"(",
"interval",
")",
"else",
":",
"if",
"queue",
":",
"queue",
".",
"put_nowait",
"(",
"(",
"port",
",",
"True",
")",
")",
"return",
"True"
] | 34.421053 | 18.210526 |
def param_to_array(*param):
"""
Convert an arbitrary number of parameters to :class:ndarray class objects.
This is for converting parameter objects to numpy arrays, when using
scipy.weave.inline routine. In scipy.weave.blitz there is no automatic
array detection (even when the array inherits from :class:ndarray)
"""
import warnings
warnings.warn("Please use param.values, as this function will be deprecated in the next release.", DeprecationWarning)
assert len(param) > 0, "At least one parameter needed"
if len(param) == 1:
return param[0].view(np.ndarray)
return [x.view(np.ndarray) for x in param] | [
"def",
"param_to_array",
"(",
"*",
"param",
")",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"\"Please use param.values, as this function will be deprecated in the next release.\"",
",",
"DeprecationWarning",
")",
"assert",
"len",
"(",
"param",
")",
">",
"0",
",",
"\"At least one parameter needed\"",
"if",
"len",
"(",
"param",
")",
"==",
"1",
":",
"return",
"param",
"[",
"0",
"]",
".",
"view",
"(",
"np",
".",
"ndarray",
")",
"return",
"[",
"x",
".",
"view",
"(",
"np",
".",
"ndarray",
")",
"for",
"x",
"in",
"param",
"]"
] | 49.538462 | 22.461538 |
def also_restrict_to(self, restriction):
"""
Works like restict_to but offers an additional restriction. Playbooks use this
to implement serial behavior.
"""
if type(restriction) != list:
restriction = [ restriction ]
self._also_restriction = restriction | [
"def",
"also_restrict_to",
"(",
"self",
",",
"restriction",
")",
":",
"if",
"type",
"(",
"restriction",
")",
"!=",
"list",
":",
"restriction",
"=",
"[",
"restriction",
"]",
"self",
".",
"_also_restriction",
"=",
"restriction"
] | 38.5 | 7.25 |
def deblur(input_seqs, mean_error=0.005,
error_dist=None,
indel_prob=0.01, indel_max=3):
"""Deblur the reads
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format. The label
should include the sequence count in the 'size=X' format.
mean_error : float, optional
The mean illumina error, used for original sequence estimate.
Default: 0.005
error_dist : list of float, optional
A list of error probabilities. The length of the list determines the
amount of hamming distances taken into account. Default: None, use
the default error profile (from get_default_error_profile() )
indel_prob : float, optional
Indel probability (same for N indels). Default: 0.01
indel_max : int, optional
The maximal number of indels expected by errors. Default: 3
Results
-------
list of Sequence
The deblurred sequences
Notes
-----
mean_error is used only for normalizing the peak height before deblurring.
The array 'error_dist' represents the error distribution, where
Xi = max frequency of error hamming. The length of this array - 1 limits
the hamming distance taken into account, i.e. if the length if `error_dist`
is 10, sequences up to 10 - 1 = 9 hamming distance will be taken into
account
"""
logger = logging.getLogger(__name__)
if error_dist is None:
error_dist = get_default_error_profile()
logger.debug('Using error profile %s' % error_dist)
# Get the sequences
seqs = get_sequences(input_seqs)
if seqs is None:
logger.warn('no sequences deblurred')
return None
logger.info('deblurring %d sequences' % len(seqs))
# fix the original frequencies of each read error using the
# mean error profile
mod_factor = pow((1 - mean_error), seqs[0].unaligned_length)
error_dist = np.array(error_dist) / mod_factor
max_h_dist = len(error_dist) - 1
for seq_i in seqs:
# no need to remove neighbors if freq. is <=0
if seq_i.frequency <= 0:
continue
# Correct for the fact that many reads are expected to be mutated
num_err = error_dist * seq_i.frequency
# if it's low level, just continue
if num_err[1] < 0.1:
continue
# Compare to all other sequences and calculate hamming dist
seq_i_len = len(seq_i.sequence.rstrip('-'))
for seq_j in seqs:
# Ignore current sequence
if seq_i == seq_j:
continue
# Calculate the hamming distance
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence,
seq_j.np_sequence))
# If far away, don't need to correct
if h_dist > max_h_dist:
continue
# Close, so lets calculate exact distance
# We stop checking in the shortest sequence after removing trailing
# indels. We need to do this in order to avoid double counting
# the insertions/deletions
length = min(seq_i_len, len(seq_j.sequence.rstrip('-')))
sub_seq_i = seq_i.np_sequence[:length]
sub_seq_j = seq_j.np_sequence[:length]
mask = (sub_seq_i != sub_seq_j)
# find all indels
mut_is_indel = np.logical_or(sub_seq_i[mask] == 4,
sub_seq_j[mask] == 4)
num_indels = mut_is_indel.sum()
if num_indels > 0:
# need to account for indel in one sequence not solved in the other
# (so we have '-' at the end. Need to ignore it in the total count)
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence[:length],
seq_j.np_sequence[:length]))
num_substitutions = h_dist - num_indels
correction_value = num_err[num_substitutions]
if num_indels > indel_max:
correction_value = 0
elif num_indels > 0:
# remove errors due to (PCR?) indels (saw in 22 mock mixture)
correction_value = correction_value * indel_prob
# met all the criteria - so correct the frequency of the neighbor
seq_j.frequency -= correction_value
result = [s for s in seqs if round(s.frequency) > 0]
logger.info('%d unique sequences left following deblurring' % len(result))
return result | [
"def",
"deblur",
"(",
"input_seqs",
",",
"mean_error",
"=",
"0.005",
",",
"error_dist",
"=",
"None",
",",
"indel_prob",
"=",
"0.01",
",",
"indel_max",
"=",
"3",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"error_dist",
"is",
"None",
":",
"error_dist",
"=",
"get_default_error_profile",
"(",
")",
"logger",
".",
"debug",
"(",
"'Using error profile %s'",
"%",
"error_dist",
")",
"# Get the sequences",
"seqs",
"=",
"get_sequences",
"(",
"input_seqs",
")",
"if",
"seqs",
"is",
"None",
":",
"logger",
".",
"warn",
"(",
"'no sequences deblurred'",
")",
"return",
"None",
"logger",
".",
"info",
"(",
"'deblurring %d sequences'",
"%",
"len",
"(",
"seqs",
")",
")",
"# fix the original frequencies of each read error using the",
"# mean error profile",
"mod_factor",
"=",
"pow",
"(",
"(",
"1",
"-",
"mean_error",
")",
",",
"seqs",
"[",
"0",
"]",
".",
"unaligned_length",
")",
"error_dist",
"=",
"np",
".",
"array",
"(",
"error_dist",
")",
"/",
"mod_factor",
"max_h_dist",
"=",
"len",
"(",
"error_dist",
")",
"-",
"1",
"for",
"seq_i",
"in",
"seqs",
":",
"# no need to remove neighbors if freq. is <=0",
"if",
"seq_i",
".",
"frequency",
"<=",
"0",
":",
"continue",
"# Correct for the fact that many reads are expected to be mutated",
"num_err",
"=",
"error_dist",
"*",
"seq_i",
".",
"frequency",
"# if it's low level, just continue",
"if",
"num_err",
"[",
"1",
"]",
"<",
"0.1",
":",
"continue",
"# Compare to all other sequences and calculate hamming dist",
"seq_i_len",
"=",
"len",
"(",
"seq_i",
".",
"sequence",
".",
"rstrip",
"(",
"'-'",
")",
")",
"for",
"seq_j",
"in",
"seqs",
":",
"# Ignore current sequence",
"if",
"seq_i",
"==",
"seq_j",
":",
"continue",
"# Calculate the hamming distance",
"h_dist",
"=",
"np",
".",
"count_nonzero",
"(",
"np",
".",
"not_equal",
"(",
"seq_i",
".",
"np_sequence",
",",
"seq_j",
".",
"np_sequence",
")",
")",
"# If far away, don't need to correct",
"if",
"h_dist",
">",
"max_h_dist",
":",
"continue",
"# Close, so lets calculate exact distance",
"# We stop checking in the shortest sequence after removing trailing",
"# indels. We need to do this in order to avoid double counting",
"# the insertions/deletions",
"length",
"=",
"min",
"(",
"seq_i_len",
",",
"len",
"(",
"seq_j",
".",
"sequence",
".",
"rstrip",
"(",
"'-'",
")",
")",
")",
"sub_seq_i",
"=",
"seq_i",
".",
"np_sequence",
"[",
":",
"length",
"]",
"sub_seq_j",
"=",
"seq_j",
".",
"np_sequence",
"[",
":",
"length",
"]",
"mask",
"=",
"(",
"sub_seq_i",
"!=",
"sub_seq_j",
")",
"# find all indels",
"mut_is_indel",
"=",
"np",
".",
"logical_or",
"(",
"sub_seq_i",
"[",
"mask",
"]",
"==",
"4",
",",
"sub_seq_j",
"[",
"mask",
"]",
"==",
"4",
")",
"num_indels",
"=",
"mut_is_indel",
".",
"sum",
"(",
")",
"if",
"num_indels",
">",
"0",
":",
"# need to account for indel in one sequence not solved in the other",
"# (so we have '-' at the end. Need to ignore it in the total count)",
"h_dist",
"=",
"np",
".",
"count_nonzero",
"(",
"np",
".",
"not_equal",
"(",
"seq_i",
".",
"np_sequence",
"[",
":",
"length",
"]",
",",
"seq_j",
".",
"np_sequence",
"[",
":",
"length",
"]",
")",
")",
"num_substitutions",
"=",
"h_dist",
"-",
"num_indels",
"correction_value",
"=",
"num_err",
"[",
"num_substitutions",
"]",
"if",
"num_indels",
">",
"indel_max",
":",
"correction_value",
"=",
"0",
"elif",
"num_indels",
">",
"0",
":",
"# remove errors due to (PCR?) indels (saw in 22 mock mixture)",
"correction_value",
"=",
"correction_value",
"*",
"indel_prob",
"# met all the criteria - so correct the frequency of the neighbor",
"seq_j",
".",
"frequency",
"-=",
"correction_value",
"result",
"=",
"[",
"s",
"for",
"s",
"in",
"seqs",
"if",
"round",
"(",
"s",
".",
"frequency",
")",
">",
"0",
"]",
"logger",
".",
"info",
"(",
"'%d unique sequences left following deblurring'",
"%",
"len",
"(",
"result",
")",
")",
"return",
"result"
] | 37.773109 | 22.176471 |
def write_file(fname, *lines):
'write lines to a file'
yield 'touch {}'.format(fname)
for line in lines:
yield "echo {} >> {}".format(line, fname) | [
"def",
"write_file",
"(",
"fname",
",",
"*",
"lines",
")",
":",
"yield",
"'touch {}'",
".",
"format",
"(",
"fname",
")",
"for",
"line",
"in",
"lines",
":",
"yield",
"\"echo {} >> {}\"",
".",
"format",
"(",
"line",
",",
"fname",
")"
] | 32.4 | 11.2 |
def validateStr(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None):
"""Raises ValidationException if value is not a string. This function
is identical to the built-in input() function, but also offers the
PySimpleValidate features of not allowing blank values by default,
automatically stripping whitespace, and having allowlist/blocklist
regular expressions.
Returns value, so it can be used inline in an expression:
print('Hello, ' + validateStr(your_name))
* value (str): The value being validated as a string.
* blank (bool): If True, a blank string will be accepted. Defaults to False. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* excMsg (str): A custom message to use in the raised ValidationException.
>>> import pysimplevalidate as pysv
>>> pysv.validateStr('hello')
'hello'
>>> pysv.validateStr('')
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: Blank values are not allowed.
>>> pysv.validateStr('', blank=True)
''
>>> pysv.validateStr(' hello ')
'hello'
>>> pysv.validateStr('hello', blocklistRegexes=['hello'])
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: This response is invalid.
>>> pysv.validateStr('hello', blocklistRegexes=[('hello', 'Hello is not allowed')])
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: Hello is not allowed
>>> pysv.validateStr('hello', allowlistRegexes=['hello'], blocklistRegexes=['llo'])
'hello'
"""
# Validate parameters.
_validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes)
returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg)
return value | [
"def",
"validateStr",
"(",
"value",
",",
"blank",
"=",
"False",
",",
"strip",
"=",
"None",
",",
"allowlistRegexes",
"=",
"None",
",",
"blocklistRegexes",
"=",
"None",
",",
"excMsg",
"=",
"None",
")",
":",
"# Validate parameters.",
"_validateGenericParameters",
"(",
"blank",
"=",
"blank",
",",
"strip",
"=",
"strip",
",",
"allowlistRegexes",
"=",
"None",
",",
"blocklistRegexes",
"=",
"blocklistRegexes",
")",
"returnNow",
",",
"value",
"=",
"_prevalidationCheck",
"(",
"value",
",",
"blank",
",",
"strip",
",",
"allowlistRegexes",
",",
"blocklistRegexes",
",",
"excMsg",
")",
"return",
"value"
] | 49.326087 | 32.456522 |
def delete(instance, disconnect=True):
'''
Delete an *instance* from its metaclass instance pool and optionally
*disconnect* it from any links it might be connected to.
'''
if not isinstance(instance, Class):
raise DeleteException("the provided argument is not an xtuml instance")
return get_metaclass(instance).delete(instance, disconnect) | [
"def",
"delete",
"(",
"instance",
",",
"disconnect",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"instance",
",",
"Class",
")",
":",
"raise",
"DeleteException",
"(",
"\"the provided argument is not an xtuml instance\"",
")",
"return",
"get_metaclass",
"(",
"instance",
")",
".",
"delete",
"(",
"instance",
",",
"disconnect",
")"
] | 41.888889 | 23.444444 |
def p_statement_namespace(self, p):
""" statement : css_namespace t_ws word css_string t_semicolon
"""
p[0] = Statement(list(p)[1:], p.lineno(1))
p[0].parse(None) | [
"def",
"p_statement_namespace",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"Statement",
"(",
"list",
"(",
"p",
")",
"[",
"1",
":",
"]",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"p",
"[",
"0",
"]",
".",
"parse",
"(",
"None",
")"
] | 40.2 | 6.2 |
def parse_text(infile, xpath=None, filter_words=None, attributes=None):
"""Filter text using XPath, regex keywords, and tag attributes.
Keyword arguments:
infile -- HTML or text content to parse (list)
xpath -- an XPath expression (str)
filter_words -- regex keywords (list)
attributes -- HTML tag attributes (list)
Return a list of strings of text.
"""
infiles = []
text = []
if xpath is not None:
infile = parse_html(infile, xpath)
if isinstance(infile, list):
if isinstance(infile[0], lh.HtmlElement):
infiles = list(infile)
else:
text = [line + '\n' for line in infile]
elif isinstance(infile, lh.HtmlElement):
infiles = [infile]
else:
text = [infile]
else:
infiles = [infile]
if attributes is not None:
attributes = [clean_attr(x) for x in attributes]
attributes = [x for x in attributes if x]
else:
attributes = ['text()']
if not text:
text_xpath = '//*[not(self::script) and not(self::style)]'
for attr in attributes:
for infile in infiles:
if isinstance(infile, lh.HtmlElement):
new_text = infile.xpath('{0}/{1}'.format(text_xpath, attr))
else:
# re.split preserves delimiters place in the list
new_text = [x for x in re.split('(\n)', infile) if x]
text += new_text
if filter_words is not None:
text = re_filter(text, filter_words)
return [''.join(x for x in line if x in string.printable)
for line in remove_whitespace(text) if line] | [
"def",
"parse_text",
"(",
"infile",
",",
"xpath",
"=",
"None",
",",
"filter_words",
"=",
"None",
",",
"attributes",
"=",
"None",
")",
":",
"infiles",
"=",
"[",
"]",
"text",
"=",
"[",
"]",
"if",
"xpath",
"is",
"not",
"None",
":",
"infile",
"=",
"parse_html",
"(",
"infile",
",",
"xpath",
")",
"if",
"isinstance",
"(",
"infile",
",",
"list",
")",
":",
"if",
"isinstance",
"(",
"infile",
"[",
"0",
"]",
",",
"lh",
".",
"HtmlElement",
")",
":",
"infiles",
"=",
"list",
"(",
"infile",
")",
"else",
":",
"text",
"=",
"[",
"line",
"+",
"'\\n'",
"for",
"line",
"in",
"infile",
"]",
"elif",
"isinstance",
"(",
"infile",
",",
"lh",
".",
"HtmlElement",
")",
":",
"infiles",
"=",
"[",
"infile",
"]",
"else",
":",
"text",
"=",
"[",
"infile",
"]",
"else",
":",
"infiles",
"=",
"[",
"infile",
"]",
"if",
"attributes",
"is",
"not",
"None",
":",
"attributes",
"=",
"[",
"clean_attr",
"(",
"x",
")",
"for",
"x",
"in",
"attributes",
"]",
"attributes",
"=",
"[",
"x",
"for",
"x",
"in",
"attributes",
"if",
"x",
"]",
"else",
":",
"attributes",
"=",
"[",
"'text()'",
"]",
"if",
"not",
"text",
":",
"text_xpath",
"=",
"'//*[not(self::script) and not(self::style)]'",
"for",
"attr",
"in",
"attributes",
":",
"for",
"infile",
"in",
"infiles",
":",
"if",
"isinstance",
"(",
"infile",
",",
"lh",
".",
"HtmlElement",
")",
":",
"new_text",
"=",
"infile",
".",
"xpath",
"(",
"'{0}/{1}'",
".",
"format",
"(",
"text_xpath",
",",
"attr",
")",
")",
"else",
":",
"# re.split preserves delimiters place in the list",
"new_text",
"=",
"[",
"x",
"for",
"x",
"in",
"re",
".",
"split",
"(",
"'(\\n)'",
",",
"infile",
")",
"if",
"x",
"]",
"text",
"+=",
"new_text",
"if",
"filter_words",
"is",
"not",
"None",
":",
"text",
"=",
"re_filter",
"(",
"text",
",",
"filter_words",
")",
"return",
"[",
"''",
".",
"join",
"(",
"x",
"for",
"x",
"in",
"line",
"if",
"x",
"in",
"string",
".",
"printable",
")",
"for",
"line",
"in",
"remove_whitespace",
"(",
"text",
")",
"if",
"line",
"]"
] | 34.75 | 17.25 |
def validate_arg(f,
arg_name,
*validation_func, # type: ValidationFuncs
**kwargs
):
# type: (...) -> Callable
"""
A decorator to apply function input validation for the given argument name, with the provided base validation
function(s). You may use several such decorators on a given function as long as they are stacked on top of each
other (no external decorator in the middle)
:param arg_name:
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param error_type: a subclass of ValidationError to raise in case of validation failure. By default a
ValidationError will be raised with the provided help_msg
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various
possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: a function decorator, able to transform a function into a function that will perform input validation
before executing the function's code everytime it is executed.
"""
return decorate_with_validation(f, arg_name, *validation_func, **kwargs) | [
"def",
"validate_arg",
"(",
"f",
",",
"arg_name",
",",
"*",
"validation_func",
",",
"# type: ValidationFuncs",
"*",
"*",
"kwargs",
")",
":",
"# type: (...) -> Callable",
"return",
"decorate_with_validation",
"(",
"f",
",",
"arg_name",
",",
"*",
"validation_func",
",",
"*",
"*",
"kwargs",
")"
] | 67.464286 | 40.392857 |
def senses(self, bestonly=False):
"""Returns a list of all predicted senses"""
l = []
for word_id, senses,distance in self:
for sense, confidence in senses:
if not sense in l: l.append(sense)
if bestonly:
break
return l | [
"def",
"senses",
"(",
"self",
",",
"bestonly",
"=",
"False",
")",
":",
"l",
"=",
"[",
"]",
"for",
"word_id",
",",
"senses",
",",
"distance",
"in",
"self",
":",
"for",
"sense",
",",
"confidence",
"in",
"senses",
":",
"if",
"not",
"sense",
"in",
"l",
":",
"l",
".",
"append",
"(",
"sense",
")",
"if",
"bestonly",
":",
"break",
"return",
"l"
] | 34.111111 | 11.444444 |
def stream(self):
"""Iterate over the messages from the computation's output.
Control and metadata messages are intercepted and interpreted to
enhance this Computation's object knowledge of the computation's
context. Data and event messages are yielded back to the caller as a
generator.
"""
iterator = iter(self._stream)
while self._state < Computation.STATE_COMPLETED:
try:
message = next(iterator)
except StopIteration:
if self._state < Computation.STATE_COMPLETED:
self._stream = self._execute()
iterator = iter(self._stream)
continue
if isinstance(message, messages.StreamStartMessage):
self._state = Computation.STATE_STREAM_STARTED
continue
if isinstance(message, messages.JobStartMessage):
self._state = Computation.STATE_COMPUTATION_STARTED
self._id = message.handle
yield message
continue
if isinstance(message, messages.JobProgressMessage):
yield message
continue
if isinstance(message, messages.ChannelAbortMessage):
self._state = Computation.STATE_ABORTED
raise errors.ComputationAborted(message.abort_info)
if isinstance(message, messages.EndOfChannelMessage):
self._state = Computation.STATE_COMPLETED
continue
# Intercept metadata messages to accumulate received metadata...
if isinstance(message, messages.MetadataMessage):
self._metadata[message.tsid] = message.properties
yield message
continue
# ...as well as expired-tsid messages to clean it up.
if isinstance(message, messages.ExpiredTsIdMessage):
if message.tsid in self._metadata:
del self._metadata[message.tsid]
yield message
continue
if isinstance(message, messages.InfoMessage):
self._process_info_message(message.message)
self._batch_count_detected = True
if self._current_batch_message:
yield self._get_batch_to_yield()
continue
# Accumulate data messages and release them when we have received
# all batches for the same logical timestamp.
if isinstance(message, messages.DataMessage):
self._state = Computation.STATE_DATA_RECEIVED
if not self._batch_count_detected:
self._expected_batches += 1
if not self._current_batch_message:
self._current_batch_message = message
self._current_batch_count = 1
elif (message.logical_timestamp_ms ==
self._current_batch_message.logical_timestamp_ms):
self._current_batch_message.add_data(message.data)
self._current_batch_count += 1
else:
self._batch_count_detected = True
if (self._batch_count_detected and
self._current_batch_count == self._expected_batches):
yield self._get_batch_to_yield()
continue
if isinstance(message, messages.EventMessage):
yield message
continue
if isinstance(message, messages.ErrorMessage):
raise errors.ComputationFailed(message.errors)
# Yield last batch, even if potentially incomplete.
if self._current_batch_message:
yield self._get_batch_to_yield() | [
"def",
"stream",
"(",
"self",
")",
":",
"iterator",
"=",
"iter",
"(",
"self",
".",
"_stream",
")",
"while",
"self",
".",
"_state",
"<",
"Computation",
".",
"STATE_COMPLETED",
":",
"try",
":",
"message",
"=",
"next",
"(",
"iterator",
")",
"except",
"StopIteration",
":",
"if",
"self",
".",
"_state",
"<",
"Computation",
".",
"STATE_COMPLETED",
":",
"self",
".",
"_stream",
"=",
"self",
".",
"_execute",
"(",
")",
"iterator",
"=",
"iter",
"(",
"self",
".",
"_stream",
")",
"continue",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"StreamStartMessage",
")",
":",
"self",
".",
"_state",
"=",
"Computation",
".",
"STATE_STREAM_STARTED",
"continue",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"JobStartMessage",
")",
":",
"self",
".",
"_state",
"=",
"Computation",
".",
"STATE_COMPUTATION_STARTED",
"self",
".",
"_id",
"=",
"message",
".",
"handle",
"yield",
"message",
"continue",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"JobProgressMessage",
")",
":",
"yield",
"message",
"continue",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"ChannelAbortMessage",
")",
":",
"self",
".",
"_state",
"=",
"Computation",
".",
"STATE_ABORTED",
"raise",
"errors",
".",
"ComputationAborted",
"(",
"message",
".",
"abort_info",
")",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"EndOfChannelMessage",
")",
":",
"self",
".",
"_state",
"=",
"Computation",
".",
"STATE_COMPLETED",
"continue",
"# Intercept metadata messages to accumulate received metadata...",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"MetadataMessage",
")",
":",
"self",
".",
"_metadata",
"[",
"message",
".",
"tsid",
"]",
"=",
"message",
".",
"properties",
"yield",
"message",
"continue",
"# ...as well as expired-tsid messages to clean it up.",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"ExpiredTsIdMessage",
")",
":",
"if",
"message",
".",
"tsid",
"in",
"self",
".",
"_metadata",
":",
"del",
"self",
".",
"_metadata",
"[",
"message",
".",
"tsid",
"]",
"yield",
"message",
"continue",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"InfoMessage",
")",
":",
"self",
".",
"_process_info_message",
"(",
"message",
".",
"message",
")",
"self",
".",
"_batch_count_detected",
"=",
"True",
"if",
"self",
".",
"_current_batch_message",
":",
"yield",
"self",
".",
"_get_batch_to_yield",
"(",
")",
"continue",
"# Accumulate data messages and release them when we have received",
"# all batches for the same logical timestamp.",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"DataMessage",
")",
":",
"self",
".",
"_state",
"=",
"Computation",
".",
"STATE_DATA_RECEIVED",
"if",
"not",
"self",
".",
"_batch_count_detected",
":",
"self",
".",
"_expected_batches",
"+=",
"1",
"if",
"not",
"self",
".",
"_current_batch_message",
":",
"self",
".",
"_current_batch_message",
"=",
"message",
"self",
".",
"_current_batch_count",
"=",
"1",
"elif",
"(",
"message",
".",
"logical_timestamp_ms",
"==",
"self",
".",
"_current_batch_message",
".",
"logical_timestamp_ms",
")",
":",
"self",
".",
"_current_batch_message",
".",
"add_data",
"(",
"message",
".",
"data",
")",
"self",
".",
"_current_batch_count",
"+=",
"1",
"else",
":",
"self",
".",
"_batch_count_detected",
"=",
"True",
"if",
"(",
"self",
".",
"_batch_count_detected",
"and",
"self",
".",
"_current_batch_count",
"==",
"self",
".",
"_expected_batches",
")",
":",
"yield",
"self",
".",
"_get_batch_to_yield",
"(",
")",
"continue",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"EventMessage",
")",
":",
"yield",
"message",
"continue",
"if",
"isinstance",
"(",
"message",
",",
"messages",
".",
"ErrorMessage",
")",
":",
"raise",
"errors",
".",
"ComputationFailed",
"(",
"message",
".",
"errors",
")",
"# Yield last batch, even if potentially incomplete.",
"if",
"self",
".",
"_current_batch_message",
":",
"yield",
"self",
".",
"_get_batch_to_yield",
"(",
")"
] | 39.946809 | 21.031915 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.