text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def destiny_addr_mode(pkt):
"""destiny_addr_mode
This function depending on the arguments returns the amount of bits to be
used by the destiny address.
Keyword arguments:
pkt -- packet object instance
"""
if pkt.m == 0 and pkt.dac == 0:
if pkt.dam == 0x0:
return 16
elif pkt.dam == 0x1:
return 8
elif pkt.dam == 0x2:
return 2
else:
return 0
elif pkt.m == 0 and pkt.dac == 1:
if pkt.dam == 0x0:
raise Exception('reserved')
elif pkt.dam == 0x1:
return 8
elif pkt.dam == 0x2:
return 2
else:
return 0
elif pkt.m == 1 and pkt.dac == 0:
if pkt.dam == 0x0:
return 16
elif pkt.dam == 0x1:
return 6
elif pkt.dam == 0x2:
return 4
elif pkt.dam == 0x3:
return 1
elif pkt.m == 1 and pkt.dac == 1:
if pkt.dam == 0x0:
return 6
elif pkt.dam == 0x1:
raise Exception('reserved')
elif pkt.dam == 0x2:
raise Exception('reserved')
elif pkt.dam == 0x3:
raise Exception('reserved') | [
"def",
"destiny_addr_mode",
"(",
"pkt",
")",
":",
"if",
"pkt",
".",
"m",
"==",
"0",
"and",
"pkt",
".",
"dac",
"==",
"0",
":",
"if",
"pkt",
".",
"dam",
"==",
"0x0",
":",
"return",
"16",
"elif",
"pkt",
".",
"dam",
"==",
"0x1",
":",
"return",
"8",
"elif",
"pkt",
".",
"dam",
"==",
"0x2",
":",
"return",
"2",
"else",
":",
"return",
"0",
"elif",
"pkt",
".",
"m",
"==",
"0",
"and",
"pkt",
".",
"dac",
"==",
"1",
":",
"if",
"pkt",
".",
"dam",
"==",
"0x0",
":",
"raise",
"Exception",
"(",
"'reserved'",
")",
"elif",
"pkt",
".",
"dam",
"==",
"0x1",
":",
"return",
"8",
"elif",
"pkt",
".",
"dam",
"==",
"0x2",
":",
"return",
"2",
"else",
":",
"return",
"0",
"elif",
"pkt",
".",
"m",
"==",
"1",
"and",
"pkt",
".",
"dac",
"==",
"0",
":",
"if",
"pkt",
".",
"dam",
"==",
"0x0",
":",
"return",
"16",
"elif",
"pkt",
".",
"dam",
"==",
"0x1",
":",
"return",
"6",
"elif",
"pkt",
".",
"dam",
"==",
"0x2",
":",
"return",
"4",
"elif",
"pkt",
".",
"dam",
"==",
"0x3",
":",
"return",
"1",
"elif",
"pkt",
".",
"m",
"==",
"1",
"and",
"pkt",
".",
"dac",
"==",
"1",
":",
"if",
"pkt",
".",
"dam",
"==",
"0x0",
":",
"return",
"6",
"elif",
"pkt",
".",
"dam",
"==",
"0x1",
":",
"raise",
"Exception",
"(",
"'reserved'",
")",
"elif",
"pkt",
".",
"dam",
"==",
"0x2",
":",
"raise",
"Exception",
"(",
"'reserved'",
")",
"elif",
"pkt",
".",
"dam",
"==",
"0x3",
":",
"raise",
"Exception",
"(",
"'reserved'",
")"
] | 26.133333 | 14.422222 |
def reload(self, env=None, silent=None): # pragma: no cover
"""Clean end Execute all loaders"""
self.clean()
self.execute_loaders(env, silent) | [
"def",
"reload",
"(",
"self",
",",
"env",
"=",
"None",
",",
"silent",
"=",
"None",
")",
":",
"# pragma: no cover",
"self",
".",
"clean",
"(",
")",
"self",
".",
"execute_loaders",
"(",
"env",
",",
"silent",
")"
] | 41 | 10.25 |
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
if last_runtime:
Stats.gauge(
'dag_processing.last_runtime.{}'.format(file_name),
last_runtime
)
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (timezone.utcnow() - last_run).total_seconds()
Stats.gauge(
'dag_processing.last_run.seconds_ago.{}'.format(file_name),
seconds_ago
)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str) | [
"def",
"_log_file_processing_stats",
"(",
"self",
",",
"known_file_paths",
")",
":",
"# File Path: Path to the file containing the DAG definition",
"# PID: PID associated with the process that's processing the file. May",
"# be empty.",
"# Runtime: If the process is currently running, how long it's been",
"# running for in seconds.",
"# Last Runtime: If the process ran before, how long did it take to",
"# finish in seconds",
"# Last Run: When the file finished processing in the previous run.",
"headers",
"=",
"[",
"\"File Path\"",
",",
"\"PID\"",
",",
"\"Runtime\"",
",",
"\"Last Runtime\"",
",",
"\"Last Run\"",
"]",
"rows",
"=",
"[",
"]",
"for",
"file_path",
"in",
"known_file_paths",
":",
"last_runtime",
"=",
"self",
".",
"get_last_runtime",
"(",
"file_path",
")",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file_path",
")",
"file_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"[",
"0",
"]",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'.'",
")",
"if",
"last_runtime",
":",
"Stats",
".",
"gauge",
"(",
"'dag_processing.last_runtime.{}'",
".",
"format",
"(",
"file_name",
")",
",",
"last_runtime",
")",
"processor_pid",
"=",
"self",
".",
"get_pid",
"(",
"file_path",
")",
"processor_start_time",
"=",
"self",
".",
"get_start_time",
"(",
"file_path",
")",
"runtime",
"=",
"(",
"(",
"timezone",
".",
"utcnow",
"(",
")",
"-",
"processor_start_time",
")",
".",
"total_seconds",
"(",
")",
"if",
"processor_start_time",
"else",
"None",
")",
"last_run",
"=",
"self",
".",
"get_last_finish_time",
"(",
"file_path",
")",
"if",
"last_run",
":",
"seconds_ago",
"=",
"(",
"timezone",
".",
"utcnow",
"(",
")",
"-",
"last_run",
")",
".",
"total_seconds",
"(",
")",
"Stats",
".",
"gauge",
"(",
"'dag_processing.last_run.seconds_ago.{}'",
".",
"format",
"(",
"file_name",
")",
",",
"seconds_ago",
")",
"rows",
".",
"append",
"(",
"(",
"file_path",
",",
"processor_pid",
",",
"runtime",
",",
"last_runtime",
",",
"last_run",
")",
")",
"# Sort by longest last runtime. (Can't sort None values in python3)",
"rows",
"=",
"sorted",
"(",
"rows",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"3",
"]",
"or",
"0.0",
")",
"formatted_rows",
"=",
"[",
"]",
"for",
"file_path",
",",
"pid",
",",
"runtime",
",",
"last_runtime",
",",
"last_run",
"in",
"rows",
":",
"formatted_rows",
".",
"append",
"(",
"(",
"file_path",
",",
"pid",
",",
"\"{:.2f}s\"",
".",
"format",
"(",
"runtime",
")",
"if",
"runtime",
"else",
"None",
",",
"\"{:.2f}s\"",
".",
"format",
"(",
"last_runtime",
")",
"if",
"last_runtime",
"else",
"None",
",",
"last_run",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"if",
"last_run",
"else",
"None",
")",
")",
"log_str",
"=",
"(",
"\"\\n\"",
"+",
"\"=\"",
"*",
"80",
"+",
"\"\\n\"",
"+",
"\"DAG File Processing Stats\\n\\n\"",
"+",
"tabulate",
"(",
"formatted_rows",
",",
"headers",
"=",
"headers",
")",
"+",
"\"\\n\"",
"+",
"\"=\"",
"*",
"80",
")",
"self",
".",
"log",
".",
"info",
"(",
"log_str",
")"
] | 40.4 | 19.68 |
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads,
isolate_session_state=True)
return config | [
"def",
"create_session_config",
"(",
"log_device_placement",
"=",
"False",
",",
"enable_graph_rewriter",
"=",
"False",
",",
"gpu_mem_fraction",
"=",
"0.95",
",",
"use_tpu",
"=",
"False",
",",
"xla_jit_level",
"=",
"tf",
".",
"OptimizerOptions",
".",
"OFF",
",",
"inter_op_parallelism_threads",
"=",
"0",
",",
"intra_op_parallelism_threads",
"=",
"0",
")",
":",
"if",
"use_tpu",
":",
"graph_options",
"=",
"tf",
".",
"GraphOptions",
"(",
")",
"else",
":",
"if",
"enable_graph_rewriter",
":",
"rewrite_options",
"=",
"rewriter_config_pb2",
".",
"RewriterConfig",
"(",
")",
"rewrite_options",
".",
"layout_optimizer",
"=",
"rewriter_config_pb2",
".",
"RewriterConfig",
".",
"ON",
"graph_options",
"=",
"tf",
".",
"GraphOptions",
"(",
"rewrite_options",
"=",
"rewrite_options",
")",
"else",
":",
"graph_options",
"=",
"tf",
".",
"GraphOptions",
"(",
"optimizer_options",
"=",
"tf",
".",
"OptimizerOptions",
"(",
"opt_level",
"=",
"tf",
".",
"OptimizerOptions",
".",
"L1",
",",
"do_function_inlining",
"=",
"False",
",",
"global_jit_level",
"=",
"xla_jit_level",
")",
")",
"gpu_options",
"=",
"tf",
".",
"GPUOptions",
"(",
"per_process_gpu_memory_fraction",
"=",
"gpu_mem_fraction",
")",
"config",
"=",
"tf",
".",
"ConfigProto",
"(",
"allow_soft_placement",
"=",
"True",
",",
"graph_options",
"=",
"graph_options",
",",
"gpu_options",
"=",
"gpu_options",
",",
"log_device_placement",
"=",
"log_device_placement",
",",
"inter_op_parallelism_threads",
"=",
"inter_op_parallelism_threads",
",",
"intra_op_parallelism_threads",
"=",
"intra_op_parallelism_threads",
",",
"isolate_session_state",
"=",
"True",
")",
"return",
"config"
] | 41.424242 | 16.848485 |
def cep(numero):
"""Valida um número de CEP. O número deverá ser informado como uma string
contendo 8 dígitos numéricos. Se o número informado for inválido será
lançada a exceção :exc:`NumeroCEPError`.
.. warning::
Qualquer string que contenha 8 dígitos será considerada como um CEP
válido, desde que os dígitos não sejam todos iguais.
"""
_digitos = digitos(numero)
if len(_digitos) != 8 or len(numero) != 8:
raise NumeroCEPError('CEP "%s" nao possui 8 digitos' % numero)
elif _digitos[0] * 8 == _digitos:
raise NumeroCEPError('CEP "%s" considerado invalido' % numero) | [
"def",
"cep",
"(",
"numero",
")",
":",
"_digitos",
"=",
"digitos",
"(",
"numero",
")",
"if",
"len",
"(",
"_digitos",
")",
"!=",
"8",
"or",
"len",
"(",
"numero",
")",
"!=",
"8",
":",
"raise",
"NumeroCEPError",
"(",
"'CEP \"%s\" nao possui 8 digitos'",
"%",
"numero",
")",
"elif",
"_digitos",
"[",
"0",
"]",
"*",
"8",
"==",
"_digitos",
":",
"raise",
"NumeroCEPError",
"(",
"'CEP \"%s\" considerado invalido'",
"%",
"numero",
")"
] | 34.5 | 23.277778 |
def pomodoro(self):
"""
Pomodoro response handling and countdown
"""
if not self._initialized:
self._init()
cached_until = self.py3.time_in(0)
if self._running:
self._time_left = ceil(self._end_time - time())
time_left = ceil(self._time_left)
else:
time_left = ceil(self._time_left)
vals = {"ss": int(time_left), "mm": int(ceil(time_left / 60))}
if self.py3.format_contains(self.format, "mmss"):
hours, rest = divmod(time_left, 3600)
mins, seconds = divmod(rest, 60)
if hours:
vals["mmss"] = u"%d%s%02d%s%02d" % (
hours,
self.format_separator,
mins,
self.format_separator,
seconds,
)
else:
vals["mmss"] = u"%d%s%02d" % (mins, self.format_separator, seconds)
if self.py3.format_contains(self.format, "bar"):
vals["bar"] = self._setup_bar()
formatted = self.format.format(**vals)
if self._running:
if self._active:
format = self.format_active
else:
format = self.format_break
else:
if self._active:
format = self.format_stopped
else:
format = self.format_break_stopped
cached_until = self.py3.CACHE_FOREVER
response = {
"full_text": format.format(
breakno=self._break_number, format=formatted, **vals
),
"cached_until": cached_until,
}
if self._alert:
response["urgent"] = True
self._alert = False
if not self._running:
response["color"] = self.py3.COLOR_BAD
else:
if self._active:
response["color"] = self.py3.COLOR_GOOD
else:
response["color"] = self.py3.COLOR_DEGRADED
return response | [
"def",
"pomodoro",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_initialized",
":",
"self",
".",
"_init",
"(",
")",
"cached_until",
"=",
"self",
".",
"py3",
".",
"time_in",
"(",
"0",
")",
"if",
"self",
".",
"_running",
":",
"self",
".",
"_time_left",
"=",
"ceil",
"(",
"self",
".",
"_end_time",
"-",
"time",
"(",
")",
")",
"time_left",
"=",
"ceil",
"(",
"self",
".",
"_time_left",
")",
"else",
":",
"time_left",
"=",
"ceil",
"(",
"self",
".",
"_time_left",
")",
"vals",
"=",
"{",
"\"ss\"",
":",
"int",
"(",
"time_left",
")",
",",
"\"mm\"",
":",
"int",
"(",
"ceil",
"(",
"time_left",
"/",
"60",
")",
")",
"}",
"if",
"self",
".",
"py3",
".",
"format_contains",
"(",
"self",
".",
"format",
",",
"\"mmss\"",
")",
":",
"hours",
",",
"rest",
"=",
"divmod",
"(",
"time_left",
",",
"3600",
")",
"mins",
",",
"seconds",
"=",
"divmod",
"(",
"rest",
",",
"60",
")",
"if",
"hours",
":",
"vals",
"[",
"\"mmss\"",
"]",
"=",
"u\"%d%s%02d%s%02d\"",
"%",
"(",
"hours",
",",
"self",
".",
"format_separator",
",",
"mins",
",",
"self",
".",
"format_separator",
",",
"seconds",
",",
")",
"else",
":",
"vals",
"[",
"\"mmss\"",
"]",
"=",
"u\"%d%s%02d\"",
"%",
"(",
"mins",
",",
"self",
".",
"format_separator",
",",
"seconds",
")",
"if",
"self",
".",
"py3",
".",
"format_contains",
"(",
"self",
".",
"format",
",",
"\"bar\"",
")",
":",
"vals",
"[",
"\"bar\"",
"]",
"=",
"self",
".",
"_setup_bar",
"(",
")",
"formatted",
"=",
"self",
".",
"format",
".",
"format",
"(",
"*",
"*",
"vals",
")",
"if",
"self",
".",
"_running",
":",
"if",
"self",
".",
"_active",
":",
"format",
"=",
"self",
".",
"format_active",
"else",
":",
"format",
"=",
"self",
".",
"format_break",
"else",
":",
"if",
"self",
".",
"_active",
":",
"format",
"=",
"self",
".",
"format_stopped",
"else",
":",
"format",
"=",
"self",
".",
"format_break_stopped",
"cached_until",
"=",
"self",
".",
"py3",
".",
"CACHE_FOREVER",
"response",
"=",
"{",
"\"full_text\"",
":",
"format",
".",
"format",
"(",
"breakno",
"=",
"self",
".",
"_break_number",
",",
"format",
"=",
"formatted",
",",
"*",
"*",
"vals",
")",
",",
"\"cached_until\"",
":",
"cached_until",
",",
"}",
"if",
"self",
".",
"_alert",
":",
"response",
"[",
"\"urgent\"",
"]",
"=",
"True",
"self",
".",
"_alert",
"=",
"False",
"if",
"not",
"self",
".",
"_running",
":",
"response",
"[",
"\"color\"",
"]",
"=",
"self",
".",
"py3",
".",
"COLOR_BAD",
"else",
":",
"if",
"self",
".",
"_active",
":",
"response",
"[",
"\"color\"",
"]",
"=",
"self",
".",
"py3",
".",
"COLOR_GOOD",
"else",
":",
"response",
"[",
"\"color\"",
"]",
"=",
"self",
".",
"py3",
".",
"COLOR_DEGRADED",
"return",
"response"
] | 29.485294 | 18.014706 |
def save_model(self, file_name='model.cx'):
"""Save the assembled CX network in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the CX network to. Default: model.cx
"""
with open(file_name, 'wt') as fh:
cx_str = self.print_cx()
fh.write(cx_str) | [
"def",
"save_model",
"(",
"self",
",",
"file_name",
"=",
"'model.cx'",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'wt'",
")",
"as",
"fh",
":",
"cx_str",
"=",
"self",
".",
"print_cx",
"(",
")",
"fh",
".",
"write",
"(",
"cx_str",
")"
] | 32.363636 | 13.454545 |
def insert(self, _values=None, **values):
"""
Insert a new record into the database
:param _values: The new record values
:type _values: dict or list
:param values: The new record values as keyword arguments
:type values: dict
:return: The result
:rtype: bool
"""
if not values and not _values:
return True
if not isinstance(_values, list):
if _values is not None:
values.update(_values)
values = [values]
else:
values = _values
for i, value in enumerate(values):
values[i] = OrderedDict(sorted(value.items()))
bindings = []
for record in values:
for value in record.values():
bindings.append(value)
sql = self._grammar.compile_insert(self, values)
bindings = self._clean_bindings(bindings)
return self._connection.insert(sql, bindings) | [
"def",
"insert",
"(",
"self",
",",
"_values",
"=",
"None",
",",
"*",
"*",
"values",
")",
":",
"if",
"not",
"values",
"and",
"not",
"_values",
":",
"return",
"True",
"if",
"not",
"isinstance",
"(",
"_values",
",",
"list",
")",
":",
"if",
"_values",
"is",
"not",
"None",
":",
"values",
".",
"update",
"(",
"_values",
")",
"values",
"=",
"[",
"values",
"]",
"else",
":",
"values",
"=",
"_values",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"values",
")",
":",
"values",
"[",
"i",
"]",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"value",
".",
"items",
"(",
")",
")",
")",
"bindings",
"=",
"[",
"]",
"for",
"record",
"in",
"values",
":",
"for",
"value",
"in",
"record",
".",
"values",
"(",
")",
":",
"bindings",
".",
"append",
"(",
"value",
")",
"sql",
"=",
"self",
".",
"_grammar",
".",
"compile_insert",
"(",
"self",
",",
"values",
")",
"bindings",
"=",
"self",
".",
"_clean_bindings",
"(",
"bindings",
")",
"return",
"self",
".",
"_connection",
".",
"insert",
"(",
"sql",
",",
"bindings",
")"
] | 26.108108 | 17.945946 |
def attrsignal(descriptor, signal_name, *, defer=False):
"""
Connect the decorated method or coroutine method to the addressed signal on
a descriptor.
:param descriptor: The descriptor to connect to.
:type descriptor: :class:`Descriptor` subclass.
:param signal_name: Attribute name of the signal to connect to
:type signal_name: :class:`str`
:param defer: Flag indicating whether deferred execution of the decorated
method is desired; see below for details.
:type defer: :class:`bool`
The signal is discovered by accessing the attribute with the name
`signal_name` on the :attr:`~Descriptor.value_type` of the `descriptor`.
During instantiation of the service, the value of the descriptor is used
to obtain the signal and then the decorated method is connected to the
signal.
If the signal is a :class:`.callbacks.Signal` and `defer` is false, the
decorated object is connected using the default
:attr:`~.callbacks.AdHocSignal.STRONG` mode.
If the signal is a :class:`.callbacks.Signal` and `defer` is true and the
decorated object is a coroutine function, the
:attr:`~.callbacks.AdHocSignal.SPAWN_WITH_LOOP` mode with the default
asyncio event loop is used. If the decorated object is not a coroutine
function, :attr:`~.callbacks.AdHocSignal.ASYNC_WITH_LOOP` is used instead.
If the signal is a :class:`.callbacks.SyncSignal`, `defer` must be false
and the decorated object must be a coroutine function.
.. versionadded:: 0.9
"""
def decorator(f):
add_handler_spec(
f,
_attrsignal_spec(descriptor, signal_name, f, defer)
)
return f
return decorator | [
"def",
"attrsignal",
"(",
"descriptor",
",",
"signal_name",
",",
"*",
",",
"defer",
"=",
"False",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"add_handler_spec",
"(",
"f",
",",
"_attrsignal_spec",
"(",
"descriptor",
",",
"signal_name",
",",
"f",
",",
"defer",
")",
")",
"return",
"f",
"return",
"decorator"
] | 40.333333 | 25.571429 |
def set_runtime_value_int(self, ihcid: int, value: int) -> bool:
""" Set integer runtime value with re-authenticate if needed"""
if self.client.set_runtime_value_int(ihcid, value):
return True
self.re_authenticate()
return self.client.set_runtime_value_int(ihcid, value) | [
"def",
"set_runtime_value_int",
"(",
"self",
",",
"ihcid",
":",
"int",
",",
"value",
":",
"int",
")",
"->",
"bool",
":",
"if",
"self",
".",
"client",
".",
"set_runtime_value_int",
"(",
"ihcid",
",",
"value",
")",
":",
"return",
"True",
"self",
".",
"re_authenticate",
"(",
")",
"return",
"self",
".",
"client",
".",
"set_runtime_value_int",
"(",
"ihcid",
",",
"value",
")"
] | 51.5 | 15.333333 |
def shuffle_song(
self, song, *, num_songs=100, only_library=False, recently_played=None
):
"""Get a listing of song shuffle/mix songs.
Parameters:
song (dict): A song dict.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``100``
only_library (bool, Optional): Only return content from library.
Default: False
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
list: List of artist shuffle/mix songs.
"""
station_info = {
'num_entries': num_songs,
'library_content_only': only_library
}
if 'storeId' in song:
station_info['seed'] = {
'trackId': song['storeId'],
'seedType': StationSeedType.store_track.value
}
else:
station_info['seed'] = {
'trackLockerId': song['id'],
'seedType': StationSeedType.library_track.value
}
if recently_played is not None:
station_info['recently_played'] = recently_played
response = self._call(mc_calls.RadioStationFeed, station_infos=[station_info])
station_feed = response.body.get('data', {}).get('stations', [])
try:
station = station_feed[0]
except IndexError:
station = {}
return station.get('tracks', []) | [
"def",
"shuffle_song",
"(",
"self",
",",
"song",
",",
"*",
",",
"num_songs",
"=",
"100",
",",
"only_library",
"=",
"False",
",",
"recently_played",
"=",
"None",
")",
":",
"station_info",
"=",
"{",
"'num_entries'",
":",
"num_songs",
",",
"'library_content_only'",
":",
"only_library",
"}",
"if",
"'storeId'",
"in",
"song",
":",
"station_info",
"[",
"'seed'",
"]",
"=",
"{",
"'trackId'",
":",
"song",
"[",
"'storeId'",
"]",
",",
"'seedType'",
":",
"StationSeedType",
".",
"store_track",
".",
"value",
"}",
"else",
":",
"station_info",
"[",
"'seed'",
"]",
"=",
"{",
"'trackLockerId'",
":",
"song",
"[",
"'id'",
"]",
",",
"'seedType'",
":",
"StationSeedType",
".",
"library_track",
".",
"value",
"}",
"if",
"recently_played",
"is",
"not",
"None",
":",
"station_info",
"[",
"'recently_played'",
"]",
"=",
"recently_played",
"response",
"=",
"self",
".",
"_call",
"(",
"mc_calls",
".",
"RadioStationFeed",
",",
"station_infos",
"=",
"[",
"station_info",
"]",
")",
"station_feed",
"=",
"response",
".",
"body",
".",
"get",
"(",
"'data'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'stations'",
",",
"[",
"]",
")",
"try",
":",
"station",
"=",
"station_feed",
"[",
"0",
"]",
"except",
"IndexError",
":",
"station",
"=",
"{",
"}",
"return",
"station",
".",
"get",
"(",
"'tracks'",
",",
"[",
"]",
")"
] | 27.782609 | 24.608696 |
def RGB_to_XYZ(cobj, target_illuminant=None, *args, **kwargs):
"""
RGB to XYZ conversion. Expects 0-255 RGB values.
Based off of: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html
"""
# Will contain linearized RGB channels (removed the gamma func).
linear_channels = {}
if isinstance(cobj, sRGBColor):
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
if V <= 0.04045:
linear_channels[channel] = V / 12.92
else:
linear_channels[channel] = math.pow((V + 0.055) / 1.055, 2.4)
elif isinstance(cobj, BT2020Color):
if kwargs.get('is_12_bits_system'):
a, b, c = 1.0993, 0.0181, 0.081697877417347
else:
a, b, c = 1.099, 0.018, 0.08124794403514049
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
if V <= c:
linear_channels[channel] = V / 4.5
else:
linear_channels[channel] = math.pow((V + (a - 1)) / a, 1 / 0.45)
else:
# If it's not sRGB...
gamma = cobj.rgb_gamma
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
linear_channels[channel] = math.pow(V, gamma)
# Apply an RGB working space matrix to the XYZ values (matrix mul).
xyz_x, xyz_y, xyz_z = apply_RGB_matrix(
linear_channels['r'], linear_channels['g'], linear_channels['b'],
rgb_type=cobj, convtype="rgb_to_xyz")
if target_illuminant is None:
target_illuminant = cobj.native_illuminant
# The illuminant of the original RGB object. This will always match
# the RGB colorspace's native illuminant.
illuminant = cobj.native_illuminant
xyzcolor = XYZColor(xyz_x, xyz_y, xyz_z, illuminant=illuminant)
# This will take care of any illuminant changes for us (if source
# illuminant != target illuminant).
xyzcolor.apply_adaptation(target_illuminant)
return xyzcolor | [
"def",
"RGB_to_XYZ",
"(",
"cobj",
",",
"target_illuminant",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Will contain linearized RGB channels (removed the gamma func).",
"linear_channels",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"cobj",
",",
"sRGBColor",
")",
":",
"for",
"channel",
"in",
"[",
"'r'",
",",
"'g'",
",",
"'b'",
"]",
":",
"V",
"=",
"getattr",
"(",
"cobj",
",",
"'rgb_'",
"+",
"channel",
")",
"if",
"V",
"<=",
"0.04045",
":",
"linear_channels",
"[",
"channel",
"]",
"=",
"V",
"/",
"12.92",
"else",
":",
"linear_channels",
"[",
"channel",
"]",
"=",
"math",
".",
"pow",
"(",
"(",
"V",
"+",
"0.055",
")",
"/",
"1.055",
",",
"2.4",
")",
"elif",
"isinstance",
"(",
"cobj",
",",
"BT2020Color",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"'is_12_bits_system'",
")",
":",
"a",
",",
"b",
",",
"c",
"=",
"1.0993",
",",
"0.0181",
",",
"0.081697877417347",
"else",
":",
"a",
",",
"b",
",",
"c",
"=",
"1.099",
",",
"0.018",
",",
"0.08124794403514049",
"for",
"channel",
"in",
"[",
"'r'",
",",
"'g'",
",",
"'b'",
"]",
":",
"V",
"=",
"getattr",
"(",
"cobj",
",",
"'rgb_'",
"+",
"channel",
")",
"if",
"V",
"<=",
"c",
":",
"linear_channels",
"[",
"channel",
"]",
"=",
"V",
"/",
"4.5",
"else",
":",
"linear_channels",
"[",
"channel",
"]",
"=",
"math",
".",
"pow",
"(",
"(",
"V",
"+",
"(",
"a",
"-",
"1",
")",
")",
"/",
"a",
",",
"1",
"/",
"0.45",
")",
"else",
":",
"# If it's not sRGB...",
"gamma",
"=",
"cobj",
".",
"rgb_gamma",
"for",
"channel",
"in",
"[",
"'r'",
",",
"'g'",
",",
"'b'",
"]",
":",
"V",
"=",
"getattr",
"(",
"cobj",
",",
"'rgb_'",
"+",
"channel",
")",
"linear_channels",
"[",
"channel",
"]",
"=",
"math",
".",
"pow",
"(",
"V",
",",
"gamma",
")",
"# Apply an RGB working space matrix to the XYZ values (matrix mul).",
"xyz_x",
",",
"xyz_y",
",",
"xyz_z",
"=",
"apply_RGB_matrix",
"(",
"linear_channels",
"[",
"'r'",
"]",
",",
"linear_channels",
"[",
"'g'",
"]",
",",
"linear_channels",
"[",
"'b'",
"]",
",",
"rgb_type",
"=",
"cobj",
",",
"convtype",
"=",
"\"rgb_to_xyz\"",
")",
"if",
"target_illuminant",
"is",
"None",
":",
"target_illuminant",
"=",
"cobj",
".",
"native_illuminant",
"# The illuminant of the original RGB object. This will always match",
"# the RGB colorspace's native illuminant.",
"illuminant",
"=",
"cobj",
".",
"native_illuminant",
"xyzcolor",
"=",
"XYZColor",
"(",
"xyz_x",
",",
"xyz_y",
",",
"xyz_z",
",",
"illuminant",
"=",
"illuminant",
")",
"# This will take care of any illuminant changes for us (if source",
"# illuminant != target illuminant).",
"xyzcolor",
".",
"apply_adaptation",
"(",
"target_illuminant",
")",
"return",
"xyzcolor"
] | 38 | 18.115385 |
def delete(self):
"""
Remove the document and all of its bundles from ProvStore.
.. warning::
Cannot be undone.
"""
if self.abstract:
raise AbstractDocumentException()
self._api.delete_document(self.id)
self._id = None
return True | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"abstract",
":",
"raise",
"AbstractDocumentException",
"(",
")",
"self",
".",
"_api",
".",
"delete_document",
"(",
"self",
".",
"id",
")",
"self",
".",
"_id",
"=",
"None",
"return",
"True"
] | 21.928571 | 18.642857 |
def set_taker(self, resource_id):
"""Sets the resource who will be taking this assessment.
arg: resource_id (osid.id.Id): the resource Id
raise: InvalidArgument - ``resource_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_taker_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(resource_id):
raise errors.InvalidArgument()
self._my_map['takerId'] = str(resource_id) | [
"def",
"set_taker",
"(",
"self",
",",
"resource_id",
")",
":",
"# Implemented from template for osid.resource.ResourceForm.set_avatar_template",
"if",
"self",
".",
"get_taker_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"errors",
".",
"NoAccess",
"(",
")",
"if",
"not",
"self",
".",
"_is_valid_id",
"(",
"resource_id",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
")",
"self",
".",
"_my_map",
"[",
"'takerId'",
"]",
"=",
"str",
"(",
"resource_id",
")"
] | 44.466667 | 17.066667 |
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Collect data on Class-Based Views
"""
# Purge data in view method cache
# Python 3's keys() method returns an iterator, so force evaluation before iterating.
view_keys = list(VIEW_METHOD_DATA.keys())
for key in view_keys:
del VIEW_METHOD_DATA[key]
self.view_data = {}
try:
cbv = view_func.view_class
except AttributeError:
cbv = False
if cbv:
self.view_data['cbv'] = True
klass = view_func.view_class
self.view_data['bases'] = [base.__name__ for base in inspect.getmro(klass)]
# Inject with drugz
for member in inspect.getmembers(view_func.view_class):
# Check that we are interested in capturing data for this method
# and ensure that a decorated method is not decorated multiple times.
if member[0] in VIEW_METHOD_WHITEIST and member[0] not in PATCHED_METHODS[klass]:
decorate_method(klass, member[0])
PATCHED_METHODS[klass].append(member[0]) | [
"def",
"process_view",
"(",
"self",
",",
"request",
",",
"view_func",
",",
"view_args",
",",
"view_kwargs",
")",
":",
"# Purge data in view method cache",
"# Python 3's keys() method returns an iterator, so force evaluation before iterating.",
"view_keys",
"=",
"list",
"(",
"VIEW_METHOD_DATA",
".",
"keys",
"(",
")",
")",
"for",
"key",
"in",
"view_keys",
":",
"del",
"VIEW_METHOD_DATA",
"[",
"key",
"]",
"self",
".",
"view_data",
"=",
"{",
"}",
"try",
":",
"cbv",
"=",
"view_func",
".",
"view_class",
"except",
"AttributeError",
":",
"cbv",
"=",
"False",
"if",
"cbv",
":",
"self",
".",
"view_data",
"[",
"'cbv'",
"]",
"=",
"True",
"klass",
"=",
"view_func",
".",
"view_class",
"self",
".",
"view_data",
"[",
"'bases'",
"]",
"=",
"[",
"base",
".",
"__name__",
"for",
"base",
"in",
"inspect",
".",
"getmro",
"(",
"klass",
")",
"]",
"# Inject with drugz",
"for",
"member",
"in",
"inspect",
".",
"getmembers",
"(",
"view_func",
".",
"view_class",
")",
":",
"# Check that we are interested in capturing data for this method",
"# and ensure that a decorated method is not decorated multiple times.",
"if",
"member",
"[",
"0",
"]",
"in",
"VIEW_METHOD_WHITEIST",
"and",
"member",
"[",
"0",
"]",
"not",
"in",
"PATCHED_METHODS",
"[",
"klass",
"]",
":",
"decorate_method",
"(",
"klass",
",",
"member",
"[",
"0",
"]",
")",
"PATCHED_METHODS",
"[",
"klass",
"]",
".",
"append",
"(",
"member",
"[",
"0",
"]",
")"
] | 37.548387 | 22.516129 |
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value) | [
"def",
"insert",
"(",
"self",
",",
"index",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"in",
"self",
".",
"keyOrder",
":",
"n",
"=",
"self",
".",
"keyOrder",
".",
"index",
"(",
"key",
")",
"del",
"self",
".",
"keyOrder",
"[",
"n",
"]",
"if",
"n",
"<",
"index",
":",
"index",
"-=",
"1",
"self",
".",
"keyOrder",
".",
"insert",
"(",
"index",
",",
"key",
")",
"super",
"(",
"SortedDict",
",",
"self",
")",
".",
"__setitem__",
"(",
"key",
",",
"value",
")"
] | 40.555556 | 7.111111 |
def largestNativeClique(self, max_chain_length=None):
"""Returns the largest native clique embedding we can find on the
processor, with the shortest chainlength possible (for that clique
size). If possible, returns a uniform choice among all largest
cliques.
INPUTS:
max_chain_length (int): longest chain length to consider or ``None`` if chain
lengths are allowed to be unbounded. (default: ``None``)
OUTPUT:
clique (list): a list containing lists of qubits, each associated to a
chain. These lists of qubits are carefully arranged so that
>>> [zip(chain,chain[1:]) for chain in clique]
is a list of valid couplers.
Note: this fails to return a uniform choice if there are broken
intra-cell couplers between working qubits. (the choice is
uniform on a particular subprocessor)
"""
def f(x):
return x.largestNativeClique(max_chain_length=max_chain_length)
objective = self._objective_bestscore
return self._translate(self._map_to_processors(f, objective)) | [
"def",
"largestNativeClique",
"(",
"self",
",",
"max_chain_length",
"=",
"None",
")",
":",
"def",
"f",
"(",
"x",
")",
":",
"return",
"x",
".",
"largestNativeClique",
"(",
"max_chain_length",
"=",
"max_chain_length",
")",
"objective",
"=",
"self",
".",
"_objective_bestscore",
"return",
"self",
".",
"_translate",
"(",
"self",
".",
"_map_to_processors",
"(",
"f",
",",
"objective",
")",
")"
] | 43.307692 | 26 |
def lx4dec(string, first):
"""
Scan a string from a specified starting position for the
end of a decimal number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4dec_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple
"""
string = stypes.stringToCharP(string)
first = ctypes.c_int(first)
last = ctypes.c_int()
nchar = ctypes.c_int()
libspice.lx4dec_c(string, first, ctypes.byref(last), ctypes.byref(nchar))
return last.value, nchar.value | [
"def",
"lx4dec",
"(",
"string",
",",
"first",
")",
":",
"string",
"=",
"stypes",
".",
"stringToCharP",
"(",
"string",
")",
"first",
"=",
"ctypes",
".",
"c_int",
"(",
"first",
")",
"last",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"nchar",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"libspice",
".",
"lx4dec_c",
"(",
"string",
",",
"first",
",",
"ctypes",
".",
"byref",
"(",
"last",
")",
",",
"ctypes",
".",
"byref",
"(",
"nchar",
")",
")",
"return",
"last",
".",
"value",
",",
"nchar",
".",
"value"
] | 30.85 | 16.65 |
def asnum(number, limit=None, return_format=None):
"""Returns a summary of the information our database holds for a
particular ASNUM (similar to /asdetailsascii.html) with return limit.
:param limit: number of records to be returned (max 2000)
"""
uri = 'asnum/{number}'.format(number=number)
if limit:
uri = '/'.join([uri, str(limit)])
return _get(uri, return_format) | [
"def",
"asnum",
"(",
"number",
",",
"limit",
"=",
"None",
",",
"return_format",
"=",
"None",
")",
":",
"uri",
"=",
"'asnum/{number}'",
".",
"format",
"(",
"number",
"=",
"number",
")",
"if",
"limit",
":",
"uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"uri",
",",
"str",
"(",
"limit",
")",
"]",
")",
"return",
"_get",
"(",
"uri",
",",
"return_format",
")"
] | 39.6 | 14.5 |
def _initialize_client_from_environment():
''' Initialize a KeenClient instance using environment variables. '''
global _client, project_id, write_key, read_key, master_key, base_url
if _client is None:
# check environment for project ID and keys
project_id = project_id or os.environ.get("KEEN_PROJECT_ID")
write_key = write_key or os.environ.get("KEEN_WRITE_KEY")
read_key = read_key or os.environ.get("KEEN_READ_KEY")
master_key = master_key or os.environ.get("KEEN_MASTER_KEY")
base_url = base_url or os.environ.get("KEEN_BASE_URL")
if not project_id:
raise InvalidEnvironmentError("Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!")
_client = KeenClient(project_id,
write_key=write_key,
read_key=read_key,
master_key=master_key,
base_url=base_url) | [
"def",
"_initialize_client_from_environment",
"(",
")",
":",
"global",
"_client",
",",
"project_id",
",",
"write_key",
",",
"read_key",
",",
"master_key",
",",
"base_url",
"if",
"_client",
"is",
"None",
":",
"# check environment for project ID and keys",
"project_id",
"=",
"project_id",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"\"KEEN_PROJECT_ID\"",
")",
"write_key",
"=",
"write_key",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"\"KEEN_WRITE_KEY\"",
")",
"read_key",
"=",
"read_key",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"\"KEEN_READ_KEY\"",
")",
"master_key",
"=",
"master_key",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"\"KEEN_MASTER_KEY\"",
")",
"base_url",
"=",
"base_url",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"\"KEEN_BASE_URL\"",
")",
"if",
"not",
"project_id",
":",
"raise",
"InvalidEnvironmentError",
"(",
"\"Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!\"",
")",
"_client",
"=",
"KeenClient",
"(",
"project_id",
",",
"write_key",
"=",
"write_key",
",",
"read_key",
"=",
"read_key",
",",
"master_key",
"=",
"master_key",
",",
"base_url",
"=",
"base_url",
")"
] | 48.35 | 23.45 |
def _get_zone_id_from_name(self, name):
"""Return zone ID based on a zone."""
results = self.client['Account'].getDomains(
filter={"domains": {"name": utils.query_filter(name)}})
return [x['id'] for x in results] | [
"def",
"_get_zone_id_from_name",
"(",
"self",
",",
"name",
")",
":",
"results",
"=",
"self",
".",
"client",
"[",
"'Account'",
"]",
".",
"getDomains",
"(",
"filter",
"=",
"{",
"\"domains\"",
":",
"{",
"\"name\"",
":",
"utils",
".",
"query_filter",
"(",
"name",
")",
"}",
"}",
")",
"return",
"[",
"x",
"[",
"'id'",
"]",
"for",
"x",
"in",
"results",
"]"
] | 48.8 | 8.2 |
def init():
""" Setup Mocha in the current directory """
mochapyfile = os.path.join(os.path.join(CWD, "brew.py"))
header("Initializing Mocha ...")
if os.path.isfile(mochapyfile):
print("WARNING: It seems like Mocha is already setup!")
print("*" * 80)
else:
print("")
print("Copying files to the current directory...")
copy_resource_dir(SKELETON_DIR + "/create/", CWD)
print("")
_npm_install_static()
print("")
print("----- Your Mocha is ready! ----")
print("")
print("> What's next?")
print("- Edit the config [ application/config.py ] ")
print("- If necessary setup your model database [ mocha :initdb ]")
print("- Launch app on development mode, run [ mocha :serve ]")
print("")
print("*" * 80) | [
"def",
"init",
"(",
")",
":",
"mochapyfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CWD",
",",
"\"brew.py\"",
")",
")",
"header",
"(",
"\"Initializing Mocha ...\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"mochapyfile",
")",
":",
"print",
"(",
"\"WARNING: It seems like Mocha is already setup!\"",
")",
"print",
"(",
"\"*\"",
"*",
"80",
")",
"else",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"Copying files to the current directory...\"",
")",
"copy_resource_dir",
"(",
"SKELETON_DIR",
"+",
"\"/create/\"",
",",
"CWD",
")",
"print",
"(",
"\"\"",
")",
"_npm_install_static",
"(",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"----- Your Mocha is ready! ----\"",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"> What's next?\"",
")",
"print",
"(",
"\"- Edit the config [ application/config.py ] \"",
")",
"print",
"(",
"\"- If necessary setup your model database [ mocha :initdb ]\"",
")",
"print",
"(",
"\"- Launch app on development mode, run [ mocha :serve ]\"",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"*\"",
"*",
"80",
")"
] | 32.96 | 21.24 |
def sinkhorn(w1, w2, M, reg, k):
"""Sinkhorn algorithm with fixed number of iteration (autograd)
"""
K = np.exp(-M / reg)
ui = np.ones((M.shape[0],))
vi = np.ones((M.shape[1],))
for i in range(k):
vi = w2 / (np.dot(K.T, ui))
ui = w1 / (np.dot(K, vi))
G = ui.reshape((M.shape[0], 1)) * K * vi.reshape((1, M.shape[1]))
return G | [
"def",
"sinkhorn",
"(",
"w1",
",",
"w2",
",",
"M",
",",
"reg",
",",
"k",
")",
":",
"K",
"=",
"np",
".",
"exp",
"(",
"-",
"M",
"/",
"reg",
")",
"ui",
"=",
"np",
".",
"ones",
"(",
"(",
"M",
".",
"shape",
"[",
"0",
"]",
",",
")",
")",
"vi",
"=",
"np",
".",
"ones",
"(",
"(",
"M",
".",
"shape",
"[",
"1",
"]",
",",
")",
")",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"vi",
"=",
"w2",
"/",
"(",
"np",
".",
"dot",
"(",
"K",
".",
"T",
",",
"ui",
")",
")",
"ui",
"=",
"w1",
"/",
"(",
"np",
".",
"dot",
"(",
"K",
",",
"vi",
")",
")",
"G",
"=",
"ui",
".",
"reshape",
"(",
"(",
"M",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"*",
"K",
"*",
"vi",
".",
"reshape",
"(",
"(",
"1",
",",
"M",
".",
"shape",
"[",
"1",
"]",
")",
")",
"return",
"G"
] | 33 | 11.727273 |
def oneday_weather_forecast(
location='Portland, OR',
inputs=('Min Temperature', 'Mean Temperature', 'Max Temperature', 'Max Humidity', 'Mean Humidity', 'Min Humidity', 'Max Sea Level Pressure', 'Mean Sea Level Pressure', 'Min Sea Level Pressure', 'Wind Direction'),
outputs=('Min Temperature', 'Mean Temperature', 'Max Temperature', 'Max Humidity'),
date=None,
epochs=200,
delays=(1, 2, 3, 4),
num_years=4,
use_cache=False,
verbosity=1,
):
""" Provide a weather forecast for tomorrow based on historical weather at that location """
date = make_date(date or datetime.datetime.now().date())
num_years = int(num_years or 10)
years = range(date.year - num_years, date.year + 1)
df = weather.daily(location, years=years, use_cache=use_cache, verbosity=verbosity).sort()
# because up-to-date weather history was cached above, can use that cache, regardless of use_cache kwarg
trainer, df = train_weather_predictor(
location,
years=years,
delays=delays,
inputs=inputs,
outputs=outputs,
epochs=epochs,
verbosity=verbosity,
use_cache=True,
)
nn = trainer.module
forecast = {'trainer': trainer}
yesterday = dict(zip(outputs, nn.activate(trainer.ds['input'][-2])))
forecast['yesterday'] = update_dict(yesterday, {'date': df.index[-2].date()})
today = dict(zip(outputs, nn.activate(trainer.ds['input'][-1])))
forecast['today'] = update_dict(today, {'date': df.index[-1].date()})
ds = util.input_dataset_from_dataframe(df[-max(delays):], delays=delays, inputs=inputs, normalize=False, verbosity=0)
tomorrow = dict(zip(outputs, nn.activate(ds['input'][-1])))
forecast['tomorrow'] = update_dict(tomorrow, {'date': (df.index[-1] + datetime.timedelta(1)).date()})
return forecast | [
"def",
"oneday_weather_forecast",
"(",
"location",
"=",
"'Portland, OR'",
",",
"inputs",
"=",
"(",
"'Min Temperature'",
",",
"'Mean Temperature'",
",",
"'Max Temperature'",
",",
"'Max Humidity'",
",",
"'Mean Humidity'",
",",
"'Min Humidity'",
",",
"'Max Sea Level Pressure'",
",",
"'Mean Sea Level Pressure'",
",",
"'Min Sea Level Pressure'",
",",
"'Wind Direction'",
")",
",",
"outputs",
"=",
"(",
"'Min Temperature'",
",",
"'Mean Temperature'",
",",
"'Max Temperature'",
",",
"'Max Humidity'",
")",
",",
"date",
"=",
"None",
",",
"epochs",
"=",
"200",
",",
"delays",
"=",
"(",
"1",
",",
"2",
",",
"3",
",",
"4",
")",
",",
"num_years",
"=",
"4",
",",
"use_cache",
"=",
"False",
",",
"verbosity",
"=",
"1",
",",
")",
":",
"date",
"=",
"make_date",
"(",
"date",
"or",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"date",
"(",
")",
")",
"num_years",
"=",
"int",
"(",
"num_years",
"or",
"10",
")",
"years",
"=",
"range",
"(",
"date",
".",
"year",
"-",
"num_years",
",",
"date",
".",
"year",
"+",
"1",
")",
"df",
"=",
"weather",
".",
"daily",
"(",
"location",
",",
"years",
"=",
"years",
",",
"use_cache",
"=",
"use_cache",
",",
"verbosity",
"=",
"verbosity",
")",
".",
"sort",
"(",
")",
"# because up-to-date weather history was cached above, can use that cache, regardless of use_cache kwarg",
"trainer",
",",
"df",
"=",
"train_weather_predictor",
"(",
"location",
",",
"years",
"=",
"years",
",",
"delays",
"=",
"delays",
",",
"inputs",
"=",
"inputs",
",",
"outputs",
"=",
"outputs",
",",
"epochs",
"=",
"epochs",
",",
"verbosity",
"=",
"verbosity",
",",
"use_cache",
"=",
"True",
",",
")",
"nn",
"=",
"trainer",
".",
"module",
"forecast",
"=",
"{",
"'trainer'",
":",
"trainer",
"}",
"yesterday",
"=",
"dict",
"(",
"zip",
"(",
"outputs",
",",
"nn",
".",
"activate",
"(",
"trainer",
".",
"ds",
"[",
"'input'",
"]",
"[",
"-",
"2",
"]",
")",
")",
")",
"forecast",
"[",
"'yesterday'",
"]",
"=",
"update_dict",
"(",
"yesterday",
",",
"{",
"'date'",
":",
"df",
".",
"index",
"[",
"-",
"2",
"]",
".",
"date",
"(",
")",
"}",
")",
"today",
"=",
"dict",
"(",
"zip",
"(",
"outputs",
",",
"nn",
".",
"activate",
"(",
"trainer",
".",
"ds",
"[",
"'input'",
"]",
"[",
"-",
"1",
"]",
")",
")",
")",
"forecast",
"[",
"'today'",
"]",
"=",
"update_dict",
"(",
"today",
",",
"{",
"'date'",
":",
"df",
".",
"index",
"[",
"-",
"1",
"]",
".",
"date",
"(",
")",
"}",
")",
"ds",
"=",
"util",
".",
"input_dataset_from_dataframe",
"(",
"df",
"[",
"-",
"max",
"(",
"delays",
")",
":",
"]",
",",
"delays",
"=",
"delays",
",",
"inputs",
"=",
"inputs",
",",
"normalize",
"=",
"False",
",",
"verbosity",
"=",
"0",
")",
"tomorrow",
"=",
"dict",
"(",
"zip",
"(",
"outputs",
",",
"nn",
".",
"activate",
"(",
"ds",
"[",
"'input'",
"]",
"[",
"-",
"1",
"]",
")",
")",
")",
"forecast",
"[",
"'tomorrow'",
"]",
"=",
"update_dict",
"(",
"tomorrow",
",",
"{",
"'date'",
":",
"(",
"df",
".",
"index",
"[",
"-",
"1",
"]",
"+",
"datetime",
".",
"timedelta",
"(",
"1",
")",
")",
".",
"date",
"(",
")",
"}",
")",
"return",
"forecast"
] | 45.073171 | 30.097561 |
def create(self, object_type, under=None, attributes=None, **kwattrs):
"""Create a new automation object.
Arguments:
object_type -- Type of object to create.
under -- Handle of the parent of the new object.
attributes -- Dictionary of attributes (name-value pairs).
kwattrs -- Optional keyword attributes (name=value pairs).
Return:
Handle of newly created object.
"""
data = self.createx(object_type, under, attributes, **kwattrs)
return data['handle'] | [
"def",
"create",
"(",
"self",
",",
"object_type",
",",
"under",
"=",
"None",
",",
"attributes",
"=",
"None",
",",
"*",
"*",
"kwattrs",
")",
":",
"data",
"=",
"self",
".",
"createx",
"(",
"object_type",
",",
"under",
",",
"attributes",
",",
"*",
"*",
"kwattrs",
")",
"return",
"data",
"[",
"'handle'",
"]"
] | 36.066667 | 21.733333 |
def phmmer(query, db, type, out, threads = '4', evalue = '0.01'):
"""
run phmmer
"""
if os.path.exists(out) is False:
print('# ... running phmmer with %s as query and %s as database' % (query, db))
os.system('phmmer -o %s.ph1 --tblout %s.ph2 --acc --noali --notextw -E %s --cpu %s %s %s' % (out, out, evalue, threads, query, db))
else:
print('# ... phmmer output found for %s as query and %s as database' % (query, db))
phmmer2blast('%s.ph2' % out, out) | [
"def",
"phmmer",
"(",
"query",
",",
"db",
",",
"type",
",",
"out",
",",
"threads",
"=",
"'4'",
",",
"evalue",
"=",
"'0.01'",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"out",
")",
"is",
"False",
":",
"print",
"(",
"'# ... running phmmer with %s as query and %s as database'",
"%",
"(",
"query",
",",
"db",
")",
")",
"os",
".",
"system",
"(",
"'phmmer -o %s.ph1 --tblout %s.ph2 --acc --noali --notextw -E %s --cpu %s %s %s'",
"%",
"(",
"out",
",",
"out",
",",
"evalue",
",",
"threads",
",",
"query",
",",
"db",
")",
")",
"else",
":",
"print",
"(",
"'# ... phmmer output found for %s as query and %s as database'",
"%",
"(",
"query",
",",
"db",
")",
")",
"phmmer2blast",
"(",
"'%s.ph2'",
"%",
"out",
",",
"out",
")"
] | 49.2 | 28.6 |
def _makeButtons(self):
"""Makes buttons and wires them up.
"""
self.button = button = urwid.Button(u"OK")
urwid.connect_signal(button, "click", self._completed)
return [self.button] | [
"def",
"_makeButtons",
"(",
"self",
")",
":",
"self",
".",
"button",
"=",
"button",
"=",
"urwid",
".",
"Button",
"(",
"u\"OK\"",
")",
"urwid",
".",
"connect_signal",
"(",
"button",
",",
"\"click\"",
",",
"self",
".",
"_completed",
")",
"return",
"[",
"self",
".",
"button",
"]"
] | 31 | 14.428571 |
def select_update_method(self, force_interactive, force_change_set):
"""Select the correct update method when updating a stack.
Args:
force_interactive (str): Whether or not to force interactive mode
no matter what mode the provider is in.
force_change_set (bool): Whether or not to force change set use.
Returns:
function: The correct object method to use when updating.
"""
if self.interactive or force_interactive:
return self.interactive_update_stack
elif force_change_set:
return self.noninteractive_changeset_update
else:
return self.default_update_stack | [
"def",
"select_update_method",
"(",
"self",
",",
"force_interactive",
",",
"force_change_set",
")",
":",
"if",
"self",
".",
"interactive",
"or",
"force_interactive",
":",
"return",
"self",
".",
"interactive_update_stack",
"elif",
"force_change_set",
":",
"return",
"self",
".",
"noninteractive_changeset_update",
"else",
":",
"return",
"self",
".",
"default_update_stack"
] | 40.588235 | 20.529412 |
def copy_file(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
source_file_name = self._get_file_reference()
self.service.create_file(share_name, directory_name, source_file_name, 512)
# Basic
# Copy the file from the directory to the root of the share
source = self.service.make_file_url(share_name, directory_name, source_file_name)
copy = self.service.copy_file(share_name, None, 'file1copy', source)
# Poll for copy completion
while copy.status != 'success':
count = count + 1
if count > 5:
print('Timed out waiting for async copy to complete.')
time.sleep(30)
copy = self.service.get_file_properties(share_name, dir_name, 'file1copy').properties.copy
# With SAS from a remote account to local file
# Commented out as remote share, directory, file, and sas would need to be created
'''
source_file_url = self.service.make_file_url(
remote_share_name,
remote_directory_name,
remote_file_name,
sas_token=remote_sas_token,
)
copy = self.service.copy_file(destination_sharename,
destination_directory_name,
destination_file_name,
source_file_url)
'''
# Abort copy
# Commented out as this involves timing the abort to be sent while the copy is still running
# Abort copy is useful to do along with polling
# self.service.abort_copy_file(share_name, dir_name, file_name, copy.id)
self.service.delete_share(share_name) | [
"def",
"copy_file",
"(",
"self",
")",
":",
"share_name",
"=",
"self",
".",
"_create_share",
"(",
")",
"directory_name",
"=",
"self",
".",
"_create_directory",
"(",
"share_name",
")",
"source_file_name",
"=",
"self",
".",
"_get_file_reference",
"(",
")",
"self",
".",
"service",
".",
"create_file",
"(",
"share_name",
",",
"directory_name",
",",
"source_file_name",
",",
"512",
")",
"# Basic",
"# Copy the file from the directory to the root of the share",
"source",
"=",
"self",
".",
"service",
".",
"make_file_url",
"(",
"share_name",
",",
"directory_name",
",",
"source_file_name",
")",
"copy",
"=",
"self",
".",
"service",
".",
"copy_file",
"(",
"share_name",
",",
"None",
",",
"'file1copy'",
",",
"source",
")",
"# Poll for copy completion",
"while",
"copy",
".",
"status",
"!=",
"'success'",
":",
"count",
"=",
"count",
"+",
"1",
"if",
"count",
">",
"5",
":",
"print",
"(",
"'Timed out waiting for async copy to complete.'",
")",
"time",
".",
"sleep",
"(",
"30",
")",
"copy",
"=",
"self",
".",
"service",
".",
"get_file_properties",
"(",
"share_name",
",",
"dir_name",
",",
"'file1copy'",
")",
".",
"properties",
".",
"copy",
"# With SAS from a remote account to local file",
"# Commented out as remote share, directory, file, and sas would need to be created",
"# Abort copy",
"# Commented out as this involves timing the abort to be sent while the copy is still running",
"# Abort copy is useful to do along with polling",
"# self.service.abort_copy_file(share_name, dir_name, file_name, copy.id)",
"self",
".",
"service",
".",
"delete_share",
"(",
"share_name",
")"
] | 42.875 | 24.325 |
def children_bp(self, feature, child_featuretype='exon', merge=False,
ignore_strand=False):
"""
Total bp of all children of a featuretype.
Useful for getting the exonic bp of an mRNA.
Parameters
----------
feature : str or Feature instance
child_featuretype : str
Which featuretype to consider. For example, to get exonic bp of an
mRNA, use `child_featuretype='exon'`.
merge : bool
Whether or not to merge child features together before summing
them.
ignore_strand : bool
If True, then overlapping features on different strands will be
merged together; otherwise, merging features with different strands
will result in a ValueError.
Returns
-------
Integer representing the total number of bp.
"""
children = self.children(feature, featuretype=child_featuretype,
order_by='start')
if merge:
children = self.merge(children, ignore_strand=ignore_strand)
total = 0
for child in children:
total += len(child)
return total | [
"def",
"children_bp",
"(",
"self",
",",
"feature",
",",
"child_featuretype",
"=",
"'exon'",
",",
"merge",
"=",
"False",
",",
"ignore_strand",
"=",
"False",
")",
":",
"children",
"=",
"self",
".",
"children",
"(",
"feature",
",",
"featuretype",
"=",
"child_featuretype",
",",
"order_by",
"=",
"'start'",
")",
"if",
"merge",
":",
"children",
"=",
"self",
".",
"merge",
"(",
"children",
",",
"ignore_strand",
"=",
"ignore_strand",
")",
"total",
"=",
"0",
"for",
"child",
"in",
"children",
":",
"total",
"+=",
"len",
"(",
"child",
")",
"return",
"total"
] | 30.615385 | 23.025641 |
def __insert(self):
"""Insert rows to table
"""
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = [] | [
"def",
"__insert",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"__buffer",
")",
">",
"0",
":",
"# Insert data",
"statement",
"=",
"self",
".",
"__table",
".",
"insert",
"(",
")",
"if",
"self",
".",
"__autoincrement",
":",
"statement",
"=",
"statement",
".",
"returning",
"(",
"getattr",
"(",
"self",
".",
"__table",
".",
"c",
",",
"self",
".",
"__autoincrement",
")",
")",
"statement",
"=",
"statement",
".",
"values",
"(",
"self",
".",
"__buffer",
")",
"res",
"=",
"statement",
".",
"execute",
"(",
")",
"for",
"id",
",",
"in",
"res",
":",
"row",
"=",
"self",
".",
"__buffer",
".",
"pop",
"(",
"0",
")",
"yield",
"WrittenRow",
"(",
"row",
",",
"False",
",",
"id",
")",
"else",
":",
"statement",
".",
"execute",
"(",
"self",
".",
"__buffer",
")",
"for",
"row",
"in",
"self",
".",
"__buffer",
":",
"yield",
"WrittenRow",
"(",
"row",
",",
"False",
",",
"None",
")",
"# Clean memory",
"self",
".",
"__buffer",
"=",
"[",
"]"
] | 38 | 10.1 |
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
dtype support::
See :func:`imgaug.imgaug.imresize_many_images`.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...] | [
"def",
"imresize_single_image",
"(",
"image",
",",
"sizes",
",",
"interpolation",
"=",
"None",
")",
":",
"grayscale",
"=",
"False",
"if",
"image",
".",
"ndim",
"==",
"2",
":",
"grayscale",
"=",
"True",
"image",
"=",
"image",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"do_assert",
"(",
"len",
"(",
"image",
".",
"shape",
")",
"==",
"3",
",",
"image",
".",
"shape",
")",
"rs",
"=",
"imresize_many_images",
"(",
"image",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
",",
":",
"]",
",",
"sizes",
",",
"interpolation",
"=",
"interpolation",
")",
"if",
"grayscale",
":",
"return",
"np",
".",
"squeeze",
"(",
"rs",
"[",
"0",
",",
":",
",",
":",
",",
"0",
"]",
")",
"else",
":",
"return",
"rs",
"[",
"0",
",",
"...",
"]"
] | 26.567568 | 20.945946 |
def add_binding(self, binding: Binding):
"""Stores binding"""
binding.add_error_info = lambda error: error.add_view_info(self._xml_node.view_info)
self._bindings.append(binding) | [
"def",
"add_binding",
"(",
"self",
",",
"binding",
":",
"Binding",
")",
":",
"binding",
".",
"add_error_info",
"=",
"lambda",
"error",
":",
"error",
".",
"add_view_info",
"(",
"self",
".",
"_xml_node",
".",
"view_info",
")",
"self",
".",
"_bindings",
".",
"append",
"(",
"binding",
")"
] | 49.5 | 13.5 |
def get_callback_url(self, provider):
"""Return the callback url for this provider."""
info = self.model._meta.app_label, self.model._meta.model_name
return reverse('admin:%s_%s_callback' % info, kwargs={'provider': provider.id}) | [
"def",
"get_callback_url",
"(",
"self",
",",
"provider",
")",
":",
"info",
"=",
"self",
".",
"model",
".",
"_meta",
".",
"app_label",
",",
"self",
".",
"model",
".",
"_meta",
".",
"model_name",
"return",
"reverse",
"(",
"'admin:%s_%s_callback'",
"%",
"info",
",",
"kwargs",
"=",
"{",
"'provider'",
":",
"provider",
".",
"id",
"}",
")"
] | 62.5 | 20 |
def resolve_tag(name, **kwargs):
'''
.. versionadded:: 2017.7.2
.. versionchanged:: 2018.3.0
Instead of matching against pulled tags using
:py:func:`docker.list_tags <salt.modules.dockermod.list_tags>`, this
function now simply inspects the passed image name using
:py:func:`docker.inspect_image <salt.modules.dockermod.inspect_image>`
and returns the first matching tag. If no matching tags are found, it
is assumed that the passed image is an untagged image ID, and the full
ID is returned.
Inspects the specified image name and returns the first matching tag in the
inspect results. If the specified image is not pulled locally, this
function will return ``False``.
name
Image name to resolve. If the image is found but there are no tags,
this means that the image name passed was an untagged image. In this
case the image ID will be returned.
all : False
If ``True``, a list of all matching tags will be returned. If the image
is found but there are no tags, then a list will still be returned, but
it will simply contain the image ID.
.. versionadded:: 2018.3.0
tags
.. deprecated:: 2018.3.0
CLI Examples:
.. code-block:: bash
salt myminion docker.resolve_tag busybox
salt myminion docker.resolve_tag centos:7 all=True
salt myminion docker.resolve_tag c9f378ac27d9
'''
kwargs = __utils__['args.clean_kwargs'](**kwargs)
all_ = kwargs.pop('all', False)
if kwargs:
__utils__['args.invalid_kwargs'](kwargs)
try:
inspect_result = inspect_image(name)
tags = inspect_result['RepoTags']
if all_:
if tags:
return tags
# If the image is untagged, don't return an empty list, return
# back the resolved ID at he end of this function.
else:
return tags[0]
except CommandExecutionError:
# No matching image pulled locally, or inspect_image otherwise failed
return False
except KeyError:
log.error(
'Inspecting docker image \'%s\' returned an unexpected data '
'structure: %s', name, inspect_result
)
except IndexError:
# The image passed is an untagged image ID
pass
return [inspect_result['Id']] if all_ else inspect_result['Id'] | [
"def",
"resolve_tag",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"__utils__",
"[",
"'args.clean_kwargs'",
"]",
"(",
"*",
"*",
"kwargs",
")",
"all_",
"=",
"kwargs",
".",
"pop",
"(",
"'all'",
",",
"False",
")",
"if",
"kwargs",
":",
"__utils__",
"[",
"'args.invalid_kwargs'",
"]",
"(",
"kwargs",
")",
"try",
":",
"inspect_result",
"=",
"inspect_image",
"(",
"name",
")",
"tags",
"=",
"inspect_result",
"[",
"'RepoTags'",
"]",
"if",
"all_",
":",
"if",
"tags",
":",
"return",
"tags",
"# If the image is untagged, don't return an empty list, return",
"# back the resolved ID at he end of this function.",
"else",
":",
"return",
"tags",
"[",
"0",
"]",
"except",
"CommandExecutionError",
":",
"# No matching image pulled locally, or inspect_image otherwise failed",
"return",
"False",
"except",
"KeyError",
":",
"log",
".",
"error",
"(",
"'Inspecting docker image \\'%s\\' returned an unexpected data '",
"'structure: %s'",
",",
"name",
",",
"inspect_result",
")",
"except",
"IndexError",
":",
"# The image passed is an untagged image ID",
"pass",
"return",
"[",
"inspect_result",
"[",
"'Id'",
"]",
"]",
"if",
"all_",
"else",
"inspect_result",
"[",
"'Id'",
"]"
] | 35.969697 | 23.939394 |
def _get_function_name(self, fn, default="None"):
""" Return name of function, using default value if function not defined
"""
if fn is None:
fn_name = default
else:
fn_name = fn.__name__
return fn_name | [
"def",
"_get_function_name",
"(",
"self",
",",
"fn",
",",
"default",
"=",
"\"None\"",
")",
":",
"if",
"fn",
"is",
"None",
":",
"fn_name",
"=",
"default",
"else",
":",
"fn_name",
"=",
"fn",
".",
"__name__",
"return",
"fn_name"
] | 32.375 | 11.25 |
def read_text(forfn, nrows=None, verbose=True):
r""" Read all the lines (up to nrows) from a text file or txt.gz file
>>> fn = os.path.join(DATA_PATH, 'mavis-batey-greetings.txt')
>>> len(read_text(fn, nrows=3))
3
"""
tqdm_prog = tqdm if verbose else no_tqdm
nrows = wc(forfn, nrows=nrows) # not necessary when nrows==None
lines = np.empty(dtype=object, shape=nrows)
with ensure_open(forfn) as f:
for i, line in enumerate(tqdm_prog(f, total=nrows)):
if i >= len(lines):
break
lines[i] = ensure_str(line).rstrip('\n').rstrip('\r')
if all('\t' in line for line in lines):
num_tabs = [sum([1 for c in line if c == '\t']) for line in lines]
del lines
if all(i == num_tabs[0] for i in num_tabs):
f.seek(0)
return read_csv(f, sep='\t', header=None, nrows=nrows)
elif sum((1 for line in lines if any((tag.lower() in line.lower() for tag in HTML_TAGS)))
) / float(len(lines)) > .05:
return np.array(html2text(EOL.join(lines)).split(EOL))
return lines | [
"def",
"read_text",
"(",
"forfn",
",",
"nrows",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"tqdm_prog",
"=",
"tqdm",
"if",
"verbose",
"else",
"no_tqdm",
"nrows",
"=",
"wc",
"(",
"forfn",
",",
"nrows",
"=",
"nrows",
")",
"# not necessary when nrows==None",
"lines",
"=",
"np",
".",
"empty",
"(",
"dtype",
"=",
"object",
",",
"shape",
"=",
"nrows",
")",
"with",
"ensure_open",
"(",
"forfn",
")",
"as",
"f",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"tqdm_prog",
"(",
"f",
",",
"total",
"=",
"nrows",
")",
")",
":",
"if",
"i",
">=",
"len",
"(",
"lines",
")",
":",
"break",
"lines",
"[",
"i",
"]",
"=",
"ensure_str",
"(",
"line",
")",
".",
"rstrip",
"(",
"'\\n'",
")",
".",
"rstrip",
"(",
"'\\r'",
")",
"if",
"all",
"(",
"'\\t'",
"in",
"line",
"for",
"line",
"in",
"lines",
")",
":",
"num_tabs",
"=",
"[",
"sum",
"(",
"[",
"1",
"for",
"c",
"in",
"line",
"if",
"c",
"==",
"'\\t'",
"]",
")",
"for",
"line",
"in",
"lines",
"]",
"del",
"lines",
"if",
"all",
"(",
"i",
"==",
"num_tabs",
"[",
"0",
"]",
"for",
"i",
"in",
"num_tabs",
")",
":",
"f",
".",
"seek",
"(",
"0",
")",
"return",
"read_csv",
"(",
"f",
",",
"sep",
"=",
"'\\t'",
",",
"header",
"=",
"None",
",",
"nrows",
"=",
"nrows",
")",
"elif",
"sum",
"(",
"(",
"1",
"for",
"line",
"in",
"lines",
"if",
"any",
"(",
"(",
"tag",
".",
"lower",
"(",
")",
"in",
"line",
".",
"lower",
"(",
")",
"for",
"tag",
"in",
"HTML_TAGS",
")",
")",
")",
")",
"/",
"float",
"(",
"len",
"(",
"lines",
")",
")",
">",
".05",
":",
"return",
"np",
".",
"array",
"(",
"html2text",
"(",
"EOL",
".",
"join",
"(",
"lines",
")",
")",
".",
"split",
"(",
"EOL",
")",
")",
"return",
"lines"
] | 44.8 | 18.64 |
def _clean_directory(self, name):
"""Clean a directory if exists and not in dry run"""
if not os.path.exists(name):
return
self.announce(
"removing directory '{}' and all its contents".format(name)
)
if not self.dry_run:
rmtree(name, True) | [
"def",
"_clean_directory",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"name",
")",
":",
"return",
"self",
".",
"announce",
"(",
"\"removing directory '{}' and all its contents\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"not",
"self",
".",
"dry_run",
":",
"rmtree",
"(",
"name",
",",
"True",
")"
] | 34.111111 | 15 |
def spherical(coordinates):
"""No error is propagated"""
c = coordinates
r = N.linalg.norm(c,axis=0)
theta = N.arccos(c[2]/r)
phi = N.arctan2(c[1],c[0])
return N.column_stack((r,theta,phi)) | [
"def",
"spherical",
"(",
"coordinates",
")",
":",
"c",
"=",
"coordinates",
"r",
"=",
"N",
".",
"linalg",
".",
"norm",
"(",
"c",
",",
"axis",
"=",
"0",
")",
"theta",
"=",
"N",
".",
"arccos",
"(",
"c",
"[",
"2",
"]",
"/",
"r",
")",
"phi",
"=",
"N",
".",
"arctan2",
"(",
"c",
"[",
"1",
"]",
",",
"c",
"[",
"0",
"]",
")",
"return",
"N",
".",
"column_stack",
"(",
"(",
"r",
",",
"theta",
",",
"phi",
")",
")"
] | 29.571429 | 9.285714 |
def pack_metadata(self) -> List[Tuple[str, Any]]:
"""Packs the log fields and the invocation metadata into a new metadata
The log fields are added in the new metadata with the key
`LOG_FIELDS_KEY_META`.
"""
metadata = [(k, v) for k, v in self._invocation_metadata.items()
if k != LOG_FIELDS_KEY_META]
metadata.append((LOG_FIELDS_KEY_META, self._log_fields.dumps()))
return metadata | [
"def",
"pack_metadata",
"(",
"self",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"metadata",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_invocation_metadata",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"LOG_FIELDS_KEY_META",
"]",
"metadata",
".",
"append",
"(",
"(",
"LOG_FIELDS_KEY_META",
",",
"self",
".",
"_log_fields",
".",
"dumps",
"(",
")",
")",
")",
"return",
"metadata"
] | 40.818182 | 19.363636 |
def cache(self):
"""Call a user defined query and cache the results"""
if not self._bucket_width or self._untrusted_time is None:
raise ValueError('QueryCompute must be initialized with a bucket_width '
'and an untrusted_time in order to write to the cache.')
now = datetime.datetime.now()
untrusted_time = now - datetime.timedelta(seconds=self._untrusted_time)
list(self._query_cache.compute_and_cache_missing_buckets(
self._start_time,
self._end_time,
untrusted_time)) | [
"def",
"cache",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_bucket_width",
"or",
"self",
".",
"_untrusted_time",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'QueryCompute must be initialized with a bucket_width '",
"'and an untrusted_time in order to write to the cache.'",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"untrusted_time",
"=",
"now",
"-",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"_untrusted_time",
")",
"list",
"(",
"self",
".",
"_query_cache",
".",
"compute_and_cache_missing_buckets",
"(",
"self",
".",
"_start_time",
",",
"self",
".",
"_end_time",
",",
"untrusted_time",
")",
")"
] | 44.416667 | 22.833333 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_label') and self.document_label is not None:
_dict['document_label'] = self.document_label
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'document_label'",
")",
"and",
"self",
".",
"document_label",
"is",
"not",
"None",
":",
"_dict",
"[",
"'document_label'",
"]",
"=",
"self",
".",
"document_label",
"if",
"hasattr",
"(",
"self",
",",
"'location'",
")",
"and",
"self",
".",
"location",
"is",
"not",
"None",
":",
"_dict",
"[",
"'location'",
"]",
"=",
"self",
".",
"location",
".",
"_to_dict",
"(",
")",
"if",
"hasattr",
"(",
"self",
",",
"'text'",
")",
"and",
"self",
".",
"text",
"is",
"not",
"None",
":",
"_dict",
"[",
"'text'",
"]",
"=",
"self",
".",
"text",
"if",
"hasattr",
"(",
"self",
",",
"'types'",
")",
"and",
"self",
".",
"types",
"is",
"not",
"None",
":",
"_dict",
"[",
"'types'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"types",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'categories'",
")",
"and",
"self",
".",
"categories",
"is",
"not",
"None",
":",
"_dict",
"[",
"'categories'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"categories",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'attributes'",
")",
"and",
"self",
".",
"attributes",
"is",
"not",
"None",
":",
"_dict",
"[",
"'attributes'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"attributes",
"]",
"return",
"_dict"
] | 55.4375 | 22.25 |
def get_bytes(self, n):
"""
Return the next C{n} bytes of the Message, without decomposing into
an int, string, etc. Just the raw bytes are returned.
@return: a string of the next C{n} bytes of the Message, or a string
of C{n} zero bytes, if there aren't C{n} bytes remaining.
@rtype: string
"""
b = self.packet.read(n)
if len(b) < n:
return b + '\x00' * (n - len(b))
return b | [
"def",
"get_bytes",
"(",
"self",
",",
"n",
")",
":",
"b",
"=",
"self",
".",
"packet",
".",
"read",
"(",
"n",
")",
"if",
"len",
"(",
"b",
")",
"<",
"n",
":",
"return",
"b",
"+",
"'\\x00'",
"*",
"(",
"n",
"-",
"len",
"(",
"b",
")",
")",
"return",
"b"
] | 35.538462 | 19.384615 |
def delete_dag(dag_id):
"""
Delete all DB records related to the specified Dag.
"""
try:
count = delete.delete_dag(dag_id)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
return jsonify(message="Removed {} record(s)".format(count), count=count) | [
"def",
"delete_dag",
"(",
"dag_id",
")",
":",
"try",
":",
"count",
"=",
"delete",
".",
"delete_dag",
"(",
"dag_id",
")",
"except",
"AirflowException",
"as",
"err",
":",
"_log",
".",
"error",
"(",
"err",
")",
"response",
"=",
"jsonify",
"(",
"error",
"=",
"\"{}\"",
".",
"format",
"(",
"err",
")",
")",
"response",
".",
"status_code",
"=",
"err",
".",
"status_code",
"return",
"response",
"return",
"jsonify",
"(",
"message",
"=",
"\"Removed {} record(s)\"",
".",
"format",
"(",
"count",
")",
",",
"count",
"=",
"count",
")"
] | 32.916667 | 13.083333 |
def do_some_expensive_things(number):
"""
Perform one expensive computation cooperatively with any
other iterator passed into twisted's cooperate, then
use it's result to pass into the second computation.
:param number:
:return:
"""
result = yield batch_accumulate(1000, expensive(number))
total = reduce(add, result, 0)
log.msg("first for {}: {}".format(number, total))
result = yield batch_accumulate(1000, expensive(int(total/1e9)))
total = reduce(add, result, 0)
log.msg("second for {}: {}".format(number, total))
defer.returnValue(total) | [
"def",
"do_some_expensive_things",
"(",
"number",
")",
":",
"result",
"=",
"yield",
"batch_accumulate",
"(",
"1000",
",",
"expensive",
"(",
"number",
")",
")",
"total",
"=",
"reduce",
"(",
"add",
",",
"result",
",",
"0",
")",
"log",
".",
"msg",
"(",
"\"first for {}: {}\"",
".",
"format",
"(",
"number",
",",
"total",
")",
")",
"result",
"=",
"yield",
"batch_accumulate",
"(",
"1000",
",",
"expensive",
"(",
"int",
"(",
"total",
"/",
"1e9",
")",
")",
")",
"total",
"=",
"reduce",
"(",
"add",
",",
"result",
",",
"0",
")",
"log",
".",
"msg",
"(",
"\"second for {}: {}\"",
".",
"format",
"(",
"number",
",",
"total",
")",
")",
"defer",
".",
"returnValue",
"(",
"total",
")"
] | 34.470588 | 16.823529 |
def orient_graph(self, df_data, graph, nb_runs=6, printout=None, **kwargs):
"""Orient an undirected graph using the pairwise method defined by the subclass.
The pairwise method is ran on every undirected edge.
Args:
df_data (pandas.DataFrame): Data
umg (networkx.Graph): Graph to orient
nb_runs (int): number of times to rerun for each pair (bootstrap)
printout (str): (optional) Path to file where to save temporary results
Returns:
networkx.DiGraph: a directed graph, which might contain cycles
.. warning:
Requirement : Name of the nodes in the graph correspond to name of
the variables in df_data
"""
if type(graph) == nx.DiGraph:
edges = [a for a in list(graph.edges()) if (a[1], a[0]) in list(graph.edges())]
oriented_edges = [a for a in list(graph.edges()) if (a[1], a[0]) not in list(graph.edges())]
for a in edges:
if (a[1], a[0]) in list(graph.edges()):
edges.remove(a)
output = nx.DiGraph()
for i in oriented_edges:
output.add_edge(*i)
elif type(graph) == nx.Graph:
edges = list(graph.edges())
output = nx.DiGraph()
else:
raise TypeError("Data type not understood.")
res = []
for idx, (a, b) in enumerate(edges):
weight = self.predict_proba(
df_data[a].values.reshape((-1, 1)), df_data[b].values.reshape((-1, 1)), idx=idx,
nb_runs=nb_runs, **kwargs)
if weight > 0: # a causes b
output.add_edge(a, b, weight=weight)
else:
output.add_edge(b, a, weight=abs(weight))
if printout is not None:
res.append([str(a) + '-' + str(b), weight])
DataFrame(res, columns=['SampleID', 'Predictions']).to_csv(
printout, index=False)
for node in list(df_data.columns.values):
if node not in output.nodes():
output.add_node(node)
return output | [
"def",
"orient_graph",
"(",
"self",
",",
"df_data",
",",
"graph",
",",
"nb_runs",
"=",
"6",
",",
"printout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"type",
"(",
"graph",
")",
"==",
"nx",
".",
"DiGraph",
":",
"edges",
"=",
"[",
"a",
"for",
"a",
"in",
"list",
"(",
"graph",
".",
"edges",
"(",
")",
")",
"if",
"(",
"a",
"[",
"1",
"]",
",",
"a",
"[",
"0",
"]",
")",
"in",
"list",
"(",
"graph",
".",
"edges",
"(",
")",
")",
"]",
"oriented_edges",
"=",
"[",
"a",
"for",
"a",
"in",
"list",
"(",
"graph",
".",
"edges",
"(",
")",
")",
"if",
"(",
"a",
"[",
"1",
"]",
",",
"a",
"[",
"0",
"]",
")",
"not",
"in",
"list",
"(",
"graph",
".",
"edges",
"(",
")",
")",
"]",
"for",
"a",
"in",
"edges",
":",
"if",
"(",
"a",
"[",
"1",
"]",
",",
"a",
"[",
"0",
"]",
")",
"in",
"list",
"(",
"graph",
".",
"edges",
"(",
")",
")",
":",
"edges",
".",
"remove",
"(",
"a",
")",
"output",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"for",
"i",
"in",
"oriented_edges",
":",
"output",
".",
"add_edge",
"(",
"*",
"i",
")",
"elif",
"type",
"(",
"graph",
")",
"==",
"nx",
".",
"Graph",
":",
"edges",
"=",
"list",
"(",
"graph",
".",
"edges",
"(",
")",
")",
"output",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Data type not understood.\"",
")",
"res",
"=",
"[",
"]",
"for",
"idx",
",",
"(",
"a",
",",
"b",
")",
"in",
"enumerate",
"(",
"edges",
")",
":",
"weight",
"=",
"self",
".",
"predict_proba",
"(",
"df_data",
"[",
"a",
"]",
".",
"values",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
",",
"df_data",
"[",
"b",
"]",
".",
"values",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
",",
"idx",
"=",
"idx",
",",
"nb_runs",
"=",
"nb_runs",
",",
"*",
"*",
"kwargs",
")",
"if",
"weight",
">",
"0",
":",
"# a causes b",
"output",
".",
"add_edge",
"(",
"a",
",",
"b",
",",
"weight",
"=",
"weight",
")",
"else",
":",
"output",
".",
"add_edge",
"(",
"b",
",",
"a",
",",
"weight",
"=",
"abs",
"(",
"weight",
")",
")",
"if",
"printout",
"is",
"not",
"None",
":",
"res",
".",
"append",
"(",
"[",
"str",
"(",
"a",
")",
"+",
"'-'",
"+",
"str",
"(",
"b",
")",
",",
"weight",
"]",
")",
"DataFrame",
"(",
"res",
",",
"columns",
"=",
"[",
"'SampleID'",
",",
"'Predictions'",
"]",
")",
".",
"to_csv",
"(",
"printout",
",",
"index",
"=",
"False",
")",
"for",
"node",
"in",
"list",
"(",
"df_data",
".",
"columns",
".",
"values",
")",
":",
"if",
"node",
"not",
"in",
"output",
".",
"nodes",
"(",
")",
":",
"output",
".",
"add_node",
"(",
"node",
")",
"return",
"output"
] | 38.490909 | 20.872727 |
def get_request_authorization(method, resource, key, params, headers):
""" :return bytes (PY2) or string (PY2) """
if not key:
return six.b('')
content = method + "\n"
if 'Content-MD5' in headers:
content += headers['Content-MD5']
content += '\n'
if 'Content-Type' in headers:
content += headers['Content-Type']
content += "\n"
content += headers['Date'] + "\n"
content += Util.canonicalized_log_headers(headers)
content += Util.canonicalized_resource(resource, params)
return Util.hmac_sha1(content, key) | [
"def",
"get_request_authorization",
"(",
"method",
",",
"resource",
",",
"key",
",",
"params",
",",
"headers",
")",
":",
"if",
"not",
"key",
":",
"return",
"six",
".",
"b",
"(",
"''",
")",
"content",
"=",
"method",
"+",
"\"\\n\"",
"if",
"'Content-MD5'",
"in",
"headers",
":",
"content",
"+=",
"headers",
"[",
"'Content-MD5'",
"]",
"content",
"+=",
"'\\n'",
"if",
"'Content-Type'",
"in",
"headers",
":",
"content",
"+=",
"headers",
"[",
"'Content-Type'",
"]",
"content",
"+=",
"\"\\n\"",
"content",
"+=",
"headers",
"[",
"'Date'",
"]",
"+",
"\"\\n\"",
"content",
"+=",
"Util",
".",
"canonicalized_log_headers",
"(",
"headers",
")",
"content",
"+=",
"Util",
".",
"canonicalized_resource",
"(",
"resource",
",",
"params",
")",
"return",
"Util",
".",
"hmac_sha1",
"(",
"content",
",",
"key",
")"
] | 41.933333 | 11.266667 |
def run_script(self, container, instance=None, map_name=None, **kwargs):
"""
Runs a script or single command in the context of a container. By the default implementation this means creating
the container along with all of its dependencies, mounting the script path, and running the script. The result
is recorded in a dictionary per client, before the container is removed. Dependencies are not removed. For
details, see :meth:`dockermap.map.runner.script.ScriptMixin.run_script`.
:param container: Container configuration name.
:type container: unicode | str
:param map_name: Container map name.
:type map_name: unicode | str
:param instance: Instance name. Optional, if not specified runs the default instance.
:type instance: unicode | str
:param kwargs: Keyword arguments to the script runner function.
:return: Return values of the script actions with their log output and exit codes.
:return: A dictionary of client names with their log output and exit codes.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('script', container, instances=instance, map_name=map_name, **kwargs) | [
"def",
"run_script",
"(",
"self",
",",
"container",
",",
"instance",
"=",
"None",
",",
"map_name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"run_actions",
"(",
"'script'",
",",
"container",
",",
"instances",
"=",
"instance",
",",
"map_name",
"=",
"map_name",
",",
"*",
"*",
"kwargs",
")"
] | 64.736842 | 32.842105 |
def authority(self, column=None, value=None, **kwargs):
"""Provides codes and associated authorizing statutes."""
return self._resolve_call('GIC_AUTHORITY', column, value, **kwargs) | [
"def",
"authority",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'GIC_AUTHORITY'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",
")"
] | 65 | 16.666667 |
def parse_synset(self, offset=None, debug=False):
"""Parses Synset from file
"""
if False:
pass
else:
# WORD_INSTANCE
def _word_instance():
_synset(True)
# WORD_MEANING
def _synset(pn=False):
if not pn:
self.synset = Synset()
self.pn = False
else:
self.synset = WordInstance()
self.pn = True
if self.DRN:
self.synset.number = self.DRN
self.targetType = None
def _variants():
self.synset.variants = Variants()
def _literal():
a = Variant()
self.synset.variants.append(a)
self.synset.variants[-1].literal = self.fieldValue
def _target_literal():
self.target_synset.variants.append(Variant())
self.target_synset.variants[-1].literal = self.fieldValue
def _sense():
self.synset.variants[-1].sense = self.fieldValue
def _status():
self.noQuotes = True
try:
self.synset.variants[-1].status = as_unicode(self.fieldValue)
except:
self.synset.variants[-1].status = as_unicode(str(self.fieldValue))
self.noQuotes = False
def _target_sense():
self.target_synset.variants[-1].sense = self.fieldValue
if self.targetType == 'internal':
self.synset.internalLinks[
-1].target_concept = self.target_synset
elif self.targetType == 'ili':
self.synset.eqLinks[-1].target_concept = self.target_synset
elif self.targetType == 'pv':
self.synset.propertyValues[-1].value = self.target_synset
else:
print ('BOOOOOOOOO!!') # Error TODO
def _gloss():
self.synset.variants[-1].gloss = self.fieldValue
self.synset.definition = self.fieldValue # ADDED BY KOM
def _translations():
self.synset.variants[-1].translations = Translations()
def _translation():
self.synset.variants[-1].translations.append(
Translation(
language=self.fieldValue.split(':')[0],
translation_value = self.fieldValue.split(':')[1])
)
def _examples():
self.synset.variants[-1].examples = Examples()
def _usage_labels():
self.synset.variants[-1].usage_labels = Usage_Labels()
def _external_info():
self.synset.variants[-1].externalInfo = External_Info()
def _example():
self.synset.variants[-1].examples.append(
Example(self.fieldValue)
)
def _usage_label():
self.synset.variants[
-1].usage_labels.append(
Usage_Label(name=self.fieldValue)
)
def _usage_label_value():
self.synset.variants[
-1].usage_labels[-1].usage_label_value = self.fieldValue
def _source_id():
if self.targetType == 'internal':
self.synset.internalLinks[-1].source_id = self.fieldValue
# self.synset.internalLinks[-1].source_ids.append(
# Relation_Source_Id(number=self.fieldValue))
elif self.targetType == 'ili':
self.synset.eqLinks[-1].source_id = self.fieldValue
# self.synset.eqLinks[-1].source_ids.append(
# Relation_Source_Id(number=self.fieldValue))
else:
if self.synset.variants[-1].external_info:
self.synset.variants[
-1].external_info.source_ids.append(
Source_Id(number=self.fieldValue)
)
else:
self.synset.variants[-1].external_info = External_Info()
self.synset.variants[
-1].external_info.source_ids.append(
Source_Id(number=self.fieldValue)
)
def _corpus_id():
if self.targetType == 'internal': # not needed
self.synset.internalLinks[-1].corpus_ids.append(
Relation_Corpus_Id(number=self.fieldValue))
else:
if self.synset.variants[-1].external_info:
self.synset.variants[
-1].external_info.corpus_ids.append(
Corpus_Id(number=self.fieldValue)
)
else:
self.synset.variants[-1].external_info = External_Info()
self.synset.variants[
-1].external_info.corpus_ids.append(
Corpus_Id(number=self.fieldValue)
)
def _frequency():
self.synset.variants[
-1].external_info.corpus_ids[-1].frequency = self.fieldValue
def _text_key():
self.synset.variants[
-1].external_info.source_ids[-1].text_key = self.fieldValue
def _number_key():
self.synset.variants[
-1].external_info.source_ids[
-1].number_key = self.fieldValue
def _pos():
self.synset.pos = self.fieldValue
# INTERNAL_LINKS
def _target_concept():
self.target_synset = Synset()
self.target_synset.variants = Variants()
if self.levelNumber == 3: # and self.fieldValue:
self.target_synset.number = int(self.fieldValue or 0)
def _target_pos():
self.target_synset.pos = self.fieldValue
def _internal_links():
self.synset.internalLinks = InternalLinks()
self.targetType = 'internal'
def _relation():
if self.targetType == 'internal':
self.synset.internalLinks.append(Relation())
self.synset.internalLinks[-1].name = self.fieldValue
elif self.targetType == 'ili':
self.synset.eqLinks.append(EqLink())
self.synset.eqLinks[-1].name = self.fieldValue
else:
print ('BOOOOOOOOO!!') # Error TODO
def _features():
if self.targetType == 'internal':
self.synset.internalLinks[-1].features = Features()
else:
self.synset.variants[-1].features = Features()
self.synset.variants[-1].features.append(Feature())
def _feature():
self.synset.variants[-1].features[-1].name = self.fieldValue
def _feature_value():
self.synset.variants[
-1].features[-1].featureValue = self.fieldValue
def _reversed():
self.synset.internalLinks[-1].features.append(Feature())
self.synset.internalLinks[-1].features[-1].name = self.fieldTag
self.synset.internalLinks[-1].features[-1].featureValue = True
def _variant_to_variant():
self.synset.internalLinks[-1].features.append(Feature())
self.synset.internalLinks[-1].features[-1].name = self.fieldTag
def _source_variant():
self.variant_to_variant_source = self.fieldValue
def _target_variant():
self.variant_to_variant_target = self.fieldValue
self.synset.internalLinks[
-1].features[-1].featureValue = (
self.variant_to_variant_source,
self.variant_to_variant_target)
# EQ_LINKS
def _eq_links():
self.synset.eqLinks = EqLinks()
self.targetType = 'ili'
def _wn_offset():
self.target_synset.wordnet_offset = self.fieldValue
self.synset.eqLinks[-1].target_concept = self.target_synset
def _add_on_id():
self.target_synset.add_on_id = self.fieldValue
self.synset.eqLinks[-1].target_concept = self.target_synset
# PROPERTIES
def _properties():
self.synset.properties = Properties()
def _name():
if self.pn:
self.synset.propertyValues.append(
PropertyValue(name=self.fieldValue))
else:
self.synset.properties.append(Property(self.fieldValue))
# PROPERTY_VALUES
def _property_values():
self.synset.propertyValues = PropertyValues()
def _property_value():
self.synset.propertyValues[-1].value = self.fieldValue
self.targetType = 'pv'
def _property_wm():
pass
rulez = {
(0,'WORD_MEANING'): _synset,
(0,'WORD_INSTANCE'): _word_instance,
(1,'PART_OF_SPEECH'): _pos,
(1,'VARIANTS'): _variants,
(2,'LITERAL'): _literal,
(3,'SENSE'): _sense,
(3,'STATUS'): _status,
(3,'DEFINITION'): _gloss,
(3,'EXAMPLES'): _examples,
(3,'USAGE_LABELS'): _usage_labels,
(4,'USAGE_LABEL'): _usage_label,
(5,'USAGE_LABEL_VALUE'): _usage_label_value,
(4,'EXAMPLE'): _example,
(3,'TRANSLATIONS'): _translations,
(4,'TRANSLATION'): _translation,
(3,'EXTERNAL_INFO'): _external_info,
(4,'SOURCE_ID'): _source_id,
(4,'CORPUS_ID'): _corpus_id,
(5,'FREQUENCY'): _frequency,
(5,'TEXT_KEY'): _text_key,
(5,'NUMBER_KEY'): _number_key,
(1,'INTERNAL_LINKS'): _internal_links,
(2,'RELATION'): _relation,
(3,'TARGET_CONCEPT'): _target_concept,
(4,'PART_OF_SPEECH'): _target_pos,
(4,'LITERAL'): _target_literal,
(5,'SENSE'): _target_sense,
(3,'FEATURES'): _features,
(4,'FEATURE'): _feature,
(5,'FEATURE_VALUE'): _feature_value,
(4,'REVERSED'): _reversed,
(4,'VARIANT_TO_VARIANT'): _variant_to_variant,
(5,'SOURCE_VARIANT'): _source_variant,
(5,'TARGET_VARIANT'): _target_variant,
(3,'SOURCE_ID'): _source_id,
(1,'EQ_LINKS'): _eq_links,
(2,'EQ_RELATION'): _relation,
(3,'TARGET_ILI'): _target_concept,
(4,'WORDNET_OFFSET'): _wn_offset,
(4,'ADD_ON_ID'): _add_on_id,
(1,'PROPERTIES'): _properties,
(1,'PROPERTY_VALUES'): _property_values,
(2,'NAME'): _name,
(3,'VALUE'): _property_value,
(3,'VALUE_AS_TEXT'): _property_value,
(3,'VALUE_AS_WORD_MEANING'): _target_concept,
}
if not offset:
offset = self.milestone
else:
self.milestone=offset
if self.file:
self.file.seek(offset,0)
line = 'X'
ili = False
var = False
while line.strip():
offset = self.file.tell()
self.file.seek(offset,0)
line = as_unicode(self.file.readline(), self.encoding).strip()
if debug:
print (line.encode('utf-8'))
self.parse_line(line)
self.noQuotes = None
select = (self.levelNumber,self.fieldTag)
if select in rulez.keys():
rulez[select]()
else:
if line:
print (self.synset.polarisText)
raise ParseError("No parsing rule for '%s'" % line)
return self.synset | [
"def",
"parse_synset",
"(",
"self",
",",
"offset",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"if",
"False",
":",
"pass",
"else",
":",
"# WORD_INSTANCE",
"def",
"_word_instance",
"(",
")",
":",
"_synset",
"(",
"True",
")",
"# WORD_MEANING",
"def",
"_synset",
"(",
"pn",
"=",
"False",
")",
":",
"if",
"not",
"pn",
":",
"self",
".",
"synset",
"=",
"Synset",
"(",
")",
"self",
".",
"pn",
"=",
"False",
"else",
":",
"self",
".",
"synset",
"=",
"WordInstance",
"(",
")",
"self",
".",
"pn",
"=",
"True",
"if",
"self",
".",
"DRN",
":",
"self",
".",
"synset",
".",
"number",
"=",
"self",
".",
"DRN",
"self",
".",
"targetType",
"=",
"None",
"def",
"_variants",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"=",
"Variants",
"(",
")",
"def",
"_literal",
"(",
")",
":",
"a",
"=",
"Variant",
"(",
")",
"self",
".",
"synset",
".",
"variants",
".",
"append",
"(",
"a",
")",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"literal",
"=",
"self",
".",
"fieldValue",
"def",
"_target_literal",
"(",
")",
":",
"self",
".",
"target_synset",
".",
"variants",
".",
"append",
"(",
"Variant",
"(",
")",
")",
"self",
".",
"target_synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"literal",
"=",
"self",
".",
"fieldValue",
"def",
"_sense",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"sense",
"=",
"self",
".",
"fieldValue",
"def",
"_status",
"(",
")",
":",
"self",
".",
"noQuotes",
"=",
"True",
"try",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"status",
"=",
"as_unicode",
"(",
"self",
".",
"fieldValue",
")",
"except",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"status",
"=",
"as_unicode",
"(",
"str",
"(",
"self",
".",
"fieldValue",
")",
")",
"self",
".",
"noQuotes",
"=",
"False",
"def",
"_target_sense",
"(",
")",
":",
"self",
".",
"target_synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"sense",
"=",
"self",
".",
"fieldValue",
"if",
"self",
".",
"targetType",
"==",
"'internal'",
":",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"target_concept",
"=",
"self",
".",
"target_synset",
"elif",
"self",
".",
"targetType",
"==",
"'ili'",
":",
"self",
".",
"synset",
".",
"eqLinks",
"[",
"-",
"1",
"]",
".",
"target_concept",
"=",
"self",
".",
"target_synset",
"elif",
"self",
".",
"targetType",
"==",
"'pv'",
":",
"self",
".",
"synset",
".",
"propertyValues",
"[",
"-",
"1",
"]",
".",
"value",
"=",
"self",
".",
"target_synset",
"else",
":",
"print",
"(",
"'BOOOOOOOOO!!'",
")",
"# Error TODO",
"def",
"_gloss",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"gloss",
"=",
"self",
".",
"fieldValue",
"self",
".",
"synset",
".",
"definition",
"=",
"self",
".",
"fieldValue",
"# ADDED BY KOM",
"def",
"_translations",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"translations",
"=",
"Translations",
"(",
")",
"def",
"_translation",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"translations",
".",
"append",
"(",
"Translation",
"(",
"language",
"=",
"self",
".",
"fieldValue",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
",",
"translation_value",
"=",
"self",
".",
"fieldValue",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
")",
")",
"def",
"_examples",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"examples",
"=",
"Examples",
"(",
")",
"def",
"_usage_labels",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"usage_labels",
"=",
"Usage_Labels",
"(",
")",
"def",
"_external_info",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"externalInfo",
"=",
"External_Info",
"(",
")",
"def",
"_example",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"examples",
".",
"append",
"(",
"Example",
"(",
"self",
".",
"fieldValue",
")",
")",
"def",
"_usage_label",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"usage_labels",
".",
"append",
"(",
"Usage_Label",
"(",
"name",
"=",
"self",
".",
"fieldValue",
")",
")",
"def",
"_usage_label_value",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"usage_labels",
"[",
"-",
"1",
"]",
".",
"usage_label_value",
"=",
"self",
".",
"fieldValue",
"def",
"_source_id",
"(",
")",
":",
"if",
"self",
".",
"targetType",
"==",
"'internal'",
":",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"source_id",
"=",
"self",
".",
"fieldValue",
"# self.synset.internalLinks[-1].source_ids.append(",
"# Relation_Source_Id(number=self.fieldValue))",
"elif",
"self",
".",
"targetType",
"==",
"'ili'",
":",
"self",
".",
"synset",
".",
"eqLinks",
"[",
"-",
"1",
"]",
".",
"source_id",
"=",
"self",
".",
"fieldValue",
"# self.synset.eqLinks[-1].source_ids.append(",
"# Relation_Source_Id(number=self.fieldValue))",
"else",
":",
"if",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
".",
"source_ids",
".",
"append",
"(",
"Source_Id",
"(",
"number",
"=",
"self",
".",
"fieldValue",
")",
")",
"else",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
"=",
"External_Info",
"(",
")",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
".",
"source_ids",
".",
"append",
"(",
"Source_Id",
"(",
"number",
"=",
"self",
".",
"fieldValue",
")",
")",
"def",
"_corpus_id",
"(",
")",
":",
"if",
"self",
".",
"targetType",
"==",
"'internal'",
":",
"# not needed",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"corpus_ids",
".",
"append",
"(",
"Relation_Corpus_Id",
"(",
"number",
"=",
"self",
".",
"fieldValue",
")",
")",
"else",
":",
"if",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
".",
"corpus_ids",
".",
"append",
"(",
"Corpus_Id",
"(",
"number",
"=",
"self",
".",
"fieldValue",
")",
")",
"else",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
"=",
"External_Info",
"(",
")",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
".",
"corpus_ids",
".",
"append",
"(",
"Corpus_Id",
"(",
"number",
"=",
"self",
".",
"fieldValue",
")",
")",
"def",
"_frequency",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
".",
"corpus_ids",
"[",
"-",
"1",
"]",
".",
"frequency",
"=",
"self",
".",
"fieldValue",
"def",
"_text_key",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
".",
"source_ids",
"[",
"-",
"1",
"]",
".",
"text_key",
"=",
"self",
".",
"fieldValue",
"def",
"_number_key",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"external_info",
".",
"source_ids",
"[",
"-",
"1",
"]",
".",
"number_key",
"=",
"self",
".",
"fieldValue",
"def",
"_pos",
"(",
")",
":",
"self",
".",
"synset",
".",
"pos",
"=",
"self",
".",
"fieldValue",
"# INTERNAL_LINKS",
"def",
"_target_concept",
"(",
")",
":",
"self",
".",
"target_synset",
"=",
"Synset",
"(",
")",
"self",
".",
"target_synset",
".",
"variants",
"=",
"Variants",
"(",
")",
"if",
"self",
".",
"levelNumber",
"==",
"3",
":",
"# and self.fieldValue:",
"self",
".",
"target_synset",
".",
"number",
"=",
"int",
"(",
"self",
".",
"fieldValue",
"or",
"0",
")",
"def",
"_target_pos",
"(",
")",
":",
"self",
".",
"target_synset",
".",
"pos",
"=",
"self",
".",
"fieldValue",
"def",
"_internal_links",
"(",
")",
":",
"self",
".",
"synset",
".",
"internalLinks",
"=",
"InternalLinks",
"(",
")",
"self",
".",
"targetType",
"=",
"'internal'",
"def",
"_relation",
"(",
")",
":",
"if",
"self",
".",
"targetType",
"==",
"'internal'",
":",
"self",
".",
"synset",
".",
"internalLinks",
".",
"append",
"(",
"Relation",
"(",
")",
")",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"name",
"=",
"self",
".",
"fieldValue",
"elif",
"self",
".",
"targetType",
"==",
"'ili'",
":",
"self",
".",
"synset",
".",
"eqLinks",
".",
"append",
"(",
"EqLink",
"(",
")",
")",
"self",
".",
"synset",
".",
"eqLinks",
"[",
"-",
"1",
"]",
".",
"name",
"=",
"self",
".",
"fieldValue",
"else",
":",
"print",
"(",
"'BOOOOOOOOO!!'",
")",
"# Error TODO",
"def",
"_features",
"(",
")",
":",
"if",
"self",
".",
"targetType",
"==",
"'internal'",
":",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"features",
"=",
"Features",
"(",
")",
"else",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"features",
"=",
"Features",
"(",
")",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"features",
".",
"append",
"(",
"Feature",
"(",
")",
")",
"def",
"_feature",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"features",
"[",
"-",
"1",
"]",
".",
"name",
"=",
"self",
".",
"fieldValue",
"def",
"_feature_value",
"(",
")",
":",
"self",
".",
"synset",
".",
"variants",
"[",
"-",
"1",
"]",
".",
"features",
"[",
"-",
"1",
"]",
".",
"featureValue",
"=",
"self",
".",
"fieldValue",
"def",
"_reversed",
"(",
")",
":",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"features",
".",
"append",
"(",
"Feature",
"(",
")",
")",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"features",
"[",
"-",
"1",
"]",
".",
"name",
"=",
"self",
".",
"fieldTag",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"features",
"[",
"-",
"1",
"]",
".",
"featureValue",
"=",
"True",
"def",
"_variant_to_variant",
"(",
")",
":",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"features",
".",
"append",
"(",
"Feature",
"(",
")",
")",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"features",
"[",
"-",
"1",
"]",
".",
"name",
"=",
"self",
".",
"fieldTag",
"def",
"_source_variant",
"(",
")",
":",
"self",
".",
"variant_to_variant_source",
"=",
"self",
".",
"fieldValue",
"def",
"_target_variant",
"(",
")",
":",
"self",
".",
"variant_to_variant_target",
"=",
"self",
".",
"fieldValue",
"self",
".",
"synset",
".",
"internalLinks",
"[",
"-",
"1",
"]",
".",
"features",
"[",
"-",
"1",
"]",
".",
"featureValue",
"=",
"(",
"self",
".",
"variant_to_variant_source",
",",
"self",
".",
"variant_to_variant_target",
")",
"# EQ_LINKS",
"def",
"_eq_links",
"(",
")",
":",
"self",
".",
"synset",
".",
"eqLinks",
"=",
"EqLinks",
"(",
")",
"self",
".",
"targetType",
"=",
"'ili'",
"def",
"_wn_offset",
"(",
")",
":",
"self",
".",
"target_synset",
".",
"wordnet_offset",
"=",
"self",
".",
"fieldValue",
"self",
".",
"synset",
".",
"eqLinks",
"[",
"-",
"1",
"]",
".",
"target_concept",
"=",
"self",
".",
"target_synset",
"def",
"_add_on_id",
"(",
")",
":",
"self",
".",
"target_synset",
".",
"add_on_id",
"=",
"self",
".",
"fieldValue",
"self",
".",
"synset",
".",
"eqLinks",
"[",
"-",
"1",
"]",
".",
"target_concept",
"=",
"self",
".",
"target_synset",
"# PROPERTIES",
"def",
"_properties",
"(",
")",
":",
"self",
".",
"synset",
".",
"properties",
"=",
"Properties",
"(",
")",
"def",
"_name",
"(",
")",
":",
"if",
"self",
".",
"pn",
":",
"self",
".",
"synset",
".",
"propertyValues",
".",
"append",
"(",
"PropertyValue",
"(",
"name",
"=",
"self",
".",
"fieldValue",
")",
")",
"else",
":",
"self",
".",
"synset",
".",
"properties",
".",
"append",
"(",
"Property",
"(",
"self",
".",
"fieldValue",
")",
")",
"# PROPERTY_VALUES",
"def",
"_property_values",
"(",
")",
":",
"self",
".",
"synset",
".",
"propertyValues",
"=",
"PropertyValues",
"(",
")",
"def",
"_property_value",
"(",
")",
":",
"self",
".",
"synset",
".",
"propertyValues",
"[",
"-",
"1",
"]",
".",
"value",
"=",
"self",
".",
"fieldValue",
"self",
".",
"targetType",
"=",
"'pv'",
"def",
"_property_wm",
"(",
")",
":",
"pass",
"rulez",
"=",
"{",
"(",
"0",
",",
"'WORD_MEANING'",
")",
":",
"_synset",
",",
"(",
"0",
",",
"'WORD_INSTANCE'",
")",
":",
"_word_instance",
",",
"(",
"1",
",",
"'PART_OF_SPEECH'",
")",
":",
"_pos",
",",
"(",
"1",
",",
"'VARIANTS'",
")",
":",
"_variants",
",",
"(",
"2",
",",
"'LITERAL'",
")",
":",
"_literal",
",",
"(",
"3",
",",
"'SENSE'",
")",
":",
"_sense",
",",
"(",
"3",
",",
"'STATUS'",
")",
":",
"_status",
",",
"(",
"3",
",",
"'DEFINITION'",
")",
":",
"_gloss",
",",
"(",
"3",
",",
"'EXAMPLES'",
")",
":",
"_examples",
",",
"(",
"3",
",",
"'USAGE_LABELS'",
")",
":",
"_usage_labels",
",",
"(",
"4",
",",
"'USAGE_LABEL'",
")",
":",
"_usage_label",
",",
"(",
"5",
",",
"'USAGE_LABEL_VALUE'",
")",
":",
"_usage_label_value",
",",
"(",
"4",
",",
"'EXAMPLE'",
")",
":",
"_example",
",",
"(",
"3",
",",
"'TRANSLATIONS'",
")",
":",
"_translations",
",",
"(",
"4",
",",
"'TRANSLATION'",
")",
":",
"_translation",
",",
"(",
"3",
",",
"'EXTERNAL_INFO'",
")",
":",
"_external_info",
",",
"(",
"4",
",",
"'SOURCE_ID'",
")",
":",
"_source_id",
",",
"(",
"4",
",",
"'CORPUS_ID'",
")",
":",
"_corpus_id",
",",
"(",
"5",
",",
"'FREQUENCY'",
")",
":",
"_frequency",
",",
"(",
"5",
",",
"'TEXT_KEY'",
")",
":",
"_text_key",
",",
"(",
"5",
",",
"'NUMBER_KEY'",
")",
":",
"_number_key",
",",
"(",
"1",
",",
"'INTERNAL_LINKS'",
")",
":",
"_internal_links",
",",
"(",
"2",
",",
"'RELATION'",
")",
":",
"_relation",
",",
"(",
"3",
",",
"'TARGET_CONCEPT'",
")",
":",
"_target_concept",
",",
"(",
"4",
",",
"'PART_OF_SPEECH'",
")",
":",
"_target_pos",
",",
"(",
"4",
",",
"'LITERAL'",
")",
":",
"_target_literal",
",",
"(",
"5",
",",
"'SENSE'",
")",
":",
"_target_sense",
",",
"(",
"3",
",",
"'FEATURES'",
")",
":",
"_features",
",",
"(",
"4",
",",
"'FEATURE'",
")",
":",
"_feature",
",",
"(",
"5",
",",
"'FEATURE_VALUE'",
")",
":",
"_feature_value",
",",
"(",
"4",
",",
"'REVERSED'",
")",
":",
"_reversed",
",",
"(",
"4",
",",
"'VARIANT_TO_VARIANT'",
")",
":",
"_variant_to_variant",
",",
"(",
"5",
",",
"'SOURCE_VARIANT'",
")",
":",
"_source_variant",
",",
"(",
"5",
",",
"'TARGET_VARIANT'",
")",
":",
"_target_variant",
",",
"(",
"3",
",",
"'SOURCE_ID'",
")",
":",
"_source_id",
",",
"(",
"1",
",",
"'EQ_LINKS'",
")",
":",
"_eq_links",
",",
"(",
"2",
",",
"'EQ_RELATION'",
")",
":",
"_relation",
",",
"(",
"3",
",",
"'TARGET_ILI'",
")",
":",
"_target_concept",
",",
"(",
"4",
",",
"'WORDNET_OFFSET'",
")",
":",
"_wn_offset",
",",
"(",
"4",
",",
"'ADD_ON_ID'",
")",
":",
"_add_on_id",
",",
"(",
"1",
",",
"'PROPERTIES'",
")",
":",
"_properties",
",",
"(",
"1",
",",
"'PROPERTY_VALUES'",
")",
":",
"_property_values",
",",
"(",
"2",
",",
"'NAME'",
")",
":",
"_name",
",",
"(",
"3",
",",
"'VALUE'",
")",
":",
"_property_value",
",",
"(",
"3",
",",
"'VALUE_AS_TEXT'",
")",
":",
"_property_value",
",",
"(",
"3",
",",
"'VALUE_AS_WORD_MEANING'",
")",
":",
"_target_concept",
",",
"}",
"if",
"not",
"offset",
":",
"offset",
"=",
"self",
".",
"milestone",
"else",
":",
"self",
".",
"milestone",
"=",
"offset",
"if",
"self",
".",
"file",
":",
"self",
".",
"file",
".",
"seek",
"(",
"offset",
",",
"0",
")",
"line",
"=",
"'X'",
"ili",
"=",
"False",
"var",
"=",
"False",
"while",
"line",
".",
"strip",
"(",
")",
":",
"offset",
"=",
"self",
".",
"file",
".",
"tell",
"(",
")",
"self",
".",
"file",
".",
"seek",
"(",
"offset",
",",
"0",
")",
"line",
"=",
"as_unicode",
"(",
"self",
".",
"file",
".",
"readline",
"(",
")",
",",
"self",
".",
"encoding",
")",
".",
"strip",
"(",
")",
"if",
"debug",
":",
"print",
"(",
"line",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"self",
".",
"parse_line",
"(",
"line",
")",
"self",
".",
"noQuotes",
"=",
"None",
"select",
"=",
"(",
"self",
".",
"levelNumber",
",",
"self",
".",
"fieldTag",
")",
"if",
"select",
"in",
"rulez",
".",
"keys",
"(",
")",
":",
"rulez",
"[",
"select",
"]",
"(",
")",
"else",
":",
"if",
"line",
":",
"print",
"(",
"self",
".",
"synset",
".",
"polarisText",
")",
"raise",
"ParseError",
"(",
"\"No parsing rule for '%s'\"",
"%",
"line",
")",
"return",
"self",
".",
"synset"
] | 38.751553 | 18.742236 |
def handles(self, src, path):
"""Must return a list of files that this handler will produce after
successfully processing `path`. If the current handler does not operate
on `path`, None should be returned. If instead `path` should not
produce output by itself (but should be compiled nonetheless), then an
empty list should be returned. This function will be called every time
the file identified by `path` changes (only the modification time is
taken into account; `src` is provided for convenience; it allows direct
access to the file's contents.."""
if not pathtools.patterns.match_path(path, self.patterns,
self.ignore_patterns):
return None
return self._outputs(src, path) | [
"def",
"handles",
"(",
"self",
",",
"src",
",",
"path",
")",
":",
"if",
"not",
"pathtools",
".",
"patterns",
".",
"match_path",
"(",
"path",
",",
"self",
".",
"patterns",
",",
"self",
".",
"ignore_patterns",
")",
":",
"return",
"None",
"return",
"self",
".",
"_outputs",
"(",
"src",
",",
"path",
")"
] | 57.285714 | 24.5 |
def append(self, observation, action, reward, terminal, training=True):
"""Append a reward to the memory
# Argument
observation (dict): Observation returned by environment
action (int): Action taken to obtain this observation
reward (float): Reward obtained by taking this action
terminal (boolean): Is the state terminal
"""
super(EpisodeParameterMemory, self).append(observation, action, reward, terminal, training=training)
if training:
self.intermediate_rewards.append(reward) | [
"def",
"append",
"(",
"self",
",",
"observation",
",",
"action",
",",
"reward",
",",
"terminal",
",",
"training",
"=",
"True",
")",
":",
"super",
"(",
"EpisodeParameterMemory",
",",
"self",
")",
".",
"append",
"(",
"observation",
",",
"action",
",",
"reward",
",",
"terminal",
",",
"training",
"=",
"training",
")",
"if",
"training",
":",
"self",
".",
"intermediate_rewards",
".",
"append",
"(",
"reward",
")"
] | 47.5 | 23.583333 |
def credit_note(request, note_id, access_code=None):
''' Displays a credit note.
If ``request`` is a ``POST`` request, forms for applying or refunding
a credit note will be processed.
This view requires a login, and the logged in user must be staff.
Arguments:
note_id (castable to int): The ID of the credit note to view.
Returns:
render or redirect:
If the "apply to invoice" form is correctly processed, redirect to
that invoice, otherwise, render ``registration/credit_note.html``
with the following data::
{
"credit_note": models.commerce.CreditNote(),
"apply_form": form, # A form for applying credit note
# to an invoice.
"refund_form": form, # A form for applying a *manual*
# refund of the credit note.
"cancellation_fee_form" : form, # A form for generating an
# invoice with a
# cancellation fee
}
'''
note_id = int(note_id)
current_note = CreditNoteController.for_id_or_404(note_id)
apply_form = forms.ApplyCreditNoteForm(
current_note.credit_note.invoice.user,
request.POST or None,
prefix="apply_note"
)
refund_form = forms.ManualCreditNoteRefundForm(
request.POST or None,
prefix="refund_note"
)
cancellation_fee_form = forms.CancellationFeeForm(
request.POST or None,
prefix="cancellation_fee"
)
if request.POST and apply_form.is_valid():
inv_id = apply_form.cleaned_data["invoice"]
invoice = commerce.Invoice.objects.get(pk=inv_id)
current_note.apply_to_invoice(invoice)
messages.success(
request,
"Applied credit note %d to invoice." % note_id,
)
return redirect("invoice", invoice.id)
elif request.POST and refund_form.is_valid():
refund_form.instance.entered_by = request.user
refund_form.instance.parent = current_note.credit_note
refund_form.save()
messages.success(
request,
"Applied manual refund to credit note."
)
refund_form = forms.ManualCreditNoteRefundForm(
prefix="refund_note",
)
elif request.POST and cancellation_fee_form.is_valid():
percentage = cancellation_fee_form.cleaned_data["percentage"]
invoice = current_note.cancellation_fee(percentage)
messages.success(
request,
"Generated cancellation fee for credit note %d." % note_id,
)
return redirect("invoice", invoice.invoice.id)
data = {
"credit_note": current_note.credit_note,
"apply_form": apply_form,
"refund_form": refund_form,
"cancellation_fee_form": cancellation_fee_form,
}
return render(request, "registrasion/credit_note.html", data) | [
"def",
"credit_note",
"(",
"request",
",",
"note_id",
",",
"access_code",
"=",
"None",
")",
":",
"note_id",
"=",
"int",
"(",
"note_id",
")",
"current_note",
"=",
"CreditNoteController",
".",
"for_id_or_404",
"(",
"note_id",
")",
"apply_form",
"=",
"forms",
".",
"ApplyCreditNoteForm",
"(",
"current_note",
".",
"credit_note",
".",
"invoice",
".",
"user",
",",
"request",
".",
"POST",
"or",
"None",
",",
"prefix",
"=",
"\"apply_note\"",
")",
"refund_form",
"=",
"forms",
".",
"ManualCreditNoteRefundForm",
"(",
"request",
".",
"POST",
"or",
"None",
",",
"prefix",
"=",
"\"refund_note\"",
")",
"cancellation_fee_form",
"=",
"forms",
".",
"CancellationFeeForm",
"(",
"request",
".",
"POST",
"or",
"None",
",",
"prefix",
"=",
"\"cancellation_fee\"",
")",
"if",
"request",
".",
"POST",
"and",
"apply_form",
".",
"is_valid",
"(",
")",
":",
"inv_id",
"=",
"apply_form",
".",
"cleaned_data",
"[",
"\"invoice\"",
"]",
"invoice",
"=",
"commerce",
".",
"Invoice",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"inv_id",
")",
"current_note",
".",
"apply_to_invoice",
"(",
"invoice",
")",
"messages",
".",
"success",
"(",
"request",
",",
"\"Applied credit note %d to invoice.\"",
"%",
"note_id",
",",
")",
"return",
"redirect",
"(",
"\"invoice\"",
",",
"invoice",
".",
"id",
")",
"elif",
"request",
".",
"POST",
"and",
"refund_form",
".",
"is_valid",
"(",
")",
":",
"refund_form",
".",
"instance",
".",
"entered_by",
"=",
"request",
".",
"user",
"refund_form",
".",
"instance",
".",
"parent",
"=",
"current_note",
".",
"credit_note",
"refund_form",
".",
"save",
"(",
")",
"messages",
".",
"success",
"(",
"request",
",",
"\"Applied manual refund to credit note.\"",
")",
"refund_form",
"=",
"forms",
".",
"ManualCreditNoteRefundForm",
"(",
"prefix",
"=",
"\"refund_note\"",
",",
")",
"elif",
"request",
".",
"POST",
"and",
"cancellation_fee_form",
".",
"is_valid",
"(",
")",
":",
"percentage",
"=",
"cancellation_fee_form",
".",
"cleaned_data",
"[",
"\"percentage\"",
"]",
"invoice",
"=",
"current_note",
".",
"cancellation_fee",
"(",
"percentage",
")",
"messages",
".",
"success",
"(",
"request",
",",
"\"Generated cancellation fee for credit note %d.\"",
"%",
"note_id",
",",
")",
"return",
"redirect",
"(",
"\"invoice\"",
",",
"invoice",
".",
"invoice",
".",
"id",
")",
"data",
"=",
"{",
"\"credit_note\"",
":",
"current_note",
".",
"credit_note",
",",
"\"apply_form\"",
":",
"apply_form",
",",
"\"refund_form\"",
":",
"refund_form",
",",
"\"cancellation_fee_form\"",
":",
"cancellation_fee_form",
",",
"}",
"return",
"render",
"(",
"request",
",",
"\"registrasion/credit_note.html\"",
",",
"data",
")"
] | 34.227273 | 23.113636 |
def pluck(obj, selector, default=None, skipmissing=True):
"""Alternative implementation of `plucks` that accepts more complex
selectors. It's a wrapper around `pluckable`, so a `selector` can be any
valid Python expression comprising attribute getters (``.attr``) and item
getters (``[1, 4:8, "key"]``).
Example:
pluck(obj, "users[2:5, 10:15].name.first")
equal to:
pluckable(obj).users[2:5, 10:15].name.first.value
"""
if not selector:
return obj
if selector[0] != '[':
selector = '.%s' % selector
wrapped_obj = pluckable(obj, default=default, skipmissing=skipmissing, inplace=True)
return eval("wrapped_obj%s.value" % selector) | [
"def",
"pluck",
"(",
"obj",
",",
"selector",
",",
"default",
"=",
"None",
",",
"skipmissing",
"=",
"True",
")",
":",
"if",
"not",
"selector",
":",
"return",
"obj",
"if",
"selector",
"[",
"0",
"]",
"!=",
"'['",
":",
"selector",
"=",
"'.%s'",
"%",
"selector",
"wrapped_obj",
"=",
"pluckable",
"(",
"obj",
",",
"default",
"=",
"default",
",",
"skipmissing",
"=",
"skipmissing",
",",
"inplace",
"=",
"True",
")",
"return",
"eval",
"(",
"\"wrapped_obj%s.value\"",
"%",
"selector",
")"
] | 32.857143 | 23.619048 |
def K(self, parm):
""" Returns the Gram Matrix
Parameters
----------
parm : np.ndarray
Parameters for the Gram Matrix
Returns
----------
- Gram Matrix (np.ndarray)
"""
return ARD_K_matrix(self.X, parm) + np.identity(self.X.shape[0])*(10**-10) | [
"def",
"K",
"(",
"self",
",",
"parm",
")",
":",
"return",
"ARD_K_matrix",
"(",
"self",
".",
"X",
",",
"parm",
")",
"+",
"np",
".",
"identity",
"(",
"self",
".",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"(",
"10",
"**",
"-",
"10",
")"
] | 24.307692 | 19.846154 |
def signal_stop(self, test_id=None):
"""
Set ts_end for the analysis represented by test_id
:param test_id: integer that represents the analysis
:return: test_id
"""
if test_id is None:
test_id = self._default_test_id
if self._analyses[test_id].ts_end:
return CONSTANTS.OK
self._analyses[test_id].ts_end = naarad.utils.get_standardized_timestamp('now', None)
return CONSTANTS.OK | [
"def",
"signal_stop",
"(",
"self",
",",
"test_id",
"=",
"None",
")",
":",
"if",
"test_id",
"is",
"None",
":",
"test_id",
"=",
"self",
".",
"_default_test_id",
"if",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"ts_end",
":",
"return",
"CONSTANTS",
".",
"OK",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"ts_end",
"=",
"naarad",
".",
"utils",
".",
"get_standardized_timestamp",
"(",
"'now'",
",",
"None",
")",
"return",
"CONSTANTS",
".",
"OK"
] | 34.583333 | 13.083333 |
def from_text(text):
"""Convert the text form of a TTL to an integer.
The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
@param text: the textual TTL
@type text: string
@raises dns.ttl.BadTTL: the TTL is not well-formed
@rtype: int
"""
if text.isdigit():
total = long(text)
else:
if not text[0].isdigit():
raise BadTTL
total = 0L
current = 0L
for c in text:
if c.isdigit():
current *= 10
current += long(c)
else:
c = c.lower()
if c == 'w':
total += current * 604800L
elif c == 'd':
total += current * 86400L
elif c == 'h':
total += current * 3600L
elif c == 'm':
total += current * 60L
elif c == 's':
total += current
else:
raise BadTTL("unknown unit '%s'" % c)
current = 0
if not current == 0:
raise BadTTL("trailing integer")
if total < 0L or total > 2147483647L:
raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
return total | [
"def",
"from_text",
"(",
"text",
")",
":",
"if",
"text",
".",
"isdigit",
"(",
")",
":",
"total",
"=",
"long",
"(",
"text",
")",
"else",
":",
"if",
"not",
"text",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"raise",
"BadTTL",
"total",
"=",
"0L",
"current",
"=",
"0L",
"for",
"c",
"in",
"text",
":",
"if",
"c",
".",
"isdigit",
"(",
")",
":",
"current",
"*=",
"10",
"current",
"+=",
"long",
"(",
"c",
")",
"else",
":",
"c",
"=",
"c",
".",
"lower",
"(",
")",
"if",
"c",
"==",
"'w'",
":",
"total",
"+=",
"current",
"*",
"604800L",
"elif",
"c",
"==",
"'d'",
":",
"total",
"+=",
"current",
"*",
"86400L",
"elif",
"c",
"==",
"'h'",
":",
"total",
"+=",
"current",
"*",
"3600L",
"elif",
"c",
"==",
"'m'",
":",
"total",
"+=",
"current",
"*",
"60L",
"elif",
"c",
"==",
"'s'",
":",
"total",
"+=",
"current",
"else",
":",
"raise",
"BadTTL",
"(",
"\"unknown unit '%s'\"",
"%",
"c",
")",
"current",
"=",
"0",
"if",
"not",
"current",
"==",
"0",
":",
"raise",
"BadTTL",
"(",
"\"trailing integer\"",
")",
"if",
"total",
"<",
"0L",
"or",
"total",
">",
"2147483647L",
":",
"raise",
"BadTTL",
"(",
"\"TTL should be between 0 and 2^31 - 1 (inclusive)\"",
")",
"return",
"total"
] | 29.761905 | 15.261905 |
def wait_until_complete(job_list):
"""
Args: Accepts a list of GPJob objects
This method will not return until all GPJob objects in the list have
finished running. That us, they are either complete and have resulted in
an error state.
This method will occasionally query each job to see if it is finished.
"""
complete = [False] * len(job_list)
wait = 1
while not all(complete):
time.sleep(wait)
for i, job in enumerate(job_list):
if not complete[i]:
complete[i] = job.is_finished()
if not complete[i]:
break
wait = min(wait * 2, 10) | [
"def",
"wait_until_complete",
"(",
"job_list",
")",
":",
"complete",
"=",
"[",
"False",
"]",
"*",
"len",
"(",
"job_list",
")",
"wait",
"=",
"1",
"while",
"not",
"all",
"(",
"complete",
")",
":",
"time",
".",
"sleep",
"(",
"wait",
")",
"for",
"i",
",",
"job",
"in",
"enumerate",
"(",
"job_list",
")",
":",
"if",
"not",
"complete",
"[",
"i",
"]",
":",
"complete",
"[",
"i",
"]",
"=",
"job",
".",
"is_finished",
"(",
")",
"if",
"not",
"complete",
"[",
"i",
"]",
":",
"break",
"wait",
"=",
"min",
"(",
"wait",
"*",
"2",
",",
"10",
")"
] | 35.6 | 15.3 |
def feed(self, data):
"""
Feed data to the parser.
"""
assert isinstance(data, binary_type)
for b in iterbytes(data):
self._parser.send(int2byte(b)) | [
"def",
"feed",
"(",
"self",
",",
"data",
")",
":",
"assert",
"isinstance",
"(",
"data",
",",
"binary_type",
")",
"for",
"b",
"in",
"iterbytes",
"(",
"data",
")",
":",
"self",
".",
"_parser",
".",
"send",
"(",
"int2byte",
"(",
"b",
")",
")"
] | 27.714286 | 5.714286 |
def _parse_handler_result(self, result):
"""Parses the item(s) returned by your handler implementation.
Handlers may return a single item (payload), or a tuple that gets
passed to the Response class __init__ method of your HTTP layer.
_parse_handler_result separates the payload from the rest the tuple,
as well as providing the tuple so that it can be re-composed after
the payload has been run through the `_returns` Resource's renderer.
"""
if isinstance(result, (list, tuple)):
payload = result[0]
list_result = list(result)
else:
payload = result
list_result = [""]
return payload, list_result | [
"def",
"_parse_handler_result",
"(",
"self",
",",
"result",
")",
":",
"if",
"isinstance",
"(",
"result",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"payload",
"=",
"result",
"[",
"0",
"]",
"list_result",
"=",
"list",
"(",
"result",
")",
"else",
":",
"payload",
"=",
"result",
"list_result",
"=",
"[",
"\"\"",
"]",
"return",
"payload",
",",
"list_result"
] | 41.882353 | 18.882353 |
def cacheback(lifetime=None, fetch_on_miss=None, cache_alias=None,
job_class=None, task_options=None, **job_class_kwargs):
"""
Decorate function to cache its return value.
:lifetime: How long to cache items for
:fetch_on_miss: Whether to perform a synchronous fetch when no cached
result is found
:cache_alias: The Django cache alias to store the result into.
:job_class: The class to use for running the cache refresh job. Defaults
using the FunctionJob.
:job_class_kwargs: Any extra kwargs to pass to job_class constructor.
Useful with custom job_class implementations.
"""
if job_class is None:
job_class = FunctionJob
job = job_class(lifetime=lifetime, fetch_on_miss=fetch_on_miss,
cache_alias=cache_alias, task_options=task_options,
**job_class_kwargs)
def _wrapper(fn):
# using available_attrs to work around http://bugs.python.org/issue3445
@wraps(fn, assigned=available_attrs(fn))
def __wrapper(*args, **kwargs):
return job.get(fn, *args, **kwargs)
# Assign reference to unwrapped function so that we can access it
# later without descending into infinite regress.
__wrapper.fn = fn
# Assign reference to job so we can use the full Job API
__wrapper.job = job
return __wrapper
return _wrapper | [
"def",
"cacheback",
"(",
"lifetime",
"=",
"None",
",",
"fetch_on_miss",
"=",
"None",
",",
"cache_alias",
"=",
"None",
",",
"job_class",
"=",
"None",
",",
"task_options",
"=",
"None",
",",
"*",
"*",
"job_class_kwargs",
")",
":",
"if",
"job_class",
"is",
"None",
":",
"job_class",
"=",
"FunctionJob",
"job",
"=",
"job_class",
"(",
"lifetime",
"=",
"lifetime",
",",
"fetch_on_miss",
"=",
"fetch_on_miss",
",",
"cache_alias",
"=",
"cache_alias",
",",
"task_options",
"=",
"task_options",
",",
"*",
"*",
"job_class_kwargs",
")",
"def",
"_wrapper",
"(",
"fn",
")",
":",
"# using available_attrs to work around http://bugs.python.org/issue3445",
"@",
"wraps",
"(",
"fn",
",",
"assigned",
"=",
"available_attrs",
"(",
"fn",
")",
")",
"def",
"__wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"job",
".",
"get",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Assign reference to unwrapped function so that we can access it",
"# later without descending into infinite regress.",
"__wrapper",
".",
"fn",
"=",
"fn",
"# Assign reference to job so we can use the full Job API",
"__wrapper",
".",
"job",
"=",
"job",
"return",
"__wrapper",
"return",
"_wrapper"
] | 43.181818 | 19.545455 |
def send_message(self, message, callback):
""" send a message over the wire; callback=None indicates a safe=False call where we write and forget about it"""
if self.__callback is not None:
raise ProgrammingError('connection already in use')
if callback:
err_callback = functools.partial(callback, None)
else:
err_callback = None
# Go and update err_callback for async jobs in queue if any
for job in self.__job_queue:
# this is a dirty hack and I hate it, but there is no way of setting the correct
# err_callback during the connection time
if isinstance(job, asyncjobs.AsyncJob):
job.update_err_callback(err_callback)
if not self.__alive:
if self.__autoreconnect:
self.__connect(err_callback)
else:
raise InterfaceError('connection invalid. autoreconnect=False')
# Put the current message on the bottom of the queue
self._put_job(asyncjobs.AsyncMessage(self, message, callback), 0)
self._next_job() | [
"def",
"send_message",
"(",
"self",
",",
"message",
",",
"callback",
")",
":",
"if",
"self",
".",
"__callback",
"is",
"not",
"None",
":",
"raise",
"ProgrammingError",
"(",
"'connection already in use'",
")",
"if",
"callback",
":",
"err_callback",
"=",
"functools",
".",
"partial",
"(",
"callback",
",",
"None",
")",
"else",
":",
"err_callback",
"=",
"None",
"# Go and update err_callback for async jobs in queue if any",
"for",
"job",
"in",
"self",
".",
"__job_queue",
":",
"# this is a dirty hack and I hate it, but there is no way of setting the correct",
"# err_callback during the connection time",
"if",
"isinstance",
"(",
"job",
",",
"asyncjobs",
".",
"AsyncJob",
")",
":",
"job",
".",
"update_err_callback",
"(",
"err_callback",
")",
"if",
"not",
"self",
".",
"__alive",
":",
"if",
"self",
".",
"__autoreconnect",
":",
"self",
".",
"__connect",
"(",
"err_callback",
")",
"else",
":",
"raise",
"InterfaceError",
"(",
"'connection invalid. autoreconnect=False'",
")",
"# Put the current message on the bottom of the queue",
"self",
".",
"_put_job",
"(",
"asyncjobs",
".",
"AsyncMessage",
"(",
"self",
",",
"message",
",",
"callback",
")",
",",
"0",
")",
"self",
".",
"_next_job",
"(",
")"
] | 41.407407 | 20.62963 |
def numRegisteredForRole(self, role, includeTemporaryRegs=False):
'''
Accepts a DanceRole object and returns the number of registrations of that role.
'''
count = self.eventregistration_set.filter(cancelled=False,dropIn=False,role=role).count()
if includeTemporaryRegs:
count += self.temporaryeventregistration_set.filter(dropIn=False,role=role).exclude(
registration__expirationDate__lte=timezone.now()).count()
return count | [
"def",
"numRegisteredForRole",
"(",
"self",
",",
"role",
",",
"includeTemporaryRegs",
"=",
"False",
")",
":",
"count",
"=",
"self",
".",
"eventregistration_set",
".",
"filter",
"(",
"cancelled",
"=",
"False",
",",
"dropIn",
"=",
"False",
",",
"role",
"=",
"role",
")",
".",
"count",
"(",
")",
"if",
"includeTemporaryRegs",
":",
"count",
"+=",
"self",
".",
"temporaryeventregistration_set",
".",
"filter",
"(",
"dropIn",
"=",
"False",
",",
"role",
"=",
"role",
")",
".",
"exclude",
"(",
"registration__expirationDate__lte",
"=",
"timezone",
".",
"now",
"(",
")",
")",
".",
"count",
"(",
")",
"return",
"count"
] | 54.777778 | 33.888889 |
def mahalanobis_norm(self, dx):
"""compute the Mahalanobis norm that is induced by the adapted
sample distribution, covariance matrix ``C`` times ``sigma**2``,
including ``sigma_vec``. The expected Mahalanobis distance to
the sample mean is about ``sqrt(dimension)``.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobis_norm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where ``es.N`` is the dimension, and an expected distance of
close to ``sqrt(N)`` to the sample mean. In the example,
`d` is the Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1. * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma | [
"def",
"mahalanobis_norm",
"(",
"self",
",",
"dx",
")",
":",
"return",
"sqrt",
"(",
"sum",
"(",
"(",
"self",
".",
"D",
"**",
"-",
"1.",
"*",
"np",
".",
"dot",
"(",
"self",
".",
"B",
".",
"T",
",",
"dx",
"/",
"self",
".",
"sigma_vec",
")",
")",
"**",
"2",
")",
")",
"/",
"self",
".",
"sigma"
] | 40.76 | 23.2 |
def random_card(cards, remove=False):
"""
Returns a random card from the Stack. If ``remove=True``, it will
also remove the card from the deck.
:arg bool remove:
Whether or not to remove the card from the deck.
:returns:
A random Card object, from the Stack.
"""
if not remove:
return random.choice(cards)
else:
i = random.randrange(len(cards))
card = cards[i]
del cards[i]
return card | [
"def",
"random_card",
"(",
"cards",
",",
"remove",
"=",
"False",
")",
":",
"if",
"not",
"remove",
":",
"return",
"random",
".",
"choice",
"(",
"cards",
")",
"else",
":",
"i",
"=",
"random",
".",
"randrange",
"(",
"len",
"(",
"cards",
")",
")",
"card",
"=",
"cards",
"[",
"i",
"]",
"del",
"cards",
"[",
"i",
"]",
"return",
"card"
] | 24.105263 | 17.684211 |
def make_runserver(app_factory, hostname='localhost', port=5000,
use_reloader=False, use_debugger=False, use_evalex=True,
threaded=False, processes=1, static_files=None,
extra_files=None, ssl_context=None):
"""Returns an action callback that spawns a new development server.
.. versionadded:: 0.5
`static_files` and `extra_files` was added.
..versionadded:: 0.6.1
`ssl_context` was added.
:param app_factory: a function that returns a new WSGI application.
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
:param use_reloader: the default setting for the reloader.
:param use_evalex: the default setting for the evalex flag of the debugger.
:param threaded: the default threading setting.
:param processes: the default number of processes to start.
:param static_files: optional dict of static files.
:param extra_files: optional list of extra files to track for reloading.
:param ssl_context: optional SSL context for running server in HTTPS mode.
"""
def action(hostname=('h', hostname), port=('p', port),
reloader=use_reloader, debugger=use_debugger,
evalex=use_evalex, threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
run_simple(hostname, port, app, reloader, debugger, evalex,
extra_files, 1, threaded, processes,
static_files=static_files, ssl_context=ssl_context)
return action | [
"def",
"make_runserver",
"(",
"app_factory",
",",
"hostname",
"=",
"'localhost'",
",",
"port",
"=",
"5000",
",",
"use_reloader",
"=",
"False",
",",
"use_debugger",
"=",
"False",
",",
"use_evalex",
"=",
"True",
",",
"threaded",
"=",
"False",
",",
"processes",
"=",
"1",
",",
"static_files",
"=",
"None",
",",
"extra_files",
"=",
"None",
",",
"ssl_context",
"=",
"None",
")",
":",
"def",
"action",
"(",
"hostname",
"=",
"(",
"'h'",
",",
"hostname",
")",
",",
"port",
"=",
"(",
"'p'",
",",
"port",
")",
",",
"reloader",
"=",
"use_reloader",
",",
"debugger",
"=",
"use_debugger",
",",
"evalex",
"=",
"use_evalex",
",",
"threaded",
"=",
"threaded",
",",
"processes",
"=",
"processes",
")",
":",
"\"\"\"Start a new development server.\"\"\"",
"from",
"werkzeug",
".",
"serving",
"import",
"run_simple",
"app",
"=",
"app_factory",
"(",
")",
"run_simple",
"(",
"hostname",
",",
"port",
",",
"app",
",",
"reloader",
",",
"debugger",
",",
"evalex",
",",
"extra_files",
",",
"1",
",",
"threaded",
",",
"processes",
",",
"static_files",
"=",
"static_files",
",",
"ssl_context",
"=",
"ssl_context",
")",
"return",
"action"
] | 49.787879 | 21.454545 |
def _run(self):
"""Run the receiver.
"""
port = broadcast_port
nameservers = []
if self._multicast_enabled:
recv = MulticastReceiver(port).settimeout(2.)
while True:
try:
recv = MulticastReceiver(port).settimeout(2.)
LOGGER.info("Receiver initialized.")
break
except IOError as err:
if err.errno == errno.ENODEV:
LOGGER.error("Receiver initialization failed "
"(no such device). "
"Trying again in %d s",
10)
time.sleep(10)
else:
raise
else:
recv = _SimpleReceiver(port)
nameservers = ["localhost"]
self._is_running = True
with Publish("address_receiver", self._port, ["addresses"],
nameservers=nameservers) as pub:
try:
while self._do_run:
try:
data, fromaddr = recv()
LOGGER.debug("data %s", data)
del fromaddr
except SocketTimeout:
if self._multicast_enabled:
LOGGER.debug("Multicast socket timed out on recv!")
continue
finally:
self._check_age(pub, min_interval=self._max_age / 20)
if self._do_heartbeat:
pub.heartbeat(min_interval=29)
msg = Message.decode(data)
name = msg.subject.split("/")[1]
if(msg.type == 'info' and
msg.subject.lower().startswith(self._subject)):
addr = msg.data["URI"]
msg.data['status'] = True
metadata = copy.copy(msg.data)
metadata["name"] = name
LOGGER.debug('receiving address %s %s %s', str(addr),
str(name), str(metadata))
if addr not in self._addresses:
LOGGER.info("nameserver: publish add '%s'",
str(msg))
pub.send(msg.encode())
self._add(addr, metadata)
finally:
self._is_running = False
recv.close() | [
"def",
"_run",
"(",
"self",
")",
":",
"port",
"=",
"broadcast_port",
"nameservers",
"=",
"[",
"]",
"if",
"self",
".",
"_multicast_enabled",
":",
"recv",
"=",
"MulticastReceiver",
"(",
"port",
")",
".",
"settimeout",
"(",
"2.",
")",
"while",
"True",
":",
"try",
":",
"recv",
"=",
"MulticastReceiver",
"(",
"port",
")",
".",
"settimeout",
"(",
"2.",
")",
"LOGGER",
".",
"info",
"(",
"\"Receiver initialized.\"",
")",
"break",
"except",
"IOError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"==",
"errno",
".",
"ENODEV",
":",
"LOGGER",
".",
"error",
"(",
"\"Receiver initialization failed \"",
"\"(no such device). \"",
"\"Trying again in %d s\"",
",",
"10",
")",
"time",
".",
"sleep",
"(",
"10",
")",
"else",
":",
"raise",
"else",
":",
"recv",
"=",
"_SimpleReceiver",
"(",
"port",
")",
"nameservers",
"=",
"[",
"\"localhost\"",
"]",
"self",
".",
"_is_running",
"=",
"True",
"with",
"Publish",
"(",
"\"address_receiver\"",
",",
"self",
".",
"_port",
",",
"[",
"\"addresses\"",
"]",
",",
"nameservers",
"=",
"nameservers",
")",
"as",
"pub",
":",
"try",
":",
"while",
"self",
".",
"_do_run",
":",
"try",
":",
"data",
",",
"fromaddr",
"=",
"recv",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"\"data %s\"",
",",
"data",
")",
"del",
"fromaddr",
"except",
"SocketTimeout",
":",
"if",
"self",
".",
"_multicast_enabled",
":",
"LOGGER",
".",
"debug",
"(",
"\"Multicast socket timed out on recv!\"",
")",
"continue",
"finally",
":",
"self",
".",
"_check_age",
"(",
"pub",
",",
"min_interval",
"=",
"self",
".",
"_max_age",
"/",
"20",
")",
"if",
"self",
".",
"_do_heartbeat",
":",
"pub",
".",
"heartbeat",
"(",
"min_interval",
"=",
"29",
")",
"msg",
"=",
"Message",
".",
"decode",
"(",
"data",
")",
"name",
"=",
"msg",
".",
"subject",
".",
"split",
"(",
"\"/\"",
")",
"[",
"1",
"]",
"if",
"(",
"msg",
".",
"type",
"==",
"'info'",
"and",
"msg",
".",
"subject",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"self",
".",
"_subject",
")",
")",
":",
"addr",
"=",
"msg",
".",
"data",
"[",
"\"URI\"",
"]",
"msg",
".",
"data",
"[",
"'status'",
"]",
"=",
"True",
"metadata",
"=",
"copy",
".",
"copy",
"(",
"msg",
".",
"data",
")",
"metadata",
"[",
"\"name\"",
"]",
"=",
"name",
"LOGGER",
".",
"debug",
"(",
"'receiving address %s %s %s'",
",",
"str",
"(",
"addr",
")",
",",
"str",
"(",
"name",
")",
",",
"str",
"(",
"metadata",
")",
")",
"if",
"addr",
"not",
"in",
"self",
".",
"_addresses",
":",
"LOGGER",
".",
"info",
"(",
"\"nameserver: publish add '%s'\"",
",",
"str",
"(",
"msg",
")",
")",
"pub",
".",
"send",
"(",
"msg",
".",
"encode",
"(",
")",
")",
"self",
".",
"_add",
"(",
"addr",
",",
"metadata",
")",
"finally",
":",
"self",
".",
"_is_running",
"=",
"False",
"recv",
".",
"close",
"(",
")"
] | 42.196721 | 14.442623 |
def add_local(self, field_name, field):
"""Add a local variable in the current scope
:field_name: The field's name
:field: The field
:returns: None
"""
self._dlog("adding local '{}'".format(field_name))
field._pfp__name = field_name
# TODO do we allow clobbering of locals???
self._curr_scope["vars"][field_name] = field | [
"def",
"add_local",
"(",
"self",
",",
"field_name",
",",
"field",
")",
":",
"self",
".",
"_dlog",
"(",
"\"adding local '{}'\"",
".",
"format",
"(",
"field_name",
")",
")",
"field",
".",
"_pfp__name",
"=",
"field_name",
"# TODO do we allow clobbering of locals???",
"self",
".",
"_curr_scope",
"[",
"\"vars\"",
"]",
"[",
"field_name",
"]",
"=",
"field"
] | 31.916667 | 13.333333 |
def get_acquaintance_size(obj: Union[circuits.Circuit, ops.Operation]) -> int:
"""The maximum number of qubits to be acquainted with each other."""
if isinstance(obj, circuits.Circuit):
if not is_acquaintance_strategy(obj):
raise TypeError('not is_acquaintance_strategy(circuit)')
return max(tuple(get_acquaintance_size(op)
for op in obj.all_operations()) or (0,))
if not isinstance(obj, ops.Operation):
raise TypeError('not isinstance(obj, (Circuit, Operation))')
if not isinstance(obj, ops.GateOperation):
return 0
if isinstance(obj.gate, AcquaintanceOpportunityGate):
return len(obj.qubits)
if isinstance(obj.gate, BipartiteSwapNetworkGate):
return 2
if isinstance(obj.gate, ShiftSwapNetworkGate):
return obj.gate.acquaintance_size()
if isinstance(obj.gate, SwapNetworkGate):
if obj.gate.acquaintance_size is None:
return sum(sorted(obj.gate.part_lens)[-2:])
if (obj.gate.acquaintance_size - 1) in obj.gate.part_lens:
return obj.gate.acquaintance_size
sizer = getattr(obj.gate, '_acquaintance_size_', None)
return 0 if sizer is None else sizer(len(obj.qubits)) | [
"def",
"get_acquaintance_size",
"(",
"obj",
":",
"Union",
"[",
"circuits",
".",
"Circuit",
",",
"ops",
".",
"Operation",
"]",
")",
"->",
"int",
":",
"if",
"isinstance",
"(",
"obj",
",",
"circuits",
".",
"Circuit",
")",
":",
"if",
"not",
"is_acquaintance_strategy",
"(",
"obj",
")",
":",
"raise",
"TypeError",
"(",
"'not is_acquaintance_strategy(circuit)'",
")",
"return",
"max",
"(",
"tuple",
"(",
"get_acquaintance_size",
"(",
"op",
")",
"for",
"op",
"in",
"obj",
".",
"all_operations",
"(",
")",
")",
"or",
"(",
"0",
",",
")",
")",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"ops",
".",
"Operation",
")",
":",
"raise",
"TypeError",
"(",
"'not isinstance(obj, (Circuit, Operation))'",
")",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"ops",
".",
"GateOperation",
")",
":",
"return",
"0",
"if",
"isinstance",
"(",
"obj",
".",
"gate",
",",
"AcquaintanceOpportunityGate",
")",
":",
"return",
"len",
"(",
"obj",
".",
"qubits",
")",
"if",
"isinstance",
"(",
"obj",
".",
"gate",
",",
"BipartiteSwapNetworkGate",
")",
":",
"return",
"2",
"if",
"isinstance",
"(",
"obj",
".",
"gate",
",",
"ShiftSwapNetworkGate",
")",
":",
"return",
"obj",
".",
"gate",
".",
"acquaintance_size",
"(",
")",
"if",
"isinstance",
"(",
"obj",
".",
"gate",
",",
"SwapNetworkGate",
")",
":",
"if",
"obj",
".",
"gate",
".",
"acquaintance_size",
"is",
"None",
":",
"return",
"sum",
"(",
"sorted",
"(",
"obj",
".",
"gate",
".",
"part_lens",
")",
"[",
"-",
"2",
":",
"]",
")",
"if",
"(",
"obj",
".",
"gate",
".",
"acquaintance_size",
"-",
"1",
")",
"in",
"obj",
".",
"gate",
".",
"part_lens",
":",
"return",
"obj",
".",
"gate",
".",
"acquaintance_size",
"sizer",
"=",
"getattr",
"(",
"obj",
".",
"gate",
",",
"'_acquaintance_size_'",
",",
"None",
")",
"return",
"0",
"if",
"sizer",
"is",
"None",
"else",
"sizer",
"(",
"len",
"(",
"obj",
".",
"qubits",
")",
")"
] | 50.541667 | 14.041667 |
def process_response(self, result):
""" process a response from the API. We check the API version against
the client's to see if it's old, and give them a warning (once)
Parameters
==========
result: the result from the API
"""
if len(result) == 3:
data = result[0]
headers = result[2]
if self.HEADER_API_VERSION in headers:
api_version = headers[self.HEADER_API_VERSION]
if (not self.already_printed_version_warning
and not self.is_up_to_date(api_version)):
print('Warning: Looks like you\'re using an outdated API '
'Version, please consider updating (server ' +
api_version + ' / client ' + self.__version__ + ')')
self.already_printed_version_warning = True
return data
return result | [
"def",
"process_response",
"(",
"self",
",",
"result",
")",
":",
"if",
"len",
"(",
"result",
")",
"==",
"3",
":",
"data",
"=",
"result",
"[",
"0",
"]",
"headers",
"=",
"result",
"[",
"2",
"]",
"if",
"self",
".",
"HEADER_API_VERSION",
"in",
"headers",
":",
"api_version",
"=",
"headers",
"[",
"self",
".",
"HEADER_API_VERSION",
"]",
"if",
"(",
"not",
"self",
".",
"already_printed_version_warning",
"and",
"not",
"self",
".",
"is_up_to_date",
"(",
"api_version",
")",
")",
":",
"print",
"(",
"'Warning: Looks like you\\'re using an outdated API '",
"'Version, please consider updating (server '",
"+",
"api_version",
"+",
"' / client '",
"+",
"self",
".",
"__version__",
"+",
"')'",
")",
"self",
".",
"already_printed_version_warning",
"=",
"True",
"return",
"data",
"return",
"result"
] | 44.952381 | 18.857143 |
def update(self):
"""
Reload the keys if necessary
This is a forced update, will happen even if cache time has not elapsed.
Replaced keys will be marked as inactive and not removed.
"""
res = True # An update was successful
if self.source:
_keys = self._keys # just in case
# reread everything
self._keys = []
try:
if self.remote is False:
if self.fileformat in ["jwks", "jwk"]:
self.do_local_jwk(self.source)
elif self.fileformat == "der":
self.do_local_der(self.source, self.keytype,
self.keyusage)
else:
res = self.do_remote()
except Exception as err:
logger.error('Key bundle update failed: {}'.format(err))
self._keys = _keys # restore
return False
now = time.time()
for _key in _keys:
if _key not in self._keys:
if not _key.inactive_since: # If already marked don't mess
_key.inactive_since = now
self._keys.append(_key)
return res | [
"def",
"update",
"(",
"self",
")",
":",
"res",
"=",
"True",
"# An update was successful",
"if",
"self",
".",
"source",
":",
"_keys",
"=",
"self",
".",
"_keys",
"# just in case",
"# reread everything",
"self",
".",
"_keys",
"=",
"[",
"]",
"try",
":",
"if",
"self",
".",
"remote",
"is",
"False",
":",
"if",
"self",
".",
"fileformat",
"in",
"[",
"\"jwks\"",
",",
"\"jwk\"",
"]",
":",
"self",
".",
"do_local_jwk",
"(",
"self",
".",
"source",
")",
"elif",
"self",
".",
"fileformat",
"==",
"\"der\"",
":",
"self",
".",
"do_local_der",
"(",
"self",
".",
"source",
",",
"self",
".",
"keytype",
",",
"self",
".",
"keyusage",
")",
"else",
":",
"res",
"=",
"self",
".",
"do_remote",
"(",
")",
"except",
"Exception",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"'Key bundle update failed: {}'",
".",
"format",
"(",
"err",
")",
")",
"self",
".",
"_keys",
"=",
"_keys",
"# restore",
"return",
"False",
"now",
"=",
"time",
".",
"time",
"(",
")",
"for",
"_key",
"in",
"_keys",
":",
"if",
"_key",
"not",
"in",
"self",
".",
"_keys",
":",
"if",
"not",
"_key",
".",
"inactive_since",
":",
"# If already marked don't mess",
"_key",
".",
"inactive_since",
"=",
"now",
"self",
".",
"_keys",
".",
"append",
"(",
"_key",
")",
"return",
"res"
] | 35.472222 | 17.527778 |
def Registry(address='https://index.docker.io', **kwargs):
"""
:return:
"""
registry = None
try:
try:
registry = V1(address, **kwargs)
registry.ping()
except RegistryException:
registry = V2(address, **kwargs)
registry.ping()
except OSError:
logger.warning(
'Was unable to verify certs for a registry @ {0}. '
'Will not be able to interact with it for any operations until the certs can be validated.'.format(address)
)
return registry | [
"def",
"Registry",
"(",
"address",
"=",
"'https://index.docker.io'",
",",
"*",
"*",
"kwargs",
")",
":",
"registry",
"=",
"None",
"try",
":",
"try",
":",
"registry",
"=",
"V1",
"(",
"address",
",",
"*",
"*",
"kwargs",
")",
"registry",
".",
"ping",
"(",
")",
"except",
"RegistryException",
":",
"registry",
"=",
"V2",
"(",
"address",
",",
"*",
"*",
"kwargs",
")",
"registry",
".",
"ping",
"(",
")",
"except",
"OSError",
":",
"logger",
".",
"warning",
"(",
"'Was unable to verify certs for a registry @ {0}. '",
"'Will not be able to interact with it for any operations until the certs can be validated.'",
".",
"format",
"(",
"address",
")",
")",
"return",
"registry"
] | 28.947368 | 21.052632 |
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise | [
"def",
"get_commit_from_tag",
"(",
"self",
",",
"tag",
":",
"str",
")",
"->",
"Commit",
":",
"try",
":",
"selected_tag",
"=",
"self",
".",
"repo",
".",
"tags",
"[",
"tag",
"]",
"return",
"self",
".",
"get_commit",
"(",
"selected_tag",
".",
"commit",
".",
"hexsha",
")",
"except",
"(",
"IndexError",
",",
"AttributeError",
")",
":",
"logger",
".",
"debug",
"(",
"'Tag %s not found'",
",",
"tag",
")",
"raise"
] | 32.923077 | 13.846154 |
def enter(self, path):
"""
Enters the given node. Creates it if it does not exist.
Returns the node.
"""
self.current.append(self.add(path))
return self.current[-1] | [
"def",
"enter",
"(",
"self",
",",
"path",
")",
":",
"self",
".",
"current",
".",
"append",
"(",
"self",
".",
"add",
"(",
"path",
")",
")",
"return",
"self",
".",
"current",
"[",
"-",
"1",
"]"
] | 29.428571 | 9.714286 |
def entry(self):
"""
Connects to Youtube Api and retrieves the video entry object
Return:
gdata.youtube.YouTubeVideoEntry
"""
api = Api()
api.authenticate()
return api.fetch_video(self.video_id) | [
"def",
"entry",
"(",
"self",
")",
":",
"api",
"=",
"Api",
"(",
")",
"api",
".",
"authenticate",
"(",
")",
"return",
"api",
".",
"fetch_video",
"(",
"self",
".",
"video_id",
")"
] | 25.4 | 16 |
def sign_decorated(self, data):
"""Sign a bytes-like object and return the decorated signature.
Sign a bytes-like object by signing the data using the signing
(private) key, and return a decorated signature, which includes the
last four bytes of the public key as a signature hint to go along with
the signature as an XDR DecoratedSignature object.
:param bytes data: A sequence of bytes to sign, typically a
transaction.
"""
signature = self.sign(data)
hint = self.signature_hint()
return Xdr.types.DecoratedSignature(hint, signature) | [
"def",
"sign_decorated",
"(",
"self",
",",
"data",
")",
":",
"signature",
"=",
"self",
".",
"sign",
"(",
"data",
")",
"hint",
"=",
"self",
".",
"signature_hint",
"(",
")",
"return",
"Xdr",
".",
"types",
".",
"DecoratedSignature",
"(",
"hint",
",",
"signature",
")"
] | 41.066667 | 21.466667 |
def getClassAlias(self, klass):
"""
Gets a class alias based on the supplied C{klass}. If one is not found
in the global context, one is created locally.
If you supply a string alias and the class is not registered,
L{pyamf.UnknownClassAlias} will be raised.
@param klass: A class object or string alias.
@return: The L{pyamf.ClassAlias} instance that describes C{klass}
"""
try:
return self._class_aliases[klass]
except KeyError:
pass
try:
alias = self._class_aliases[klass] = pyamf.get_class_alias(klass)
except pyamf.UnknownClassAlias:
if isinstance(klass, python.str_types):
raise
# no alias has been found yet .. check subclasses
alias = util.get_class_alias(klass) or pyamf.ClassAlias
meta = util.get_class_meta(klass)
alias = alias(klass, defer=True, **meta)
self._class_aliases[klass] = alias
return alias | [
"def",
"getClassAlias",
"(",
"self",
",",
"klass",
")",
":",
"try",
":",
"return",
"self",
".",
"_class_aliases",
"[",
"klass",
"]",
"except",
"KeyError",
":",
"pass",
"try",
":",
"alias",
"=",
"self",
".",
"_class_aliases",
"[",
"klass",
"]",
"=",
"pyamf",
".",
"get_class_alias",
"(",
"klass",
")",
"except",
"pyamf",
".",
"UnknownClassAlias",
":",
"if",
"isinstance",
"(",
"klass",
",",
"python",
".",
"str_types",
")",
":",
"raise",
"# no alias has been found yet .. check subclasses",
"alias",
"=",
"util",
".",
"get_class_alias",
"(",
"klass",
")",
"or",
"pyamf",
".",
"ClassAlias",
"meta",
"=",
"util",
".",
"get_class_meta",
"(",
"klass",
")",
"alias",
"=",
"alias",
"(",
"klass",
",",
"defer",
"=",
"True",
",",
"*",
"*",
"meta",
")",
"self",
".",
"_class_aliases",
"[",
"klass",
"]",
"=",
"alias",
"return",
"alias"
] | 33.933333 | 21.533333 |
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!') | [
"def",
"login",
"(",
"self",
")",
":",
"auth_data",
"=",
"dict",
"(",
")",
"auth_data",
"[",
"'apikey'",
"]",
"=",
"self",
".",
"api_key",
"auth_data",
"[",
"'username'",
"]",
"=",
"self",
".",
"username",
"auth_data",
"[",
"'userkey'",
"]",
"=",
"self",
".",
"account_identifier",
"auth_resp",
"=",
"requests_util",
".",
"run_request",
"(",
"'post'",
",",
"self",
".",
"API_BASE_URL",
"+",
"'/login'",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"auth_data",
")",
",",
"headers",
"=",
"self",
".",
"__get_header",
"(",
")",
")",
"if",
"auth_resp",
".",
"status_code",
"==",
"200",
":",
"auth_resp_data",
"=",
"self",
".",
"parse_raw_response",
"(",
"auth_resp",
")",
"self",
".",
"__token",
"=",
"auth_resp_data",
"[",
"'token'",
"]",
"self",
".",
"__auth_time",
"=",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"is_authenticated",
"=",
"True",
"else",
":",
"raise",
"AuthenticationFailedException",
"(",
"'Authentication failed!'",
")"
] | 39.857143 | 22.047619 |
def return_multiple_convert_numpy(self, start_id, end_id, converter, add_args=None):
"""
Converts several objects, with ids in the range (start_id, end_id)
into a 2d numpy array and returns the array, the conversion is done by the 'converter' function
Parameters
----------
start_id : the id of the first object to be converted
end_id : the id of the last object to be converted, if equal to -1, will convert all data points in range
(start_id, <id of last element in database>)
converter : function, which takes the path of a data point and *args as parameters and returns a numpy array
add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the
converter should take only one input argument - the file path. default value: None
Returns
-------
result : 2-dimensional ndarray
"""
if end_id == -1:
end_id = self.points_amt
return return_multiple_convert_numpy_base(self.dbpath, self.path_to_set, self._set_object, start_id, end_id,
converter, add_args) | [
"def",
"return_multiple_convert_numpy",
"(",
"self",
",",
"start_id",
",",
"end_id",
",",
"converter",
",",
"add_args",
"=",
"None",
")",
":",
"if",
"end_id",
"==",
"-",
"1",
":",
"end_id",
"=",
"self",
".",
"points_amt",
"return",
"return_multiple_convert_numpy_base",
"(",
"self",
".",
"dbpath",
",",
"self",
".",
"path_to_set",
",",
"self",
".",
"_set_object",
",",
"start_id",
",",
"end_id",
",",
"converter",
",",
"add_args",
")"
] | 53.045455 | 33.5 |
def find_old_vidyo_rooms(max_room_event_age):
"""Finds all Vidyo rooms that are:
- linked to no events
- linked only to events whose start date precedes today - max_room_event_age days
"""
recently_used = (db.session.query(VCRoom.id)
.filter(VCRoom.type == 'vidyo',
Event.end_dt > (now_utc() - timedelta(days=max_room_event_age)))
.join(VCRoom.events)
.join(VCRoomEventAssociation.event)
.group_by(VCRoom.id))
# non-deleted rooms with no recent associations
return VCRoom.find_all(VCRoom.status != VCRoomStatus.deleted, ~VCRoom.id.in_(recently_used)) | [
"def",
"find_old_vidyo_rooms",
"(",
"max_room_event_age",
")",
":",
"recently_used",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"VCRoom",
".",
"id",
")",
".",
"filter",
"(",
"VCRoom",
".",
"type",
"==",
"'vidyo'",
",",
"Event",
".",
"end_dt",
">",
"(",
"now_utc",
"(",
")",
"-",
"timedelta",
"(",
"days",
"=",
"max_room_event_age",
")",
")",
")",
".",
"join",
"(",
"VCRoom",
".",
"events",
")",
".",
"join",
"(",
"VCRoomEventAssociation",
".",
"event",
")",
".",
"group_by",
"(",
"VCRoom",
".",
"id",
")",
")",
"# non-deleted rooms with no recent associations",
"return",
"VCRoom",
".",
"find_all",
"(",
"VCRoom",
".",
"status",
"!=",
"VCRoomStatus",
".",
"deleted",
",",
"~",
"VCRoom",
".",
"id",
".",
"in_",
"(",
"recently_used",
")",
")"
] | 48.928571 | 18.857143 |
def _set_name(self):
"""Set device name."""
try:
self._name = pretty(self.machine_name)
self._serial = self.serial_number
except AttributeError:
self._name = None
self._serial = None | [
"def",
"_set_name",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_name",
"=",
"pretty",
"(",
"self",
".",
"machine_name",
")",
"self",
".",
"_serial",
"=",
"self",
".",
"serial_number",
"except",
"AttributeError",
":",
"self",
".",
"_name",
"=",
"None",
"self",
".",
"_serial",
"=",
"None"
] | 30.875 | 11.625 |
def cartogram(df, projection=None,
scale=None, limits=(0.2, 1), scale_func=None, trace=True, trace_kwargs=None,
hue=None, categorical=False, scheme=None, k=5, cmap='viridis', vmin=None, vmax=None,
legend=False, legend_values=None, legend_labels=None, legend_kwargs=None, legend_var="scale",
extent=None,
figsize=(8, 6), ax=None,
**kwargs):
"""
Self-scaling area plot.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
A geographic projection. For more information refer to `the tutorial page on projections
<https://nbviewer.jupyter.org/github/ResidentMario/geoplot/blob/master/notebooks/tutorials/Projections.ipynb>`_.
scale : str or iterable, optional
Applies scaling to the output points. Defaults to None (no scaling).
limits : (min, max) tuple, optional
The minimum and maximum scale limits. Ignored if ``scale`` is left specified.
scale_func : ufunc, optional
The function used to scale point sizes. Defaults to a linear scale. For more information see `the Gallery demo
<examples/usa-city-elevations.html>`_.
trace : boolean, optional
Whether or not to include a trace of the polygon's original outline in the plot result.
trace_kwargs : dict, optional
If ``trace`` is set to ``True``, this parameter can be used to adjust the properties of the trace outline. This
parameter is ignored if trace is ``False``.
hue : None, Series, GeoSeries, iterable, or str, optional
Applies a colormap to the output points.
categorical : boolean, optional
Set to ``True`` if ``hue`` references a categorical variable, and ``False`` (the default) otherwise. Ignored
if ``hue`` is left unspecified.
scheme : None or {"quantiles"|"equal_interval"|"fisher_Jenks"}, optional
Controls how the colormap bin edges are determined. Ignored if ``hue`` is left unspecified.
k : int or None, optional
Ignored if ``hue`` is left unspecified. Otherwise, if ``categorical`` is False, controls how many colors to
use (5 is the default). If set to ``None``, a continuous colormap will be used.
cmap : matplotlib color, optional
The `matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to be used.
Ignored if ``hue`` is left unspecified.
vmin : float, optional
Values below this level will be colored the same threshold value. Defaults to the dataset minimum. Ignored
if ``hue`` is left unspecified.
vmax : float, optional
Values above this level will be colored the same threshold value. Defaults to the dataset maximum. Ignored
if ``hue`` is left unspecified.
legend : boolean, optional
Whether or not to include a legend. Ignored if neither a ``hue`` nor a ``scale`` is specified.
legend_values : list, optional
The values to use in the legend. Defaults to equal intervals. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_labels : list, optional
The names to use in the legend. Defaults to the variable values. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_kwargs : dict, optional
Keyword arguments to be passed to `the underlying legend <http://matplotlib.org/users/legend_guide.html>`_.
extent : None or (minx, maxx, miny, maxy), optional
Used to control plot x-axis and y-axis limits manually.
figsize : tuple, optional
An (x, y) tuple passed to ``matplotlib.figure`` which sets the size, in inches, of the resultant plot.
ax : AxesSubplot or GeoAxesSubplot instance, optional
A ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot`` instance. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
Returns
-------
``AxesSubplot`` or ``GeoAxesSubplot``
The plot axis
Examples
--------
A cartogram is a plot type which ingests a series of enclosed ``Polygon`` or ``MultiPolygon`` entities and spits
out a view of these shapes in which area is distorted according to the size of some parameter of interest.
A basic cartogram specifies data, a projection, and a ``scale`` parameter.
.. code-block:: python
import geoplot as gplt
import geoplot.crs as gcrs
gplt.cartogram(boroughs, scale='Population Density', projection=gcrs.AlbersEqualArea())
.. image:: ../figures/cartogram/cartogram-initial.png
The gray outline can be turned off by specifying ``trace``, and a legend can be added by specifying ``legend``.
.. code-block:: python
gplt.cartogram(boroughs, scale='Population Density', projection=gcrs.AlbersEqualArea(),
trace=False, legend=True)
.. image:: ../figures/cartogram/cartogram-trace-legend.png
Keyword arguments can be passed to the legend using the ``legend_kwargs`` argument. These arguments will be
passed to the underlying ``matplotlib.legend.Legend`` instance (`ref
<http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_). The ``loc`` and ``bbox_to_anchor``
parameters are particularly useful for positioning the legend. Other additional arguments will be passed to the
underlying ``matplotlib`` `scatter plot <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter>`_.
.. code-block:: python
gplt.cartogram(boroughs, scale='Population Density', projection=gcrs.AlbersEqualArea(),
trace=False, legend=True, legend_kwargs={'loc': 'upper left'})
.. image:: ../figures/cartogram/cartogram-legend-kwargs.png
Additional arguments to ``cartogram`` will be interpreted as keyword arguments for the scaled polygons,
using `matplotlib Polygon patch
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_ rules.
.. code-block:: python
gplt.cartogram(boroughs, scale='Population Density', projection=gcrs.AlbersEqualArea(),
edgecolor='darkgreen')
.. image:: ../figures/cartogram/cartogram-kwargs.png
Manipulate the outlines use the ``trace_kwargs`` argument, which accepts the same `matplotlib Polygon patch
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_ parameters.
.. code-block:: python
gplt.cartogram(boroughs, scale='Population Density', projection=gcrs.AlbersEqualArea(),
trace_kwargs={'edgecolor': 'lightgreen'})
.. image:: ../figures/cartogram/cartogram-trace-kwargs.png
Adjust the level of scaling to apply using the ``limits`` parameter.
.. code-block:: python
gplt.cartogram(boroughs, scale='Population Density', projection=gcrs.AlbersEqualArea(),
limits=(0.5, 1))
.. image:: ../figures/cartogram/cartogram-limits.png
The default scaling function is linear: an observations at the midpoint of two others will be exactly midway
between them in size. To specify an alternative scaling function, use the ``scale_func`` parameter. This should
be a factory function of two variables which, when given the maximum and minimum of the dataset,
returns a scaling function which will be applied to the rest of the data. A demo is available in
the `example gallery <examples/usa-city-elevations.html>`_.
.. code-block:: python
def trivial_scale(minval, maxval): return lambda v: 2
gplt.cartogram(boroughs, scale='Population Density', projection=gcrs.AlbersEqualArea(),
limits=(0.5, 1), scale_func=trivial_scale)
.. image:: ../figures/cartogram/cartogram-scale-func.png
``cartogram`` also provides the same ``hue`` visual variable parameters provided by e.g. ``pointplot``. For more
information on ``hue``-related arguments, see the related sections in the ``pointplot`` `documentation
<./pointplot.html>`_.
.. code-block:: python
gplt.cartogram(boroughs, scale='Population Density', projection=gcrs.AlbersEqualArea(),
hue='Population Density', k=None, cmap='Blues')
.. image:: ../figures/cartogram/cartogram-hue.png
"""
# Initialize the figure.
fig = _init_figure(ax, figsize)
# Load the projection.
if projection:
projection = projection.load(df, {
'central_longitude': lambda df: np.mean(np.array([p.x for p in df.geometry.centroid])),
'central_latitude': lambda df: np.mean(np.array([p.y for p in df.geometry.centroid]))
})
# Set up the axis.
if not ax:
ax = plt.subplot(111, projection=projection)
# Clean up patches.
else:
if not ax:
ax = plt.gca()
# Clean up patches.
_lay_out_axes(ax, projection)
# Immediately return if input geometry is empty.
if len(df.geometry) == 0:
return ax
# Set extent.
extrema = _get_envelopes_min_maxes(df.geometry.envelope.exterior)
_set_extent(ax, projection, extent, extrema)
# Check that the ``scale`` parameter is filled, and use it to fill a ``values`` name.
if not scale:
raise ValueError("No scale parameter provided.")
elif isinstance(scale, str):
values = df[scale]
else:
values = scale
# Compute a scale function.
dmin, dmax = np.min(values), np.max(values)
if not scale_func:
dslope = (limits[1] - limits[0]) / (dmax - dmin)
dscale = lambda dval: limits[0] + dslope * (dval - dmin)
else:
dscale = scale_func(dmin, dmax)
# Create a legend, if appropriate.
if legend:
_paint_carto_legend(ax, values, legend_values, legend_labels, dscale, legend_kwargs)
# Validate hue input.
hue = _validate_hue(df, hue)
# Generate the coloring information, if needed. Follows one of two schemes, categorical or continuous,
# based on whether or not ``k`` is specified (``hue`` must be specified for either to work).
if k is not None and hue is not None:
# Categorical colormap code path.
categorical, k, scheme = _validate_buckets(categorical, k, scheme)
if hue is not None:
cmap, categories, hue_values = _discrete_colorize(categorical, hue, scheme, k, cmap, vmin, vmax)
colors = [cmap.to_rgba(v) for v in hue_values]
# Add a legend, if appropriate.
if legend and (legend_var != "scale" or scale is None):
_paint_hue_legend(ax, categories, cmap, legend_labels, legend_kwargs)
else:
colors = ['None']*len(df)
elif k is None and hue is not None:
# Continuous colormap code path.
hue_values = hue
cmap = _continuous_colormap(hue_values, cmap, vmin, vmax)
colors = [cmap.to_rgba(v) for v in hue_values]
# Add a legend, if appropriate.
if legend and (legend_var != "scale" or scale is None):
_paint_colorbar_legend(ax, hue_values, cmap, legend_kwargs)
elif 'facecolor' in kwargs:
colors = [kwargs.pop('facecolor')]*len(df)
else:
colors = ['None']*len(df)
# Manipulate trace_kwargs.
if trace:
if trace_kwargs is None:
trace_kwargs = dict()
if 'edgecolor' not in trace_kwargs.keys():
trace_kwargs['edgecolor'] = 'lightgray'
if 'facecolor' not in trace_kwargs.keys():
trace_kwargs['facecolor'] = 'None'
# Draw traces first, if appropriate.
if trace:
if projection:
for polygon in df.geometry:
features = ShapelyFeature([polygon], ccrs.PlateCarree())
ax.add_feature(features, **trace_kwargs)
else:
for polygon in df.geometry:
try: # Duck test for MultiPolygon.
for subgeom in polygon:
feature = descartes.PolygonPatch(subgeom, **trace_kwargs)
ax.add_patch(feature)
except (TypeError, AssertionError): # Shapely Polygon.
feature = descartes.PolygonPatch(polygon, **trace_kwargs)
ax.add_patch(feature)
# Finally, draw the scaled geometries.
for value, color, polygon in zip(values, colors, df.geometry):
scale_factor = dscale(value)
scaled_polygon = shapely.affinity.scale(polygon, xfact=scale_factor, yfact=scale_factor)
if projection:
features = ShapelyFeature([scaled_polygon], ccrs.PlateCarree())
ax.add_feature(features, facecolor=color, **kwargs)
else:
try: # Duck test for MultiPolygon.
for subgeom in scaled_polygon:
feature = descartes.PolygonPatch(subgeom, facecolor=color, **kwargs)
ax.add_patch(feature)
except (TypeError, AssertionError): # Shapely Polygon.
feature = descartes.PolygonPatch(scaled_polygon, facecolor=color, **kwargs)
ax.add_patch(feature)
return ax | [
"def",
"cartogram",
"(",
"df",
",",
"projection",
"=",
"None",
",",
"scale",
"=",
"None",
",",
"limits",
"=",
"(",
"0.2",
",",
"1",
")",
",",
"scale_func",
"=",
"None",
",",
"trace",
"=",
"True",
",",
"trace_kwargs",
"=",
"None",
",",
"hue",
"=",
"None",
",",
"categorical",
"=",
"False",
",",
"scheme",
"=",
"None",
",",
"k",
"=",
"5",
",",
"cmap",
"=",
"'viridis'",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
",",
"legend",
"=",
"False",
",",
"legend_values",
"=",
"None",
",",
"legend_labels",
"=",
"None",
",",
"legend_kwargs",
"=",
"None",
",",
"legend_var",
"=",
"\"scale\"",
",",
"extent",
"=",
"None",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Initialize the figure.",
"fig",
"=",
"_init_figure",
"(",
"ax",
",",
"figsize",
")",
"# Load the projection.",
"if",
"projection",
":",
"projection",
"=",
"projection",
".",
"load",
"(",
"df",
",",
"{",
"'central_longitude'",
":",
"lambda",
"df",
":",
"np",
".",
"mean",
"(",
"np",
".",
"array",
"(",
"[",
"p",
".",
"x",
"for",
"p",
"in",
"df",
".",
"geometry",
".",
"centroid",
"]",
")",
")",
",",
"'central_latitude'",
":",
"lambda",
"df",
":",
"np",
".",
"mean",
"(",
"np",
".",
"array",
"(",
"[",
"p",
".",
"y",
"for",
"p",
"in",
"df",
".",
"geometry",
".",
"centroid",
"]",
")",
")",
"}",
")",
"# Set up the axis.",
"if",
"not",
"ax",
":",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"111",
",",
"projection",
"=",
"projection",
")",
"# Clean up patches.",
"else",
":",
"if",
"not",
"ax",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# Clean up patches.",
"_lay_out_axes",
"(",
"ax",
",",
"projection",
")",
"# Immediately return if input geometry is empty.",
"if",
"len",
"(",
"df",
".",
"geometry",
")",
"==",
"0",
":",
"return",
"ax",
"# Set extent.",
"extrema",
"=",
"_get_envelopes_min_maxes",
"(",
"df",
".",
"geometry",
".",
"envelope",
".",
"exterior",
")",
"_set_extent",
"(",
"ax",
",",
"projection",
",",
"extent",
",",
"extrema",
")",
"# Check that the ``scale`` parameter is filled, and use it to fill a ``values`` name.",
"if",
"not",
"scale",
":",
"raise",
"ValueError",
"(",
"\"No scale parameter provided.\"",
")",
"elif",
"isinstance",
"(",
"scale",
",",
"str",
")",
":",
"values",
"=",
"df",
"[",
"scale",
"]",
"else",
":",
"values",
"=",
"scale",
"# Compute a scale function.",
"dmin",
",",
"dmax",
"=",
"np",
".",
"min",
"(",
"values",
")",
",",
"np",
".",
"max",
"(",
"values",
")",
"if",
"not",
"scale_func",
":",
"dslope",
"=",
"(",
"limits",
"[",
"1",
"]",
"-",
"limits",
"[",
"0",
"]",
")",
"/",
"(",
"dmax",
"-",
"dmin",
")",
"dscale",
"=",
"lambda",
"dval",
":",
"limits",
"[",
"0",
"]",
"+",
"dslope",
"*",
"(",
"dval",
"-",
"dmin",
")",
"else",
":",
"dscale",
"=",
"scale_func",
"(",
"dmin",
",",
"dmax",
")",
"# Create a legend, if appropriate.",
"if",
"legend",
":",
"_paint_carto_legend",
"(",
"ax",
",",
"values",
",",
"legend_values",
",",
"legend_labels",
",",
"dscale",
",",
"legend_kwargs",
")",
"# Validate hue input.",
"hue",
"=",
"_validate_hue",
"(",
"df",
",",
"hue",
")",
"# Generate the coloring information, if needed. Follows one of two schemes, categorical or continuous,",
"# based on whether or not ``k`` is specified (``hue`` must be specified for either to work).",
"if",
"k",
"is",
"not",
"None",
"and",
"hue",
"is",
"not",
"None",
":",
"# Categorical colormap code path.",
"categorical",
",",
"k",
",",
"scheme",
"=",
"_validate_buckets",
"(",
"categorical",
",",
"k",
",",
"scheme",
")",
"if",
"hue",
"is",
"not",
"None",
":",
"cmap",
",",
"categories",
",",
"hue_values",
"=",
"_discrete_colorize",
"(",
"categorical",
",",
"hue",
",",
"scheme",
",",
"k",
",",
"cmap",
",",
"vmin",
",",
"vmax",
")",
"colors",
"=",
"[",
"cmap",
".",
"to_rgba",
"(",
"v",
")",
"for",
"v",
"in",
"hue_values",
"]",
"# Add a legend, if appropriate.",
"if",
"legend",
"and",
"(",
"legend_var",
"!=",
"\"scale\"",
"or",
"scale",
"is",
"None",
")",
":",
"_paint_hue_legend",
"(",
"ax",
",",
"categories",
",",
"cmap",
",",
"legend_labels",
",",
"legend_kwargs",
")",
"else",
":",
"colors",
"=",
"[",
"'None'",
"]",
"*",
"len",
"(",
"df",
")",
"elif",
"k",
"is",
"None",
"and",
"hue",
"is",
"not",
"None",
":",
"# Continuous colormap code path.",
"hue_values",
"=",
"hue",
"cmap",
"=",
"_continuous_colormap",
"(",
"hue_values",
",",
"cmap",
",",
"vmin",
",",
"vmax",
")",
"colors",
"=",
"[",
"cmap",
".",
"to_rgba",
"(",
"v",
")",
"for",
"v",
"in",
"hue_values",
"]",
"# Add a legend, if appropriate.",
"if",
"legend",
"and",
"(",
"legend_var",
"!=",
"\"scale\"",
"or",
"scale",
"is",
"None",
")",
":",
"_paint_colorbar_legend",
"(",
"ax",
",",
"hue_values",
",",
"cmap",
",",
"legend_kwargs",
")",
"elif",
"'facecolor'",
"in",
"kwargs",
":",
"colors",
"=",
"[",
"kwargs",
".",
"pop",
"(",
"'facecolor'",
")",
"]",
"*",
"len",
"(",
"df",
")",
"else",
":",
"colors",
"=",
"[",
"'None'",
"]",
"*",
"len",
"(",
"df",
")",
"# Manipulate trace_kwargs.",
"if",
"trace",
":",
"if",
"trace_kwargs",
"is",
"None",
":",
"trace_kwargs",
"=",
"dict",
"(",
")",
"if",
"'edgecolor'",
"not",
"in",
"trace_kwargs",
".",
"keys",
"(",
")",
":",
"trace_kwargs",
"[",
"'edgecolor'",
"]",
"=",
"'lightgray'",
"if",
"'facecolor'",
"not",
"in",
"trace_kwargs",
".",
"keys",
"(",
")",
":",
"trace_kwargs",
"[",
"'facecolor'",
"]",
"=",
"'None'",
"# Draw traces first, if appropriate.",
"if",
"trace",
":",
"if",
"projection",
":",
"for",
"polygon",
"in",
"df",
".",
"geometry",
":",
"features",
"=",
"ShapelyFeature",
"(",
"[",
"polygon",
"]",
",",
"ccrs",
".",
"PlateCarree",
"(",
")",
")",
"ax",
".",
"add_feature",
"(",
"features",
",",
"*",
"*",
"trace_kwargs",
")",
"else",
":",
"for",
"polygon",
"in",
"df",
".",
"geometry",
":",
"try",
":",
"# Duck test for MultiPolygon.",
"for",
"subgeom",
"in",
"polygon",
":",
"feature",
"=",
"descartes",
".",
"PolygonPatch",
"(",
"subgeom",
",",
"*",
"*",
"trace_kwargs",
")",
"ax",
".",
"add_patch",
"(",
"feature",
")",
"except",
"(",
"TypeError",
",",
"AssertionError",
")",
":",
"# Shapely Polygon.",
"feature",
"=",
"descartes",
".",
"PolygonPatch",
"(",
"polygon",
",",
"*",
"*",
"trace_kwargs",
")",
"ax",
".",
"add_patch",
"(",
"feature",
")",
"# Finally, draw the scaled geometries.",
"for",
"value",
",",
"color",
",",
"polygon",
"in",
"zip",
"(",
"values",
",",
"colors",
",",
"df",
".",
"geometry",
")",
":",
"scale_factor",
"=",
"dscale",
"(",
"value",
")",
"scaled_polygon",
"=",
"shapely",
".",
"affinity",
".",
"scale",
"(",
"polygon",
",",
"xfact",
"=",
"scale_factor",
",",
"yfact",
"=",
"scale_factor",
")",
"if",
"projection",
":",
"features",
"=",
"ShapelyFeature",
"(",
"[",
"scaled_polygon",
"]",
",",
"ccrs",
".",
"PlateCarree",
"(",
")",
")",
"ax",
".",
"add_feature",
"(",
"features",
",",
"facecolor",
"=",
"color",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"try",
":",
"# Duck test for MultiPolygon.",
"for",
"subgeom",
"in",
"scaled_polygon",
":",
"feature",
"=",
"descartes",
".",
"PolygonPatch",
"(",
"subgeom",
",",
"facecolor",
"=",
"color",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"add_patch",
"(",
"feature",
")",
"except",
"(",
"TypeError",
",",
"AssertionError",
")",
":",
"# Shapely Polygon.",
"feature",
"=",
"descartes",
".",
"PolygonPatch",
"(",
"scaled_polygon",
",",
"facecolor",
"=",
"color",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"add_patch",
"(",
"feature",
")",
"return",
"ax"
] | 45.574394 | 29.768166 |
def draw(self):
"""Draws the Text in the window."""
if not self.visible:
return
# If this input text has focus, draw an outline around the text image
if self.focus:
pygame.draw.rect(self.window, self.focusColor, self.focusedImageRect, 1)
# Blit in the image of text (set earlier in _updateImage)
self.window.blit(self.textImage, self.loc)
# If this field has focus, see if it is time to blink the cursor
if self.focus:
self.cursorMsCounter = self.cursorMsCounter + self.clock.get_time()
if self.cursorMsCounter >= self.cursorSwitchMs:
self.cursorMsCounter = self.cursorMsCounter % self.cursorSwitchMs
self.cursorVisible = not self.cursorVisible
if self.cursorVisible:
cursorOffset = self.font.size(self.text[:self.cursorPosition])[0]
if self.cursorPosition > 0: # Try to get between characters
cursorOffset = cursorOffset - 1
if cursorOffset < self.width: # if the loc is within the text area, draw it
self.cursorLoc[0] = self.loc[0] + cursorOffset
self.window.blit(self.cursorSurface, self.cursorLoc)
self.clock.tick() | [
"def",
"draw",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"visible",
":",
"return",
"# If this input text has focus, draw an outline around the text image\r",
"if",
"self",
".",
"focus",
":",
"pygame",
".",
"draw",
".",
"rect",
"(",
"self",
".",
"window",
",",
"self",
".",
"focusColor",
",",
"self",
".",
"focusedImageRect",
",",
"1",
")",
"# Blit in the image of text (set earlier in _updateImage)\r",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"textImage",
",",
"self",
".",
"loc",
")",
"# If this field has focus, see if it is time to blink the cursor\r",
"if",
"self",
".",
"focus",
":",
"self",
".",
"cursorMsCounter",
"=",
"self",
".",
"cursorMsCounter",
"+",
"self",
".",
"clock",
".",
"get_time",
"(",
")",
"if",
"self",
".",
"cursorMsCounter",
">=",
"self",
".",
"cursorSwitchMs",
":",
"self",
".",
"cursorMsCounter",
"=",
"self",
".",
"cursorMsCounter",
"%",
"self",
".",
"cursorSwitchMs",
"self",
".",
"cursorVisible",
"=",
"not",
"self",
".",
"cursorVisible",
"if",
"self",
".",
"cursorVisible",
":",
"cursorOffset",
"=",
"self",
".",
"font",
".",
"size",
"(",
"self",
".",
"text",
"[",
":",
"self",
".",
"cursorPosition",
"]",
")",
"[",
"0",
"]",
"if",
"self",
".",
"cursorPosition",
">",
"0",
":",
"# Try to get between characters\r",
"cursorOffset",
"=",
"cursorOffset",
"-",
"1",
"if",
"cursorOffset",
"<",
"self",
".",
"width",
":",
"# if the loc is within the text area, draw it\r",
"self",
".",
"cursorLoc",
"[",
"0",
"]",
"=",
"self",
".",
"loc",
"[",
"0",
"]",
"+",
"cursorOffset",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"cursorSurface",
",",
"self",
".",
"cursorLoc",
")",
"self",
".",
"clock",
".",
"tick",
"(",
")"
] | 46.464286 | 27.821429 |
def reload(self):
"""
Reload the program.
:return:
None.
"""
# Get reload mode
reload_mode = self._reload_mode
# If reload mode is `exec`
if self._reload_mode == self.RELOAD_MODE_V_EXEC:
# Call `reload_using_exec`
self.reload_using_exec()
# If reload mode is `spawn_exit`
elif self._reload_mode == self.RELOAD_MODE_V_SPAWN_EXIT:
# Call `reload_using_spawn_exit`
self.reload_using_spawn_exit()
# If reload mode is `spawn_wait`
elif self._reload_mode == self.RELOAD_MODE_V_SPAWN_WAIT:
# Call `reload_using_spawn_wait`
self.reload_using_spawn_wait()
# If reload mode is none of above
else:
# Get error message
error_msg = 'Invalid reload mode: {}.'.format(repr(reload_mode))
# Raise error
raise ValueError(error_msg) | [
"def",
"reload",
"(",
"self",
")",
":",
"# Get reload mode",
"reload_mode",
"=",
"self",
".",
"_reload_mode",
"# If reload mode is `exec`",
"if",
"self",
".",
"_reload_mode",
"==",
"self",
".",
"RELOAD_MODE_V_EXEC",
":",
"# Call `reload_using_exec`",
"self",
".",
"reload_using_exec",
"(",
")",
"# If reload mode is `spawn_exit`",
"elif",
"self",
".",
"_reload_mode",
"==",
"self",
".",
"RELOAD_MODE_V_SPAWN_EXIT",
":",
"# Call `reload_using_spawn_exit`",
"self",
".",
"reload_using_spawn_exit",
"(",
")",
"# If reload mode is `spawn_wait`",
"elif",
"self",
".",
"_reload_mode",
"==",
"self",
".",
"RELOAD_MODE_V_SPAWN_WAIT",
":",
"# Call `reload_using_spawn_wait`",
"self",
".",
"reload_using_spawn_wait",
"(",
")",
"# If reload mode is none of above",
"else",
":",
"# Get error message",
"error_msg",
"=",
"'Invalid reload mode: {}.'",
".",
"format",
"(",
"repr",
"(",
"reload_mode",
")",
")",
"# Raise error",
"raise",
"ValueError",
"(",
"error_msg",
")"
] | 29.125 | 16.125 |
def copy(self):
"Return a copy of the drop target (to avoid wx problems on rebuild)"
return ToolBoxDropTarget(self.dv, self.root,
self.designer, self.inspector) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"ToolBoxDropTarget",
"(",
"self",
".",
"dv",
",",
"self",
".",
"root",
",",
"self",
".",
"designer",
",",
"self",
".",
"inspector",
")"
] | 51.75 | 24.25 |
def parse_sv_frequencies(variant):
"""Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
"""
frequency_keys = [
'clingen_cgh_benignAF',
'clingen_cgh_benign',
'clingen_cgh_pathogenicAF',
'clingen_cgh_pathogenic',
'clingen_ngi',
'clingen_ngiAF',
'swegen',
'swegenAF',
'decipherAF',
'decipher'
]
sv_frequencies = {}
for key in frequency_keys:
value = variant.INFO.get(key, 0)
if 'AF' in key:
value = float(value)
else:
value = int(value)
if value > 0:
sv_frequencies[key] = value
return sv_frequencies | [
"def",
"parse_sv_frequencies",
"(",
"variant",
")",
":",
"frequency_keys",
"=",
"[",
"'clingen_cgh_benignAF'",
",",
"'clingen_cgh_benign'",
",",
"'clingen_cgh_pathogenicAF'",
",",
"'clingen_cgh_pathogenic'",
",",
"'clingen_ngi'",
",",
"'clingen_ngiAF'",
",",
"'swegen'",
",",
"'swegenAF'",
",",
"'decipherAF'",
",",
"'decipher'",
"]",
"sv_frequencies",
"=",
"{",
"}",
"for",
"key",
"in",
"frequency_keys",
":",
"value",
"=",
"variant",
".",
"INFO",
".",
"get",
"(",
"key",
",",
"0",
")",
"if",
"'AF'",
"in",
"key",
":",
"value",
"=",
"float",
"(",
"value",
")",
"else",
":",
"value",
"=",
"int",
"(",
"value",
")",
"if",
"value",
">",
"0",
":",
"sv_frequencies",
"[",
"key",
"]",
"=",
"value",
"return",
"sv_frequencies"
] | 24.685714 | 18.085714 |
def import_project_modules(module_name):
"""Imports modules from registered apps using given module name
and returns them as a list.
:param str module_name:
:rtype: list
"""
from django.conf import settings
submodules = []
for app in settings.INSTALLED_APPS:
module = import_app_module(app, module_name)
if module is not None:
submodules.append(module)
return submodules | [
"def",
"import_project_modules",
"(",
"module_name",
")",
":",
"from",
"django",
".",
"conf",
"import",
"settings",
"submodules",
"=",
"[",
"]",
"for",
"app",
"in",
"settings",
".",
"INSTALLED_APPS",
":",
"module",
"=",
"import_app_module",
"(",
"app",
",",
"module_name",
")",
"if",
"module",
"is",
"not",
"None",
":",
"submodules",
".",
"append",
"(",
"module",
")",
"return",
"submodules"
] | 24.823529 | 16.235294 |
def map(self, func, value_shape=None, dtype=None):
"""
Apply an array -> array function on each subarray.
The function can change the shape of the subarray, but only along
dimensions that are not chunked.
Parameters
----------
func : function
Function of a single subarray to apply
value_shape:
Known shape of chunking plan after the map
dtype: numpy.dtype, optional, default=None
Known dtype of values resulting from operation
Returns
-------
ChunkedArray
"""
if value_shape is None or dtype is None:
# try to compute the size of each mapped element by applying func to a random array
try:
mapped = func(random.randn(*self.plan).astype(self.dtype))
except Exception:
first = self._rdd.first()
if first:
# eval func on the first element
mapped = func(first[1])
if value_shape is None:
value_shape = mapped.shape
if dtype is None:
dtype = mapped.dtype
chunked_dims = where(self.plan != self.vshape)[0]
unchunked_dims = where(self.plan == self.vshape)[0]
# check that no dimensions are dropped
if len(value_shape) != len(self.plan):
raise NotImplementedError('map on ChunkedArray cannot drop dimensions')
# check that chunked dimensions did not change shape
if any([value_shape[i] != self.plan[i] for i in chunked_dims]):
raise ValueError('map cannot change the sizes of chunked dimensions')
def check_and_apply(v):
new = func(v)
if len(unchunked_dims) > 0:
if any([new.shape[i] != value_shape[i] for i in unchunked_dims]):
raise Exception("Map operation did not produce values of uniform shape.")
if len(chunked_dims) > 0:
if any([v.shape[i] != new.shape[i] for i in chunked_dims]):
raise Exception("Map operation changed the size of a chunked dimension")
return new
rdd = self._rdd.mapValues(check_and_apply)
vshape = [value_shape[i] if i in unchunked_dims else self.vshape[i] for i in range(len(self.vshape))]
newshape = r_[self.kshape, vshape].astype(int).tolist()
return self._constructor(rdd, shape=tuple(newshape), dtype=dtype,
plan=asarray(value_shape)).__finalize__(self) | [
"def",
"map",
"(",
"self",
",",
"func",
",",
"value_shape",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"value_shape",
"is",
"None",
"or",
"dtype",
"is",
"None",
":",
"# try to compute the size of each mapped element by applying func to a random array",
"try",
":",
"mapped",
"=",
"func",
"(",
"random",
".",
"randn",
"(",
"*",
"self",
".",
"plan",
")",
".",
"astype",
"(",
"self",
".",
"dtype",
")",
")",
"except",
"Exception",
":",
"first",
"=",
"self",
".",
"_rdd",
".",
"first",
"(",
")",
"if",
"first",
":",
"# eval func on the first element",
"mapped",
"=",
"func",
"(",
"first",
"[",
"1",
"]",
")",
"if",
"value_shape",
"is",
"None",
":",
"value_shape",
"=",
"mapped",
".",
"shape",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"mapped",
".",
"dtype",
"chunked_dims",
"=",
"where",
"(",
"self",
".",
"plan",
"!=",
"self",
".",
"vshape",
")",
"[",
"0",
"]",
"unchunked_dims",
"=",
"where",
"(",
"self",
".",
"plan",
"==",
"self",
".",
"vshape",
")",
"[",
"0",
"]",
"# check that no dimensions are dropped",
"if",
"len",
"(",
"value_shape",
")",
"!=",
"len",
"(",
"self",
".",
"plan",
")",
":",
"raise",
"NotImplementedError",
"(",
"'map on ChunkedArray cannot drop dimensions'",
")",
"# check that chunked dimensions did not change shape",
"if",
"any",
"(",
"[",
"value_shape",
"[",
"i",
"]",
"!=",
"self",
".",
"plan",
"[",
"i",
"]",
"for",
"i",
"in",
"chunked_dims",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'map cannot change the sizes of chunked dimensions'",
")",
"def",
"check_and_apply",
"(",
"v",
")",
":",
"new",
"=",
"func",
"(",
"v",
")",
"if",
"len",
"(",
"unchunked_dims",
")",
">",
"0",
":",
"if",
"any",
"(",
"[",
"new",
".",
"shape",
"[",
"i",
"]",
"!=",
"value_shape",
"[",
"i",
"]",
"for",
"i",
"in",
"unchunked_dims",
"]",
")",
":",
"raise",
"Exception",
"(",
"\"Map operation did not produce values of uniform shape.\"",
")",
"if",
"len",
"(",
"chunked_dims",
")",
">",
"0",
":",
"if",
"any",
"(",
"[",
"v",
".",
"shape",
"[",
"i",
"]",
"!=",
"new",
".",
"shape",
"[",
"i",
"]",
"for",
"i",
"in",
"chunked_dims",
"]",
")",
":",
"raise",
"Exception",
"(",
"\"Map operation changed the size of a chunked dimension\"",
")",
"return",
"new",
"rdd",
"=",
"self",
".",
"_rdd",
".",
"mapValues",
"(",
"check_and_apply",
")",
"vshape",
"=",
"[",
"value_shape",
"[",
"i",
"]",
"if",
"i",
"in",
"unchunked_dims",
"else",
"self",
".",
"vshape",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"vshape",
")",
")",
"]",
"newshape",
"=",
"r_",
"[",
"self",
".",
"kshape",
",",
"vshape",
"]",
".",
"astype",
"(",
"int",
")",
".",
"tolist",
"(",
")",
"return",
"self",
".",
"_constructor",
"(",
"rdd",
",",
"shape",
"=",
"tuple",
"(",
"newshape",
")",
",",
"dtype",
"=",
"dtype",
",",
"plan",
"=",
"asarray",
"(",
"value_shape",
")",
")",
".",
"__finalize__",
"(",
"self",
")"
] | 38.769231 | 23.876923 |
def update_args(self, override_args):
"""Update the argument used to invoke the application
Note that this will also update the dictionary of input and output files.
Parameters
-----------
override_args : dict
Dictionary of arguments to override the current values
"""
self.args = extract_arguments(override_args, self.args)
self._latch_file_info()
scratch_dir = self.args.get('scratch', None)
if is_not_null(scratch_dir):
self._file_stage = FileStageManager(scratch_dir, '.') | [
"def",
"update_args",
"(",
"self",
",",
"override_args",
")",
":",
"self",
".",
"args",
"=",
"extract_arguments",
"(",
"override_args",
",",
"self",
".",
"args",
")",
"self",
".",
"_latch_file_info",
"(",
")",
"scratch_dir",
"=",
"self",
".",
"args",
".",
"get",
"(",
"'scratch'",
",",
"None",
")",
"if",
"is_not_null",
"(",
"scratch_dir",
")",
":",
"self",
".",
"_file_stage",
"=",
"FileStageManager",
"(",
"scratch_dir",
",",
"'.'",
")"
] | 35.5 | 19.875 |
def setup(self, settings):
'''
Does the actual setup of the middleware
'''
# set up the default sc logger
my_level = settings.get('SC_LOG_LEVEL', 'INFO')
my_name = settings.get('SClogger_NAME', 'sc-logger')
my_output = settings.get('SC_LOG_STDOUT', True)
my_json = settings.get('SC_LOG_JSON', False)
my_dir = settings.get('SC_LOG_DIR', 'logs')
my_bytes = settings.get('SC_LOG_MAX_BYTES', '10MB')
my_file = settings.get('SC_LOG_FILE', 'main.log')
my_backups = settings.get('SC_LOG_BACKUPS', 5)
self.logger = LogFactory.get_instance(json=my_json,
name=my_name,
stdout=my_output,
level=my_level,
dir=my_dir,
file=my_file,
bytes=my_bytes,
backups=my_backups)
self.settings = settings
self.stats_dict = {}
# set up redis
self.redis_conn = redis.Redis(host=settings.get('REDIS_HOST'),
port=settings.get('REDIS_PORT'),
db=settings.get('REDIS_DB'))
try:
self.redis_conn.info()
self.logger.debug("Connected to Redis in ScraperHandler")
except ConnectionError:
self.logger.error("Failed to connect to Redis in Stats Middleware")
# plugin is essential to functionality
sys.exit(1) | [
"def",
"setup",
"(",
"self",
",",
"settings",
")",
":",
"# set up the default sc logger",
"my_level",
"=",
"settings",
".",
"get",
"(",
"'SC_LOG_LEVEL'",
",",
"'INFO'",
")",
"my_name",
"=",
"settings",
".",
"get",
"(",
"'SClogger_NAME'",
",",
"'sc-logger'",
")",
"my_output",
"=",
"settings",
".",
"get",
"(",
"'SC_LOG_STDOUT'",
",",
"True",
")",
"my_json",
"=",
"settings",
".",
"get",
"(",
"'SC_LOG_JSON'",
",",
"False",
")",
"my_dir",
"=",
"settings",
".",
"get",
"(",
"'SC_LOG_DIR'",
",",
"'logs'",
")",
"my_bytes",
"=",
"settings",
".",
"get",
"(",
"'SC_LOG_MAX_BYTES'",
",",
"'10MB'",
")",
"my_file",
"=",
"settings",
".",
"get",
"(",
"'SC_LOG_FILE'",
",",
"'main.log'",
")",
"my_backups",
"=",
"settings",
".",
"get",
"(",
"'SC_LOG_BACKUPS'",
",",
"5",
")",
"self",
".",
"logger",
"=",
"LogFactory",
".",
"get_instance",
"(",
"json",
"=",
"my_json",
",",
"name",
"=",
"my_name",
",",
"stdout",
"=",
"my_output",
",",
"level",
"=",
"my_level",
",",
"dir",
"=",
"my_dir",
",",
"file",
"=",
"my_file",
",",
"bytes",
"=",
"my_bytes",
",",
"backups",
"=",
"my_backups",
")",
"self",
".",
"settings",
"=",
"settings",
"self",
".",
"stats_dict",
"=",
"{",
"}",
"# set up redis",
"self",
".",
"redis_conn",
"=",
"redis",
".",
"Redis",
"(",
"host",
"=",
"settings",
".",
"get",
"(",
"'REDIS_HOST'",
")",
",",
"port",
"=",
"settings",
".",
"get",
"(",
"'REDIS_PORT'",
")",
",",
"db",
"=",
"settings",
".",
"get",
"(",
"'REDIS_DB'",
")",
")",
"try",
":",
"self",
".",
"redis_conn",
".",
"info",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Connected to Redis in ScraperHandler\"",
")",
"except",
"ConnectionError",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"Failed to connect to Redis in Stats Middleware\"",
")",
"# plugin is essential to functionality",
"sys",
".",
"exit",
"(",
"1",
")"
] | 41.026316 | 18.5 |
def load(self, entity_class, entity):
"""
Load the given repository entity into the session and return a
clone. If it was already loaded before, look up the loaded entity
and return it.
All entities referenced by the loaded entity will also be loaded
(and cloned) recursively.
:raises ValueError: When an attempt is made to load an entity that
has no ID
"""
if self.__needs_flushing:
self.flush()
if entity.id is None:
raise ValueError('Can not load entity without an ID.')
cache = self.__get_cache(entity_class)
sess_ent = cache.get_by_id(entity.id)
if sess_ent is None:
if self.__clone_on_load:
sess_ent = self.__clone(entity, cache)
else: # Only needed by the nosql backend pragma: no cover
cache.add(entity)
sess_ent = entity
self.__unit_of_work.register_clean(entity_class, sess_ent)
return sess_ent | [
"def",
"load",
"(",
"self",
",",
"entity_class",
",",
"entity",
")",
":",
"if",
"self",
".",
"__needs_flushing",
":",
"self",
".",
"flush",
"(",
")",
"if",
"entity",
".",
"id",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Can not load entity without an ID.'",
")",
"cache",
"=",
"self",
".",
"__get_cache",
"(",
"entity_class",
")",
"sess_ent",
"=",
"cache",
".",
"get_by_id",
"(",
"entity",
".",
"id",
")",
"if",
"sess_ent",
"is",
"None",
":",
"if",
"self",
".",
"__clone_on_load",
":",
"sess_ent",
"=",
"self",
".",
"__clone",
"(",
"entity",
",",
"cache",
")",
"else",
":",
"# Only needed by the nosql backend pragma: no cover",
"cache",
".",
"add",
"(",
"entity",
")",
"sess_ent",
"=",
"entity",
"self",
".",
"__unit_of_work",
".",
"register_clean",
"(",
"entity_class",
",",
"sess_ent",
")",
"return",
"sess_ent"
] | 38.884615 | 17.269231 |
def license_file_is_valid(license_filename, data_filename,
dirpath='.', verbose=False):
"""Check that XML license file for given filename_to_verify is valid.
Input:
license_filename: XML license file (must be an absolute path name)
data_filename: The data filename that is being audited
dir_path: Where the files live
verbose: Optional verbosity
Check for each datafile listed that
* Datafile tags are there and match the one specified
* Fields are non empty (except IP_info which can be left blank)
* Datafile exists
* Checksum is correct
* Datafile is flagged as publishable
If anything is violated an appropriate exception is raised.
If everything is honky dory the function will return True.
"""
if verbose:
print 'Parsing', license_filename
doc = xml2object(license_filename)
# Check that file is valid (e.g. all elements there)
if not doc.has_key('ga_license_file'):
msg = 'License file %s must have two elements' %license_filename
msg += ' at the root level. They are\n'
msg += ' <?xml version="1.0" encoding="iso-8859-1"?>\n'
msg += ' <ga_license_file>\n'
msg += 'The second element was found to be %s' %doc.keys()
raise WrongTags, msg
# Validate elements: metadata, datafile, datafile, ...
# FIXME (Ole): I'd like this to verified by the parser
# using a proper DTD template one day....
# For not, let's check the main ones.
elements = doc['ga_license_file']
if not elements.has_key('metadata'):
msg = 'Tag %s must have the element "metadata"'\
%doc.keys()[0]
msg += 'The element found was %s' %elements[0].nodeName
raise WrongTags, msg
if not elements.has_key('datafile'):
msg = 'Tag %s must have the element "datafile"'\
%doc.keys()[0]
msg += 'The element found was %s' %elements[0].nodeName
raise WrongTags, msg
for key in elements.keys():
msg = 'Invalid tag: %s' %key
if not key in ['metadata', 'datafile']:
raise WrongTags, msg
# Extract information for metadata section
if verbose: print
metadata = elements['metadata']
author = metadata['author']
if verbose: print 'Author: ', author
if author == '':
msg = 'Missing author'
raise Exception, msg
#svn_keywords = metadata['svn_keywords']
#if verbose: print 'SVN keywords: ', svn_keywords
# Extract information for datafile sections
datafile = elements['datafile']
if isinstance(datafile, XML_element):
datafile = [datafile]
# Check that filename to verify is listed in license file
found = False
for data in datafile:
if data['filename'] == data_filename:
found = True
break
if not found:
msg = 'Specified filename to verify %s ' %data_filename
msg += 'did not appear in license file %s' %license_filename
raise FilenameMismatch, msg
# Check contents for selected data_filename
#for data in datafile:
# if verbose: print
# Filename
if data['filename'] == '':
msg = 'Missing filename'
raise FilenameMismatch, msg
else:
filename = join(dirpath, data['filename'])
if verbose: print 'Filename: "%s"' %filename
try:
fid = open(filename, 'r')
except:
msg = 'Specified filename %s could not be opened'\
%filename
raise FilenameMismatch, msg
# CRC
reported_crc = data['checksum']
if verbose: print 'Checksum: "%s"' %reported_crc
file_crc = str(compute_checksum(filename))
if reported_crc != file_crc:
msg = 'Bad checksum (CRC).\n'
msg += ' The CRC reported in license file "%s" is "%s"\n'\
%(license_filename, reported_crc)
msg += ' The CRC computed from file "%s" is "%s"'\
%(filename, file_crc)
raise CRCMismatch, msg
# Accountable
accountable = data['accountable']
if verbose: print 'Accountable: "%s"' %accountable
if accountable == '':
msg = 'No accountable person specified'
raise Empty, msg
# Source
source = data['source']
if verbose: print 'Source: "%s"' %source
if source == '':
msg = 'No source specified'
raise Empty, msg
# IP owner
ip_owner = data['IP_owner']
if verbose: print 'IP owner: "%s"' %ip_owner
if ip_owner == '':
msg = 'No IP owner specified'
raise Empty, msg
# IP info
ip_info = data['IP_info']
if verbose: print 'IP info: "%s"' %ip_info
#if ip_info == '':
# msg = 'No IP info specified'
# raise Empty, msg
# Publishable
publishable = data['publishable']
if verbose: print 'Publishable: "%s"' %publishable
if publishable == '':
msg = 'No publishable value specified'
raise NotPublishable, msg
if publishable.upper() != 'YES':
msg = 'Data file %s is not flagged as publishable'\
%fid.name
raise NotPublishable, msg
# If we get this far, the license file is OK
return True | [
"def",
"license_file_is_valid",
"(",
"license_filename",
",",
"data_filename",
",",
"dirpath",
"=",
"'.'",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"verbose",
":",
"print",
"'Parsing'",
",",
"license_filename",
"doc",
"=",
"xml2object",
"(",
"license_filename",
")",
"# Check that file is valid (e.g. all elements there)",
"if",
"not",
"doc",
".",
"has_key",
"(",
"'ga_license_file'",
")",
":",
"msg",
"=",
"'License file %s must have two elements'",
"%",
"license_filename",
"msg",
"+=",
"' at the root level. They are\\n'",
"msg",
"+=",
"' <?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\\n'",
"msg",
"+=",
"' <ga_license_file>\\n'",
"msg",
"+=",
"'The second element was found to be %s'",
"%",
"doc",
".",
"keys",
"(",
")",
"raise",
"WrongTags",
",",
"msg",
"# Validate elements: metadata, datafile, datafile, ...",
"# FIXME (Ole): I'd like this to verified by the parser",
"# using a proper DTD template one day....",
"# For not, let's check the main ones.",
"elements",
"=",
"doc",
"[",
"'ga_license_file'",
"]",
"if",
"not",
"elements",
".",
"has_key",
"(",
"'metadata'",
")",
":",
"msg",
"=",
"'Tag %s must have the element \"metadata\"'",
"%",
"doc",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"msg",
"+=",
"'The element found was %s'",
"%",
"elements",
"[",
"0",
"]",
".",
"nodeName",
"raise",
"WrongTags",
",",
"msg",
"if",
"not",
"elements",
".",
"has_key",
"(",
"'datafile'",
")",
":",
"msg",
"=",
"'Tag %s must have the element \"datafile\"'",
"%",
"doc",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"msg",
"+=",
"'The element found was %s'",
"%",
"elements",
"[",
"0",
"]",
".",
"nodeName",
"raise",
"WrongTags",
",",
"msg",
"for",
"key",
"in",
"elements",
".",
"keys",
"(",
")",
":",
"msg",
"=",
"'Invalid tag: %s'",
"%",
"key",
"if",
"not",
"key",
"in",
"[",
"'metadata'",
",",
"'datafile'",
"]",
":",
"raise",
"WrongTags",
",",
"msg",
"# Extract information for metadata section",
"if",
"verbose",
":",
"print",
"metadata",
"=",
"elements",
"[",
"'metadata'",
"]",
"author",
"=",
"metadata",
"[",
"'author'",
"]",
"if",
"verbose",
":",
"print",
"'Author: '",
",",
"author",
"if",
"author",
"==",
"''",
":",
"msg",
"=",
"'Missing author'",
"raise",
"Exception",
",",
"msg",
"#svn_keywords = metadata['svn_keywords']",
"#if verbose: print 'SVN keywords: ', svn_keywords",
"# Extract information for datafile sections",
"datafile",
"=",
"elements",
"[",
"'datafile'",
"]",
"if",
"isinstance",
"(",
"datafile",
",",
"XML_element",
")",
":",
"datafile",
"=",
"[",
"datafile",
"]",
"# Check that filename to verify is listed in license file",
"found",
"=",
"False",
"for",
"data",
"in",
"datafile",
":",
"if",
"data",
"[",
"'filename'",
"]",
"==",
"data_filename",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"msg",
"=",
"'Specified filename to verify %s '",
"%",
"data_filename",
"msg",
"+=",
"'did not appear in license file %s'",
"%",
"license_filename",
"raise",
"FilenameMismatch",
",",
"msg",
"# Check contents for selected data_filename",
"#for data in datafile:",
"# if verbose: print",
"# Filename",
"if",
"data",
"[",
"'filename'",
"]",
"==",
"''",
":",
"msg",
"=",
"'Missing filename'",
"raise",
"FilenameMismatch",
",",
"msg",
"else",
":",
"filename",
"=",
"join",
"(",
"dirpath",
",",
"data",
"[",
"'filename'",
"]",
")",
"if",
"verbose",
":",
"print",
"'Filename: \"%s\"'",
"%",
"filename",
"try",
":",
"fid",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
"except",
":",
"msg",
"=",
"'Specified filename %s could not be opened'",
"%",
"filename",
"raise",
"FilenameMismatch",
",",
"msg",
"# CRC",
"reported_crc",
"=",
"data",
"[",
"'checksum'",
"]",
"if",
"verbose",
":",
"print",
"'Checksum: \"%s\"'",
"%",
"reported_crc",
"file_crc",
"=",
"str",
"(",
"compute_checksum",
"(",
"filename",
")",
")",
"if",
"reported_crc",
"!=",
"file_crc",
":",
"msg",
"=",
"'Bad checksum (CRC).\\n'",
"msg",
"+=",
"' The CRC reported in license file \"%s\" is \"%s\"\\n'",
"%",
"(",
"license_filename",
",",
"reported_crc",
")",
"msg",
"+=",
"' The CRC computed from file \"%s\" is \"%s\"'",
"%",
"(",
"filename",
",",
"file_crc",
")",
"raise",
"CRCMismatch",
",",
"msg",
"# Accountable",
"accountable",
"=",
"data",
"[",
"'accountable'",
"]",
"if",
"verbose",
":",
"print",
"'Accountable: \"%s\"'",
"%",
"accountable",
"if",
"accountable",
"==",
"''",
":",
"msg",
"=",
"'No accountable person specified'",
"raise",
"Empty",
",",
"msg",
"# Source",
"source",
"=",
"data",
"[",
"'source'",
"]",
"if",
"verbose",
":",
"print",
"'Source: \"%s\"'",
"%",
"source",
"if",
"source",
"==",
"''",
":",
"msg",
"=",
"'No source specified'",
"raise",
"Empty",
",",
"msg",
"# IP owner",
"ip_owner",
"=",
"data",
"[",
"'IP_owner'",
"]",
"if",
"verbose",
":",
"print",
"'IP owner: \"%s\"'",
"%",
"ip_owner",
"if",
"ip_owner",
"==",
"''",
":",
"msg",
"=",
"'No IP owner specified'",
"raise",
"Empty",
",",
"msg",
"# IP info",
"ip_info",
"=",
"data",
"[",
"'IP_info'",
"]",
"if",
"verbose",
":",
"print",
"'IP info: \"%s\"'",
"%",
"ip_info",
"#if ip_info == '':",
"# msg = 'No IP info specified'",
"# raise Empty, msg",
"# Publishable",
"publishable",
"=",
"data",
"[",
"'publishable'",
"]",
"if",
"verbose",
":",
"print",
"'Publishable: \"%s\"'",
"%",
"publishable",
"if",
"publishable",
"==",
"''",
":",
"msg",
"=",
"'No publishable value specified'",
"raise",
"NotPublishable",
",",
"msg",
"if",
"publishable",
".",
"upper",
"(",
")",
"!=",
"'YES'",
":",
"msg",
"=",
"'Data file %s is not flagged as publishable'",
"%",
"fid",
".",
"name",
"raise",
"NotPublishable",
",",
"msg",
"# If we get this far, the license file is OK",
"return",
"True"
] | 30.100592 | 18.573964 |
def delete_comment(self, comment_id):
"""Deletes a ``Comment``.
arg: comment_id (osid.id.Id): the ``Id`` of the ``Comment``
to remove
raise: NotFound - ``comment_id`` not found
raise: NullArgument - ``comment_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('commenting',
collection='Comment',
runtime=self._runtime)
if not isinstance(comment_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
comment_map = collection.find_one(
dict({'_id': ObjectId(comment_id.get_identifier())},
**self._view_filter()))
objects.Comment(osid_object_map=comment_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(comment_id.get_identifier())}) | [
"def",
"delete_comment",
"(",
"self",
",",
"comment_id",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceAdminSession.delete_resource_template",
"collection",
"=",
"JSONClientValidated",
"(",
"'commenting'",
",",
"collection",
"=",
"'Comment'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"if",
"not",
"isinstance",
"(",
"comment_id",
",",
"ABCId",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
"'the argument is not a valid OSID Id'",
")",
"comment_map",
"=",
"collection",
".",
"find_one",
"(",
"dict",
"(",
"{",
"'_id'",
":",
"ObjectId",
"(",
"comment_id",
".",
"get_identifier",
"(",
")",
")",
"}",
",",
"*",
"*",
"self",
".",
"_view_filter",
"(",
")",
")",
")",
"objects",
".",
"Comment",
"(",
"osid_object_map",
"=",
"comment_map",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")",
".",
"_delete",
"(",
")",
"collection",
".",
"delete_one",
"(",
"{",
"'_id'",
":",
"ObjectId",
"(",
"comment_id",
".",
"get_identifier",
"(",
")",
")",
"}",
")"
] | 48.2 | 20.76 |
def k8s_events_handle_experiment_job_statuses(self: 'celery_app.task', payload: Dict) -> None:
"""Experiment jobs statuses"""
details = payload['details']
job_uuid = details['labels']['job_uuid']
logger.debug('handling events status for job_uuid: %s, status: %s',
job_uuid, payload['status'])
try:
job = ExperimentJob.objects.get(uuid=job_uuid)
except ExperimentJob.DoesNotExist:
logger.debug('Job uuid`%s` does not exist', job_uuid)
return
try:
job.experiment
except Experiment.DoesNotExist:
logger.debug('Experiment for job `%s` does not exist anymore', job_uuid)
return
if job.last_status is None and self.request.retries < 2:
self.retry(countdown=1)
# Set the new status
try:
set_node_scheduling(job, details['node_name'])
job.set_status(status=payload['status'],
message=payload['message'],
created_at=payload.get('created_at'),
traceback=payload.get('traceback'),
details=details)
logger.debug('status %s is set for job %s %s', payload['status'], job_uuid, job.id)
except IntegrityError:
# Due to concurrency this could happen, we just retry it
logger.info('Retry job status %s handling %s', payload['status'], job_uuid)
self.retry(countdown=Intervals.EXPERIMENTS_SCHEDULER) | [
"def",
"k8s_events_handle_experiment_job_statuses",
"(",
"self",
":",
"'celery_app.task'",
",",
"payload",
":",
"Dict",
")",
"->",
"None",
":",
"details",
"=",
"payload",
"[",
"'details'",
"]",
"job_uuid",
"=",
"details",
"[",
"'labels'",
"]",
"[",
"'job_uuid'",
"]",
"logger",
".",
"debug",
"(",
"'handling events status for job_uuid: %s, status: %s'",
",",
"job_uuid",
",",
"payload",
"[",
"'status'",
"]",
")",
"try",
":",
"job",
"=",
"ExperimentJob",
".",
"objects",
".",
"get",
"(",
"uuid",
"=",
"job_uuid",
")",
"except",
"ExperimentJob",
".",
"DoesNotExist",
":",
"logger",
".",
"debug",
"(",
"'Job uuid`%s` does not exist'",
",",
"job_uuid",
")",
"return",
"try",
":",
"job",
".",
"experiment",
"except",
"Experiment",
".",
"DoesNotExist",
":",
"logger",
".",
"debug",
"(",
"'Experiment for job `%s` does not exist anymore'",
",",
"job_uuid",
")",
"return",
"if",
"job",
".",
"last_status",
"is",
"None",
"and",
"self",
".",
"request",
".",
"retries",
"<",
"2",
":",
"self",
".",
"retry",
"(",
"countdown",
"=",
"1",
")",
"# Set the new status",
"try",
":",
"set_node_scheduling",
"(",
"job",
",",
"details",
"[",
"'node_name'",
"]",
")",
"job",
".",
"set_status",
"(",
"status",
"=",
"payload",
"[",
"'status'",
"]",
",",
"message",
"=",
"payload",
"[",
"'message'",
"]",
",",
"created_at",
"=",
"payload",
".",
"get",
"(",
"'created_at'",
")",
",",
"traceback",
"=",
"payload",
".",
"get",
"(",
"'traceback'",
")",
",",
"details",
"=",
"details",
")",
"logger",
".",
"debug",
"(",
"'status %s is set for job %s %s'",
",",
"payload",
"[",
"'status'",
"]",
",",
"job_uuid",
",",
"job",
".",
"id",
")",
"except",
"IntegrityError",
":",
"# Due to concurrency this could happen, we just retry it",
"logger",
".",
"info",
"(",
"'Retry job status %s handling %s'",
",",
"payload",
"[",
"'status'",
"]",
",",
"job_uuid",
")",
"self",
".",
"retry",
"(",
"countdown",
"=",
"Intervals",
".",
"EXPERIMENTS_SCHEDULER",
")"
] | 40.314286 | 22.257143 |
def insert(self, collection_name, instance):
""" inserts a unit of work into MongoDB. """
assert isinstance(instance, SiteStatistics)
collection = self.ds.connection(collection_name)
return collection.insert_one(instance.document).inserted_id | [
"def",
"insert",
"(",
"self",
",",
"collection_name",
",",
"instance",
")",
":",
"assert",
"isinstance",
"(",
"instance",
",",
"SiteStatistics",
")",
"collection",
"=",
"self",
".",
"ds",
".",
"connection",
"(",
"collection_name",
")",
"return",
"collection",
".",
"insert_one",
"(",
"instance",
".",
"document",
")",
".",
"inserted_id"
] | 54 | 11.6 |
def get_all_incomings(self, params=None):
"""
Get all incomings
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(self.get_incomings_per_page, resource=INCOMINGS, **{'params': params}) | [
"def",
"get_all_incomings",
"(",
"self",
",",
"params",
"=",
"None",
")",
":",
"if",
"not",
"params",
":",
"params",
"=",
"{",
"}",
"return",
"self",
".",
"_iterate_through_pages",
"(",
"self",
".",
"get_incomings_per_page",
",",
"resource",
"=",
"INCOMINGS",
",",
"*",
"*",
"{",
"'params'",
":",
"params",
"}",
")"
] | 38.333333 | 22 |
def hours(self,local=False):
""" Returns the number of hours of difference
"""
delta = self.delta(local)
return delta.total_seconds()/3600 | [
"def",
"hours",
"(",
"self",
",",
"local",
"=",
"False",
")",
":",
"delta",
"=",
"self",
".",
"delta",
"(",
"local",
")",
"return",
"delta",
".",
"total_seconds",
"(",
")",
"/",
"3600"
] | 33.2 | 4 |
def dep(self, source_name):
"""Return a bundle dependency from the sources list
:param source_name: Source name. The URL field must be a bundle or partition reference
:return:
"""
from ambry.orm.exc import NotFoundError
from ambry.dbexceptions import ConfigurationError
source = self.source(source_name)
ref = source.url
if not ref:
raise ValueError("Got an empty ref for source '{}' ".format(source.name))
try:
try:
p = self.library.partition(ref)
except NotFoundError:
self.warn("Partition reference {} not found, try to download it".format(ref))
remote, vname = self.library.find_remote_bundle(ref, try_harder=True)
if remote:
self.warn("Installing {} from {}".format(remote, vname))
self.library.checkin_remote_bundle(vname, remote)
p = self.library.partition(ref)
else:
raise
if not p.is_local:
with self.progress.start('test', 0, message='localizing') as ps:
p.localize(ps)
return p
except NotFoundError:
return self.library.bundle(ref) | [
"def",
"dep",
"(",
"self",
",",
"source_name",
")",
":",
"from",
"ambry",
".",
"orm",
".",
"exc",
"import",
"NotFoundError",
"from",
"ambry",
".",
"dbexceptions",
"import",
"ConfigurationError",
"source",
"=",
"self",
".",
"source",
"(",
"source_name",
")",
"ref",
"=",
"source",
".",
"url",
"if",
"not",
"ref",
":",
"raise",
"ValueError",
"(",
"\"Got an empty ref for source '{}' \"",
".",
"format",
"(",
"source",
".",
"name",
")",
")",
"try",
":",
"try",
":",
"p",
"=",
"self",
".",
"library",
".",
"partition",
"(",
"ref",
")",
"except",
"NotFoundError",
":",
"self",
".",
"warn",
"(",
"\"Partition reference {} not found, try to download it\"",
".",
"format",
"(",
"ref",
")",
")",
"remote",
",",
"vname",
"=",
"self",
".",
"library",
".",
"find_remote_bundle",
"(",
"ref",
",",
"try_harder",
"=",
"True",
")",
"if",
"remote",
":",
"self",
".",
"warn",
"(",
"\"Installing {} from {}\"",
".",
"format",
"(",
"remote",
",",
"vname",
")",
")",
"self",
".",
"library",
".",
"checkin_remote_bundle",
"(",
"vname",
",",
"remote",
")",
"p",
"=",
"self",
".",
"library",
".",
"partition",
"(",
"ref",
")",
"else",
":",
"raise",
"if",
"not",
"p",
".",
"is_local",
":",
"with",
"self",
".",
"progress",
".",
"start",
"(",
"'test'",
",",
"0",
",",
"message",
"=",
"'localizing'",
")",
"as",
"ps",
":",
"p",
".",
"localize",
"(",
"ps",
")",
"return",
"p",
"except",
"NotFoundError",
":",
"return",
"self",
".",
"library",
".",
"bundle",
"(",
"ref",
")"
] | 32.564103 | 25.025641 |
async def _receive_data_chunk(self, chunk):
"""
Handle a DATA chunk.
"""
self._sack_needed = True
# mark as received
if self._mark_received(chunk.tsn):
return
# find stream
inbound_stream = self._get_inbound_stream(chunk.stream_id)
# defragment data
inbound_stream.add_chunk(chunk)
self._advertised_rwnd -= len(chunk.user_data)
for message in inbound_stream.pop_messages():
self._advertised_rwnd += len(message[2])
await self._receive(*message) | [
"async",
"def",
"_receive_data_chunk",
"(",
"self",
",",
"chunk",
")",
":",
"self",
".",
"_sack_needed",
"=",
"True",
"# mark as received",
"if",
"self",
".",
"_mark_received",
"(",
"chunk",
".",
"tsn",
")",
":",
"return",
"# find stream",
"inbound_stream",
"=",
"self",
".",
"_get_inbound_stream",
"(",
"chunk",
".",
"stream_id",
")",
"# defragment data",
"inbound_stream",
".",
"add_chunk",
"(",
"chunk",
")",
"self",
".",
"_advertised_rwnd",
"-=",
"len",
"(",
"chunk",
".",
"user_data",
")",
"for",
"message",
"in",
"inbound_stream",
".",
"pop_messages",
"(",
")",
":",
"self",
".",
"_advertised_rwnd",
"+=",
"len",
"(",
"message",
"[",
"2",
"]",
")",
"await",
"self",
".",
"_receive",
"(",
"*",
"message",
")"
] | 29.526316 | 14.789474 |
def _resolve_placeholders(self):
"""Resolve objects that have been imported from elsewhere."""
modules = {}
for module in self.paths.values():
children = {child["name"]: child for child in module["children"]}
modules[module["name"]] = (module, children)
resolved = set()
for module_name in modules:
visit_path = collections.OrderedDict()
_resolve_module_placeholders(modules, module_name, visit_path, resolved) | [
"def",
"_resolve_placeholders",
"(",
"self",
")",
":",
"modules",
"=",
"{",
"}",
"for",
"module",
"in",
"self",
".",
"paths",
".",
"values",
"(",
")",
":",
"children",
"=",
"{",
"child",
"[",
"\"name\"",
"]",
":",
"child",
"for",
"child",
"in",
"module",
"[",
"\"children\"",
"]",
"}",
"modules",
"[",
"module",
"[",
"\"name\"",
"]",
"]",
"=",
"(",
"module",
",",
"children",
")",
"resolved",
"=",
"set",
"(",
")",
"for",
"module_name",
"in",
"modules",
":",
"visit_path",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"_resolve_module_placeholders",
"(",
"modules",
",",
"module_name",
",",
"visit_path",
",",
"resolved",
")"
] | 44.454545 | 18 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.