text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
|---|---|---|---|
def camelcase_underscore(name):
""" Convert camelcase names to underscore """
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
[
"def",
"camelcase_underscore",
"(",
"name",
")",
":",
"s1",
"=",
"re",
".",
"sub",
"(",
"'(.)([A-Z][a-z]+)'",
",",
"r'\\1_\\2'",
",",
"name",
")",
"return",
"re",
".",
"sub",
"(",
"'([a-z0-9])([A-Z])'",
",",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")"
] | 47.75
| 10
|
def nextprefix(self):
"""
Get the next available prefix. This means a prefix starting with 'ns'
with a number appended as (ns0, ns1, ..) that is not already defined
on the wsdl document.
"""
used = [ns[0] for ns in self.prefixes]
used += [ns[0] for ns in self.wsdl.root.nsprefixes.items()]
for n in range(0, 1024):
p = 'ns%d' % n
if p not in used:
return p
raise Exception('prefixes exhausted')
|
[
"def",
"nextprefix",
"(",
"self",
")",
":",
"used",
"=",
"[",
"ns",
"[",
"0",
"]",
"for",
"ns",
"in",
"self",
".",
"prefixes",
"]",
"used",
"+=",
"[",
"ns",
"[",
"0",
"]",
"for",
"ns",
"in",
"self",
".",
"wsdl",
".",
"root",
".",
"nsprefixes",
".",
"items",
"(",
")",
"]",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"1024",
")",
":",
"p",
"=",
"'ns%d'",
"%",
"n",
"if",
"p",
"not",
"in",
"used",
":",
"return",
"p",
"raise",
"Exception",
"(",
"'prefixes exhausted'",
")"
] | 38.076923
| 14.692308
|
def main(unused_argv):
"""Run an agent."""
stopwatch.sw.enabled = FLAGS.profile or FLAGS.trace
stopwatch.sw.trace = FLAGS.trace
map_inst = maps.get(FLAGS.map)
agent_classes = []
players = []
agent_module, agent_name = FLAGS.agent.rsplit(".", 1)
agent_cls = getattr(importlib.import_module(agent_module), agent_name)
agent_classes.append(agent_cls)
players.append(sc2_env.Agent(sc2_env.Race[FLAGS.agent_race],
FLAGS.agent_name or agent_name))
if map_inst.players >= 2:
if FLAGS.agent2 == "Bot":
players.append(sc2_env.Bot(sc2_env.Race[FLAGS.agent2_race],
sc2_env.Difficulty[FLAGS.difficulty]))
else:
agent_module, agent_name = FLAGS.agent2.rsplit(".", 1)
agent_cls = getattr(importlib.import_module(agent_module), agent_name)
agent_classes.append(agent_cls)
players.append(sc2_env.Agent(sc2_env.Race[FLAGS.agent2_race],
FLAGS.agent2_name or agent_name))
threads = []
for _ in range(FLAGS.parallel - 1):
t = threading.Thread(target=run_thread,
args=(agent_classes, players, FLAGS.map, False))
threads.append(t)
t.start()
run_thread(agent_classes, players, FLAGS.map, FLAGS.render)
for t in threads:
t.join()
if FLAGS.profile:
print(stopwatch.sw)
|
[
"def",
"main",
"(",
"unused_argv",
")",
":",
"stopwatch",
".",
"sw",
".",
"enabled",
"=",
"FLAGS",
".",
"profile",
"or",
"FLAGS",
".",
"trace",
"stopwatch",
".",
"sw",
".",
"trace",
"=",
"FLAGS",
".",
"trace",
"map_inst",
"=",
"maps",
".",
"get",
"(",
"FLAGS",
".",
"map",
")",
"agent_classes",
"=",
"[",
"]",
"players",
"=",
"[",
"]",
"agent_module",
",",
"agent_name",
"=",
"FLAGS",
".",
"agent",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"agent_cls",
"=",
"getattr",
"(",
"importlib",
".",
"import_module",
"(",
"agent_module",
")",
",",
"agent_name",
")",
"agent_classes",
".",
"append",
"(",
"agent_cls",
")",
"players",
".",
"append",
"(",
"sc2_env",
".",
"Agent",
"(",
"sc2_env",
".",
"Race",
"[",
"FLAGS",
".",
"agent_race",
"]",
",",
"FLAGS",
".",
"agent_name",
"or",
"agent_name",
")",
")",
"if",
"map_inst",
".",
"players",
">=",
"2",
":",
"if",
"FLAGS",
".",
"agent2",
"==",
"\"Bot\"",
":",
"players",
".",
"append",
"(",
"sc2_env",
".",
"Bot",
"(",
"sc2_env",
".",
"Race",
"[",
"FLAGS",
".",
"agent2_race",
"]",
",",
"sc2_env",
".",
"Difficulty",
"[",
"FLAGS",
".",
"difficulty",
"]",
")",
")",
"else",
":",
"agent_module",
",",
"agent_name",
"=",
"FLAGS",
".",
"agent2",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"agent_cls",
"=",
"getattr",
"(",
"importlib",
".",
"import_module",
"(",
"agent_module",
")",
",",
"agent_name",
")",
"agent_classes",
".",
"append",
"(",
"agent_cls",
")",
"players",
".",
"append",
"(",
"sc2_env",
".",
"Agent",
"(",
"sc2_env",
".",
"Race",
"[",
"FLAGS",
".",
"agent2_race",
"]",
",",
"FLAGS",
".",
"agent2_name",
"or",
"agent_name",
")",
")",
"threads",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"FLAGS",
".",
"parallel",
"-",
"1",
")",
":",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"run_thread",
",",
"args",
"=",
"(",
"agent_classes",
",",
"players",
",",
"FLAGS",
".",
"map",
",",
"False",
")",
")",
"threads",
".",
"append",
"(",
"t",
")",
"t",
".",
"start",
"(",
")",
"run_thread",
"(",
"agent_classes",
",",
"players",
",",
"FLAGS",
".",
"map",
",",
"FLAGS",
".",
"render",
")",
"for",
"t",
"in",
"threads",
":",
"t",
".",
"join",
"(",
")",
"if",
"FLAGS",
".",
"profile",
":",
"print",
"(",
"stopwatch",
".",
"sw",
")"
] | 32.317073
| 23.268293
|
def train_local(self, closest_point, label_vector_description=None, N=None,
pivot=True, **kwargs):
"""
Train the model in a Cannon-like fashion using the grid points as labels
and the intensities as normalsied rest-frame fluxes within some local
regime.
"""
lv = self._cannon_label_vector if label_vector_description is None else\
self._interpret_label_vector(label_vector_description)
# By default we will train to the nearest 10% of the grid.
# If grid subset is a fraction, scale it to real numbers.
if N is None:
N = self._configuration.get("settings", {}).get("grid_subset",
0.10)
if 1 >= N > 0:
N = int(np.round(N * self.grid_points.size))
logger.debug("Using {} nearest points for local Cannon model".format(N))
# Use closest N points.
dtype = [(name, '<f8') for name in self.grid_points.dtype.names]
grid_points \
= self.grid_points.astype(dtype).view(float).reshape(-1, len(dtype))
distance = np.sum(np.abs(grid_points - np.array(closest_point))/
np.ptp(grid_points, axis=0), axis=1)
grid_indices = np.argsort(distance)[:N]
lv_array, _, offsets = _build_label_vector_array(
self.grid_points[grid_indices], lv, pivot=pivot)
return self._train(lv_array, grid_indices, offsets, lv, **kwargs)
|
[
"def",
"train_local",
"(",
"self",
",",
"closest_point",
",",
"label_vector_description",
"=",
"None",
",",
"N",
"=",
"None",
",",
"pivot",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"lv",
"=",
"self",
".",
"_cannon_label_vector",
"if",
"label_vector_description",
"is",
"None",
"else",
"self",
".",
"_interpret_label_vector",
"(",
"label_vector_description",
")",
"# By default we will train to the nearest 10% of the grid.",
"# If grid subset is a fraction, scale it to real numbers.",
"if",
"N",
"is",
"None",
":",
"N",
"=",
"self",
".",
"_configuration",
".",
"get",
"(",
"\"settings\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"grid_subset\"",
",",
"0.10",
")",
"if",
"1",
">=",
"N",
">",
"0",
":",
"N",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"N",
"*",
"self",
".",
"grid_points",
".",
"size",
")",
")",
"logger",
".",
"debug",
"(",
"\"Using {} nearest points for local Cannon model\"",
".",
"format",
"(",
"N",
")",
")",
"# Use closest N points.",
"dtype",
"=",
"[",
"(",
"name",
",",
"'<f8'",
")",
"for",
"name",
"in",
"self",
".",
"grid_points",
".",
"dtype",
".",
"names",
"]",
"grid_points",
"=",
"self",
".",
"grid_points",
".",
"astype",
"(",
"dtype",
")",
".",
"view",
"(",
"float",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"len",
"(",
"dtype",
")",
")",
"distance",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"grid_points",
"-",
"np",
".",
"array",
"(",
"closest_point",
")",
")",
"/",
"np",
".",
"ptp",
"(",
"grid_points",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"1",
")",
"grid_indices",
"=",
"np",
".",
"argsort",
"(",
"distance",
")",
"[",
":",
"N",
"]",
"lv_array",
",",
"_",
",",
"offsets",
"=",
"_build_label_vector_array",
"(",
"self",
".",
"grid_points",
"[",
"grid_indices",
"]",
",",
"lv",
",",
"pivot",
"=",
"pivot",
")",
"return",
"self",
".",
"_train",
"(",
"lv_array",
",",
"grid_indices",
",",
"offsets",
",",
"lv",
",",
"*",
"*",
"kwargs",
")"
] | 43.242424
| 26.030303
|
def phone_numbers(self):
"""
Access the phone_numbers
:returns: twilio.rest.proxy.v1.service.phone_number.PhoneNumberList
:rtype: twilio.rest.proxy.v1.service.phone_number.PhoneNumberList
"""
if self._phone_numbers is None:
self._phone_numbers = PhoneNumberList(self._version, service_sid=self._solution['sid'], )
return self._phone_numbers
|
[
"def",
"phone_numbers",
"(",
"self",
")",
":",
"if",
"self",
".",
"_phone_numbers",
"is",
"None",
":",
"self",
".",
"_phone_numbers",
"=",
"PhoneNumberList",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_phone_numbers"
] | 40
| 20
|
def calculate_avg_score(res):
"""
private function
Calculate the avg score of reviews present in res
:param res: tuple of tuple returned from query_retrieve_comments_or_remarks
:return: a float of the average score rounded to the closest 0.5
"""
c_star_score = 6
avg_score = 0.0
nb_reviews = 0
for comment in res:
if comment[c_star_score] > 0:
avg_score += comment[c_star_score]
nb_reviews += 1
if nb_reviews == 0:
return 0.0
avg_score = avg_score / nb_reviews
avg_score_unit = avg_score - math.floor(avg_score)
if avg_score_unit < 0.25:
avg_score = math.floor(avg_score)
elif avg_score_unit > 0.75:
avg_score = math.floor(avg_score) + 1
else:
avg_score = math.floor(avg_score) + 0.5
if avg_score > 5:
avg_score = 5.0
return avg_score
|
[
"def",
"calculate_avg_score",
"(",
"res",
")",
":",
"c_star_score",
"=",
"6",
"avg_score",
"=",
"0.0",
"nb_reviews",
"=",
"0",
"for",
"comment",
"in",
"res",
":",
"if",
"comment",
"[",
"c_star_score",
"]",
">",
"0",
":",
"avg_score",
"+=",
"comment",
"[",
"c_star_score",
"]",
"nb_reviews",
"+=",
"1",
"if",
"nb_reviews",
"==",
"0",
":",
"return",
"0.0",
"avg_score",
"=",
"avg_score",
"/",
"nb_reviews",
"avg_score_unit",
"=",
"avg_score",
"-",
"math",
".",
"floor",
"(",
"avg_score",
")",
"if",
"avg_score_unit",
"<",
"0.25",
":",
"avg_score",
"=",
"math",
".",
"floor",
"(",
"avg_score",
")",
"elif",
"avg_score_unit",
">",
"0.75",
":",
"avg_score",
"=",
"math",
".",
"floor",
"(",
"avg_score",
")",
"+",
"1",
"else",
":",
"avg_score",
"=",
"math",
".",
"floor",
"(",
"avg_score",
")",
"+",
"0.5",
"if",
"avg_score",
">",
"5",
":",
"avg_score",
"=",
"5.0",
"return",
"avg_score"
] | 31.555556
| 14.37037
|
def as_property_description(self):
"""
Get the property description.
Returns a dictionary describing the property.
"""
description = deepcopy(self.metadata)
if 'links' not in description:
description['links'] = []
description['links'].append(
{
'rel': 'property',
'href': self.href_prefix + self.href,
}
)
return description
|
[
"def",
"as_property_description",
"(",
"self",
")",
":",
"description",
"=",
"deepcopy",
"(",
"self",
".",
"metadata",
")",
"if",
"'links'",
"not",
"in",
"description",
":",
"description",
"[",
"'links'",
"]",
"=",
"[",
"]",
"description",
"[",
"'links'",
"]",
".",
"append",
"(",
"{",
"'rel'",
":",
"'property'",
",",
"'href'",
":",
"self",
".",
"href_prefix",
"+",
"self",
".",
"href",
",",
"}",
")",
"return",
"description"
] | 25
| 15.222222
|
def geometry_point(lat, lon, elev):
"""
GeoJSON point. Latitude and Longitude only have one value each
:param list lat: Latitude values
:param list lon: Longitude values
:param float elev: Elevation value
:return dict:
"""
logger_excel.info("enter geometry_point")
coordinates = []
point_dict = OrderedDict()
for idx, val in enumerate(lat):
try:
coordinates.append(lon[idx])
coordinates.append(lat[idx])
except IndexError as e:
print("Error: Invalid geo coordinates")
logger_excel.debug("geometry_point: IndexError: lat: {}, lon: {}, {}".format(lat, lon, e))
coordinates.append(elev)
point_dict['type'] = 'Point'
point_dict['coordinates'] = coordinates
logger_excel.info("exit geometry_point")
return point_dict
|
[
"def",
"geometry_point",
"(",
"lat",
",",
"lon",
",",
"elev",
")",
":",
"logger_excel",
".",
"info",
"(",
"\"enter geometry_point\"",
")",
"coordinates",
"=",
"[",
"]",
"point_dict",
"=",
"OrderedDict",
"(",
")",
"for",
"idx",
",",
"val",
"in",
"enumerate",
"(",
"lat",
")",
":",
"try",
":",
"coordinates",
".",
"append",
"(",
"lon",
"[",
"idx",
"]",
")",
"coordinates",
".",
"append",
"(",
"lat",
"[",
"idx",
"]",
")",
"except",
"IndexError",
"as",
"e",
":",
"print",
"(",
"\"Error: Invalid geo coordinates\"",
")",
"logger_excel",
".",
"debug",
"(",
"\"geometry_point: IndexError: lat: {}, lon: {}, {}\"",
".",
"format",
"(",
"lat",
",",
"lon",
",",
"e",
")",
")",
"coordinates",
".",
"append",
"(",
"elev",
")",
"point_dict",
"[",
"'type'",
"]",
"=",
"'Point'",
"point_dict",
"[",
"'coordinates'",
"]",
"=",
"coordinates",
"logger_excel",
".",
"info",
"(",
"\"exit geometry_point\"",
")",
"return",
"point_dict"
] | 34.041667
| 12.458333
|
def isdir(path, **kwargs):
"""Check if *path* is a directory"""
import os.path
return os.path.isdir(path, **kwargs)
|
[
"def",
"isdir",
"(",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"os",
".",
"path",
"return",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
",",
"*",
"*",
"kwargs",
")"
] | 31
| 9
|
def write(self, data):
"""Write string data to current process input stream."""
data = data.encode('utf-8')
data_p = ctypes.create_string_buffer(data)
num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0))
bytes_to_write = len(data)
success = WriteFile(self.conin_pipe, data_p,
bytes_to_write, num_bytes, None)
return success, num_bytes[0]
|
[
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"data",
"=",
"data",
".",
"encode",
"(",
"'utf-8'",
")",
"data_p",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"data",
")",
"num_bytes",
"=",
"PLARGE_INTEGER",
"(",
"LARGE_INTEGER",
"(",
"0",
")",
")",
"bytes_to_write",
"=",
"len",
"(",
"data",
")",
"success",
"=",
"WriteFile",
"(",
"self",
".",
"conin_pipe",
",",
"data_p",
",",
"bytes_to_write",
",",
"num_bytes",
",",
"None",
")",
"return",
"success",
",",
"num_bytes",
"[",
"0",
"]"
] | 45
| 9.666667
|
def export():
r'''
Restores the trained variables into a simpler graph that will be exported for serving.
'''
log_info('Exporting the model...')
from tensorflow.python.framework.ops import Tensor, Operation
inputs, outputs, _ = create_inference_graph(batch_size=FLAGS.export_batch_size, n_steps=FLAGS.n_steps, tflite=FLAGS.export_tflite)
output_names_tensors = [tensor.op.name for tensor in outputs.values() if isinstance(tensor, Tensor)]
output_names_ops = [op.name for op in outputs.values() if isinstance(op, Operation)]
output_names = ",".join(output_names_tensors + output_names_ops)
if not FLAGS.export_tflite:
mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')}
else:
# Create a saver using variables from the above newly created graph
def fixup(name):
if name.startswith('rnn/lstm_cell/'):
return name.replace('rnn/lstm_cell/', 'lstm_fused_cell/')
return name
mapping = {fixup(v.op.name): v for v in tf.global_variables()}
saver = tf.train.Saver(mapping)
# Restore variables from training checkpoint
checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
checkpoint_path = checkpoint.model_checkpoint_path
output_filename = 'output_graph.pb'
if FLAGS.remove_export:
if os.path.isdir(FLAGS.export_dir):
log_info('Removing old export')
shutil.rmtree(FLAGS.export_dir)
try:
output_graph_path = os.path.join(FLAGS.export_dir, output_filename)
if not os.path.isdir(FLAGS.export_dir):
os.makedirs(FLAGS.export_dir)
def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=None):
return freeze_graph.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=saver.as_saver_def(),
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name=None,
filename_tensor_name=None,
output_graph=output_file,
clear_devices=False,
variable_names_blacklist=variables_blacklist,
initializer_nodes='')
if not FLAGS.export_tflite:
frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='previous_state_c,previous_state_h')
frozen_graph.version = int(file_relative_read('GRAPH_VERSION').strip())
# Add a no-op node to the graph with metadata information to be loaded by the native client
metadata = frozen_graph.node.add()
metadata.name = 'model_metadata'
metadata.op = 'NoOp'
metadata.attr['sample_rate'].i = FLAGS.audio_sample_rate
metadata.attr['feature_win_len'].i = FLAGS.feature_win_len
metadata.attr['feature_win_step'].i = FLAGS.feature_win_step
if FLAGS.export_language:
metadata.attr['language'].s = FLAGS.export_language.encode('ascii')
with open(output_graph_path, 'wb') as fout:
fout.write(frozen_graph.SerializeToString())
else:
frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='')
output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite'))
converter = tf.lite.TFLiteConverter(frozen_graph, input_tensors=inputs.values(), output_tensors=outputs.values())
converter.post_training_quantize = True
# AudioSpectrogram and Mfcc ops are custom but have built-in kernels in TFLite
converter.allow_custom_ops = True
tflite_model = converter.convert()
with open(output_tflite_path, 'wb') as fout:
fout.write(tflite_model)
log_info('Exported model for TF Lite engine as {}'.format(os.path.basename(output_tflite_path)))
log_info('Models exported at %s' % (FLAGS.export_dir))
except RuntimeError as e:
log_error(str(e))
|
[
"def",
"export",
"(",
")",
":",
"log_info",
"(",
"'Exporting the model...'",
")",
"from",
"tensorflow",
".",
"python",
".",
"framework",
".",
"ops",
"import",
"Tensor",
",",
"Operation",
"inputs",
",",
"outputs",
",",
"_",
"=",
"create_inference_graph",
"(",
"batch_size",
"=",
"FLAGS",
".",
"export_batch_size",
",",
"n_steps",
"=",
"FLAGS",
".",
"n_steps",
",",
"tflite",
"=",
"FLAGS",
".",
"export_tflite",
")",
"output_names_tensors",
"=",
"[",
"tensor",
".",
"op",
".",
"name",
"for",
"tensor",
"in",
"outputs",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"tensor",
",",
"Tensor",
")",
"]",
"output_names_ops",
"=",
"[",
"op",
".",
"name",
"for",
"op",
"in",
"outputs",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"op",
",",
"Operation",
")",
"]",
"output_names",
"=",
"\",\"",
".",
"join",
"(",
"output_names_tensors",
"+",
"output_names_ops",
")",
"if",
"not",
"FLAGS",
".",
"export_tflite",
":",
"mapping",
"=",
"{",
"v",
".",
"op",
".",
"name",
":",
"v",
"for",
"v",
"in",
"tf",
".",
"global_variables",
"(",
")",
"if",
"not",
"v",
".",
"op",
".",
"name",
".",
"startswith",
"(",
"'previous_state_'",
")",
"}",
"else",
":",
"# Create a saver using variables from the above newly created graph",
"def",
"fixup",
"(",
"name",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"'rnn/lstm_cell/'",
")",
":",
"return",
"name",
".",
"replace",
"(",
"'rnn/lstm_cell/'",
",",
"'lstm_fused_cell/'",
")",
"return",
"name",
"mapping",
"=",
"{",
"fixup",
"(",
"v",
".",
"op",
".",
"name",
")",
":",
"v",
"for",
"v",
"in",
"tf",
".",
"global_variables",
"(",
")",
"}",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"mapping",
")",
"# Restore variables from training checkpoint",
"checkpoint",
"=",
"tf",
".",
"train",
".",
"get_checkpoint_state",
"(",
"FLAGS",
".",
"checkpoint_dir",
")",
"checkpoint_path",
"=",
"checkpoint",
".",
"model_checkpoint_path",
"output_filename",
"=",
"'output_graph.pb'",
"if",
"FLAGS",
".",
"remove_export",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"FLAGS",
".",
"export_dir",
")",
":",
"log_info",
"(",
"'Removing old export'",
")",
"shutil",
".",
"rmtree",
"(",
"FLAGS",
".",
"export_dir",
")",
"try",
":",
"output_graph_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"export_dir",
",",
"output_filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"FLAGS",
".",
"export_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"FLAGS",
".",
"export_dir",
")",
"def",
"do_graph_freeze",
"(",
"output_file",
"=",
"None",
",",
"output_node_names",
"=",
"None",
",",
"variables_blacklist",
"=",
"None",
")",
":",
"return",
"freeze_graph",
".",
"freeze_graph_with_def_protos",
"(",
"input_graph_def",
"=",
"tf",
".",
"get_default_graph",
"(",
")",
".",
"as_graph_def",
"(",
")",
",",
"input_saver_def",
"=",
"saver",
".",
"as_saver_def",
"(",
")",
",",
"input_checkpoint",
"=",
"checkpoint_path",
",",
"output_node_names",
"=",
"output_node_names",
",",
"restore_op_name",
"=",
"None",
",",
"filename_tensor_name",
"=",
"None",
",",
"output_graph",
"=",
"output_file",
",",
"clear_devices",
"=",
"False",
",",
"variable_names_blacklist",
"=",
"variables_blacklist",
",",
"initializer_nodes",
"=",
"''",
")",
"if",
"not",
"FLAGS",
".",
"export_tflite",
":",
"frozen_graph",
"=",
"do_graph_freeze",
"(",
"output_node_names",
"=",
"output_names",
",",
"variables_blacklist",
"=",
"'previous_state_c,previous_state_h'",
")",
"frozen_graph",
".",
"version",
"=",
"int",
"(",
"file_relative_read",
"(",
"'GRAPH_VERSION'",
")",
".",
"strip",
"(",
")",
")",
"# Add a no-op node to the graph with metadata information to be loaded by the native client",
"metadata",
"=",
"frozen_graph",
".",
"node",
".",
"add",
"(",
")",
"metadata",
".",
"name",
"=",
"'model_metadata'",
"metadata",
".",
"op",
"=",
"'NoOp'",
"metadata",
".",
"attr",
"[",
"'sample_rate'",
"]",
".",
"i",
"=",
"FLAGS",
".",
"audio_sample_rate",
"metadata",
".",
"attr",
"[",
"'feature_win_len'",
"]",
".",
"i",
"=",
"FLAGS",
".",
"feature_win_len",
"metadata",
".",
"attr",
"[",
"'feature_win_step'",
"]",
".",
"i",
"=",
"FLAGS",
".",
"feature_win_step",
"if",
"FLAGS",
".",
"export_language",
":",
"metadata",
".",
"attr",
"[",
"'language'",
"]",
".",
"s",
"=",
"FLAGS",
".",
"export_language",
".",
"encode",
"(",
"'ascii'",
")",
"with",
"open",
"(",
"output_graph_path",
",",
"'wb'",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"frozen_graph",
".",
"SerializeToString",
"(",
")",
")",
"else",
":",
"frozen_graph",
"=",
"do_graph_freeze",
"(",
"output_node_names",
"=",
"output_names",
",",
"variables_blacklist",
"=",
"''",
")",
"output_tflite_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"export_dir",
",",
"output_filename",
".",
"replace",
"(",
"'.pb'",
",",
"'.tflite'",
")",
")",
"converter",
"=",
"tf",
".",
"lite",
".",
"TFLiteConverter",
"(",
"frozen_graph",
",",
"input_tensors",
"=",
"inputs",
".",
"values",
"(",
")",
",",
"output_tensors",
"=",
"outputs",
".",
"values",
"(",
")",
")",
"converter",
".",
"post_training_quantize",
"=",
"True",
"# AudioSpectrogram and Mfcc ops are custom but have built-in kernels in TFLite",
"converter",
".",
"allow_custom_ops",
"=",
"True",
"tflite_model",
"=",
"converter",
".",
"convert",
"(",
")",
"with",
"open",
"(",
"output_tflite_path",
",",
"'wb'",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"tflite_model",
")",
"log_info",
"(",
"'Exported model for TF Lite engine as {}'",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"output_tflite_path",
")",
")",
")",
"log_info",
"(",
"'Models exported at %s'",
"%",
"(",
"FLAGS",
".",
"export_dir",
")",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"log_error",
"(",
"str",
"(",
"e",
")",
")"
] | 47.103448
| 27.724138
|
def configure(self, args):
"""Configure the set of plugins with the given args.
After configuration, disabled plugins are removed from the plugins list.
"""
for plug in self._plugins:
plug_name = self.plugin_name(plug)
plug.enabled = getattr(args, "plugin_%s" % plug_name, False)
if plug.enabled and getattr(plug, "configure", None):
if callable(getattr(plug, "configure", None)):
plug.configure(args)
LOG.debug("Available plugins: %s", self._plugins)
self.plugins = [plugin for plugin in self._plugins if getattr(plugin, "enabled", False)]
LOG.debug("Enabled plugins: %s", self.plugins)
|
[
"def",
"configure",
"(",
"self",
",",
"args",
")",
":",
"for",
"plug",
"in",
"self",
".",
"_plugins",
":",
"plug_name",
"=",
"self",
".",
"plugin_name",
"(",
"plug",
")",
"plug",
".",
"enabled",
"=",
"getattr",
"(",
"args",
",",
"\"plugin_%s\"",
"%",
"plug_name",
",",
"False",
")",
"if",
"plug",
".",
"enabled",
"and",
"getattr",
"(",
"plug",
",",
"\"configure\"",
",",
"None",
")",
":",
"if",
"callable",
"(",
"getattr",
"(",
"plug",
",",
"\"configure\"",
",",
"None",
")",
")",
":",
"plug",
".",
"configure",
"(",
"args",
")",
"LOG",
".",
"debug",
"(",
"\"Available plugins: %s\"",
",",
"self",
".",
"_plugins",
")",
"self",
".",
"plugins",
"=",
"[",
"plugin",
"for",
"plugin",
"in",
"self",
".",
"_plugins",
"if",
"getattr",
"(",
"plugin",
",",
"\"enabled\"",
",",
"False",
")",
"]",
"LOG",
".",
"debug",
"(",
"\"Enabled plugins: %s\"",
",",
"self",
".",
"plugins",
")"
] | 50.214286
| 19.428571
|
def list_permissions(self, group_name=None, resource=None):
"""List permission sets associated filtering by group and/or resource.
Args:
group_name (string): Name of group.
resource (intern.resource.boss.Resource): Identifies which data model object to operate on.
Returns:
(list): List of permissions.
Raises:
requests.HTTPError on failure.
"""
return self.service.list_permissions(group_name, resource,
self.url_prefix, self.auth, self.session, self.session_send_opts)
|
[
"def",
"list_permissions",
"(",
"self",
",",
"group_name",
"=",
"None",
",",
"resource",
"=",
"None",
")",
":",
"return",
"self",
".",
"service",
".",
"list_permissions",
"(",
"group_name",
",",
"resource",
",",
"self",
".",
"url_prefix",
",",
"self",
".",
"auth",
",",
"self",
".",
"session",
",",
"self",
".",
"session_send_opts",
")"
] | 40
| 25.533333
|
def received(self, limit=None):
"""
Returns all the events that have been received (excluding sent events), until a limit if defined
Args:
limit (int, optional): the max length of the events to return (Default value = None)
Returns:
list: a list of received events
"""
return list(itertools.islice((itertools.filterfalse(lambda x: x[1].sent, self.store)), limit))[::-1]
|
[
"def",
"received",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"return",
"list",
"(",
"itertools",
".",
"islice",
"(",
"(",
"itertools",
".",
"filterfalse",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
".",
"sent",
",",
"self",
".",
"store",
")",
")",
",",
"limit",
")",
")",
"[",
":",
":",
"-",
"1",
"]"
] | 35.75
| 30.583333
|
def _add_genes_to_bed(in_file, gene_file, fai_file, out_file, data, max_distance=10000):
"""Re-usable subcomponent that annotates BED file genes from another BED
"""
try:
input_rec = next(iter(pybedtools.BedTool(in_file)))
except StopIteration: # empty file
utils.copy_plus(in_file, out_file)
return
# keep everything after standard chrom/start/end, 1-based
extra_fields = list(range(4, len(input_rec.fields) + 1))
# keep the new gene annotation
gene_index = len(input_rec.fields) + 4
extra_fields.append(gene_index)
columns = ",".join([str(x) for x in extra_fields])
max_column = max(extra_fields) + 1
ops = ",".join(["distinct"] * len(extra_fields))
# swap over gene name to '.' if beyond maximum distance
# cut removes the last distance column which can cause issues
# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'
distance_filter = (r"""awk -F$'\t' -v OFS='\t' '{if ($NF > %s || $NF < -%s) $%s = "."} {print}'""" %
(max_distance, max_distance, gene_index))
sort_cmd = bedutils.get_sort_cmd(os.path.dirname(out_file))
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
# Ensure gene transcripts match reference genome
ready_gene_file = os.path.join(os.path.dirname(out_file), "%s-genomeonly.bed" %
(utils.splitext_plus(os.path.basename(gene_file))[0]))
ready_gene_file = bedutils.subset_to_genome(gene_file, ready_gene_file, data)
exports = "export TMPDIR=%s && %s" % (os.path.dirname(out_file), utils.local_path_export())
bcbio_py = sys.executable
gsort = config_utils.get_program("gsort", data)
cmd = ("{exports}{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^# | "
"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | "
"{gsort} - {fai_file} | "
"bedtools closest -g {fai_file} "
"-D ref -t first -a - -b <({gsort} {ready_gene_file} {fai_file}) | "
"{distance_filter} | cut -f 1-{max_column} | "
"bedtools merge -i - -c {columns} -o {ops} -delim ',' -d -10 > {out_file}")
do.run(cmd.format(**locals()), "Annotate BED file with gene info")
|
[
"def",
"_add_genes_to_bed",
"(",
"in_file",
",",
"gene_file",
",",
"fai_file",
",",
"out_file",
",",
"data",
",",
"max_distance",
"=",
"10000",
")",
":",
"try",
":",
"input_rec",
"=",
"next",
"(",
"iter",
"(",
"pybedtools",
".",
"BedTool",
"(",
"in_file",
")",
")",
")",
"except",
"StopIteration",
":",
"# empty file",
"utils",
".",
"copy_plus",
"(",
"in_file",
",",
"out_file",
")",
"return",
"# keep everything after standard chrom/start/end, 1-based",
"extra_fields",
"=",
"list",
"(",
"range",
"(",
"4",
",",
"len",
"(",
"input_rec",
".",
"fields",
")",
"+",
"1",
")",
")",
"# keep the new gene annotation",
"gene_index",
"=",
"len",
"(",
"input_rec",
".",
"fields",
")",
"+",
"4",
"extra_fields",
".",
"append",
"(",
"gene_index",
")",
"columns",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"extra_fields",
"]",
")",
"max_column",
"=",
"max",
"(",
"extra_fields",
")",
"+",
"1",
"ops",
"=",
"\",\"",
".",
"join",
"(",
"[",
"\"distinct\"",
"]",
"*",
"len",
"(",
"extra_fields",
")",
")",
"# swap over gene name to '.' if beyond maximum distance",
"# cut removes the last distance column which can cause issues",
"# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'",
"distance_filter",
"=",
"(",
"r\"\"\"awk -F$'\\t' -v OFS='\\t' '{if ($NF > %s || $NF < -%s) $%s = \".\"} {print}'\"\"\"",
"%",
"(",
"max_distance",
",",
"max_distance",
",",
"gene_index",
")",
")",
"sort_cmd",
"=",
"bedutils",
".",
"get_sort_cmd",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"out_file",
")",
")",
"cat_cmd",
"=",
"\"zcat\"",
"if",
"in_file",
".",
"endswith",
"(",
"\".gz\"",
")",
"else",
"\"cat\"",
"# Ensure gene transcripts match reference genome",
"ready_gene_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"out_file",
")",
",",
"\"%s-genomeonly.bed\"",
"%",
"(",
"utils",
".",
"splitext_plus",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"gene_file",
")",
")",
"[",
"0",
"]",
")",
")",
"ready_gene_file",
"=",
"bedutils",
".",
"subset_to_genome",
"(",
"gene_file",
",",
"ready_gene_file",
",",
"data",
")",
"exports",
"=",
"\"export TMPDIR=%s && %s\"",
"%",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"out_file",
")",
",",
"utils",
".",
"local_path_export",
"(",
")",
")",
"bcbio_py",
"=",
"sys",
".",
"executable",
"gsort",
"=",
"config_utils",
".",
"get_program",
"(",
"\"gsort\"",
",",
"data",
")",
"cmd",
"=",
"(",
"\"{exports}{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^# | \"",
"\"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | \"",
"\"{gsort} - {fai_file} | \"",
"\"bedtools closest -g {fai_file} \"",
"\"-D ref -t first -a - -b <({gsort} {ready_gene_file} {fai_file}) | \"",
"\"{distance_filter} | cut -f 1-{max_column} | \"",
"\"bedtools merge -i - -c {columns} -o {ops} -delim ',' -d -10 > {out_file}\"",
")",
"do",
".",
"run",
"(",
"cmd",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
",",
"\"Annotate BED file with gene info\"",
")"
] | 59.394737
| 22.210526
|
def p_print_list_expr(p):
""" print_elem : expr
| print_at
| print_tab
| attr
| BOLD expr
| ITALIC expr
"""
if p[1] in ('BOLD', 'ITALIC'):
p[0] = make_sentence(p[1] + '_TMP',
make_typecast(TYPE.ubyte, p[2], p.lineno(1)))
else:
p[0] = p[1]
|
[
"def",
"p_print_list_expr",
"(",
"p",
")",
":",
"if",
"p",
"[",
"1",
"]",
"in",
"(",
"'BOLD'",
",",
"'ITALIC'",
")",
":",
"p",
"[",
"0",
"]",
"=",
"make_sentence",
"(",
"p",
"[",
"1",
"]",
"+",
"'_TMP'",
",",
"make_typecast",
"(",
"TYPE",
".",
"ubyte",
",",
"p",
"[",
"2",
"]",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]"
] | 29.384615
| 12.615385
|
def main():
"""Main entry point"""
# Quit when interrupted
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Arguments:
# --separator STRING/REGEX - how to split a row into cells (only relevant for CSV parser)
# --flatten - flatten item hashes. {'a':{'b':'c'}} --> {'a_b':'c'}
import argparse
parser = argparse.ArgumentParser(description='View tabulated data via GUI')
parser.add_argument('-p','--parser',type=str, default='autosplit',help='Type of parser to use') #TODO add possible parsers
parser.add_argument('--headers',type=str, help='Headers are this comma-delimited names instead of ones supplied in file. Use colons to specify types, like "colname:int"')
parser.add_argument('--filter',type=str, help='Pre-populate filter box')
parser.add_argument('-s', '--separator', help='How to seperate columns. Applies only to some parsers')
parser.add_argument('files', nargs='*', help='Files to show. Each file opens a new window')
args = parser.parse_args()
GObject.threads_init()
default_config = {'format': args.parser}
if args.filter:
default_config['filter']=args.filter
if args.separator:
default_config['separator']=args.separator
if args.headers:
default_config['headers']=map(lambda s: s.strip(),args.headers.split(','))
inputs = [ (open(f,'r'),default_config) for f in args.files ]
# Add stdin as input, if it's not a tty
if not sys.stdin.isatty():
inputs.append((sys.stdin, default_config))
global windows
windows = [Window(i[0],i[1]) for i in inputs]
for win in windows:
win.start_read()
win.connect("destroy",window_closed)
if windows:
Gtk.main()
else:
print 'No input supplied so no windows are created'
|
[
"def",
"main",
"(",
")",
":",
"# Quit when interrupted",
"import",
"signal",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_DFL",
")",
"# Arguments:",
"# --separator STRING/REGEX - how to split a row into cells (only relevant for CSV parser)",
"# --flatten - flatten item hashes. {'a':{'b':'c'}} --> {'a_b':'c'}",
"import",
"argparse",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'View tabulated data via GUI'",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--parser'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'autosplit'",
",",
"help",
"=",
"'Type of parser to use'",
")",
"#TODO add possible parsers",
"parser",
".",
"add_argument",
"(",
"'--headers'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Headers are this comma-delimited names instead of ones supplied in file. Use colons to specify types, like \"colname:int\"'",
")",
"parser",
".",
"add_argument",
"(",
"'--filter'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Pre-populate filter box'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--separator'",
",",
"help",
"=",
"'How to seperate columns. Applies only to some parsers'",
")",
"parser",
".",
"add_argument",
"(",
"'files'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"'Files to show. Each file opens a new window'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"GObject",
".",
"threads_init",
"(",
")",
"default_config",
"=",
"{",
"'format'",
":",
"args",
".",
"parser",
"}",
"if",
"args",
".",
"filter",
":",
"default_config",
"[",
"'filter'",
"]",
"=",
"args",
".",
"filter",
"if",
"args",
".",
"separator",
":",
"default_config",
"[",
"'separator'",
"]",
"=",
"args",
".",
"separator",
"if",
"args",
".",
"headers",
":",
"default_config",
"[",
"'headers'",
"]",
"=",
"map",
"(",
"lambda",
"s",
":",
"s",
".",
"strip",
"(",
")",
",",
"args",
".",
"headers",
".",
"split",
"(",
"','",
")",
")",
"inputs",
"=",
"[",
"(",
"open",
"(",
"f",
",",
"'r'",
")",
",",
"default_config",
")",
"for",
"f",
"in",
"args",
".",
"files",
"]",
"# Add stdin as input, if it's not a tty",
"if",
"not",
"sys",
".",
"stdin",
".",
"isatty",
"(",
")",
":",
"inputs",
".",
"append",
"(",
"(",
"sys",
".",
"stdin",
",",
"default_config",
")",
")",
"global",
"windows",
"windows",
"=",
"[",
"Window",
"(",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
")",
"for",
"i",
"in",
"inputs",
"]",
"for",
"win",
"in",
"windows",
":",
"win",
".",
"start_read",
"(",
")",
"win",
".",
"connect",
"(",
"\"destroy\"",
",",
"window_closed",
")",
"if",
"windows",
":",
"Gtk",
".",
"main",
"(",
")",
"else",
":",
"print",
"'No input supplied so no windows are created'"
] | 41.97619
| 27.928571
|
def print(self, *args, **kwargs):
'''
Utility function that behaves identically to 'print' except it only
prints if verbose
'''
if self._last_args and self._last_args.verbose:
print(*args, **kwargs)
|
[
"def",
"print",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_last_args",
"and",
"self",
".",
"_last_args",
".",
"verbose",
":",
"print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 34.857143
| 19.428571
|
def run_pod(self, pod, startup_timeout=120, get_logs=True):
# type: (Pod, int, bool) -> Tuple[State, Optional[str]]
"""
Launches the pod synchronously and waits for completion.
Args:
pod (Pod):
startup_timeout (int): Timeout for startup of the pod (if pod is pending for
too long, considers task a failure
"""
resp = self.run_pod_async(pod)
curr_time = dt.now()
if resp.status.start_time is None:
while self.pod_not_started(pod):
delta = dt.now() - curr_time
if delta.seconds >= startup_timeout:
raise AirflowException("Pod took too long to start")
time.sleep(1)
self.log.debug('Pod not yet started')
return self._monitor_pod(pod, get_logs)
|
[
"def",
"run_pod",
"(",
"self",
",",
"pod",
",",
"startup_timeout",
"=",
"120",
",",
"get_logs",
"=",
"True",
")",
":",
"# type: (Pod, int, bool) -> Tuple[State, Optional[str]]",
"resp",
"=",
"self",
".",
"run_pod_async",
"(",
"pod",
")",
"curr_time",
"=",
"dt",
".",
"now",
"(",
")",
"if",
"resp",
".",
"status",
".",
"start_time",
"is",
"None",
":",
"while",
"self",
".",
"pod_not_started",
"(",
"pod",
")",
":",
"delta",
"=",
"dt",
".",
"now",
"(",
")",
"-",
"curr_time",
"if",
"delta",
".",
"seconds",
">=",
"startup_timeout",
":",
"raise",
"AirflowException",
"(",
"\"Pod took too long to start\"",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Pod not yet started'",
")",
"return",
"self",
".",
"_monitor_pod",
"(",
"pod",
",",
"get_logs",
")"
] | 41.15
| 15.05
|
def get(self, sid):
"""
Constructs a CredentialListContext
:param sid: Fetch by unique credential list Sid
:returns: twilio.rest.api.v2010.account.sip.credential_list.CredentialListContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.CredentialListContext
"""
return CredentialListContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
|
[
"def",
"get",
"(",
"self",
",",
"sid",
")",
":",
"return",
"CredentialListContext",
"(",
"self",
".",
"_version",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'account_sid'",
"]",
",",
"sid",
"=",
"sid",
",",
")"
] | 41.9
| 27.9
|
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
|
[
"def",
"grant_db_access",
"(",
"conn",
",",
"schema",
",",
"table",
",",
"role",
")",
":",
"grant_str",
"=",
"\"\"\"GRANT ALL ON TABLE {schema}.{table}\n TO {role} WITH GRANT OPTION;\"\"\"",
".",
"format",
"(",
"schema",
"=",
"schema",
",",
"table",
"=",
"table",
",",
"role",
"=",
"role",
")",
"conn",
".",
"execute",
"(",
"grant_str",
")"
] | 27.8
| 15.75
|
def get(self, collection_id, content=None, **kwargs):
"""Syntactic sugar around to make it easier to get fine-grained access
to the parts of a file without composing a PhyloSchema object.
Possible invocations include:
w.get('pg_10')
w.get('pg_10', 'trees')
w.get('pg_10', 'trees', format='nexus')
w.get('pg_10', tree_id='tree3')
see:
"""
assert COLLECTION_ID_PATTERN.match(collection_id)
r = self.get_collection(collection_id)
if isinstance(r, dict) and ('data' in r):
return r['data']
return r
|
[
"def",
"get",
"(",
"self",
",",
"collection_id",
",",
"content",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"COLLECTION_ID_PATTERN",
".",
"match",
"(",
"collection_id",
")",
"r",
"=",
"self",
".",
"get_collection",
"(",
"collection_id",
")",
"if",
"isinstance",
"(",
"r",
",",
"dict",
")",
"and",
"(",
"'data'",
"in",
"r",
")",
":",
"return",
"r",
"[",
"'data'",
"]",
"return",
"r"
] | 40.8
| 11.666667
|
def clone(self, forced_version_date=None, in_bulk=False):
"""
Clones a Versionable and returns a fresh copy of the original object.
Original source: ClonableMixin snippet
(http://djangosnippets.org/snippets/1271), with the pk/id change
suggested in the comments
:param forced_version_date: a timestamp including tzinfo; this value
is usually set only internally!
:param in_bulk: whether not to write this objects to the database
already, if not necessary; this value is usually set only
internally for performance optimization
:return: returns a fresh clone of the original object
(with adjusted relations)
"""
if not self.pk:
raise ValueError('Instance must be saved before it can be cloned')
if self.version_end_date:
raise ValueError(
'This is a historical item and can not be cloned.')
if forced_version_date:
if not self.version_start_date <= forced_version_date <= \
get_utc_now():
raise ValueError(
'The clone date must be between the version start date '
'and now.')
else:
forced_version_date = get_utc_now()
if self.get_deferred_fields():
# It would be necessary to fetch the record from the database
# again for this to succeed.
# Alternatively, perhaps it would be possible to create a copy of
# the object after fetching the missing fields.
# Doing so may be unexpected by the calling code, so raise an
# exception: the calling code should be adapted if necessary.
raise ValueError(
'Can not clone a model instance that has deferred fields')
earlier_version = self
later_version = copy.copy(earlier_version)
later_version.version_end_date = None
later_version.version_start_date = forced_version_date
# set earlier_version's ID to a new UUID so the clone (later_version)
# can get the old one -- this allows 'head' to always have the original
# id allowing us to get at all historic foreign key relationships
earlier_version.id = self.uuid()
earlier_version.version_end_date = forced_version_date
if not in_bulk:
# This condition might save us a lot of database queries if we are
# being called from a loop like in .clone_relations
earlier_version.save()
later_version.save()
else:
earlier_version._not_created = True
# re-create ManyToMany relations
for field_name in self.get_all_m2m_field_names():
earlier_version.clone_relations(later_version, field_name,
forced_version_date)
return later_version
|
[
"def",
"clone",
"(",
"self",
",",
"forced_version_date",
"=",
"None",
",",
"in_bulk",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"pk",
":",
"raise",
"ValueError",
"(",
"'Instance must be saved before it can be cloned'",
")",
"if",
"self",
".",
"version_end_date",
":",
"raise",
"ValueError",
"(",
"'This is a historical item and can not be cloned.'",
")",
"if",
"forced_version_date",
":",
"if",
"not",
"self",
".",
"version_start_date",
"<=",
"forced_version_date",
"<=",
"get_utc_now",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'The clone date must be between the version start date '",
"'and now.'",
")",
"else",
":",
"forced_version_date",
"=",
"get_utc_now",
"(",
")",
"if",
"self",
".",
"get_deferred_fields",
"(",
")",
":",
"# It would be necessary to fetch the record from the database",
"# again for this to succeed.",
"# Alternatively, perhaps it would be possible to create a copy of",
"# the object after fetching the missing fields.",
"# Doing so may be unexpected by the calling code, so raise an",
"# exception: the calling code should be adapted if necessary.",
"raise",
"ValueError",
"(",
"'Can not clone a model instance that has deferred fields'",
")",
"earlier_version",
"=",
"self",
"later_version",
"=",
"copy",
".",
"copy",
"(",
"earlier_version",
")",
"later_version",
".",
"version_end_date",
"=",
"None",
"later_version",
".",
"version_start_date",
"=",
"forced_version_date",
"# set earlier_version's ID to a new UUID so the clone (later_version)",
"# can get the old one -- this allows 'head' to always have the original",
"# id allowing us to get at all historic foreign key relationships",
"earlier_version",
".",
"id",
"=",
"self",
".",
"uuid",
"(",
")",
"earlier_version",
".",
"version_end_date",
"=",
"forced_version_date",
"if",
"not",
"in_bulk",
":",
"# This condition might save us a lot of database queries if we are",
"# being called from a loop like in .clone_relations",
"earlier_version",
".",
"save",
"(",
")",
"later_version",
".",
"save",
"(",
")",
"else",
":",
"earlier_version",
".",
"_not_created",
"=",
"True",
"# re-create ManyToMany relations",
"for",
"field_name",
"in",
"self",
".",
"get_all_m2m_field_names",
"(",
")",
":",
"earlier_version",
".",
"clone_relations",
"(",
"later_version",
",",
"field_name",
",",
"forced_version_date",
")",
"return",
"later_version"
] | 43.059701
| 21.716418
|
def _set_attribute(self, attribute, name, value):
"""Device attribute setter"""
try:
if attribute is None:
attribute = self._attribute_file_open( name )
else:
attribute.seek(0)
if isinstance(value, str):
value = value.encode()
attribute.write(value)
attribute.flush()
except Exception as ex:
self._raise_friendly_access_error(ex, name)
return attribute
|
[
"def",
"_set_attribute",
"(",
"self",
",",
"attribute",
",",
"name",
",",
"value",
")",
":",
"try",
":",
"if",
"attribute",
"is",
"None",
":",
"attribute",
"=",
"self",
".",
"_attribute_file_open",
"(",
"name",
")",
"else",
":",
"attribute",
".",
"seek",
"(",
"0",
")",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"value",
".",
"encode",
"(",
")",
"attribute",
".",
"write",
"(",
"value",
")",
"attribute",
".",
"flush",
"(",
")",
"except",
"Exception",
"as",
"ex",
":",
"self",
".",
"_raise_friendly_access_error",
"(",
"ex",
",",
"name",
")",
"return",
"attribute"
] | 32.733333
| 13.066667
|
def make_success_response(self, result):
"""
Makes the python dict corresponding to the
JSON that needs to be sent for a successful
response. Result is the actual payload
that gets sent.
"""
response = self.make_response(constants.RESPONSE_STATUS_SUCCESS)
response[constants.RESPONSE_KEY_RESULT] = result
return response
|
[
"def",
"make_success_response",
"(",
"self",
",",
"result",
")",
":",
"response",
"=",
"self",
".",
"make_response",
"(",
"constants",
".",
"RESPONSE_STATUS_SUCCESS",
")",
"response",
"[",
"constants",
".",
"RESPONSE_KEY_RESULT",
"]",
"=",
"result",
"return",
"response"
] | 34.7
| 9.7
|
def funcFindPrfMltpPrdXVal(idxPrc,
aryFuncChnkTrn,
aryFuncChnkTst,
aryPrfMdlsTrnConv,
aryPrfMdlsTstConv,
aryMdls,
queOut):
"""
Function for finding best pRF model for voxel time course.
This function should be used if there are several predictors.
"""
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnkTrn.shape[0]
# Number of volumes:
varNumVolTrn = aryFuncChnkTrn.shape[2]
varNumVolTst = aryFuncChnkTst.shape[2]
# get number of cross validations
varNumXval = aryPrfMdlsTrnConv.shape[2]
# Vectors for pRF finding results [number-of-voxels times one]:
vecBstXpos = np.zeros(varNumVoxChnk)
vecBstYpos = np.zeros(varNumVoxChnk)
vecBstSd = np.zeros(varNumVoxChnk)
# vecBstR2 = np.zeros(varNumVoxChnk)
# Vector for temporary residuals values that are obtained during
# the different loops of cross validation
vecTmpResXVal = np.empty((varNumVoxChnk, varNumXval), dtype='float32')
# Vector for best residual values.
vecBstRes = np.add(np.zeros(varNumVoxChnk),
100000.0)
# Constant term for the model:
vecConstTrn = np.ones((varNumVolTrn), dtype=np.float32)
vecConstTst = np.ones((varNumVolTst), dtype=np.float32)
# Change type to float 32:
aryPrfMdlsTrnConv = aryPrfMdlsTrnConv.astype(np.float32)
aryPrfMdlsTstConv = aryPrfMdlsTstConv.astype(np.float32)
# Number of pRF models to fit:
varNumMdls = len(aryMdls)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through pRF models:
for idxMdls in range(0, varNumMdls):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Loop through different cross validations
for idxXval in range(0, varNumXval):
# Current pRF time course model:
vecMdlTrn = aryPrfMdlsTrnConv[idxMdls, :, idxXval, :]
vecMdlTst = aryPrfMdlsTstConv[idxMdls, :, idxXval, :]
# We create a design matrix including the current pRF time
# course model, and a constant term:
aryDsgnTrn = np.vstack([vecMdlTrn,
vecConstTrn]).T
aryDsgnTst = np.vstack([vecMdlTst,
vecConstTst]).T
# Calculate the least-squares solution for all voxels
# and get parameter estimates from the training fit
aryTmpPrmEst = np.linalg.lstsq(aryDsgnTrn,
aryFuncChnkTrn[:, idxXval, :].T)[0]
# calculate predicted model fit based on training data
aryTmpMdlTc = np.dot(aryDsgnTst, aryTmpPrmEst)
# calculate residual sum of squares between test data and
# predicted model fit based on training data
vecTmpResXVal[:, idxXval] = np.sum(
(np.subtract(aryFuncChnkTst[:, idxXval, :].T,
aryTmpMdlTc))**2, axis=0)
vecTmpRes = np.mean(vecTmpResXVal, axis=1)
# Check whether current residuals are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes)
# Replace best x and y position values, and SD values.
vecBstXpos[vecLgcTmpRes] = aryMdls[idxMdls][0]
vecBstYpos[vecLgcTmpRes] = aryMdls[idxMdls][1]
vecBstSd[vecLgcTmpRes] = aryMdls[idxMdls][2]
# Replace best residual values:
vecBstRes[vecLgcTmpRes] = vecTmpRes[vecLgcTmpRes]
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
]
queOut.put(lstOut)
|
[
"def",
"funcFindPrfMltpPrdXVal",
"(",
"idxPrc",
",",
"aryFuncChnkTrn",
",",
"aryFuncChnkTst",
",",
"aryPrfMdlsTrnConv",
",",
"aryPrfMdlsTstConv",
",",
"aryMdls",
",",
"queOut",
")",
":",
"# Number of voxels to be fitted in this chunk:",
"varNumVoxChnk",
"=",
"aryFuncChnkTrn",
".",
"shape",
"[",
"0",
"]",
"# Number of volumes:",
"varNumVolTrn",
"=",
"aryFuncChnkTrn",
".",
"shape",
"[",
"2",
"]",
"varNumVolTst",
"=",
"aryFuncChnkTst",
".",
"shape",
"[",
"2",
"]",
"# get number of cross validations",
"varNumXval",
"=",
"aryPrfMdlsTrnConv",
".",
"shape",
"[",
"2",
"]",
"# Vectors for pRF finding results [number-of-voxels times one]:",
"vecBstXpos",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"vecBstYpos",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"vecBstSd",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"# vecBstR2 = np.zeros(varNumVoxChnk)",
"# Vector for temporary residuals values that are obtained during",
"# the different loops of cross validation",
"vecTmpResXVal",
"=",
"np",
".",
"empty",
"(",
"(",
"varNumVoxChnk",
",",
"varNumXval",
")",
",",
"dtype",
"=",
"'float32'",
")",
"# Vector for best residual values.",
"vecBstRes",
"=",
"np",
".",
"add",
"(",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
",",
"100000.0",
")",
"# Constant term for the model:",
"vecConstTrn",
"=",
"np",
".",
"ones",
"(",
"(",
"varNumVolTrn",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"vecConstTst",
"=",
"np",
".",
"ones",
"(",
"(",
"varNumVolTst",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# Change type to float 32:",
"aryPrfMdlsTrnConv",
"=",
"aryPrfMdlsTrnConv",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"aryPrfMdlsTstConv",
"=",
"aryPrfMdlsTstConv",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# Number of pRF models to fit:",
"varNumMdls",
"=",
"len",
"(",
"aryMdls",
")",
"# Prepare status indicator if this is the first of the parallel processes:",
"if",
"idxPrc",
"==",
"0",
":",
"# We create a status indicator for the time consuming pRF model finding",
"# algorithm. Number of steps of the status indicator:",
"varStsStpSze",
"=",
"20",
"# Vector with pRF values at which to give status feedback:",
"vecStatPrf",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"varNumMdls",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrf",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrf",
")",
"vecStatPrf",
"=",
"vecStatPrf",
".",
"astype",
"(",
"int",
")",
"# Vector with corresponding percentage values at which to give status",
"# feedback:",
"vecStatPrc",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"100",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrc",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrc",
")",
"vecStatPrc",
"=",
"vecStatPrc",
".",
"astype",
"(",
"int",
")",
"# Counter for status indicator:",
"varCntSts01",
"=",
"0",
"varCntSts02",
"=",
"0",
"# Loop through pRF models:",
"for",
"idxMdls",
"in",
"range",
"(",
"0",
",",
"varNumMdls",
")",
":",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Status indicator:",
"if",
"varCntSts02",
"==",
"vecStatPrf",
"[",
"varCntSts01",
"]",
":",
"# Prepare status message:",
"strStsMsg",
"=",
"(",
"'---------Progress: '",
"+",
"str",
"(",
"vecStatPrc",
"[",
"varCntSts01",
"]",
")",
"+",
"' % --- '",
"+",
"str",
"(",
"vecStatPrf",
"[",
"varCntSts01",
"]",
")",
"+",
"' pRF models out of '",
"+",
"str",
"(",
"varNumMdls",
")",
")",
"print",
"(",
"strStsMsg",
")",
"# Only increment counter if the last value has not been",
"# reached yet:",
"if",
"varCntSts01",
"<",
"varStsStpSze",
":",
"varCntSts01",
"=",
"varCntSts01",
"+",
"int",
"(",
"1",
")",
"# Loop through different cross validations",
"for",
"idxXval",
"in",
"range",
"(",
"0",
",",
"varNumXval",
")",
":",
"# Current pRF time course model:",
"vecMdlTrn",
"=",
"aryPrfMdlsTrnConv",
"[",
"idxMdls",
",",
":",
",",
"idxXval",
",",
":",
"]",
"vecMdlTst",
"=",
"aryPrfMdlsTstConv",
"[",
"idxMdls",
",",
":",
",",
"idxXval",
",",
":",
"]",
"# We create a design matrix including the current pRF time",
"# course model, and a constant term:",
"aryDsgnTrn",
"=",
"np",
".",
"vstack",
"(",
"[",
"vecMdlTrn",
",",
"vecConstTrn",
"]",
")",
".",
"T",
"aryDsgnTst",
"=",
"np",
".",
"vstack",
"(",
"[",
"vecMdlTst",
",",
"vecConstTst",
"]",
")",
".",
"T",
"# Calculate the least-squares solution for all voxels",
"# and get parameter estimates from the training fit",
"aryTmpPrmEst",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgnTrn",
",",
"aryFuncChnkTrn",
"[",
":",
",",
"idxXval",
",",
":",
"]",
".",
"T",
")",
"[",
"0",
"]",
"# calculate predicted model fit based on training data",
"aryTmpMdlTc",
"=",
"np",
".",
"dot",
"(",
"aryDsgnTst",
",",
"aryTmpPrmEst",
")",
"# calculate residual sum of squares between test data and",
"# predicted model fit based on training data",
"vecTmpResXVal",
"[",
":",
",",
"idxXval",
"]",
"=",
"np",
".",
"sum",
"(",
"(",
"np",
".",
"subtract",
"(",
"aryFuncChnkTst",
"[",
":",
",",
"idxXval",
",",
":",
"]",
".",
"T",
",",
"aryTmpMdlTc",
")",
")",
"**",
"2",
",",
"axis",
"=",
"0",
")",
"vecTmpRes",
"=",
"np",
".",
"mean",
"(",
"vecTmpResXVal",
",",
"axis",
"=",
"1",
")",
"# Check whether current residuals are lower than previously",
"# calculated ones:",
"vecLgcTmpRes",
"=",
"np",
".",
"less",
"(",
"vecTmpRes",
",",
"vecBstRes",
")",
"# Replace best x and y position values, and SD values.",
"vecBstXpos",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"0",
"]",
"vecBstYpos",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"1",
"]",
"vecBstSd",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"2",
"]",
"# Replace best residual values:",
"vecBstRes",
"[",
"vecLgcTmpRes",
"]",
"=",
"vecTmpRes",
"[",
"vecLgcTmpRes",
"]",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Increment status indicator counter:",
"varCntSts02",
"=",
"varCntSts02",
"+",
"1",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"vecBstXpos",
",",
"vecBstYpos",
",",
"vecBstSd",
",",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | 36.24026
| 18.487013
|
def standard(target, mol_weight='pore.molecular_weight',
density='pore.density'):
r"""
Calculates the molar density from the molecular weight and mass density
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
pore_MW : string
The dictionary key containing the molecular weight in kg/mol
pore_temperature : string
The dictionary key containing the density in kg/m3
"""
MW = target[mol_weight]
rho = target[density]
value = rho/MW
return value
|
[
"def",
"standard",
"(",
"target",
",",
"mol_weight",
"=",
"'pore.molecular_weight'",
",",
"density",
"=",
"'pore.density'",
")",
":",
"MW",
"=",
"target",
"[",
"mol_weight",
"]",
"rho",
"=",
"target",
"[",
"density",
"]",
"value",
"=",
"rho",
"/",
"MW",
"return",
"value"
] | 31.590909
| 21.181818
|
def autocorrelation(x, lag):
"""
Calculates the autocorrelation of the specified lag, according to the formula [1]
.. math::
\\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu)
where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its
mean. `l` denotes the lag.
.. rubric:: References
[1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param lag: the lag
:type lag: int
:return: the value of this feature
:return type: float
"""
# This is important: If a series is passed, the product below is calculated
# based on the index, which corresponds to squaring the series.
if type(x) is pd.Series:
x = x.values
if len(x) < lag:
return np.nan
# Slice the relevant subseries based on the lag
y1 = x[:(len(x)-lag)]
y2 = x[lag:]
# Subtract the mean of the whole series x
x_mean = np.mean(x)
# The result is sometimes referred to as "covariation"
sum_product = np.sum((y1 - x_mean) * (y2 - x_mean))
# Return the normalized unbiased covariance
v = np.var(x)
if np.isclose(v, 0):
return np.NaN
else:
return sum_product / ((len(x) - lag) * v)
|
[
"def",
"autocorrelation",
"(",
"x",
",",
"lag",
")",
":",
"# This is important: If a series is passed, the product below is calculated",
"# based on the index, which corresponds to squaring the series.",
"if",
"type",
"(",
"x",
")",
"is",
"pd",
".",
"Series",
":",
"x",
"=",
"x",
".",
"values",
"if",
"len",
"(",
"x",
")",
"<",
"lag",
":",
"return",
"np",
".",
"nan",
"# Slice the relevant subseries based on the lag",
"y1",
"=",
"x",
"[",
":",
"(",
"len",
"(",
"x",
")",
"-",
"lag",
")",
"]",
"y2",
"=",
"x",
"[",
"lag",
":",
"]",
"# Subtract the mean of the whole series x",
"x_mean",
"=",
"np",
".",
"mean",
"(",
"x",
")",
"# The result is sometimes referred to as \"covariation\"",
"sum_product",
"=",
"np",
".",
"sum",
"(",
"(",
"y1",
"-",
"x_mean",
")",
"*",
"(",
"y2",
"-",
"x_mean",
")",
")",
"# Return the normalized unbiased covariance",
"v",
"=",
"np",
".",
"var",
"(",
"x",
")",
"if",
"np",
".",
"isclose",
"(",
"v",
",",
"0",
")",
":",
"return",
"np",
".",
"NaN",
"else",
":",
"return",
"sum_product",
"/",
"(",
"(",
"len",
"(",
"x",
")",
"-",
"lag",
")",
"*",
"v",
")"
] | 32.04878
| 22.487805
|
def run_decider_state(self, decider_state, child_errors, final_outcomes_dict):
""" Runs the decider state of the barrier concurrency state. The decider state decides on which outcome the
barrier concurrency is left.
:param decider_state: the decider state of the barrier concurrency state
:param child_errors: error of the concurrent branches
:param final_outcomes_dict: dictionary of all outcomes of the concurrent branches
:return:
"""
decider_state.state_execution_status = StateExecutionStatus.ACTIVE
# forward the decider specific data
decider_state.child_errors = child_errors
decider_state.final_outcomes_dict = final_outcomes_dict
# standard state execution
decider_state.input_data = self.get_inputs_for_state(decider_state)
decider_state.output_data = self.create_output_dictionary_for_state(decider_state)
decider_state.start(self.execution_history, backward_execution=False)
decider_state.join()
decider_state_error = None
if decider_state.final_outcome.outcome_id == -1:
if 'error' in decider_state.output_data:
decider_state_error = decider_state.output_data['error']
# standard output data processing
self.add_state_execution_output_to_scoped_data(decider_state.output_data, decider_state)
self.update_scoped_variables_with_output_dictionary(decider_state.output_data, decider_state)
return decider_state_error
|
[
"def",
"run_decider_state",
"(",
"self",
",",
"decider_state",
",",
"child_errors",
",",
"final_outcomes_dict",
")",
":",
"decider_state",
".",
"state_execution_status",
"=",
"StateExecutionStatus",
".",
"ACTIVE",
"# forward the decider specific data",
"decider_state",
".",
"child_errors",
"=",
"child_errors",
"decider_state",
".",
"final_outcomes_dict",
"=",
"final_outcomes_dict",
"# standard state execution",
"decider_state",
".",
"input_data",
"=",
"self",
".",
"get_inputs_for_state",
"(",
"decider_state",
")",
"decider_state",
".",
"output_data",
"=",
"self",
".",
"create_output_dictionary_for_state",
"(",
"decider_state",
")",
"decider_state",
".",
"start",
"(",
"self",
".",
"execution_history",
",",
"backward_execution",
"=",
"False",
")",
"decider_state",
".",
"join",
"(",
")",
"decider_state_error",
"=",
"None",
"if",
"decider_state",
".",
"final_outcome",
".",
"outcome_id",
"==",
"-",
"1",
":",
"if",
"'error'",
"in",
"decider_state",
".",
"output_data",
":",
"decider_state_error",
"=",
"decider_state",
".",
"output_data",
"[",
"'error'",
"]",
"# standard output data processing",
"self",
".",
"add_state_execution_output_to_scoped_data",
"(",
"decider_state",
".",
"output_data",
",",
"decider_state",
")",
"self",
".",
"update_scoped_variables_with_output_dictionary",
"(",
"decider_state",
".",
"output_data",
",",
"decider_state",
")",
"return",
"decider_state_error"
] | 57.884615
| 23.653846
|
def build_ml_phyml(alignment, outfile, work_dir=".", **kwargs):
"""
build maximum likelihood tree of DNA seqs with PhyML
"""
phy_file = op.join(work_dir, "work", "aln.phy")
AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed")
phyml_cl = PhymlCommandline(cmd=PHYML_BIN("phyml"), input=phy_file, **kwargs)
logging.debug("Building ML tree using PhyML: %s" % phyml_cl)
stdout, stderr = phyml_cl()
tree_file = phy_file + "_phyml_tree.txt"
if not op.exists(tree_file):
print("***PhyML failed.", file=sys.stderr)
return None
sh("cp {0} {1}".format(tree_file, outfile), log=False)
logging.debug("ML tree printed to %s" % outfile)
return outfile, phy_file
|
[
"def",
"build_ml_phyml",
"(",
"alignment",
",",
"outfile",
",",
"work_dir",
"=",
"\".\"",
",",
"*",
"*",
"kwargs",
")",
":",
"phy_file",
"=",
"op",
".",
"join",
"(",
"work_dir",
",",
"\"work\"",
",",
"\"aln.phy\"",
")",
"AlignIO",
".",
"write",
"(",
"alignment",
",",
"file",
"(",
"phy_file",
",",
"\"w\"",
")",
",",
"\"phylip-relaxed\"",
")",
"phyml_cl",
"=",
"PhymlCommandline",
"(",
"cmd",
"=",
"PHYML_BIN",
"(",
"\"phyml\"",
")",
",",
"input",
"=",
"phy_file",
",",
"*",
"*",
"kwargs",
")",
"logging",
".",
"debug",
"(",
"\"Building ML tree using PhyML: %s\"",
"%",
"phyml_cl",
")",
"stdout",
",",
"stderr",
"=",
"phyml_cl",
"(",
")",
"tree_file",
"=",
"phy_file",
"+",
"\"_phyml_tree.txt\"",
"if",
"not",
"op",
".",
"exists",
"(",
"tree_file",
")",
":",
"print",
"(",
"\"***PhyML failed.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"None",
"sh",
"(",
"\"cp {0} {1}\"",
".",
"format",
"(",
"tree_file",
",",
"outfile",
")",
",",
"log",
"=",
"False",
")",
"logging",
".",
"debug",
"(",
"\"ML tree printed to %s\"",
"%",
"outfile",
")",
"return",
"outfile",
",",
"phy_file"
] | 35.5
| 19.8
|
def search_engine(query, top=5, service="google", license=None,
cache=os.path.join(DEFAULT_CACHE, "google")):
"""
Return a color aggregate from colors and ranges parsed from the web.
T. De Smedt, http://nodebox.net/code/index.php/Prism
"""
# Check if we have cached information first.
try:
a = theme(query, cache=cache)
return a
except:
pass
if service == "google":
from web import google
search_engine = google
if service == "yahoo":
from web import yahoo
search_engine = yahoo
if license:
yahoo.license_key = license
# Sort all the primary hues (plus black and white) for q.
sorted_colors = search_engine.sort(
[h for h in primary_hues] + ["black", "white"],
context=query, strict=True, cached=True
)
# Sort all the shades (bright, hard, ...) for q.
sorted_shades = search_engine.sort(
[str(s) for s in shades],
context=query, strict=True, cached=True
)
# Reforms '"black death"' to 'black'.
f = lambda x: x.strip("\"").split()[0]
# Take the top most relevant hues.
n2 = sum([w for h, w in sorted_colors[:top]])
sorted_colors = [(color(f(h)), w / n2) for h, w in sorted_colors[:top]]
# Take the three most relevant shades.
n2 = sum([w for s, w in sorted_shades[:3]])
sorted_shades = [(shade(f(s)), w / n2) for s, w in sorted_shades[:3]]
a = theme(cache=cache)
a.name = query
for clr, w1 in sorted_colors:
for rng, w2 in sorted_shades:
a.add_range(rng, clr, w1 * w2)
a._save()
return a
|
[
"def",
"search_engine",
"(",
"query",
",",
"top",
"=",
"5",
",",
"service",
"=",
"\"google\"",
",",
"license",
"=",
"None",
",",
"cache",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DEFAULT_CACHE",
",",
"\"google\"",
")",
")",
":",
"# Check if we have cached information first.",
"try",
":",
"a",
"=",
"theme",
"(",
"query",
",",
"cache",
"=",
"cache",
")",
"return",
"a",
"except",
":",
"pass",
"if",
"service",
"==",
"\"google\"",
":",
"from",
"web",
"import",
"google",
"search_engine",
"=",
"google",
"if",
"service",
"==",
"\"yahoo\"",
":",
"from",
"web",
"import",
"yahoo",
"search_engine",
"=",
"yahoo",
"if",
"license",
":",
"yahoo",
".",
"license_key",
"=",
"license",
"# Sort all the primary hues (plus black and white) for q.",
"sorted_colors",
"=",
"search_engine",
".",
"sort",
"(",
"[",
"h",
"for",
"h",
"in",
"primary_hues",
"]",
"+",
"[",
"\"black\"",
",",
"\"white\"",
"]",
",",
"context",
"=",
"query",
",",
"strict",
"=",
"True",
",",
"cached",
"=",
"True",
")",
"# Sort all the shades (bright, hard, ...) for q.",
"sorted_shades",
"=",
"search_engine",
".",
"sort",
"(",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"shades",
"]",
",",
"context",
"=",
"query",
",",
"strict",
"=",
"True",
",",
"cached",
"=",
"True",
")",
"# Reforms '\"black death\"' to 'black'.",
"f",
"=",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
"\"\\\"\"",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"# Take the top most relevant hues.",
"n2",
"=",
"sum",
"(",
"[",
"w",
"for",
"h",
",",
"w",
"in",
"sorted_colors",
"[",
":",
"top",
"]",
"]",
")",
"sorted_colors",
"=",
"[",
"(",
"color",
"(",
"f",
"(",
"h",
")",
")",
",",
"w",
"/",
"n2",
")",
"for",
"h",
",",
"w",
"in",
"sorted_colors",
"[",
":",
"top",
"]",
"]",
"# Take the three most relevant shades.",
"n2",
"=",
"sum",
"(",
"[",
"w",
"for",
"s",
",",
"w",
"in",
"sorted_shades",
"[",
":",
"3",
"]",
"]",
")",
"sorted_shades",
"=",
"[",
"(",
"shade",
"(",
"f",
"(",
"s",
")",
")",
",",
"w",
"/",
"n2",
")",
"for",
"s",
",",
"w",
"in",
"sorted_shades",
"[",
":",
"3",
"]",
"]",
"a",
"=",
"theme",
"(",
"cache",
"=",
"cache",
")",
"a",
".",
"name",
"=",
"query",
"for",
"clr",
",",
"w1",
"in",
"sorted_colors",
":",
"for",
"rng",
",",
"w2",
"in",
"sorted_shades",
":",
"a",
".",
"add_range",
"(",
"rng",
",",
"clr",
",",
"w1",
"*",
"w2",
")",
"a",
".",
"_save",
"(",
")",
"return",
"a"
] | 30.188679
| 18.188679
|
def getPaths(roots, ignores=None):
"""
Recursively walk a set of paths and return a listing of contained files.
:param roots: Relative or absolute paths to files or folders.
:type roots: :class:`~__builtins__.list` of :class:`~__builtins__.str`
:param ignores: A list of :py:mod:`fnmatch` globs to avoid walking and
omit from results
:type ignores: :class:`~__builtins__.list` of :class:`~__builtins__.str`
:returns: Absolute paths to only files.
:rtype: :class:`~__builtins__.list` of :class:`~__builtins__.str`
.. todo:: Try to optimize the ignores matching. Running a regex on every
filename is a fairly significant percentage of the time taken according
to the profiler.
"""
paths, count, ignores = [], 0, ignores or []
# Prepare the ignores list for most efficient use
ignore_re = multiglob_compile(ignores, prefix=False)
for root in roots:
# For safety, only use absolute, real paths.
root = os.path.realpath(root)
# Handle directly-referenced filenames properly
# (And override ignores to "do as I mean, not as I say")
if os.path.isfile(root):
paths.append(root)
continue
for fldr in os.walk(root):
out.write("Gathering file paths to compare... (%d files examined)"
% count)
# Don't even descend into IGNOREd directories.
for subdir in fldr[1]:
dirpath = os.path.join(fldr[0], subdir)
if ignore_re.match(dirpath):
fldr[1].remove(subdir)
for filename in fldr[2]:
filepath = os.path.join(fldr[0], filename)
if ignore_re.match(filepath):
continue # Skip IGNOREd files.
paths.append(filepath)
count += 1
out.write("Found %s files to be compared for duplication." % (len(paths)),
newline=True)
return paths
|
[
"def",
"getPaths",
"(",
"roots",
",",
"ignores",
"=",
"None",
")",
":",
"paths",
",",
"count",
",",
"ignores",
"=",
"[",
"]",
",",
"0",
",",
"ignores",
"or",
"[",
"]",
"# Prepare the ignores list for most efficient use",
"ignore_re",
"=",
"multiglob_compile",
"(",
"ignores",
",",
"prefix",
"=",
"False",
")",
"for",
"root",
"in",
"roots",
":",
"# For safety, only use absolute, real paths.",
"root",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"root",
")",
"# Handle directly-referenced filenames properly",
"# (And override ignores to \"do as I mean, not as I say\")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"root",
")",
":",
"paths",
".",
"append",
"(",
"root",
")",
"continue",
"for",
"fldr",
"in",
"os",
".",
"walk",
"(",
"root",
")",
":",
"out",
".",
"write",
"(",
"\"Gathering file paths to compare... (%d files examined)\"",
"%",
"count",
")",
"# Don't even descend into IGNOREd directories.",
"for",
"subdir",
"in",
"fldr",
"[",
"1",
"]",
":",
"dirpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"fldr",
"[",
"0",
"]",
",",
"subdir",
")",
"if",
"ignore_re",
".",
"match",
"(",
"dirpath",
")",
":",
"fldr",
"[",
"1",
"]",
".",
"remove",
"(",
"subdir",
")",
"for",
"filename",
"in",
"fldr",
"[",
"2",
"]",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"fldr",
"[",
"0",
"]",
",",
"filename",
")",
"if",
"ignore_re",
".",
"match",
"(",
"filepath",
")",
":",
"continue",
"# Skip IGNOREd files.",
"paths",
".",
"append",
"(",
"filepath",
")",
"count",
"+=",
"1",
"out",
".",
"write",
"(",
"\"Found %s files to be compared for duplication.\"",
"%",
"(",
"len",
"(",
"paths",
")",
")",
",",
"newline",
"=",
"True",
")",
"return",
"paths"
] | 36.037037
| 21.555556
|
def errorReceived(self, merr):
"""
Called when an error message is received
"""
d, timeout = self._pendingCalls.get(merr.reply_serial, (None, None))
if timeout:
timeout.cancel()
if d:
del self._pendingCalls[merr.reply_serial]
e = error.RemoteError(merr.error_name)
e.message = ''
e.values = []
if merr.body:
if isinstance(merr.body[0], six.string_types):
e.message = merr.body[0]
e.values = merr.body
d.errback(e)
|
[
"def",
"errorReceived",
"(",
"self",
",",
"merr",
")",
":",
"d",
",",
"timeout",
"=",
"self",
".",
"_pendingCalls",
".",
"get",
"(",
"merr",
".",
"reply_serial",
",",
"(",
"None",
",",
"None",
")",
")",
"if",
"timeout",
":",
"timeout",
".",
"cancel",
"(",
")",
"if",
"d",
":",
"del",
"self",
".",
"_pendingCalls",
"[",
"merr",
".",
"reply_serial",
"]",
"e",
"=",
"error",
".",
"RemoteError",
"(",
"merr",
".",
"error_name",
")",
"e",
".",
"message",
"=",
"''",
"e",
".",
"values",
"=",
"[",
"]",
"if",
"merr",
".",
"body",
":",
"if",
"isinstance",
"(",
"merr",
".",
"body",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"e",
".",
"message",
"=",
"merr",
".",
"body",
"[",
"0",
"]",
"e",
".",
"values",
"=",
"merr",
".",
"body",
"d",
".",
"errback",
"(",
"e",
")"
] | 34.176471
| 13.352941
|
def get_runnable_effects(self) -> List[Effect]:
"""
Returns all runnable effects in the project.
:return: List of all runnable effects
"""
return [effect for name, effect in self._effects.items() if effect.runnable]
|
[
"def",
"get_runnable_effects",
"(",
"self",
")",
"->",
"List",
"[",
"Effect",
"]",
":",
"return",
"[",
"effect",
"for",
"name",
",",
"effect",
"in",
"self",
".",
"_effects",
".",
"items",
"(",
")",
"if",
"effect",
".",
"runnable",
"]"
] | 36.571429
| 15.714286
|
def _GetUtf8Contents(self, file_name):
"""Check for errors in file_name and return a string for csv reader."""
contents = self._FileContents(file_name)
if not contents: # Missing file
return
# Check for errors that will prevent csv.reader from working
if len(contents) >= 2 and contents[0:2] in (codecs.BOM_UTF16_BE,
codecs.BOM_UTF16_LE):
self._problems.FileFormat("appears to be encoded in utf-16", (file_name, ))
# Convert and continue, so we can find more errors
contents = codecs.getdecoder('utf-16')(contents)[0].encode('utf-8')
null_index = contents.find('\0')
if null_index != -1:
# It is easier to get some surrounding text than calculate the exact
# row_num
m = re.search(r'.{,20}\0.{,20}', contents, re.DOTALL)
self._problems.FileFormat(
"contains a null in text \"%s\" at byte %d" %
(codecs.getencoder('string_escape')(m.group()), null_index + 1),
(file_name, ))
return
# strip out any UTF-8 Byte Order Marker (otherwise it'll be
# treated as part of the first column name, causing a mis-parse)
contents = contents.lstrip(codecs.BOM_UTF8)
return contents
|
[
"def",
"_GetUtf8Contents",
"(",
"self",
",",
"file_name",
")",
":",
"contents",
"=",
"self",
".",
"_FileContents",
"(",
"file_name",
")",
"if",
"not",
"contents",
":",
"# Missing file",
"return",
"# Check for errors that will prevent csv.reader from working",
"if",
"len",
"(",
"contents",
")",
">=",
"2",
"and",
"contents",
"[",
"0",
":",
"2",
"]",
"in",
"(",
"codecs",
".",
"BOM_UTF16_BE",
",",
"codecs",
".",
"BOM_UTF16_LE",
")",
":",
"self",
".",
"_problems",
".",
"FileFormat",
"(",
"\"appears to be encoded in utf-16\"",
",",
"(",
"file_name",
",",
")",
")",
"# Convert and continue, so we can find more errors",
"contents",
"=",
"codecs",
".",
"getdecoder",
"(",
"'utf-16'",
")",
"(",
"contents",
")",
"[",
"0",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
"null_index",
"=",
"contents",
".",
"find",
"(",
"'\\0'",
")",
"if",
"null_index",
"!=",
"-",
"1",
":",
"# It is easier to get some surrounding text than calculate the exact",
"# row_num",
"m",
"=",
"re",
".",
"search",
"(",
"r'.{,20}\\0.{,20}'",
",",
"contents",
",",
"re",
".",
"DOTALL",
")",
"self",
".",
"_problems",
".",
"FileFormat",
"(",
"\"contains a null in text \\\"%s\\\" at byte %d\"",
"%",
"(",
"codecs",
".",
"getencoder",
"(",
"'string_escape'",
")",
"(",
"m",
".",
"group",
"(",
")",
")",
",",
"null_index",
"+",
"1",
")",
",",
"(",
"file_name",
",",
")",
")",
"return",
"# strip out any UTF-8 Byte Order Marker (otherwise it'll be",
"# treated as part of the first column name, causing a mis-parse)",
"contents",
"=",
"contents",
".",
"lstrip",
"(",
"codecs",
".",
"BOM_UTF8",
")",
"return",
"contents"
] | 42.071429
| 21.035714
|
def patch_namespaced_resource_quota(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_resource_quota # noqa: E501
partially update the specified ResourceQuota # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_resource_quota(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ResourceQuota (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ResourceQuota
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_resource_quota_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_resource_quota_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data
|
[
"def",
"patch_namespaced_resource_quota",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"patch_namespaced_resource_quota_with_http_info",
"(",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"patch_namespaced_resource_quota_with_http_info",
"(",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 62.32
| 35.24
|
def env(self):
"""
Dict of all environment variables that will be run with this command.
"""
env_vars = os.environ.copy()
env_vars.update(self._env)
new_path = ":".join(
self._paths + [env_vars["PATH"]] if "PATH" in env_vars else [] + self._paths
)
env_vars["PATH"] = new_path
for env_var in self._env_drop:
if env_var in env_vars:
del env_vars[env_var]
return env_vars
|
[
"def",
"env",
"(",
"self",
")",
":",
"env_vars",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"env_vars",
".",
"update",
"(",
"self",
".",
"_env",
")",
"new_path",
"=",
"\":\"",
".",
"join",
"(",
"self",
".",
"_paths",
"+",
"[",
"env_vars",
"[",
"\"PATH\"",
"]",
"]",
"if",
"\"PATH\"",
"in",
"env_vars",
"else",
"[",
"]",
"+",
"self",
".",
"_paths",
")",
"env_vars",
"[",
"\"PATH\"",
"]",
"=",
"new_path",
"for",
"env_var",
"in",
"self",
".",
"_env_drop",
":",
"if",
"env_var",
"in",
"env_vars",
":",
"del",
"env_vars",
"[",
"env_var",
"]",
"return",
"env_vars"
] | 34
| 14
|
def derivatives(self, x, y, Rs, theta_Rs, e1, e2, center_x=0, center_y=0):
"""
returns df/dx and df/dy of the function (integral of NFW)
"""
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
e = min(abs(1. - q), 0.99)
xt1 = (cos_phi*x_shift+sin_phi*y_shift)*np.sqrt(1 - e)
xt2 = (-sin_phi*x_shift+cos_phi*y_shift)*np.sqrt(1 + e)
R_ = np.sqrt(xt1**2 + xt2**2)
rho0_input = self.nfw._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0000001:
Rs = 0.0000001
f_x_prim, f_y_prim = self.nfw.nfwAlpha(R_, Rs, rho0_input, xt1, xt2)
f_x_prim *= np.sqrt(1 - e)
f_y_prim *= np.sqrt(1 + e)
f_x = cos_phi*f_x_prim-sin_phi*f_y_prim
f_y = sin_phi*f_x_prim+cos_phi*f_y_prim
return f_x, f_y
|
[
"def",
"derivatives",
"(",
"self",
",",
"x",
",",
"y",
",",
"Rs",
",",
"theta_Rs",
",",
"e1",
",",
"e2",
",",
"center_x",
"=",
"0",
",",
"center_y",
"=",
"0",
")",
":",
"phi_G",
",",
"q",
"=",
"param_util",
".",
"ellipticity2phi_q",
"(",
"e1",
",",
"e2",
")",
"x_shift",
"=",
"x",
"-",
"center_x",
"y_shift",
"=",
"y",
"-",
"center_y",
"cos_phi",
"=",
"np",
".",
"cos",
"(",
"phi_G",
")",
"sin_phi",
"=",
"np",
".",
"sin",
"(",
"phi_G",
")",
"e",
"=",
"min",
"(",
"abs",
"(",
"1.",
"-",
"q",
")",
",",
"0.99",
")",
"xt1",
"=",
"(",
"cos_phi",
"*",
"x_shift",
"+",
"sin_phi",
"*",
"y_shift",
")",
"*",
"np",
".",
"sqrt",
"(",
"1",
"-",
"e",
")",
"xt2",
"=",
"(",
"-",
"sin_phi",
"*",
"x_shift",
"+",
"cos_phi",
"*",
"y_shift",
")",
"*",
"np",
".",
"sqrt",
"(",
"1",
"+",
"e",
")",
"R_",
"=",
"np",
".",
"sqrt",
"(",
"xt1",
"**",
"2",
"+",
"xt2",
"**",
"2",
")",
"rho0_input",
"=",
"self",
".",
"nfw",
".",
"_alpha2rho0",
"(",
"theta_Rs",
"=",
"theta_Rs",
",",
"Rs",
"=",
"Rs",
")",
"if",
"Rs",
"<",
"0.0000001",
":",
"Rs",
"=",
"0.0000001",
"f_x_prim",
",",
"f_y_prim",
"=",
"self",
".",
"nfw",
".",
"nfwAlpha",
"(",
"R_",
",",
"Rs",
",",
"rho0_input",
",",
"xt1",
",",
"xt2",
")",
"f_x_prim",
"*=",
"np",
".",
"sqrt",
"(",
"1",
"-",
"e",
")",
"f_y_prim",
"*=",
"np",
".",
"sqrt",
"(",
"1",
"+",
"e",
")",
"f_x",
"=",
"cos_phi",
"*",
"f_x_prim",
"-",
"sin_phi",
"*",
"f_y_prim",
"f_y",
"=",
"sin_phi",
"*",
"f_x_prim",
"+",
"cos_phi",
"*",
"f_y_prim",
"return",
"f_x",
",",
"f_y"
] | 41.545455
| 13.636364
|
def UpsertUserDefinedFunction(self, collection_link, udf, options=None):
"""Upserts a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Upsert(udf,
path,
'udfs',
collection_id,
None,
options)
|
[
"def",
"UpsertUserDefinedFunction",
"(",
"self",
",",
"collection_link",
",",
"udf",
",",
"options",
"=",
"None",
")",
":",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"{",
"}",
"collection_id",
",",
"path",
",",
"udf",
"=",
"self",
".",
"_GetContainerIdWithPathForUDF",
"(",
"collection_link",
",",
"udf",
")",
"return",
"self",
".",
"Upsert",
"(",
"udf",
",",
"path",
",",
"'udfs'",
",",
"collection_id",
",",
"None",
",",
"options",
")"
] | 29.52
| 17.44
|
def search(self, **kwargs):
"""
Query using ElasticSearch, returning an elasticsearch queryset.
:param kwargs: keyword arguments (optional)
* query : ES Query spec
* tags : content tags
* types : content types
* feature_types : featured types
* published : date range
"""
search_query = super(ContentManager, self).search()
if "query" in kwargs:
search_query = search_query.query("match", _all=kwargs.get("query"))
else:
search_query = search_query.sort('-published', '-last_modified')
# Right now we have "Before", "After" (datetimes),
# and "published" (a boolean). Should simplify this in the future.
if "before" in kwargs or "after" in kwargs:
published_filter = Published(before=kwargs.get("before"), after=kwargs.get("after"))
search_query = search_query.filter(published_filter)
else:
# TODO: kill this "published" param. it sucks
if kwargs.get("published", True) and "status" not in kwargs:
published_filter = Published()
search_query = search_query.filter(published_filter)
if "status" in kwargs:
search_query = search_query.filter(Status(kwargs["status"]))
if "excluded_ids" in kwargs:
exclusion_filter = ~es_filter.Ids(values=kwargs.get("excluded_ids", []))
search_query = search_query.filter(exclusion_filter)
tag_filter = Tags(kwargs.get("tags", []))
search_query = search_query.filter(tag_filter)
author_filter = Authors(kwargs.get("authors", []))
search_query = search_query.filter(author_filter)
feature_type_filter = FeatureTypes(kwargs.get("feature_types", []))
search_query = search_query.filter(feature_type_filter)
# Is this good enough? Are we even using this feature at all?
types = kwargs.pop("types", [])
if types:
search_query._doc_type = types
return search_query
|
[
"def",
"search",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"search_query",
"=",
"super",
"(",
"ContentManager",
",",
"self",
")",
".",
"search",
"(",
")",
"if",
"\"query\"",
"in",
"kwargs",
":",
"search_query",
"=",
"search_query",
".",
"query",
"(",
"\"match\"",
",",
"_all",
"=",
"kwargs",
".",
"get",
"(",
"\"query\"",
")",
")",
"else",
":",
"search_query",
"=",
"search_query",
".",
"sort",
"(",
"'-published'",
",",
"'-last_modified'",
")",
"# Right now we have \"Before\", \"After\" (datetimes),",
"# and \"published\" (a boolean). Should simplify this in the future.",
"if",
"\"before\"",
"in",
"kwargs",
"or",
"\"after\"",
"in",
"kwargs",
":",
"published_filter",
"=",
"Published",
"(",
"before",
"=",
"kwargs",
".",
"get",
"(",
"\"before\"",
")",
",",
"after",
"=",
"kwargs",
".",
"get",
"(",
"\"after\"",
")",
")",
"search_query",
"=",
"search_query",
".",
"filter",
"(",
"published_filter",
")",
"else",
":",
"# TODO: kill this \"published\" param. it sucks",
"if",
"kwargs",
".",
"get",
"(",
"\"published\"",
",",
"True",
")",
"and",
"\"status\"",
"not",
"in",
"kwargs",
":",
"published_filter",
"=",
"Published",
"(",
")",
"search_query",
"=",
"search_query",
".",
"filter",
"(",
"published_filter",
")",
"if",
"\"status\"",
"in",
"kwargs",
":",
"search_query",
"=",
"search_query",
".",
"filter",
"(",
"Status",
"(",
"kwargs",
"[",
"\"status\"",
"]",
")",
")",
"if",
"\"excluded_ids\"",
"in",
"kwargs",
":",
"exclusion_filter",
"=",
"~",
"es_filter",
".",
"Ids",
"(",
"values",
"=",
"kwargs",
".",
"get",
"(",
"\"excluded_ids\"",
",",
"[",
"]",
")",
")",
"search_query",
"=",
"search_query",
".",
"filter",
"(",
"exclusion_filter",
")",
"tag_filter",
"=",
"Tags",
"(",
"kwargs",
".",
"get",
"(",
"\"tags\"",
",",
"[",
"]",
")",
")",
"search_query",
"=",
"search_query",
".",
"filter",
"(",
"tag_filter",
")",
"author_filter",
"=",
"Authors",
"(",
"kwargs",
".",
"get",
"(",
"\"authors\"",
",",
"[",
"]",
")",
")",
"search_query",
"=",
"search_query",
".",
"filter",
"(",
"author_filter",
")",
"feature_type_filter",
"=",
"FeatureTypes",
"(",
"kwargs",
".",
"get",
"(",
"\"feature_types\"",
",",
"[",
"]",
")",
")",
"search_query",
"=",
"search_query",
".",
"filter",
"(",
"feature_type_filter",
")",
"# Is this good enough? Are we even using this feature at all?",
"types",
"=",
"kwargs",
".",
"pop",
"(",
"\"types\"",
",",
"[",
"]",
")",
"if",
"types",
":",
"search_query",
".",
"_doc_type",
"=",
"types",
"return",
"search_query"
] | 40.62
| 22.66
|
def convert_sum(node, **kwargs):
"""Map MXNet's sum operator attributes to onnx's ReduceSum operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes:
node = onnx.helper.make_node(
'ReduceSum',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
else:
node = onnx.helper.make_node(
'ReduceSum',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node]
|
[
"def",
"convert_sum",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
",",
"input_nodes",
",",
"attrs",
"=",
"get_inputs",
"(",
"node",
",",
"kwargs",
")",
"mx_axis",
"=",
"attrs",
".",
"get",
"(",
"\"axis\"",
",",
"None",
")",
"axes",
"=",
"convert_string_to_list",
"(",
"str",
"(",
"mx_axis",
")",
")",
"if",
"mx_axis",
"is",
"not",
"None",
"else",
"None",
"keepdims",
"=",
"get_boolean_attribute_value",
"(",
"attrs",
",",
"\"keepdims\"",
")",
"if",
"axes",
":",
"node",
"=",
"onnx",
".",
"helper",
".",
"make_node",
"(",
"'ReduceSum'",
",",
"inputs",
"=",
"input_nodes",
",",
"outputs",
"=",
"[",
"name",
"]",
",",
"axes",
"=",
"axes",
",",
"keepdims",
"=",
"keepdims",
",",
"name",
"=",
"name",
")",
"else",
":",
"node",
"=",
"onnx",
".",
"helper",
".",
"make_node",
"(",
"'ReduceSum'",
",",
"inputs",
"=",
"input_nodes",
",",
"outputs",
"=",
"[",
"name",
"]",
",",
"keepdims",
"=",
"keepdims",
",",
"name",
"=",
"name",
")",
"return",
"[",
"node",
"]"
] | 27.344828
| 17.827586
|
def first(self):
"""
Return the first element.
"""
if self.mode == 'local':
return self.values[0]
if self.mode == 'spark':
return self.values.first().toarray()
|
[
"def",
"first",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"return",
"self",
".",
"values",
"[",
"0",
"]",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"return",
"self",
".",
"values",
".",
"first",
"(",
")",
".",
"toarray",
"(",
")"
] | 24
| 11.333333
|
def update(self, symbol, data, metadata=None, upsert=True, as_of=None, **kwargs):
""" Append 'data' under the specified 'symbol' name to this library.
Parameters
----------
symbol : `str`
symbol name for the item
data : `pd.DataFrame`
to be persisted
metadata : `dict`
An optional dictionary of metadata to persist along with the symbol. If None and there are existing
metadata, current metadata will be maintained
upsert : `bool`
Write 'data' if no previous version exists.
as_of : `datetime.datetime`
The "insert time". Default to datetime.now()
"""
local_tz = mktz()
if not as_of:
as_of = dt.now()
if as_of.tzinfo is None:
as_of = as_of.replace(tzinfo=local_tz)
data = self._add_observe_dt_index(data, as_of)
if upsert and not self._store.has_symbol(symbol):
df = data
else:
existing_item = self._store.read(symbol, **kwargs)
if metadata is None:
metadata = existing_item.metadata
df = existing_item.data.append(data).sort_index(kind='mergesort')
self._store.write(symbol, df, metadata=metadata, prune_previous_version=True)
|
[
"def",
"update",
"(",
"self",
",",
"symbol",
",",
"data",
",",
"metadata",
"=",
"None",
",",
"upsert",
"=",
"True",
",",
"as_of",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"local_tz",
"=",
"mktz",
"(",
")",
"if",
"not",
"as_of",
":",
"as_of",
"=",
"dt",
".",
"now",
"(",
")",
"if",
"as_of",
".",
"tzinfo",
"is",
"None",
":",
"as_of",
"=",
"as_of",
".",
"replace",
"(",
"tzinfo",
"=",
"local_tz",
")",
"data",
"=",
"self",
".",
"_add_observe_dt_index",
"(",
"data",
",",
"as_of",
")",
"if",
"upsert",
"and",
"not",
"self",
".",
"_store",
".",
"has_symbol",
"(",
"symbol",
")",
":",
"df",
"=",
"data",
"else",
":",
"existing_item",
"=",
"self",
".",
"_store",
".",
"read",
"(",
"symbol",
",",
"*",
"*",
"kwargs",
")",
"if",
"metadata",
"is",
"None",
":",
"metadata",
"=",
"existing_item",
".",
"metadata",
"df",
"=",
"existing_item",
".",
"data",
".",
"append",
"(",
"data",
")",
".",
"sort_index",
"(",
"kind",
"=",
"'mergesort'",
")",
"self",
".",
"_store",
".",
"write",
"(",
"symbol",
",",
"df",
",",
"metadata",
"=",
"metadata",
",",
"prune_previous_version",
"=",
"True",
")"
] | 41.483871
| 19
|
def close(self):
"""Set some objects to None to hopefully free up some memory."""
self._target_context_errors = None
self._query_context_errors = None
self._general_errors = None
for ae in self._alignment_errors:
ae.close()
self._alignment_errors = None
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"_target_context_errors",
"=",
"None",
"self",
".",
"_query_context_errors",
"=",
"None",
"self",
".",
"_general_errors",
"=",
"None",
"for",
"ae",
"in",
"self",
".",
"_alignment_errors",
":",
"ae",
".",
"close",
"(",
")",
"self",
".",
"_alignment_errors",
"=",
"None"
] | 34.5
| 9
|
def build(self, root, schema):
""" Build the syntax tree for kubectl command line """
if schema.get("subcommands") and schema["subcommands"]:
for subcmd, childSchema in schema["subcommands"].items():
child = CommandTree(node=subcmd)
child = self.build(child, childSchema)
root.children.append(child)
# {args: {}, options: {}, help: ""}
root.help = schema.get("help")
for name, desc in schema.get("options").items():
if root.node == "kubectl": # register global flags
self.globalFlags.append(Option(name, desc["help"]))
root.localFlags.append(Option(name, desc["help"]))
for arg in schema.get("args"):
node = CommandTree(node=arg)
root.children.append(node)
return root
|
[
"def",
"build",
"(",
"self",
",",
"root",
",",
"schema",
")",
":",
"if",
"schema",
".",
"get",
"(",
"\"subcommands\"",
")",
"and",
"schema",
"[",
"\"subcommands\"",
"]",
":",
"for",
"subcmd",
",",
"childSchema",
"in",
"schema",
"[",
"\"subcommands\"",
"]",
".",
"items",
"(",
")",
":",
"child",
"=",
"CommandTree",
"(",
"node",
"=",
"subcmd",
")",
"child",
"=",
"self",
".",
"build",
"(",
"child",
",",
"childSchema",
")",
"root",
".",
"children",
".",
"append",
"(",
"child",
")",
"# {args: {}, options: {}, help: \"\"}",
"root",
".",
"help",
"=",
"schema",
".",
"get",
"(",
"\"help\"",
")",
"for",
"name",
",",
"desc",
"in",
"schema",
".",
"get",
"(",
"\"options\"",
")",
".",
"items",
"(",
")",
":",
"if",
"root",
".",
"node",
"==",
"\"kubectl\"",
":",
"# register global flags",
"self",
".",
"globalFlags",
".",
"append",
"(",
"Option",
"(",
"name",
",",
"desc",
"[",
"\"help\"",
"]",
")",
")",
"root",
".",
"localFlags",
".",
"append",
"(",
"Option",
"(",
"name",
",",
"desc",
"[",
"\"help\"",
"]",
")",
")",
"for",
"arg",
"in",
"schema",
".",
"get",
"(",
"\"args\"",
")",
":",
"node",
"=",
"CommandTree",
"(",
"node",
"=",
"arg",
")",
"root",
".",
"children",
".",
"append",
"(",
"node",
")",
"return",
"root"
] | 49
| 12.058824
|
def web_agent(self, reactor, socks_endpoint, pool=None):
"""
:param socks_endpoint: create one with
:meth:`txtorcon.TorConfig.create_socks_endpoint`. Can be a
Deferred.
:param pool: passed on to the Agent (as ``pool=``)
"""
# local import because there isn't Agent stuff on some
# platforms we support, so this will only error if you try
# this on the wrong platform (pypy [??] and old-twisted)
from txtorcon import web
return web.tor_agent(
reactor,
socks_endpoint,
circuit=self,
pool=pool,
)
|
[
"def",
"web_agent",
"(",
"self",
",",
"reactor",
",",
"socks_endpoint",
",",
"pool",
"=",
"None",
")",
":",
"# local import because there isn't Agent stuff on some",
"# platforms we support, so this will only error if you try",
"# this on the wrong platform (pypy [??] and old-twisted)",
"from",
"txtorcon",
"import",
"web",
"return",
"web",
".",
"tor_agent",
"(",
"reactor",
",",
"socks_endpoint",
",",
"circuit",
"=",
"self",
",",
"pool",
"=",
"pool",
",",
")"
] | 34.944444
| 17.611111
|
def insert_weave_option_group(parser):
"""
Adds the options used to specify weave options.
Parameters
----------
parser : object
OptionParser instance
"""
optimization_group = parser.add_argument_group("Options for controlling "
"weave")
optimization_group.add_argument("--per-process-weave-cache",
action="store_true",
default=False,
help="""If given, each process will use a separate directory
for weave compilation. This is slower, but safer if
several instances may be starting on the same machine at
the same time.""")
optimization_group.add_argument("--clear-weave-cache-at-start",
action="store_true",
default=False,
help="If given, delete the contents of the weave cache "
"when the process starts")
optimization_group.add_argument("--clear-weave-cache-at-end",
action="store_true",
default=False,
help="If given, delete the contents of the weave cache "
"when the process exits")
optimization_group.add_argument("--fixed-weave-cache",
action="store_true",
default=False,
help="If given, use fixed directory PWD/pycbc_inspiral for "
" the weave cache")
|
[
"def",
"insert_weave_option_group",
"(",
"parser",
")",
":",
"optimization_group",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"Options for controlling \"",
"\"weave\"",
")",
"optimization_group",
".",
"add_argument",
"(",
"\"--per-process-weave-cache\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"\"\"If given, each process will use a separate directory\n for weave compilation. This is slower, but safer if\n several instances may be starting on the same machine at\n the same time.\"\"\"",
")",
"optimization_group",
".",
"add_argument",
"(",
"\"--clear-weave-cache-at-start\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"If given, delete the contents of the weave cache \"",
"\"when the process starts\"",
")",
"optimization_group",
".",
"add_argument",
"(",
"\"--clear-weave-cache-at-end\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"If given, delete the contents of the weave cache \"",
"\"when the process exits\"",
")",
"optimization_group",
".",
"add_argument",
"(",
"\"--fixed-weave-cache\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"If given, use fixed directory PWD/pycbc_inspiral for \"",
"\" the weave cache\"",
")"
] | 40.72973
| 18.108108
|
def conjugate(self):
"""Quaternion conjugate, encapsulated in a new instance.
For a unit quaternion, this is the same as the inverse.
Returns:
A new Quaternion object clone with its vector part negated
"""
return self.__class__(scalar=self.scalar, vector= -self.vector)
|
[
"def",
"conjugate",
"(",
"self",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"scalar",
"=",
"self",
".",
"scalar",
",",
"vector",
"=",
"-",
"self",
".",
"vector",
")"
] | 35
| 23.111111
|
def parse_dimension(self, node):
"""
Parses <Dimension>
@param node: Node containing the <Dimension> element
@type node: xml.etree.Element
@raise ParseError: When the name is not a string or if the
dimension is not a signed integer.
"""
try:
name = node.lattrib['name']
except:
self.raise_error('<Dimension> must specify a name')
description = node.lattrib.get('description', '')
dim = dict()
for d in ['l', 'm', 't', 'i', 'k', 'c', 'n']:
dim[d] = int(node.lattrib.get(d, 0))
self.model.add_dimension(Dimension(name, description, **dim))
|
[
"def",
"parse_dimension",
"(",
"self",
",",
"node",
")",
":",
"try",
":",
"name",
"=",
"node",
".",
"lattrib",
"[",
"'name'",
"]",
"except",
":",
"self",
".",
"raise_error",
"(",
"'<Dimension> must specify a name'",
")",
"description",
"=",
"node",
".",
"lattrib",
".",
"get",
"(",
"'description'",
",",
"''",
")",
"dim",
"=",
"dict",
"(",
")",
"for",
"d",
"in",
"[",
"'l'",
",",
"'m'",
",",
"'t'",
",",
"'i'",
",",
"'k'",
",",
"'c'",
",",
"'n'",
"]",
":",
"dim",
"[",
"d",
"]",
"=",
"int",
"(",
"node",
".",
"lattrib",
".",
"get",
"(",
"d",
",",
"0",
")",
")",
"self",
".",
"model",
".",
"add_dimension",
"(",
"Dimension",
"(",
"name",
",",
"description",
",",
"*",
"*",
"dim",
")",
")"
] | 28.73913
| 20.73913
|
def export_tour(tour_steps, name=None, filename="my_tour.js", url=None):
""" Exports a tour as a JS file.
It will include necessary resources as well, such as jQuery.
You'll be able to copy the tour directly into the Console of
any web browser to play the tour outside of SeleniumBase runs. """
if not name:
name = "default"
if name not in tour_steps:
raise Exception("Tour {%s} does not exist!" % name)
if not filename.endswith('.js'):
raise Exception('Tour file must end in ".js"!')
if not url:
url = "data:,"
tour_type = None
if "Bootstrap" in tour_steps[name][0]:
tour_type = "bootstrap"
elif "Hopscotch" in tour_steps[name][0]:
tour_type = "hopscotch"
elif "IntroJS" in tour_steps[name][0]:
tour_type = "introjs"
elif "Shepherd" in tour_steps[name][0]:
tour_type = "shepherd"
else:
raise Exception('Unknown tour type!')
instructions = (
'''//////// Load Tour Start Page (if not there now) ////////\n\n'''
'''if (window.location.href != "%s") {\n'''
''' window.location.href="%s";\n'''
'''}\n\n'''
'''//////// Resources ////////\n\n'''
'''function injectCSS(css_link) {'''
'''var head = document.getElementsByTagName("head")[0];'''
'''var link = document.createElement("link");'''
'''link.rel = "stylesheet";'''
'''link.type = "text/css";'''
'''link.href = css_link;'''
'''link.crossorigin = "anonymous";'''
'''head.appendChild(link);'''
'''};\n'''
'''function injectJS(js_link) {'''
'''var head = document.getElementsByTagName("head")[0];'''
'''var script = document.createElement("script");'''
'''script.src = js_link;'''
'''script.defer;'''
'''script.type="text/javascript";'''
'''script.crossorigin = "anonymous";'''
'''script.onload = function() { null };'''
'''head.appendChild(script);'''
'''};\n'''
'''function injectStyle(css) {'''
'''var head = document.getElementsByTagName("head")[0];'''
'''var style = document.createElement("style");'''
'''style.type = "text/css";'''
'''style.appendChild(document.createTextNode(css));'''
'''head.appendChild(style);'''
'''};\n''' % (url, url))
if tour_type == "bootstrap":
jquery_js = constants.JQuery.MIN_JS
bootstrap_tour_css = constants.BootstrapTour.MIN_CSS
bootstrap_tour_js = constants.BootstrapTour.MIN_JS
backdrop_style = style_sheet.bt_backdrop_style
backdrop_style = backdrop_style.replace('\n', '')
backdrop_style = js_utils.escape_quotes_if_needed(backdrop_style)
instructions += 'injectJS("%s");' % jquery_js
instructions += '\n\n//////// Resources - Load 2 ////////\n\n'
instructions += 'injectCSS("%s");\n' % bootstrap_tour_css
instructions += 'injectStyle("%s");\n' % backdrop_style
instructions += 'injectJS("%s");' % bootstrap_tour_js
elif tour_type == "hopscotch":
hopscotch_css = constants.Hopscotch.MIN_CSS
hopscotch_js = constants.Hopscotch.MIN_JS
backdrop_style = style_sheet.hops_backdrop_style
backdrop_style = backdrop_style.replace('\n', '')
backdrop_style = js_utils.escape_quotes_if_needed(backdrop_style)
instructions += 'injectCSS("%s");\n' % hopscotch_css
instructions += 'injectStyle("%s");\n' % backdrop_style
instructions += 'injectJS("%s");' % hopscotch_js
elif tour_type == "introjs":
intro_css = constants.IntroJS.MIN_CSS
intro_js = constants.IntroJS.MIN_JS
instructions += 'injectCSS("%s");\n' % intro_css
instructions += 'injectJS("%s");' % intro_js
elif tour_type == "shepherd":
jquery_js = constants.JQuery.MIN_JS
shepherd_js = constants.Shepherd.MIN_JS
sh_theme_arrows_css = constants.Shepherd.THEME_ARROWS_CSS
sh_theme_arrows_fix_css = constants.Shepherd.THEME_ARR_FIX_CSS
sh_theme_default_css = constants.Shepherd.THEME_DEFAULT_CSS
sh_theme_dark_css = constants.Shepherd.THEME_DARK_CSS
sh_theme_sq_css = constants.Shepherd.THEME_SQ_CSS
sh_theme_sq_dark_css = constants.Shepherd.THEME_SQ_DK_CSS
tether_js = constants.Tether.MIN_JS
spinner_css = constants.Messenger.SPINNER_CSS
backdrop_style = style_sheet.sh_backdrop_style
backdrop_style = backdrop_style.replace('\n', '')
backdrop_style = js_utils.escape_quotes_if_needed(backdrop_style)
instructions += 'injectCSS("%s");\n' % spinner_css
instructions += 'injectJS("%s");\n' % jquery_js
instructions += 'injectJS("%s");' % tether_js
instructions += '\n\n//////// Resources - Load 2 ////////\n\n'
instructions += 'injectCSS("%s");' % sh_theme_arrows_css
instructions += 'injectCSS("%s");' % sh_theme_arrows_fix_css
instructions += 'injectCSS("%s");' % sh_theme_default_css
instructions += 'injectCSS("%s");' % sh_theme_dark_css
instructions += 'injectCSS("%s");' % sh_theme_sq_css
instructions += 'injectCSS("%s");\n' % sh_theme_sq_dark_css
instructions += 'injectStyle("%s");\n' % backdrop_style
instructions += 'injectJS("%s");' % shepherd_js
instructions += '\n\n//////// Tour Code ////////\n\n'
for tour_step in tour_steps[name]:
instructions += tour_step
if tour_type == "bootstrap":
instructions += (
"""]);
// Initialize the tour
tour.init();
// Start the tour
tour.start();
$tour = tour;
$tour.restart();\n""")
elif tour_type == "hopscotch":
instructions += (
"""]
};
// Start the tour!
hopscotch.startTour(tour);
$tour = hopscotch;\n""")
elif tour_type == "introjs":
instructions += (
"""]
});
intro.setOption("disableInteraction", true);
intro.setOption("overlayOpacity", .29);
intro.setOption("scrollToElement", true);
intro.setOption("keyboardNavigation", true);
intro.setOption("exitOnEsc", false);
intro.setOption("exitOnOverlayClick", false);
intro.setOption("showStepNumbers", false);
intro.setOption("showProgress", false);
intro.start();
$tour = intro;
};
startIntro();\n""")
elif tour_type == "shepherd":
instructions += (
"""
tour.start();
$tour = tour;\n""")
else:
pass
exported_tours_folder = EXPORTED_TOURS_FOLDER
if exported_tours_folder.endswith("/"):
exported_tours_folder = exported_tours_folder[:-1]
if not os.path.exists(exported_tours_folder):
try:
os.makedirs(exported_tours_folder)
except Exception:
pass
import codecs
file_path = exported_tours_folder + "/" + filename
out_file = codecs.open(file_path, "w+")
out_file.writelines(instructions)
out_file.close()
print('\n>>> [%s] was saved!\n' % file_path)
|
[
"def",
"export_tour",
"(",
"tour_steps",
",",
"name",
"=",
"None",
",",
"filename",
"=",
"\"my_tour.js\"",
",",
"url",
"=",
"None",
")",
":",
"if",
"not",
"name",
":",
"name",
"=",
"\"default\"",
"if",
"name",
"not",
"in",
"tour_steps",
":",
"raise",
"Exception",
"(",
"\"Tour {%s} does not exist!\"",
"%",
"name",
")",
"if",
"not",
"filename",
".",
"endswith",
"(",
"'.js'",
")",
":",
"raise",
"Exception",
"(",
"'Tour file must end in \".js\"!'",
")",
"if",
"not",
"url",
":",
"url",
"=",
"\"data:,\"",
"tour_type",
"=",
"None",
"if",
"\"Bootstrap\"",
"in",
"tour_steps",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"tour_type",
"=",
"\"bootstrap\"",
"elif",
"\"Hopscotch\"",
"in",
"tour_steps",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"tour_type",
"=",
"\"hopscotch\"",
"elif",
"\"IntroJS\"",
"in",
"tour_steps",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"tour_type",
"=",
"\"introjs\"",
"elif",
"\"Shepherd\"",
"in",
"tour_steps",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"tour_type",
"=",
"\"shepherd\"",
"else",
":",
"raise",
"Exception",
"(",
"'Unknown tour type!'",
")",
"instructions",
"=",
"(",
"'''//////// Load Tour Start Page (if not there now) ////////\\n\\n'''",
"'''if (window.location.href != \"%s\") {\\n'''",
"''' window.location.href=\"%s\";\\n'''",
"'''}\\n\\n'''",
"'''//////// Resources ////////\\n\\n'''",
"'''function injectCSS(css_link) {'''",
"'''var head = document.getElementsByTagName(\"head\")[0];'''",
"'''var link = document.createElement(\"link\");'''",
"'''link.rel = \"stylesheet\";'''",
"'''link.type = \"text/css\";'''",
"'''link.href = css_link;'''",
"'''link.crossorigin = \"anonymous\";'''",
"'''head.appendChild(link);'''",
"'''};\\n'''",
"'''function injectJS(js_link) {'''",
"'''var head = document.getElementsByTagName(\"head\")[0];'''",
"'''var script = document.createElement(\"script\");'''",
"'''script.src = js_link;'''",
"'''script.defer;'''",
"'''script.type=\"text/javascript\";'''",
"'''script.crossorigin = \"anonymous\";'''",
"'''script.onload = function() { null };'''",
"'''head.appendChild(script);'''",
"'''};\\n'''",
"'''function injectStyle(css) {'''",
"'''var head = document.getElementsByTagName(\"head\")[0];'''",
"'''var style = document.createElement(\"style\");'''",
"'''style.type = \"text/css\";'''",
"'''style.appendChild(document.createTextNode(css));'''",
"'''head.appendChild(style);'''",
"'''};\\n'''",
"%",
"(",
"url",
",",
"url",
")",
")",
"if",
"tour_type",
"==",
"\"bootstrap\"",
":",
"jquery_js",
"=",
"constants",
".",
"JQuery",
".",
"MIN_JS",
"bootstrap_tour_css",
"=",
"constants",
".",
"BootstrapTour",
".",
"MIN_CSS",
"bootstrap_tour_js",
"=",
"constants",
".",
"BootstrapTour",
".",
"MIN_JS",
"backdrop_style",
"=",
"style_sheet",
".",
"bt_backdrop_style",
"backdrop_style",
"=",
"backdrop_style",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"backdrop_style",
"=",
"js_utils",
".",
"escape_quotes_if_needed",
"(",
"backdrop_style",
")",
"instructions",
"+=",
"'injectJS(\"%s\");'",
"%",
"jquery_js",
"instructions",
"+=",
"'\\n\\n//////// Resources - Load 2 ////////\\n\\n'",
"instructions",
"+=",
"'injectCSS(\"%s\");\\n'",
"%",
"bootstrap_tour_css",
"instructions",
"+=",
"'injectStyle(\"%s\");\\n'",
"%",
"backdrop_style",
"instructions",
"+=",
"'injectJS(\"%s\");'",
"%",
"bootstrap_tour_js",
"elif",
"tour_type",
"==",
"\"hopscotch\"",
":",
"hopscotch_css",
"=",
"constants",
".",
"Hopscotch",
".",
"MIN_CSS",
"hopscotch_js",
"=",
"constants",
".",
"Hopscotch",
".",
"MIN_JS",
"backdrop_style",
"=",
"style_sheet",
".",
"hops_backdrop_style",
"backdrop_style",
"=",
"backdrop_style",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"backdrop_style",
"=",
"js_utils",
".",
"escape_quotes_if_needed",
"(",
"backdrop_style",
")",
"instructions",
"+=",
"'injectCSS(\"%s\");\\n'",
"%",
"hopscotch_css",
"instructions",
"+=",
"'injectStyle(\"%s\");\\n'",
"%",
"backdrop_style",
"instructions",
"+=",
"'injectJS(\"%s\");'",
"%",
"hopscotch_js",
"elif",
"tour_type",
"==",
"\"introjs\"",
":",
"intro_css",
"=",
"constants",
".",
"IntroJS",
".",
"MIN_CSS",
"intro_js",
"=",
"constants",
".",
"IntroJS",
".",
"MIN_JS",
"instructions",
"+=",
"'injectCSS(\"%s\");\\n'",
"%",
"intro_css",
"instructions",
"+=",
"'injectJS(\"%s\");'",
"%",
"intro_js",
"elif",
"tour_type",
"==",
"\"shepherd\"",
":",
"jquery_js",
"=",
"constants",
".",
"JQuery",
".",
"MIN_JS",
"shepherd_js",
"=",
"constants",
".",
"Shepherd",
".",
"MIN_JS",
"sh_theme_arrows_css",
"=",
"constants",
".",
"Shepherd",
".",
"THEME_ARROWS_CSS",
"sh_theme_arrows_fix_css",
"=",
"constants",
".",
"Shepherd",
".",
"THEME_ARR_FIX_CSS",
"sh_theme_default_css",
"=",
"constants",
".",
"Shepherd",
".",
"THEME_DEFAULT_CSS",
"sh_theme_dark_css",
"=",
"constants",
".",
"Shepherd",
".",
"THEME_DARK_CSS",
"sh_theme_sq_css",
"=",
"constants",
".",
"Shepherd",
".",
"THEME_SQ_CSS",
"sh_theme_sq_dark_css",
"=",
"constants",
".",
"Shepherd",
".",
"THEME_SQ_DK_CSS",
"tether_js",
"=",
"constants",
".",
"Tether",
".",
"MIN_JS",
"spinner_css",
"=",
"constants",
".",
"Messenger",
".",
"SPINNER_CSS",
"backdrop_style",
"=",
"style_sheet",
".",
"sh_backdrop_style",
"backdrop_style",
"=",
"backdrop_style",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"backdrop_style",
"=",
"js_utils",
".",
"escape_quotes_if_needed",
"(",
"backdrop_style",
")",
"instructions",
"+=",
"'injectCSS(\"%s\");\\n'",
"%",
"spinner_css",
"instructions",
"+=",
"'injectJS(\"%s\");\\n'",
"%",
"jquery_js",
"instructions",
"+=",
"'injectJS(\"%s\");'",
"%",
"tether_js",
"instructions",
"+=",
"'\\n\\n//////// Resources - Load 2 ////////\\n\\n'",
"instructions",
"+=",
"'injectCSS(\"%s\");'",
"%",
"sh_theme_arrows_css",
"instructions",
"+=",
"'injectCSS(\"%s\");'",
"%",
"sh_theme_arrows_fix_css",
"instructions",
"+=",
"'injectCSS(\"%s\");'",
"%",
"sh_theme_default_css",
"instructions",
"+=",
"'injectCSS(\"%s\");'",
"%",
"sh_theme_dark_css",
"instructions",
"+=",
"'injectCSS(\"%s\");'",
"%",
"sh_theme_sq_css",
"instructions",
"+=",
"'injectCSS(\"%s\");\\n'",
"%",
"sh_theme_sq_dark_css",
"instructions",
"+=",
"'injectStyle(\"%s\");\\n'",
"%",
"backdrop_style",
"instructions",
"+=",
"'injectJS(\"%s\");'",
"%",
"shepherd_js",
"instructions",
"+=",
"'\\n\\n//////// Tour Code ////////\\n\\n'",
"for",
"tour_step",
"in",
"tour_steps",
"[",
"name",
"]",
":",
"instructions",
"+=",
"tour_step",
"if",
"tour_type",
"==",
"\"bootstrap\"",
":",
"instructions",
"+=",
"(",
"\"\"\"]);\n // Initialize the tour\n tour.init();\n // Start the tour\n tour.start();\n $tour = tour;\n $tour.restart();\\n\"\"\"",
")",
"elif",
"tour_type",
"==",
"\"hopscotch\"",
":",
"instructions",
"+=",
"(",
"\"\"\"]\n };\n // Start the tour!\n hopscotch.startTour(tour);\n $tour = hopscotch;\\n\"\"\"",
")",
"elif",
"tour_type",
"==",
"\"introjs\"",
":",
"instructions",
"+=",
"(",
"\"\"\"]\n });\n intro.setOption(\"disableInteraction\", true);\n intro.setOption(\"overlayOpacity\", .29);\n intro.setOption(\"scrollToElement\", true);\n intro.setOption(\"keyboardNavigation\", true);\n intro.setOption(\"exitOnEsc\", false);\n intro.setOption(\"exitOnOverlayClick\", false);\n intro.setOption(\"showStepNumbers\", false);\n intro.setOption(\"showProgress\", false);\n intro.start();\n $tour = intro;\n };\n startIntro();\\n\"\"\"",
")",
"elif",
"tour_type",
"==",
"\"shepherd\"",
":",
"instructions",
"+=",
"(",
"\"\"\"\n tour.start();\n $tour = tour;\\n\"\"\"",
")",
"else",
":",
"pass",
"exported_tours_folder",
"=",
"EXPORTED_TOURS_FOLDER",
"if",
"exported_tours_folder",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"exported_tours_folder",
"=",
"exported_tours_folder",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"exported_tours_folder",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"exported_tours_folder",
")",
"except",
"Exception",
":",
"pass",
"import",
"codecs",
"file_path",
"=",
"exported_tours_folder",
"+",
"\"/\"",
"+",
"filename",
"out_file",
"=",
"codecs",
".",
"open",
"(",
"file_path",
",",
"\"w+\"",
")",
"out_file",
".",
"writelines",
"(",
"instructions",
")",
"out_file",
".",
"close",
"(",
")",
"print",
"(",
"'\\n>>> [%s] was saved!\\n'",
"%",
"file_path",
")"
] | 41.242775
| 15.306358
|
def _save_potentials(self, directory):
"""save potentials to a directory
"""
print('saving potentials')
digits = int(np.ceil(np.log10(self.configs.configs.shape[0])))
for i in range(0, self.configs.configs.shape[0]):
pot_data = self.get_potential(i)
filename_raw = 'pot{0:0' + '{0}'.format(digits) + '}.dat'
filename = directory + os.sep + filename_raw.format(i + 1)
nodes = self.grid.nodes['sorted'][:, 1:3]
all_data = np.hstack((
nodes,
pot_data[0][:, np.newaxis],
pot_data[1][:, np.newaxis],
))
with open(filename, 'wb') as fid:
np.savetxt(fid, all_data)
|
[
"def",
"_save_potentials",
"(",
"self",
",",
"directory",
")",
":",
"print",
"(",
"'saving potentials'",
")",
"digits",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"log10",
"(",
"self",
".",
"configs",
".",
"configs",
".",
"shape",
"[",
"0",
"]",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"configs",
".",
"configs",
".",
"shape",
"[",
"0",
"]",
")",
":",
"pot_data",
"=",
"self",
".",
"get_potential",
"(",
"i",
")",
"filename_raw",
"=",
"'pot{0:0'",
"+",
"'{0}'",
".",
"format",
"(",
"digits",
")",
"+",
"'}.dat'",
"filename",
"=",
"directory",
"+",
"os",
".",
"sep",
"+",
"filename_raw",
".",
"format",
"(",
"i",
"+",
"1",
")",
"nodes",
"=",
"self",
".",
"grid",
".",
"nodes",
"[",
"'sorted'",
"]",
"[",
":",
",",
"1",
":",
"3",
"]",
"all_data",
"=",
"np",
".",
"hstack",
"(",
"(",
"nodes",
",",
"pot_data",
"[",
"0",
"]",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"pot_data",
"[",
"1",
"]",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
")",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"fid",
":",
"np",
".",
"savetxt",
"(",
"fid",
",",
"all_data",
")"
] | 40.5
| 12.944444
|
def _try_weakref(arg, remove_callback):
"""Return a weak reference to arg if possible, or arg itself if not."""
try:
arg = weakref.ref(arg, remove_callback)
except TypeError:
# Not all types can have a weakref. That includes strings
# and floats and such, so just pass them through directly.
pass
return arg
|
[
"def",
"_try_weakref",
"(",
"arg",
",",
"remove_callback",
")",
":",
"try",
":",
"arg",
"=",
"weakref",
".",
"ref",
"(",
"arg",
",",
"remove_callback",
")",
"except",
"TypeError",
":",
"# Not all types can have a weakref. That includes strings",
"# and floats and such, so just pass them through directly.",
"pass",
"return",
"arg"
] | 38.555556
| 18.222222
|
def Input_setIgnoreInputEvents(self, ignore):
"""
Function path: Input.setIgnoreInputEvents
Domain: Input
Method name: setIgnoreInputEvents
Parameters:
Required arguments:
'ignore' (type: boolean) -> Ignores input events processing when set to true.
No return value.
Description: Ignores input events (useful while auditing page).
"""
assert isinstance(ignore, (bool,)
), "Argument 'ignore' must be of type '['bool']'. Received type: '%s'" % type(
ignore)
subdom_funcs = self.synchronous_command('Input.setIgnoreInputEvents',
ignore=ignore)
return subdom_funcs
|
[
"def",
"Input_setIgnoreInputEvents",
"(",
"self",
",",
"ignore",
")",
":",
"assert",
"isinstance",
"(",
"ignore",
",",
"(",
"bool",
",",
")",
")",
",",
"\"Argument 'ignore' must be of type '['bool']'. Received type: '%s'\"",
"%",
"type",
"(",
"ignore",
")",
"subdom_funcs",
"=",
"self",
".",
"synchronous_command",
"(",
"'Input.setIgnoreInputEvents'",
",",
"ignore",
"=",
"ignore",
")",
"return",
"subdom_funcs"
] | 31.684211
| 20.526316
|
def usnjrnl_timeline(self):
"""Iterates over the changes occurred within the filesystem.
Yields UsnJrnlEvent namedtuples containing:
file_reference_number: known in Unix FS as inode.
path: full path of the file.
size: size of the file in bytes if recoverable.
allocated: whether the file exists or it has been deleted.
timestamp: timespamp of the change.
changes: list of changes applied to the file.
attributes: list of file attributes.
"""
filesystem_content = defaultdict(list)
self.logger.debug("Extracting Update Sequence Number journal.")
journal = self._read_journal()
for dirent in self._visit_filesystem():
filesystem_content[dirent.inode].append(dirent)
self.logger.debug("Generating timeline.")
yield from generate_timeline(journal, filesystem_content)
|
[
"def",
"usnjrnl_timeline",
"(",
"self",
")",
":",
"filesystem_content",
"=",
"defaultdict",
"(",
"list",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Extracting Update Sequence Number journal.\"",
")",
"journal",
"=",
"self",
".",
"_read_journal",
"(",
")",
"for",
"dirent",
"in",
"self",
".",
"_visit_filesystem",
"(",
")",
":",
"filesystem_content",
"[",
"dirent",
".",
"inode",
"]",
".",
"append",
"(",
"dirent",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Generating timeline.\"",
")",
"yield",
"from",
"generate_timeline",
"(",
"journal",
",",
"filesystem_content",
")"
] | 36.56
| 20.2
|
def register(self, job):
"""Takes a job (unencoded) and adorns it with a unique key; this makes
an entry in the database without any further specification."""
with self.lock:
self.cur.execute(
'insert into "jobs" ("name", "session", "status") '
'values (?, ?, ?)', (job.name, self.session, Status.INACTIVE))
self.jobs[self.cur.lastrowid] = job
return JobMessage(self.cur.lastrowid, job.node)
|
[
"def",
"register",
"(",
"self",
",",
"job",
")",
":",
"with",
"self",
".",
"lock",
":",
"self",
".",
"cur",
".",
"execute",
"(",
"'insert into \"jobs\" (\"name\", \"session\", \"status\") '",
"'values (?, ?, ?)'",
",",
"(",
"job",
".",
"name",
",",
"self",
".",
"session",
",",
"Status",
".",
"INACTIVE",
")",
")",
"self",
".",
"jobs",
"[",
"self",
".",
"cur",
".",
"lastrowid",
"]",
"=",
"job",
"return",
"JobMessage",
"(",
"self",
".",
"cur",
".",
"lastrowid",
",",
"job",
".",
"node",
")"
] | 52.777778
| 15
|
def reject(self, *, requeue=True):
"""
Reject the message.
:keyword bool requeue: if true, the broker will attempt to requeue the
message and deliver it to an alternate consumer.
"""
self.sender.send_BasicReject(self.delivery_tag, requeue)
|
[
"def",
"reject",
"(",
"self",
",",
"*",
",",
"requeue",
"=",
"True",
")",
":",
"self",
".",
"sender",
".",
"send_BasicReject",
"(",
"self",
".",
"delivery_tag",
",",
"requeue",
")"
] | 35.625
| 17.625
|
def _iter_frequencies(q, frange, mismatch, dur):
"""Iterate over the frequencies of this 'QPlane'
Parameters
----------
q:
q value
frange: 'list'
upper and lower bounds of frequency range
mismatch:
percentage of desired fractional mismatch
dur:
duration of timeseries in seconds
Returns
-------
frequencies:
Q-Tile frequency
"""
# work out how many frequencies we need
minf, maxf = frange
fcum_mismatch = log(float(maxf) / minf) * (2 + q**2)**(1/2.) / 2.
nfreq = int(max(1, ceil(fcum_mismatch / deltam_f(mismatch))))
fstep = fcum_mismatch / nfreq
fstepmin = 1. / dur
# for each frequency, yield a QTile
for i in xrange(nfreq):
yield (float(minf) *
exp(2 / (2 + q**2)**(1/2.) * (i + .5) * fstep) //
fstepmin * fstepmin)
raise StopIteration()
|
[
"def",
"_iter_frequencies",
"(",
"q",
",",
"frange",
",",
"mismatch",
",",
"dur",
")",
":",
"# work out how many frequencies we need",
"minf",
",",
"maxf",
"=",
"frange",
"fcum_mismatch",
"=",
"log",
"(",
"float",
"(",
"maxf",
")",
"/",
"minf",
")",
"*",
"(",
"2",
"+",
"q",
"**",
"2",
")",
"**",
"(",
"1",
"/",
"2.",
")",
"/",
"2.",
"nfreq",
"=",
"int",
"(",
"max",
"(",
"1",
",",
"ceil",
"(",
"fcum_mismatch",
"/",
"deltam_f",
"(",
"mismatch",
")",
")",
")",
")",
"fstep",
"=",
"fcum_mismatch",
"/",
"nfreq",
"fstepmin",
"=",
"1.",
"/",
"dur",
"# for each frequency, yield a QTile",
"for",
"i",
"in",
"xrange",
"(",
"nfreq",
")",
":",
"yield",
"(",
"float",
"(",
"minf",
")",
"*",
"exp",
"(",
"2",
"/",
"(",
"2",
"+",
"q",
"**",
"2",
")",
"**",
"(",
"1",
"/",
"2.",
")",
"*",
"(",
"i",
"+",
".5",
")",
"*",
"fstep",
")",
"//",
"fstepmin",
"*",
"fstepmin",
")",
"raise",
"StopIteration",
"(",
")"
] | 28.064516
| 18.225806
|
def subplot(self, index_x, index_y):
"""
Sets the active subplot.
Parameters
----------
index_x : int
Index of the subplot to activate in the x direction.
index_y : int
Index of the subplot to activate in the y direction.
"""
self._active_renderer_index = self.loc_to_index((index_x, index_y))
|
[
"def",
"subplot",
"(",
"self",
",",
"index_x",
",",
"index_y",
")",
":",
"self",
".",
"_active_renderer_index",
"=",
"self",
".",
"loc_to_index",
"(",
"(",
"index_x",
",",
"index_y",
")",
")"
] | 26.5
| 21.214286
|
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
|
[
"def",
"create_tmp_rootdir",
"(",
"self",
")",
":",
"if",
"self",
".",
"rootdir",
":",
"tmp_rootdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"'bcolz-'",
")",
"self",
".",
"_dir_clean_list",
".",
"append",
"(",
"tmp_rootdir",
")",
"else",
":",
"tmp_rootdir",
"=",
"None",
"return",
"tmp_rootdir"
] | 24.357143
| 18.214286
|
def preserve_channel_dim(func):
"""Preserve dummy channel dim."""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
shape = img.shape
result = func(img, *args, **kwargs)
if len(shape) == 3 and shape[-1] == 1 and len(result.shape) == 2:
result = np.expand_dims(result, axis=-1)
return result
return wrapped_function
|
[
"def",
"preserve_channel_dim",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped_function",
"(",
"img",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"shape",
"=",
"img",
".",
"shape",
"result",
"=",
"func",
"(",
"img",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"shape",
")",
"==",
"3",
"and",
"shape",
"[",
"-",
"1",
"]",
"==",
"1",
"and",
"len",
"(",
"result",
".",
"shape",
")",
"==",
"2",
":",
"result",
"=",
"np",
".",
"expand_dims",
"(",
"result",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"result",
"return",
"wrapped_function"
] | 33.818182
| 15.909091
|
def _pack3(obj, fp, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
fp: a .write()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
None.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> f = open('test.bin', 'wb')
>>> umsgpack.pack({u"compact": True, u"schema": 0}, f)
>>>
"""
global compatibility
ext_handlers = options.get("ext_handlers")
if obj is None:
_pack_nil(obj, fp, options)
elif ext_handlers and obj.__class__ in ext_handlers:
_pack_ext(ext_handlers[obj.__class__](obj), fp, options)
elif isinstance(obj, bool):
_pack_boolean(obj, fp, options)
elif isinstance(obj, int):
_pack_integer(obj, fp, options)
elif isinstance(obj, float):
_pack_float(obj, fp, options)
elif compatibility and isinstance(obj, str):
_pack_oldspec_raw(obj.encode('utf-8'), fp, options)
elif compatibility and isinstance(obj, bytes):
_pack_oldspec_raw(obj, fp, options)
elif isinstance(obj, str):
_pack_string(obj, fp, options)
elif isinstance(obj, bytes):
_pack_binary(obj, fp, options)
elif isinstance(obj, (list, tuple)):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
elif isinstance(obj, datetime.datetime):
_pack_ext_timestamp(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
# Linear search for superclass
t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)
if t:
_pack_ext(ext_handlers[t](obj), fp, options)
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
|
[
"def",
"_pack3",
"(",
"obj",
",",
"fp",
",",
"*",
"*",
"options",
")",
":",
"global",
"compatibility",
"ext_handlers",
"=",
"options",
".",
"get",
"(",
"\"ext_handlers\"",
")",
"if",
"obj",
"is",
"None",
":",
"_pack_nil",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"ext_handlers",
"and",
"obj",
".",
"__class__",
"in",
"ext_handlers",
":",
"_pack_ext",
"(",
"ext_handlers",
"[",
"obj",
".",
"__class__",
"]",
"(",
"obj",
")",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"bool",
")",
":",
"_pack_boolean",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"int",
")",
":",
"_pack_integer",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"float",
")",
":",
"_pack_float",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"compatibility",
"and",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"_pack_oldspec_raw",
"(",
"obj",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"fp",
",",
"options",
")",
"elif",
"compatibility",
"and",
"isinstance",
"(",
"obj",
",",
"bytes",
")",
":",
"_pack_oldspec_raw",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"_pack_string",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"bytes",
")",
":",
"_pack_binary",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"_pack_array",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"_pack_map",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"datetime",
".",
"datetime",
")",
":",
"_pack_ext_timestamp",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"Ext",
")",
":",
"_pack_ext",
"(",
"obj",
",",
"fp",
",",
"options",
")",
"elif",
"ext_handlers",
":",
"# Linear search for superclass",
"t",
"=",
"next",
"(",
"(",
"t",
"for",
"t",
"in",
"ext_handlers",
".",
"keys",
"(",
")",
"if",
"isinstance",
"(",
"obj",
",",
"t",
")",
")",
",",
"None",
")",
"if",
"t",
":",
"_pack_ext",
"(",
"ext_handlers",
"[",
"t",
"]",
"(",
"obj",
")",
",",
"fp",
",",
"options",
")",
"else",
":",
"raise",
"UnsupportedTypeException",
"(",
"\"unsupported type: %s\"",
"%",
"str",
"(",
"type",
"(",
"obj",
")",
")",
")",
"else",
":",
"raise",
"UnsupportedTypeException",
"(",
"\"unsupported type: %s\"",
"%",
"str",
"(",
"type",
"(",
"obj",
")",
")",
")"
] | 34.628571
| 16.914286
|
def session_exists(self, username):
"""
:param username:
:type username: str
:return:
:rtype:
"""
logger.debug("session_exists(%s)?" % username)
return self._store.containsSession(username, 1)
|
[
"def",
"session_exists",
"(",
"self",
",",
"username",
")",
":",
"logger",
".",
"debug",
"(",
"\"session_exists(%s)?\"",
"%",
"username",
")",
"return",
"self",
".",
"_store",
".",
"containsSession",
"(",
"username",
",",
"1",
")"
] | 27.555556
| 12.444444
|
async def query(
self,
q: AnyStr,
*,
epoch: str = 'ns',
chunked: bool = False,
chunk_size: Optional[int] = None,
db: Optional[str] = None,
use_cache: bool = False,
) -> Union[AsyncGenerator[ResultType, None], ResultType]:
"""Sends a query to InfluxDB.
Please refer to the InfluxDB documentation for all the possible queries:
https://docs.influxdata.com/influxdb/latest/query_language/
:param q: Raw query string
:param db: Database to be queried. Defaults to `self.db`.
:param epoch: Precision level of response timestamps.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.
:param chunked: If ``True``, makes InfluxDB return results in streamed batches
rather than as a single response.
Returns an AsyncGenerator which yields responses
in the same format as non-chunked queries.
:param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks
responses by series or by every 10,000 points, whichever occurs first.
:param use_cache:
:return: Response in the format specified by the combination of
:attr:`.InfluxDBClient.output` and ``chunked``
"""
async def _chunked_generator(url, data):
async with self._session.post(url, data=data) as resp:
logger.debug(f'{resp.status} (CHUNKED): {q}')
# Hack to avoid aiohttp raising ValueError('Line is too long')
# The number 16 is arbitrary (may be too large/small).
resp.content._high_water *= 16
async for chunk in resp.content:
chunk = json.loads(chunk)
self._check_error(chunk)
yield chunk
if not self._session:
await self.create_session()
# InfluxDB documentation is wrong regarding `/query` parameters
# See https://github.com/influxdata/docs.influxdata.com/issues/1807
if not isinstance(chunked, bool):
raise ValueError("'chunked' must be a boolean")
data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch)
if chunked and chunk_size:
data['chunk_size'] = chunk_size
url = self.url.format(endpoint='query')
if chunked:
if use_cache:
raise ValueError("Can't use cache w/ chunked queries")
if self.mode != 'async':
raise ValueError("Can't use 'chunked' with non-async mode")
if self.output == 'json':
return _chunked_generator(url, data)
raise ValueError(f"Chunked queries are not support with {self.output!r} output")
key = f'aioinflux:{q}'
if use_cache and self._redis and await self._redis.exists(key):
logger.debug(f'Cache HIT: {q}')
data = lz4.decompress(await self._redis.get(key))
else:
async with self._session.post(url, data=data) as resp:
data = await resp.read()
if use_cache and self._redis:
logger.debug(f'Cache MISS ({resp.status}): {q}')
if resp.status == 200:
await self._redis.set(key, lz4.compress(data))
await self._redis.expire(key, self.cache_expiry)
else:
logger.debug(f'{resp.status}: {q}')
data = json.loads(data)
self._check_error(data)
if self.output == 'json':
return data
elif self.output == 'dataframe':
return serialization.dataframe.parse(data)
else:
raise ValueError('Invalid output format')
|
[
"async",
"def",
"query",
"(",
"self",
",",
"q",
":",
"AnyStr",
",",
"*",
",",
"epoch",
":",
"str",
"=",
"'ns'",
",",
"chunked",
":",
"bool",
"=",
"False",
",",
"chunk_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"db",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"use_cache",
":",
"bool",
"=",
"False",
",",
")",
"->",
"Union",
"[",
"AsyncGenerator",
"[",
"ResultType",
",",
"None",
"]",
",",
"ResultType",
"]",
":",
"async",
"def",
"_chunked_generator",
"(",
"url",
",",
"data",
")",
":",
"async",
"with",
"self",
".",
"_session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
")",
"as",
"resp",
":",
"logger",
".",
"debug",
"(",
"f'{resp.status} (CHUNKED): {q}'",
")",
"# Hack to avoid aiohttp raising ValueError('Line is too long')",
"# The number 16 is arbitrary (may be too large/small).",
"resp",
".",
"content",
".",
"_high_water",
"*=",
"16",
"async",
"for",
"chunk",
"in",
"resp",
".",
"content",
":",
"chunk",
"=",
"json",
".",
"loads",
"(",
"chunk",
")",
"self",
".",
"_check_error",
"(",
"chunk",
")",
"yield",
"chunk",
"if",
"not",
"self",
".",
"_session",
":",
"await",
"self",
".",
"create_session",
"(",
")",
"# InfluxDB documentation is wrong regarding `/query` parameters",
"# See https://github.com/influxdata/docs.influxdata.com/issues/1807",
"if",
"not",
"isinstance",
"(",
"chunked",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"\"'chunked' must be a boolean\"",
")",
"data",
"=",
"dict",
"(",
"q",
"=",
"q",
",",
"db",
"=",
"db",
"or",
"self",
".",
"db",
",",
"chunked",
"=",
"str",
"(",
"chunked",
")",
".",
"lower",
"(",
")",
",",
"epoch",
"=",
"epoch",
")",
"if",
"chunked",
"and",
"chunk_size",
":",
"data",
"[",
"'chunk_size'",
"]",
"=",
"chunk_size",
"url",
"=",
"self",
".",
"url",
".",
"format",
"(",
"endpoint",
"=",
"'query'",
")",
"if",
"chunked",
":",
"if",
"use_cache",
":",
"raise",
"ValueError",
"(",
"\"Can't use cache w/ chunked queries\"",
")",
"if",
"self",
".",
"mode",
"!=",
"'async'",
":",
"raise",
"ValueError",
"(",
"\"Can't use 'chunked' with non-async mode\"",
")",
"if",
"self",
".",
"output",
"==",
"'json'",
":",
"return",
"_chunked_generator",
"(",
"url",
",",
"data",
")",
"raise",
"ValueError",
"(",
"f\"Chunked queries are not support with {self.output!r} output\"",
")",
"key",
"=",
"f'aioinflux:{q}'",
"if",
"use_cache",
"and",
"self",
".",
"_redis",
"and",
"await",
"self",
".",
"_redis",
".",
"exists",
"(",
"key",
")",
":",
"logger",
".",
"debug",
"(",
"f'Cache HIT: {q}'",
")",
"data",
"=",
"lz4",
".",
"decompress",
"(",
"await",
"self",
".",
"_redis",
".",
"get",
"(",
"key",
")",
")",
"else",
":",
"async",
"with",
"self",
".",
"_session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
")",
"as",
"resp",
":",
"data",
"=",
"await",
"resp",
".",
"read",
"(",
")",
"if",
"use_cache",
"and",
"self",
".",
"_redis",
":",
"logger",
".",
"debug",
"(",
"f'Cache MISS ({resp.status}): {q}'",
")",
"if",
"resp",
".",
"status",
"==",
"200",
":",
"await",
"self",
".",
"_redis",
".",
"set",
"(",
"key",
",",
"lz4",
".",
"compress",
"(",
"data",
")",
")",
"await",
"self",
".",
"_redis",
".",
"expire",
"(",
"key",
",",
"self",
".",
"cache_expiry",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f'{resp.status}: {q}'",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"self",
".",
"_check_error",
"(",
"data",
")",
"if",
"self",
".",
"output",
"==",
"'json'",
":",
"return",
"data",
"elif",
"self",
".",
"output",
"==",
"'dataframe'",
":",
"return",
"serialization",
".",
"dataframe",
".",
"parse",
"(",
"data",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid output format'",
")"
] | 44.22619
| 19.797619
|
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
|
[
"def",
"write_metadata",
"(",
"self",
",",
"handler",
")",
":",
"if",
"self",
".",
"metadata",
"is",
"not",
"None",
":",
"handler",
".",
"write_metadata",
"(",
"self",
".",
"cname",
",",
"self",
".",
"metadata",
")"
] | 41.25
| 7.5
|
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
)
|
[
"def",
"assign_default_log_values",
"(",
"self",
",",
"fpath",
",",
"line",
",",
"formatter",
")",
":",
"return",
"dict",
"(",
"id",
"=",
"None",
",",
"file",
"=",
"fpath",
",",
"host",
"=",
"self",
".",
"HOST",
",",
"formatter",
"=",
"formatter",
",",
"event",
"=",
"'event'",
",",
"data",
"=",
"{",
"}",
",",
"raw",
"=",
"line",
",",
"timestamp",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"type",
"=",
"'log'",
",",
"level",
"=",
"'debug'",
",",
"error",
"=",
"False",
",",
"error_tb",
"=",
"''",
",",
")"
] | 31.526316
| 18.526316
|
def find_unused_port(start_port, end_port, host="127.0.0.1", socket_type="TCP", ignore_ports=None):
"""
Finds an unused port in a range.
:param start_port: first port in the range
:param end_port: last port in the range
:param host: host/address for bind()
:param socket_type: TCP (default) or UDP
:param ignore_ports: list of port to ignore within the range
"""
if end_port < start_port:
raise HTTPConflict(text="Invalid port range {}-{}".format(start_port, end_port))
last_exception = None
for port in range(start_port, end_port + 1):
if ignore_ports and (port in ignore_ports or port in BANNED_PORTS):
continue
try:
PortManager._check_port(host, port, socket_type)
if host != "0.0.0.0":
PortManager._check_port("0.0.0.0", port, socket_type)
return port
except OSError as e:
last_exception = e
if port + 1 == end_port:
break
else:
continue
raise HTTPConflict(text="Could not find a free port between {} and {} on host {}, last exception: {}".format(start_port,
end_port,
host,
last_exception))
|
[
"def",
"find_unused_port",
"(",
"start_port",
",",
"end_port",
",",
"host",
"=",
"\"127.0.0.1\"",
",",
"socket_type",
"=",
"\"TCP\"",
",",
"ignore_ports",
"=",
"None",
")",
":",
"if",
"end_port",
"<",
"start_port",
":",
"raise",
"HTTPConflict",
"(",
"text",
"=",
"\"Invalid port range {}-{}\"",
".",
"format",
"(",
"start_port",
",",
"end_port",
")",
")",
"last_exception",
"=",
"None",
"for",
"port",
"in",
"range",
"(",
"start_port",
",",
"end_port",
"+",
"1",
")",
":",
"if",
"ignore_ports",
"and",
"(",
"port",
"in",
"ignore_ports",
"or",
"port",
"in",
"BANNED_PORTS",
")",
":",
"continue",
"try",
":",
"PortManager",
".",
"_check_port",
"(",
"host",
",",
"port",
",",
"socket_type",
")",
"if",
"host",
"!=",
"\"0.0.0.0\"",
":",
"PortManager",
".",
"_check_port",
"(",
"\"0.0.0.0\"",
",",
"port",
",",
"socket_type",
")",
"return",
"port",
"except",
"OSError",
"as",
"e",
":",
"last_exception",
"=",
"e",
"if",
"port",
"+",
"1",
"==",
"end_port",
":",
"break",
"else",
":",
"continue",
"raise",
"HTTPConflict",
"(",
"text",
"=",
"\"Could not find a free port between {} and {} on host {}, last exception: {}\"",
".",
"format",
"(",
"start_port",
",",
"end_port",
",",
"host",
",",
"last_exception",
")",
")"
] | 46.657143
| 27.4
|
def estimate_augmented_markov_model(dtrajs, ftrajs, lag, m, sigmas,
count_mode='sliding', connectivity='largest',
dt_traj='1 step', maxiter=1000000, eps=0.05, maxcache=3000):
r""" Estimates an Augmented Markov model from discrete trajectories and experimental data
Returns a :class:`AugmentedMarkovModel` that
contains the estimated transition matrix and allows to compute a
large number of quantities related to Markov models.
Parameters
----------
dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int)
discrete trajectories, stored as integer ndarrays (arbitrary size)
or a single ndarray for only one trajectory.
ftrajs : list of trajectories of microscopic observables. Has to have
the same shape (number of trajectories and timesteps) as dtrajs.
Each timestep in each trajectory should match the shape of m and sigma, k.
lag : int
lag time at which transitions are counted and the transition matrix is
estimated.
m : ndarray(k)
Experimental averages.
sigmas : ndarray(k)
Standard error for each experimental observable.
count_mode : str, optional, default='sliding'
mode to obtain count matrices from discrete trajectories. Should be
one of:
* 'sliding' : A trajectory of length T will have :math:`T-\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'effective' : Uses an estimate of the transition counts that are
statistically uncorrelated. Recommended when used with a
Bayesian MSM.
* 'sample' : A trajectory of length T will have :math:`T/\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., (((T/\tau)-1) \tau \rightarrow T)
connectivity : str, optional
Connectivity mode. Three methods are intended (currently only
'largest' is implemented)
* 'largest' : The active set is the largest reversibly
connected set. All estimation will be done on this subset
and all quantities (transition matrix, stationary
distribution, etc) are only defined on this subset and are
correspondingly smaller than the full set of states
* 'all' : The active set is the full set of states. Estimation
will be conducted on each reversibly connected set
separately. That means the transition matrix will decompose
into disconnected submatrices, the stationary vector is only
defined within subsets, etc. Currently not implemented.
* 'none' : The active set is the full set of
states. Estimation will be conducted on the full set of
states without ensuring connectivity. This only permits
nonreversible estimation. Currently not implemented.
dt_traj : str, optional
Description of the physical time corresponding to the lag. May
be used by analysis algorithms such as plotting tools to
pretty-print the axes. By default '1 step', i.e. there is no
physical time unit. Specify by a number, whitespace and
unit. Permitted units are (* is an arbitrary string):
* 'fs', 'femtosecond*'
* 'ps', 'picosecond*'
* 'ns', 'nanosecond*'
* 'us', 'microsecond*'
* 'ms', 'millisecond*'
* 's', 'second*'
maxiter : int, optional
Optional parameter with specifies the maximum number of
updates for Lagrange multiplier estimation.
eps : float, optional
Additional convergence criterion used when some experimental data
are outside the support of the simulation. The value of the eps
parameter is the threshold of the relative change in the predicted
observables as a function of fixed-point iteration:
$$ \mathrm{eps} > \frac{\mid o_{\mathrm{pred}}^{(i+1)}-o_{\mathrm{pred}}^{(i)}\mid }{\sigma}. $$
maxcache : int, optional
Parameter which specifies the maximum size of cache used
when performing estimation of AMM, in megabytes.
Returns
-------
amm : :class:`AugmentedMarkovModel <pyemma.msm.AugmentedMarkovModel>`
Estimator object containing the AMM and estimation information.
See also
--------
AugmentedMarkovModel
An AMM object that has been estimated from data
.. autoclass:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel
:attributes:
References
----------
.. [1] Olsson S, Wu H, Paul F, Clementi C, Noe F "Combining experimental and simulation data
of molecular processes via augmented Markov models" PNAS (2017), 114(31), pp. 8265-8270
doi: 10.1073/pnas.1704803114
"""
# check input
if _np.all(sigmas>0):
_w = 1./(2*sigmas**2.)
else:
raise ValueError('Zero or negative standard errors supplied. Please revise input')
if ftrajs[0].ndim < 2:
raise ValueError("Supplied feature trajectories have inappropriate dimensions (%d) should be atleast 2."%ftrajs[0].ndim)
if len(dtrajs) != len(ftrajs):
raise ValueError("A different number of dtrajs and ftrajs were supplied as input. They must have exactly a one-to-one correspondence.")
elif not _np.all([len(dt)==len(ft) for dt,ft in zip(dtrajs, ftrajs)]):
raise ValueError("One or more supplied dtraj-ftraj pairs do not have the same length.")
else:
# MAKE E matrix
dta = _np.concatenate(dtrajs)
fta = _np.concatenate(ftrajs)
all_markov_states = set(dta)
_E = _np.zeros((len(all_markov_states), fta.shape[1]))
for i, s in enumerate(all_markov_states):
_E[i, :] = fta[_np.where(dta == s)].mean(axis = 0)
# transition matrix estimator
mlamm = _ML_AMM(lag=lag, count_mode=count_mode,
connectivity=connectivity,
dt_traj=dt_traj, maxiter=maxiter, max_cache=maxcache,
E=_E, w=_w, m=m)
# estimate and return
return mlamm.estimate(dtrajs)
|
[
"def",
"estimate_augmented_markov_model",
"(",
"dtrajs",
",",
"ftrajs",
",",
"lag",
",",
"m",
",",
"sigmas",
",",
"count_mode",
"=",
"'sliding'",
",",
"connectivity",
"=",
"'largest'",
",",
"dt_traj",
"=",
"'1 step'",
",",
"maxiter",
"=",
"1000000",
",",
"eps",
"=",
"0.05",
",",
"maxcache",
"=",
"3000",
")",
":",
"# check input",
"if",
"_np",
".",
"all",
"(",
"sigmas",
">",
"0",
")",
":",
"_w",
"=",
"1.",
"/",
"(",
"2",
"*",
"sigmas",
"**",
"2.",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Zero or negative standard errors supplied. Please revise input'",
")",
"if",
"ftrajs",
"[",
"0",
"]",
".",
"ndim",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Supplied feature trajectories have inappropriate dimensions (%d) should be atleast 2.\"",
"%",
"ftrajs",
"[",
"0",
"]",
".",
"ndim",
")",
"if",
"len",
"(",
"dtrajs",
")",
"!=",
"len",
"(",
"ftrajs",
")",
":",
"raise",
"ValueError",
"(",
"\"A different number of dtrajs and ftrajs were supplied as input. They must have exactly a one-to-one correspondence.\"",
")",
"elif",
"not",
"_np",
".",
"all",
"(",
"[",
"len",
"(",
"dt",
")",
"==",
"len",
"(",
"ft",
")",
"for",
"dt",
",",
"ft",
"in",
"zip",
"(",
"dtrajs",
",",
"ftrajs",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"One or more supplied dtraj-ftraj pairs do not have the same length.\"",
")",
"else",
":",
"# MAKE E matrix",
"dta",
"=",
"_np",
".",
"concatenate",
"(",
"dtrajs",
")",
"fta",
"=",
"_np",
".",
"concatenate",
"(",
"ftrajs",
")",
"all_markov_states",
"=",
"set",
"(",
"dta",
")",
"_E",
"=",
"_np",
".",
"zeros",
"(",
"(",
"len",
"(",
"all_markov_states",
")",
",",
"fta",
".",
"shape",
"[",
"1",
"]",
")",
")",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"all_markov_states",
")",
":",
"_E",
"[",
"i",
",",
":",
"]",
"=",
"fta",
"[",
"_np",
".",
"where",
"(",
"dta",
"==",
"s",
")",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"# transition matrix estimator",
"mlamm",
"=",
"_ML_AMM",
"(",
"lag",
"=",
"lag",
",",
"count_mode",
"=",
"count_mode",
",",
"connectivity",
"=",
"connectivity",
",",
"dt_traj",
"=",
"dt_traj",
",",
"maxiter",
"=",
"maxiter",
",",
"max_cache",
"=",
"maxcache",
",",
"E",
"=",
"_E",
",",
"w",
"=",
"_w",
",",
"m",
"=",
"m",
")",
"# estimate and return",
"return",
"mlamm",
".",
"estimate",
"(",
"dtrajs",
")"
] | 42.453947
| 27.006579
|
def run(self):
""" Append version number to lsqfit/__init__.py """
with open('src/lsqfit/__init__.py', 'a') as lsfile:
lsfile.write("\n__version__ = '%s'\n" % LSQFIT_VERSION)
_build_py.run(self)
|
[
"def",
"run",
"(",
"self",
")",
":",
"with",
"open",
"(",
"'src/lsqfit/__init__.py'",
",",
"'a'",
")",
"as",
"lsfile",
":",
"lsfile",
".",
"write",
"(",
"\"\\n__version__ = '%s'\\n\"",
"%",
"LSQFIT_VERSION",
")",
"_build_py",
".",
"run",
"(",
"self",
")"
] | 45.2
| 17
|
def _get_build_type(fnames, samples, caller):
"""Confirm we should build a gemini database: need gemini in tools_on.
Checks for valid conditions for running a database and gemini or gemini_orig
configured in tools on.
"""
build_type = set()
if any(vcfutils.vcf_has_variants(f) for f in fnames) and caller not in NO_DB_CALLERS:
for data in samples:
if any([x in dd.get_tools_on(data)
for x in ["gemini", "gemini_orig", "gemini_allvariants", "vcf2db_expand"]]):
if vcfanno.annotate_gemini(data):
build_type.add("gemini_orig" if "gemini_orig" in dd.get_tools_on(data) else "gemini")
else:
logger.info("Not running gemini, input data not found: %s" % dd.get_sample_name(data))
else:
logger.info("Not running gemini, not configured in tools_on: %s" % dd.get_sample_name(data))
else:
logger.info("Not running gemini, no samples with variants found: %s" %
(", ".join([dd.get_sample_name(d) for d in samples])))
return build_type
|
[
"def",
"_get_build_type",
"(",
"fnames",
",",
"samples",
",",
"caller",
")",
":",
"build_type",
"=",
"set",
"(",
")",
"if",
"any",
"(",
"vcfutils",
".",
"vcf_has_variants",
"(",
"f",
")",
"for",
"f",
"in",
"fnames",
")",
"and",
"caller",
"not",
"in",
"NO_DB_CALLERS",
":",
"for",
"data",
"in",
"samples",
":",
"if",
"any",
"(",
"[",
"x",
"in",
"dd",
".",
"get_tools_on",
"(",
"data",
")",
"for",
"x",
"in",
"[",
"\"gemini\"",
",",
"\"gemini_orig\"",
",",
"\"gemini_allvariants\"",
",",
"\"vcf2db_expand\"",
"]",
"]",
")",
":",
"if",
"vcfanno",
".",
"annotate_gemini",
"(",
"data",
")",
":",
"build_type",
".",
"add",
"(",
"\"gemini_orig\"",
"if",
"\"gemini_orig\"",
"in",
"dd",
".",
"get_tools_on",
"(",
"data",
")",
"else",
"\"gemini\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Not running gemini, input data not found: %s\"",
"%",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Not running gemini, not configured in tools_on: %s\"",
"%",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Not running gemini, no samples with variants found: %s\"",
"%",
"(",
"\", \"",
".",
"join",
"(",
"[",
"dd",
".",
"get_sample_name",
"(",
"d",
")",
"for",
"d",
"in",
"samples",
"]",
")",
")",
")",
"return",
"build_type"
] | 52.47619
| 29.095238
|
def apply(self):
"""Apply the rules of the context to its occurrences.
This method executes all the functions defined in
self.tasks in the order they are listed.
Every function that acts as a context task receives the
Context object itself as its only argument.
The contextualized occurrences are then stored in
Context.contextualized.
The original Occurrence instances are not modified.
"""
raw_operations = copy.deepcopy(self.occurrences)
for task in self.tasks:
task(self)
self.occurrences = raw_operations
|
[
"def",
"apply",
"(",
"self",
")",
":",
"raw_operations",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"occurrences",
")",
"for",
"task",
"in",
"self",
".",
"tasks",
":",
"task",
"(",
"self",
")",
"self",
".",
"occurrences",
"=",
"raw_operations"
] | 33.555556
| 18.444444
|
def _add_dot_key(self, section, key=None):
"""
:param str section: Config section
:param str key: Config key
"""
if key:
self._dot_keys[self._to_dot_key(section, key)] = (section, key)
else:
self._dot_keys[self._to_dot_key(section)] = section
|
[
"def",
"_add_dot_key",
"(",
"self",
",",
"section",
",",
"key",
"=",
"None",
")",
":",
"if",
"key",
":",
"self",
".",
"_dot_keys",
"[",
"self",
".",
"_to_dot_key",
"(",
"section",
",",
"key",
")",
"]",
"=",
"(",
"section",
",",
"key",
")",
"else",
":",
"self",
".",
"_dot_keys",
"[",
"self",
".",
"_to_dot_key",
"(",
"section",
")",
"]",
"=",
"section"
] | 34
| 13.333333
|
def triangle_areas(p1,p2,p3):
"""Compute an array of triangle areas given three arrays of triangle pts
p1,p2,p3 - three Nx2 arrays of points
"""
v1 = (p2 - p1).astype(np.float)
v2 = (p3 - p1).astype(np.float)
# Original:
# cross1 = v1[:,1] * v2[:,0]
# cross2 = v2[:,1] * v1[:,0]
# a = (cross1-cross2) / 2
# Memory reduced:
cross1 = v1[:, 1]
cross1 *= v2[:, 0]
cross2 = v2[:, 1]
cross2 *= v1[:, 0]
a = cross1
a -= cross2
a /= 2.0
del v1, v2, cross1, cross2
a = a.copy() # a is a view on v1; shed one dimension.
a = np.abs(a)
#
# Handle small round-off errors
#
a[a<np.finfo(np.float32).eps] = 0
return a
|
[
"def",
"triangle_areas",
"(",
"p1",
",",
"p2",
",",
"p3",
")",
":",
"v1",
"=",
"(",
"p2",
"-",
"p1",
")",
".",
"astype",
"(",
"np",
".",
"float",
")",
"v2",
"=",
"(",
"p3",
"-",
"p1",
")",
".",
"astype",
"(",
"np",
".",
"float",
")",
"# Original:",
"# cross1 = v1[:,1] * v2[:,0]",
"# cross2 = v2[:,1] * v1[:,0]",
"# a = (cross1-cross2) / 2",
"# Memory reduced:",
"cross1",
"=",
"v1",
"[",
":",
",",
"1",
"]",
"cross1",
"*=",
"v2",
"[",
":",
",",
"0",
"]",
"cross2",
"=",
"v2",
"[",
":",
",",
"1",
"]",
"cross2",
"*=",
"v1",
"[",
":",
",",
"0",
"]",
"a",
"=",
"cross1",
"a",
"-=",
"cross2",
"a",
"/=",
"2.0",
"del",
"v1",
",",
"v2",
",",
"cross1",
",",
"cross2",
"a",
"=",
"a",
".",
"copy",
"(",
")",
"# a is a view on v1; shed one dimension.",
"a",
"=",
"np",
".",
"abs",
"(",
"a",
")",
"#",
"# Handle small round-off errors",
"#",
"a",
"[",
"a",
"<",
"np",
".",
"finfo",
"(",
"np",
".",
"float32",
")",
".",
"eps",
"]",
"=",
"0",
"return",
"a"
] | 25.481481
| 16.037037
|
def get_all_groups(path_prefix='/', region=None, key=None, keyid=None,
profile=None):
'''
Get and return all IAM group details, starting at the optional path.
.. versionadded:: 2016.3.0
CLI Example:
salt-call boto_iam.get_all_groups
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
_groups = conn.get_all_groups(path_prefix=path_prefix)
groups = _groups.list_groups_response.list_groups_result.groups
marker = getattr(
_groups.list_groups_response.list_groups_result, 'marker', None
)
while marker:
_groups = conn.get_all_groups(path_prefix=path_prefix, marker=marker)
groups = groups + _groups.list_groups_response.list_groups_result.groups
marker = getattr(
_groups.list_groups_response.list_groups_result, 'marker', None
)
return groups
|
[
"def",
"get_all_groups",
"(",
"path_prefix",
"=",
"'/'",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"conn",
":",
"return",
"None",
"_groups",
"=",
"conn",
".",
"get_all_groups",
"(",
"path_prefix",
"=",
"path_prefix",
")",
"groups",
"=",
"_groups",
".",
"list_groups_response",
".",
"list_groups_result",
".",
"groups",
"marker",
"=",
"getattr",
"(",
"_groups",
".",
"list_groups_response",
".",
"list_groups_result",
",",
"'marker'",
",",
"None",
")",
"while",
"marker",
":",
"_groups",
"=",
"conn",
".",
"get_all_groups",
"(",
"path_prefix",
"=",
"path_prefix",
",",
"marker",
"=",
"marker",
")",
"groups",
"=",
"groups",
"+",
"_groups",
".",
"list_groups_response",
".",
"list_groups_result",
".",
"groups",
"marker",
"=",
"getattr",
"(",
"_groups",
".",
"list_groups_response",
".",
"list_groups_result",
",",
"'marker'",
",",
"None",
")",
"return",
"groups"
] | 34.807692
| 27.115385
|
def fire_event(self, event_name, wait=False, *args, **kwargs):
"""
Fire an event to plugins.
PluginManager schedule @asyncio.coroutinecalls for each plugin on method called "on_" + event_name
For example, on_connect will be called on event 'connect'
Method calls are schedule in the asyn loop. wait parameter must be set to true to wait until all
mehtods are completed.
:param event_name:
:param args:
:param kwargs:
:param wait: indicates if fire_event should wait for plugin calls completion (True), or not
:return:
"""
tasks = []
event_method_name = "on_" + event_name
for plugin in self._plugins:
event_method = getattr(plugin.object, event_method_name, None)
if event_method:
try:
task = self._schedule_coro(event_method(*args, **kwargs))
tasks.append(task)
def clean_fired_events(future):
try:
self._fired_events.remove(task)
except (KeyError, ValueError):
pass
task.add_done_callback(clean_fired_events)
except AssertionError:
self.logger.error("Method '%s' on plugin '%s' is not a coroutine" %
(event_method_name, plugin.name))
self._fired_events.extend(tasks)
if wait:
if tasks:
yield from asyncio.wait(tasks, loop=self._loop)
|
[
"def",
"fire_event",
"(",
"self",
",",
"event_name",
",",
"wait",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"tasks",
"=",
"[",
"]",
"event_method_name",
"=",
"\"on_\"",
"+",
"event_name",
"for",
"plugin",
"in",
"self",
".",
"_plugins",
":",
"event_method",
"=",
"getattr",
"(",
"plugin",
".",
"object",
",",
"event_method_name",
",",
"None",
")",
"if",
"event_method",
":",
"try",
":",
"task",
"=",
"self",
".",
"_schedule_coro",
"(",
"event_method",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"tasks",
".",
"append",
"(",
"task",
")",
"def",
"clean_fired_events",
"(",
"future",
")",
":",
"try",
":",
"self",
".",
"_fired_events",
".",
"remove",
"(",
"task",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
":",
"pass",
"task",
".",
"add_done_callback",
"(",
"clean_fired_events",
")",
"except",
"AssertionError",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"Method '%s' on plugin '%s' is not a coroutine\"",
"%",
"(",
"event_method_name",
",",
"plugin",
".",
"name",
")",
")",
"self",
".",
"_fired_events",
".",
"extend",
"(",
"tasks",
")",
"if",
"wait",
":",
"if",
"tasks",
":",
"yield",
"from",
"asyncio",
".",
"wait",
"(",
"tasks",
",",
"loop",
"=",
"self",
".",
"_loop",
")"
] | 42.27027
| 22.108108
|
def from_definition(self, table: Table, version: int):
"""Add all columns from the table added in the specified version"""
self.table(table)
self.add_columns(*table.columns.get_with_version(version))
return self
|
[
"def",
"from_definition",
"(",
"self",
",",
"table",
":",
"Table",
",",
"version",
":",
"int",
")",
":",
"self",
".",
"table",
"(",
"table",
")",
"self",
".",
"add_columns",
"(",
"*",
"table",
".",
"columns",
".",
"get_with_version",
"(",
"version",
")",
")",
"return",
"self"
] | 47.8
| 15.2
|
def restbase(self, endpoint, title):
"""
Returns RESTBase query string
"""
if not endpoint:
raise ValueError("invalid endpoint: %s" % endpoint)
route = endpoint
if title and endpoint != '/page/':
route = endpoint + safequote_restbase(title)
self.set_status('restbase', route)
return "%s/api/rest_v1/%s" % (self.uri, route[1:])
|
[
"def",
"restbase",
"(",
"self",
",",
"endpoint",
",",
"title",
")",
":",
"if",
"not",
"endpoint",
":",
"raise",
"ValueError",
"(",
"\"invalid endpoint: %s\"",
"%",
"endpoint",
")",
"route",
"=",
"endpoint",
"if",
"title",
"and",
"endpoint",
"!=",
"'/page/'",
":",
"route",
"=",
"endpoint",
"+",
"safequote_restbase",
"(",
"title",
")",
"self",
".",
"set_status",
"(",
"'restbase'",
",",
"route",
")",
"return",
"\"%s/api/rest_v1/%s\"",
"%",
"(",
"self",
".",
"uri",
",",
"route",
"[",
"1",
":",
"]",
")"
] | 28.857143
| 15.714286
|
def validate_event(payload: bytes, *, signature: str, secret: str) -> None:
"""Validate the signature of a webhook event."""
# https://developer.github.com/webhooks/securing/#validating-payloads-from-github
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
raise ValidationFailure("signature does not start with "
f"{repr(signature_prefix)}")
hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
raise ValidationFailure("payload's signature does not align "
"with the secret")
|
[
"def",
"validate_event",
"(",
"payload",
":",
"bytes",
",",
"*",
",",
"signature",
":",
"str",
",",
"secret",
":",
"str",
")",
"->",
"None",
":",
"# https://developer.github.com/webhooks/securing/#validating-payloads-from-github",
"signature_prefix",
"=",
"\"sha1=\"",
"if",
"not",
"signature",
".",
"startswith",
"(",
"signature_prefix",
")",
":",
"raise",
"ValidationFailure",
"(",
"\"signature does not start with \"",
"f\"{repr(signature_prefix)}\"",
")",
"hmac_",
"=",
"hmac",
".",
"new",
"(",
"secret",
".",
"encode",
"(",
"\"UTF-8\"",
")",
",",
"msg",
"=",
"payload",
",",
"digestmod",
"=",
"\"sha1\"",
")",
"calculated_sig",
"=",
"signature_prefix",
"+",
"hmac_",
".",
"hexdigest",
"(",
")",
"if",
"not",
"hmac",
".",
"compare_digest",
"(",
"signature",
",",
"calculated_sig",
")",
":",
"raise",
"ValidationFailure",
"(",
"\"payload's signature does not align \"",
"\"with the secret\"",
")"
] | 62.25
| 22.916667
|
def format_title(self, format='html5', deparagraph=True, mathjax=False,
smart=True, extra_args=None):
"""Get the document title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the title is not available in
the document.
"""
if self.title is None:
return None
output_text = convert_lsstdoc_tex(
self.title, format,
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text
|
[
"def",
"format_title",
"(",
"self",
",",
"format",
"=",
"'html5'",
",",
"deparagraph",
"=",
"True",
",",
"mathjax",
"=",
"False",
",",
"smart",
"=",
"True",
",",
"extra_args",
"=",
"None",
")",
":",
"if",
"self",
".",
"title",
"is",
"None",
":",
"return",
"None",
"output_text",
"=",
"convert_lsstdoc_tex",
"(",
"self",
".",
"title",
",",
"format",
",",
"deparagraph",
"=",
"deparagraph",
",",
"mathjax",
"=",
"mathjax",
",",
"smart",
"=",
"smart",
",",
"extra_args",
"=",
"extra_args",
")",
"return",
"output_text"
] | 35.352941
| 16.470588
|
def is_training_name(name):
"""
**Guess** if this variable is only used in training.
Only used internally to avoid too many logging. Do not use it.
"""
# TODO: maybe simply check against TRAINABLE_VARIABLES and MODEL_VARIABLES?
# TODO or use get_slot_names()
name = get_op_tensor_name(name)[0]
if name.endswith('/Adam') or name.endswith('/Adam_1'):
return True
if name.endswith('/Momentum'):
return True
if name.endswith('/Adadelta') or name.endswith('/Adadelta_1'):
return True
if name.endswith('/RMSProp') or name.endswith('/RMSProp_1'):
return True
if name.endswith('/Adagrad'):
return True
if name.startswith('EMA/') or '/EMA/' in name: # all the moving average summaries
return True
if name.startswith('AccumGrad') or name.endswith('/AccumGrad'):
return True
if name.startswith('apply_gradients'):
return True
return False
|
[
"def",
"is_training_name",
"(",
"name",
")",
":",
"# TODO: maybe simply check against TRAINABLE_VARIABLES and MODEL_VARIABLES?",
"# TODO or use get_slot_names()",
"name",
"=",
"get_op_tensor_name",
"(",
"name",
")",
"[",
"0",
"]",
"if",
"name",
".",
"endswith",
"(",
"'/Adam'",
")",
"or",
"name",
".",
"endswith",
"(",
"'/Adam_1'",
")",
":",
"return",
"True",
"if",
"name",
".",
"endswith",
"(",
"'/Momentum'",
")",
":",
"return",
"True",
"if",
"name",
".",
"endswith",
"(",
"'/Adadelta'",
")",
"or",
"name",
".",
"endswith",
"(",
"'/Adadelta_1'",
")",
":",
"return",
"True",
"if",
"name",
".",
"endswith",
"(",
"'/RMSProp'",
")",
"or",
"name",
".",
"endswith",
"(",
"'/RMSProp_1'",
")",
":",
"return",
"True",
"if",
"name",
".",
"endswith",
"(",
"'/Adagrad'",
")",
":",
"return",
"True",
"if",
"name",
".",
"startswith",
"(",
"'EMA/'",
")",
"or",
"'/EMA/'",
"in",
"name",
":",
"# all the moving average summaries",
"return",
"True",
"if",
"name",
".",
"startswith",
"(",
"'AccumGrad'",
")",
"or",
"name",
".",
"endswith",
"(",
"'/AccumGrad'",
")",
":",
"return",
"True",
"if",
"name",
".",
"startswith",
"(",
"'apply_gradients'",
")",
":",
"return",
"True",
"return",
"False"
] | 37.28
| 18
|
def sync_required(func):
"""Decorate methods when synchronizing repository is required."""
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self._keepSynchronized:
r = func(self, *args, **kwargs)
else:
state = self._load_state()
#print("-----------> ",state, self.state)
if state is None:
r = func(self, *args, **kwargs)
elif state == self.state:
r = func(self, *args, **kwargs)
else:
warnings.warn("Repository at '%s' is out of date. Need to load it again to avoid conflict."%self.path)
r = None
return r
return wrapper
|
[
"def",
"sync_required",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"_keepSynchronized",
":",
"r",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"state",
"=",
"self",
".",
"_load_state",
"(",
")",
"#print(\"-----------> \",state, self.state)",
"if",
"state",
"is",
"None",
":",
"r",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"state",
"==",
"self",
".",
"state",
":",
"r",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"Repository at '%s' is out of date. Need to load it again to avoid conflict.\"",
"%",
"self",
".",
"path",
")",
"r",
"=",
"None",
"return",
"r",
"return",
"wrapper"
] | 38.166667
| 15.555556
|
def nodes_with_tag(tag):
"""Sets a list of nodes that have the given tag assigned and calls node()"""
nodes = lib.get_nodes_with_tag(tag, env.chef_environment,
littlechef.include_guests)
nodes = [n['name'] for n in nodes]
if not len(nodes):
print("No nodes found with tag '{0}'".format(tag))
sys.exit(0)
return node(*nodes)
|
[
"def",
"nodes_with_tag",
"(",
"tag",
")",
":",
"nodes",
"=",
"lib",
".",
"get_nodes_with_tag",
"(",
"tag",
",",
"env",
".",
"chef_environment",
",",
"littlechef",
".",
"include_guests",
")",
"nodes",
"=",
"[",
"n",
"[",
"'name'",
"]",
"for",
"n",
"in",
"nodes",
"]",
"if",
"not",
"len",
"(",
"nodes",
")",
":",
"print",
"(",
"\"No nodes found with tag '{0}'\"",
".",
"format",
"(",
"tag",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"node",
"(",
"*",
"nodes",
")"
] | 42.888889
| 14.888889
|
def distribute_libs(self, arch, src_dirs, wildcard='*', dest_dir="libs"):
'''Copy existing arch libs from build dirs to current dist dir.'''
info('Copying libs')
tgt_dir = join(dest_dir, arch.arch)
ensure_dir(tgt_dir)
for src_dir in src_dirs:
for lib in glob.glob(join(src_dir, wildcard)):
shprint(sh.cp, '-a', lib, tgt_dir)
|
[
"def",
"distribute_libs",
"(",
"self",
",",
"arch",
",",
"src_dirs",
",",
"wildcard",
"=",
"'*'",
",",
"dest_dir",
"=",
"\"libs\"",
")",
":",
"info",
"(",
"'Copying libs'",
")",
"tgt_dir",
"=",
"join",
"(",
"dest_dir",
",",
"arch",
".",
"arch",
")",
"ensure_dir",
"(",
"tgt_dir",
")",
"for",
"src_dir",
"in",
"src_dirs",
":",
"for",
"lib",
"in",
"glob",
".",
"glob",
"(",
"join",
"(",
"src_dir",
",",
"wildcard",
")",
")",
":",
"shprint",
"(",
"sh",
".",
"cp",
",",
"'-a'",
",",
"lib",
",",
"tgt_dir",
")"
] | 48.125
| 16.375
|
def alias(name, class_object):
"""
Create an alias of a class object
The objective of this method is to have
an alias that is Registered. i.e If we have
class_b = class_a
Makes `class_b` an alias of `class_a`, but if
`class_a` is registered by its metaclass,
`class_b` is not. The solution
alias('class_b', class_a)
is equivalent to:
class_b = class_a
Register['class_b'] = class_a
"""
module = inspect.getmodule(class_object)
module.__dict__[name] = class_object
if isinstance(class_object, Registry):
Registry[name] = class_object
|
[
"def",
"alias",
"(",
"name",
",",
"class_object",
")",
":",
"module",
"=",
"inspect",
".",
"getmodule",
"(",
"class_object",
")",
"module",
".",
"__dict__",
"[",
"name",
"]",
"=",
"class_object",
"if",
"isinstance",
"(",
"class_object",
",",
"Registry",
")",
":",
"Registry",
"[",
"name",
"]",
"=",
"class_object"
] | 25.125
| 14.625
|
def part_channel(self, channel, reason=None, tags=None):
"""Part the given channel."""
params = [channel]
if reason:
params.append(reason)
self.send('PART', params=params, tags=tags)
|
[
"def",
"part_channel",
"(",
"self",
",",
"channel",
",",
"reason",
"=",
"None",
",",
"tags",
"=",
"None",
")",
":",
"params",
"=",
"[",
"channel",
"]",
"if",
"reason",
":",
"params",
".",
"append",
"(",
"reason",
")",
"self",
".",
"send",
"(",
"'PART'",
",",
"params",
"=",
"params",
",",
"tags",
"=",
"tags",
")"
] | 36.833333
| 11.666667
|
def envars_to_markdown(envars, title = "Environment"):
'''generate a markdown list of a list of environment variable tuples
Parameters
==========
title: A title for the section (defaults to "Environment"
envars: a list of tuples for the environment, e.g.:
[('TERM', 'xterm-256color'),
('SHELL', '/bin/bash'),
('USER', 'vanessa'),
('LD_LIBRARY_PATH', ':/usr/local/pulse')]
'''
markdown = ''
if envars not in [None, '', []]:
markdown += '\n## %s\n' % title
for envar in envars:
markdown += ' - **%s**: %s\n' %(envar[0], envar[1])
return markdown
|
[
"def",
"envars_to_markdown",
"(",
"envars",
",",
"title",
"=",
"\"Environment\"",
")",
":",
"markdown",
"=",
"''",
"if",
"envars",
"not",
"in",
"[",
"None",
",",
"''",
",",
"[",
"]",
"]",
":",
"markdown",
"+=",
"'\\n## %s\\n'",
"%",
"title",
"for",
"envar",
"in",
"envars",
":",
"markdown",
"+=",
"' - **%s**: %s\\n'",
"%",
"(",
"envar",
"[",
"0",
"]",
",",
"envar",
"[",
"1",
"]",
")",
"return",
"markdown"
] | 32.7
| 19.8
|
def _validate_json_for_regular_workflow(json_spec, args):
"""
Validates fields used only for building a regular, project-based workflow.
"""
validated = {}
override_project_id, override_folder, override_workflow_name = \
dxpy.executable_builder.get_parsed_destination(args.destination)
validated['project'] = _get_destination_project(json_spec, args, override_project_id)
validated['folder'] = _get_destination_folder(json_spec, override_folder)
workflow_name = _get_workflow_name(json_spec, override_workflow_name)
if not workflow_name:
print('Warning: workflow name is not specified')
else:
validated['name'] = workflow_name
return validated
|
[
"def",
"_validate_json_for_regular_workflow",
"(",
"json_spec",
",",
"args",
")",
":",
"validated",
"=",
"{",
"}",
"override_project_id",
",",
"override_folder",
",",
"override_workflow_name",
"=",
"dxpy",
".",
"executable_builder",
".",
"get_parsed_destination",
"(",
"args",
".",
"destination",
")",
"validated",
"[",
"'project'",
"]",
"=",
"_get_destination_project",
"(",
"json_spec",
",",
"args",
",",
"override_project_id",
")",
"validated",
"[",
"'folder'",
"]",
"=",
"_get_destination_folder",
"(",
"json_spec",
",",
"override_folder",
")",
"workflow_name",
"=",
"_get_workflow_name",
"(",
"json_spec",
",",
"override_workflow_name",
")",
"if",
"not",
"workflow_name",
":",
"print",
"(",
"'Warning: workflow name is not specified'",
")",
"else",
":",
"validated",
"[",
"'name'",
"]",
"=",
"workflow_name",
"return",
"validated"
] | 43.5625
| 23.6875
|
def inheritdoc(parent):
"""Inherit documentation from a parent
Parameters
----------
parent : callable
The parent function or class that contains the sought-after
docstring. If it doesn't have a docstring, this might behave
in unexpected ways.
Examples
--------
>>> def a(x=1):
... '''This is documentation'''
... return x
...
>>> @inheritdoc(a)
... def b(x):
... return 2 * a(x)
...
>>> print(b.__doc__)
This is documentation
>>> print(b(2))
4
"""
def wrapper(func):
# Assign the parent docstring to the child
func.__doc__ = parent.__doc__
@wraps(func)
def caller(*args, **kwargs):
return func(*args, **kwargs)
return caller
return wrapper
|
[
"def",
"inheritdoc",
"(",
"parent",
")",
":",
"def",
"wrapper",
"(",
"func",
")",
":",
"# Assign the parent docstring to the child",
"func",
".",
"__doc__",
"=",
"parent",
".",
"__doc__",
"@",
"wraps",
"(",
"func",
")",
"def",
"caller",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"caller",
"return",
"wrapper"
] | 22.571429
| 20.257143
|
def iam_configuration(self):
"""Retrieve IAM configuration for this bucket.
:rtype: :class:`IAMConfiguration`
:returns: an instance for managing the bucket's IAM configuration.
"""
info = self._properties.get("iamConfiguration", {})
return IAMConfiguration.from_api_repr(info, self)
|
[
"def",
"iam_configuration",
"(",
"self",
")",
":",
"info",
"=",
"self",
".",
"_properties",
".",
"get",
"(",
"\"iamConfiguration\"",
",",
"{",
"}",
")",
"return",
"IAMConfiguration",
".",
"from_api_repr",
"(",
"info",
",",
"self",
")"
] | 40.5
| 15.375
|
def _ParseEntry(self, key, val):
"""Adds an entry for a configuration setting.
Args:
key: The name of the setting.
val: The value of the setting.
"""
if key in self._repeated:
setting = self.section.setdefault(key, [])
setting.extend(val)
else:
self.section.setdefault(key, val)
|
[
"def",
"_ParseEntry",
"(",
"self",
",",
"key",
",",
"val",
")",
":",
"if",
"key",
"in",
"self",
".",
"_repeated",
":",
"setting",
"=",
"self",
".",
"section",
".",
"setdefault",
"(",
"key",
",",
"[",
"]",
")",
"setting",
".",
"extend",
"(",
"val",
")",
"else",
":",
"self",
".",
"section",
".",
"setdefault",
"(",
"key",
",",
"val",
")"
] | 26.5
| 12.833333
|
def _readFile(self, sldir, fileName, sep):
'''
Private method that reads in the header and column data.
'''
if sldir.endswith(os.sep):
fileName = str(sldir)+str(fileName)
else:
fileName = str(sldir)+os.sep+str(fileName)
fileLines=[] #list of lines in the file
header=[] #list of Header lines
dataCols=[] #Dictionary of data column names
data=[] #List of Data lists
cols=[] #List of column names
f=open(fileName,'r')
fileLines=f.readlines()
i=0
if self.datatype != 'trajectory':
while i<len(fileLines):
if fileLines[i].startswith(self.header_char):
tmp=fileLines[i].lstrip(self.header_char)
header.append(tmp.strip())
else:
break
i+=1
cols=fileLines[i].split(sep)
tmp=[]
tmp1=[]
for j in range(len(cols)):
tmp1=cols[j].strip()
if tmp1 !='':
tmp.append(tmp1)
cols=tmp
i+=1
else:
header={}
while fileLines[i].startswith('#') or '=' in fileLines[i]:
if fileLines[i].startswith('#') and cols==[]:
cols=fileLines[i].strip('#')
cols=cols.strip()
cols=cols.split()
elif fileLines[i].startswith('#'):
tmp1=fileLines[i].strip('#')
tmp1=tmp1.strip()
self.headerLines.append(tmp1)
elif not fileLines[i].startswith('#'):
tmp=fileLines[i].split('=')
tmp[0]=tmp[0].strip()
tmp[1]=tmp[1].strip()
if header=={}:
header={str(tmp[0]):str(tmp[1])}
else:
header[str(tmp[0])]=str(tmp[1])
i+=1
while i<len(fileLines):
if fileLines[i].startswith('#'):
i=i+1
else:
tmp=fileLines[i].split()
for j in range(len(tmp)):
tmp[j]=tmp[j].strip()
data.append(tmp)
i+=1
tmp=[]
tmp1=[]
for j in range(len(data)):
for k in range(len(data[j])):
tmp1=data[j][k].strip()
if tmp1 !='':
tmp.append(tmp1)
data[j]=tmp
tmp=[]
tmp=[]
for j in range(len(cols)):
for k in range(len(data)):
try:
a=float(data[k][j])
tmp.append(a)
except ValueError:
tmp.append(data[k][j])
#else:
# tmp.append(float(data[k][j])) # previously tmp.append(float(data[k][j]))
tmp=array(tmp)
if j == 0:
dataCols={cols[j]:tmp}
else:
dataCols[cols[j]]=tmp
tmp=[]
return header,dataCols
|
[
"def",
"_readFile",
"(",
"self",
",",
"sldir",
",",
"fileName",
",",
"sep",
")",
":",
"if",
"sldir",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
":",
"fileName",
"=",
"str",
"(",
"sldir",
")",
"+",
"str",
"(",
"fileName",
")",
"else",
":",
"fileName",
"=",
"str",
"(",
"sldir",
")",
"+",
"os",
".",
"sep",
"+",
"str",
"(",
"fileName",
")",
"fileLines",
"=",
"[",
"]",
"#list of lines in the file",
"header",
"=",
"[",
"]",
"#list of Header lines",
"dataCols",
"=",
"[",
"]",
"#Dictionary of data column names",
"data",
"=",
"[",
"]",
"#List of Data lists",
"cols",
"=",
"[",
"]",
"#List of column names",
"f",
"=",
"open",
"(",
"fileName",
",",
"'r'",
")",
"fileLines",
"=",
"f",
".",
"readlines",
"(",
")",
"i",
"=",
"0",
"if",
"self",
".",
"datatype",
"!=",
"'trajectory'",
":",
"while",
"i",
"<",
"len",
"(",
"fileLines",
")",
":",
"if",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"self",
".",
"header_char",
")",
":",
"tmp",
"=",
"fileLines",
"[",
"i",
"]",
".",
"lstrip",
"(",
"self",
".",
"header_char",
")",
"header",
".",
"append",
"(",
"tmp",
".",
"strip",
"(",
")",
")",
"else",
":",
"break",
"i",
"+=",
"1",
"cols",
"=",
"fileLines",
"[",
"i",
"]",
".",
"split",
"(",
"sep",
")",
"tmp",
"=",
"[",
"]",
"tmp1",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"cols",
")",
")",
":",
"tmp1",
"=",
"cols",
"[",
"j",
"]",
".",
"strip",
"(",
")",
"if",
"tmp1",
"!=",
"''",
":",
"tmp",
".",
"append",
"(",
"tmp1",
")",
"cols",
"=",
"tmp",
"i",
"+=",
"1",
"else",
":",
"header",
"=",
"{",
"}",
"while",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
"or",
"'='",
"in",
"fileLines",
"[",
"i",
"]",
":",
"if",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
"and",
"cols",
"==",
"[",
"]",
":",
"cols",
"=",
"fileLines",
"[",
"i",
"]",
".",
"strip",
"(",
"'#'",
")",
"cols",
"=",
"cols",
".",
"strip",
"(",
")",
"cols",
"=",
"cols",
".",
"split",
"(",
")",
"elif",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"tmp1",
"=",
"fileLines",
"[",
"i",
"]",
".",
"strip",
"(",
"'#'",
")",
"tmp1",
"=",
"tmp1",
".",
"strip",
"(",
")",
"self",
".",
"headerLines",
".",
"append",
"(",
"tmp1",
")",
"elif",
"not",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"tmp",
"=",
"fileLines",
"[",
"i",
"]",
".",
"split",
"(",
"'='",
")",
"tmp",
"[",
"0",
"]",
"=",
"tmp",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"tmp",
"[",
"1",
"]",
"=",
"tmp",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"if",
"header",
"==",
"{",
"}",
":",
"header",
"=",
"{",
"str",
"(",
"tmp",
"[",
"0",
"]",
")",
":",
"str",
"(",
"tmp",
"[",
"1",
"]",
")",
"}",
"else",
":",
"header",
"[",
"str",
"(",
"tmp",
"[",
"0",
"]",
")",
"]",
"=",
"str",
"(",
"tmp",
"[",
"1",
"]",
")",
"i",
"+=",
"1",
"while",
"i",
"<",
"len",
"(",
"fileLines",
")",
":",
"if",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"i",
"=",
"i",
"+",
"1",
"else",
":",
"tmp",
"=",
"fileLines",
"[",
"i",
"]",
".",
"split",
"(",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"tmp",
")",
")",
":",
"tmp",
"[",
"j",
"]",
"=",
"tmp",
"[",
"j",
"]",
".",
"strip",
"(",
")",
"data",
".",
"append",
"(",
"tmp",
")",
"i",
"+=",
"1",
"tmp",
"=",
"[",
"]",
"tmp1",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"data",
"[",
"j",
"]",
")",
")",
":",
"tmp1",
"=",
"data",
"[",
"j",
"]",
"[",
"k",
"]",
".",
"strip",
"(",
")",
"if",
"tmp1",
"!=",
"''",
":",
"tmp",
".",
"append",
"(",
"tmp1",
")",
"data",
"[",
"j",
"]",
"=",
"tmp",
"tmp",
"=",
"[",
"]",
"tmp",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"cols",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"try",
":",
"a",
"=",
"float",
"(",
"data",
"[",
"k",
"]",
"[",
"j",
"]",
")",
"tmp",
".",
"append",
"(",
"a",
")",
"except",
"ValueError",
":",
"tmp",
".",
"append",
"(",
"data",
"[",
"k",
"]",
"[",
"j",
"]",
")",
"#else:",
"# tmp.append(float(data[k][j])) # previously tmp.append(float(data[k][j]))",
"tmp",
"=",
"array",
"(",
"tmp",
")",
"if",
"j",
"==",
"0",
":",
"dataCols",
"=",
"{",
"cols",
"[",
"j",
"]",
":",
"tmp",
"}",
"else",
":",
"dataCols",
"[",
"cols",
"[",
"j",
"]",
"]",
"=",
"tmp",
"tmp",
"=",
"[",
"]",
"return",
"header",
",",
"dataCols"
] | 31
| 15.505051
|
def proxy_protocol(self, error='raise', default=None, limit=None, authenticate=False):
"""
Parses, and optionally authenticates, proxy protocol information from
request. Note that ``self.request`` is wrapped by ``SocketBuffer``.
:param error:
How read (``exc.ReadError``) and parse (``exc.ParseError``) errors
are handled. One of:
- "raise" to propagate.
- "unread" to suppress exceptions and unread back to socket.
:param default:
What to return when no ``ProxyInfo`` was found. Only meaningful
with error "unread".
:param limit:
Maximum number of bytes to read when probing request for
``ProxyInfo``.
:returns: Parsed ``ProxyInfo`` instance or **default** if none found.
"""
if error not in ('raise', 'unread'):
raise ValueError('error="{0}" is not "raise" or "unread""')
if not isinstance(self.request, SocketBuffer):
self.request = SocketBuffer(self.request)
if default == 'peer':
default = ProxyInfo(
self.client_address[0], self.client_address[1],
self.client_address[0], self.client_address[1],
)
try:
line = read_line(
self.request.sock,
self.request.buf,
limit=limit,
)
except exc.ReadError:
if error == 'raise':
raise
return default
try:
info = parse_line(line)
except exc.ParseError:
if error == 'raise':
raise
self.request.unread(line)
return default
if authenticate and not self.proxy_authenticate(info):
logger.info('authentication failed - %s', info)
return default
return info
|
[
"def",
"proxy_protocol",
"(",
"self",
",",
"error",
"=",
"'raise'",
",",
"default",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"authenticate",
"=",
"False",
")",
":",
"if",
"error",
"not",
"in",
"(",
"'raise'",
",",
"'unread'",
")",
":",
"raise",
"ValueError",
"(",
"'error=\"{0}\" is not \"raise\" or \"unread\"\"'",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"request",
",",
"SocketBuffer",
")",
":",
"self",
".",
"request",
"=",
"SocketBuffer",
"(",
"self",
".",
"request",
")",
"if",
"default",
"==",
"'peer'",
":",
"default",
"=",
"ProxyInfo",
"(",
"self",
".",
"client_address",
"[",
"0",
"]",
",",
"self",
".",
"client_address",
"[",
"1",
"]",
",",
"self",
".",
"client_address",
"[",
"0",
"]",
",",
"self",
".",
"client_address",
"[",
"1",
"]",
",",
")",
"try",
":",
"line",
"=",
"read_line",
"(",
"self",
".",
"request",
".",
"sock",
",",
"self",
".",
"request",
".",
"buf",
",",
"limit",
"=",
"limit",
",",
")",
"except",
"exc",
".",
"ReadError",
":",
"if",
"error",
"==",
"'raise'",
":",
"raise",
"return",
"default",
"try",
":",
"info",
"=",
"parse_line",
"(",
"line",
")",
"except",
"exc",
".",
"ParseError",
":",
"if",
"error",
"==",
"'raise'",
":",
"raise",
"self",
".",
"request",
".",
"unread",
"(",
"line",
")",
"return",
"default",
"if",
"authenticate",
"and",
"not",
"self",
".",
"proxy_authenticate",
"(",
"info",
")",
":",
"logger",
".",
"info",
"(",
"'authentication failed - %s'",
",",
"info",
")",
"return",
"default",
"return",
"info"
] | 37.938776
| 18.755102
|
def receive(self, event_type, signature, data_str):
"""Receive a web hook for the event and signature.
Args:
event_type (str): Name of the event that was received (from the
request ``X-HelpScout-Event`` header).
signature (str): The signature that was received, which serves as
authentication (from the request ``X-HelpScout-Signature``
header).
data_str (str): The raw data that was posted by HelpScout
to the web hook. This must be the raw string, because if it
is parsed with JSON it will lose its ordering and not pass
signature validation.
Raises:
helpscout.exceptions.HelpScoutSecurityException: If an invalid
signature is provided, and ``raise_if_invalid`` is ``True``.
Returns:
helpscout.web_hook.WebHookEvent: The authenticated web hook
request.
"""
if not self.validate_signature(signature, data_str):
raise HelpScoutSecurityException(
'The signature provided by this request was invalid.',
)
return HelpScoutWebHookEvent(
event_type=event_type,
record=json.loads(data_str),
)
|
[
"def",
"receive",
"(",
"self",
",",
"event_type",
",",
"signature",
",",
"data_str",
")",
":",
"if",
"not",
"self",
".",
"validate_signature",
"(",
"signature",
",",
"data_str",
")",
":",
"raise",
"HelpScoutSecurityException",
"(",
"'The signature provided by this request was invalid.'",
",",
")",
"return",
"HelpScoutWebHookEvent",
"(",
"event_type",
"=",
"event_type",
",",
"record",
"=",
"json",
".",
"loads",
"(",
"data_str",
")",
",",
")"
] | 39.875
| 23.84375
|
def explore(node):
""" Given a node, explores on relatives, siblings and children
:param node: GraphNode from which to explore
:return: set of explored GraphNodes
"""
explored = set()
explored.add(node)
dfs(node, callback=lambda n: explored.add(n))
return explored
|
[
"def",
"explore",
"(",
"node",
")",
":",
"explored",
"=",
"set",
"(",
")",
"explored",
".",
"add",
"(",
"node",
")",
"dfs",
"(",
"node",
",",
"callback",
"=",
"lambda",
"n",
":",
"explored",
".",
"add",
"(",
"n",
")",
")",
"return",
"explored"
] | 32
| 11
|
def scale(self, center=True, scale=True):
"""
Center and/or scale the columns of the current frame.
:param center: If True, then demean the data. If False, no shifting is done. If ``center`` is a list of
numbers then shift each column by the corresponding amount.
:param scale: If True, then scale the data by each column's standard deviation. If False, no scaling
is done. If ``scale`` is a list of numbers, then scale each column by the requested amount.
:returns: an H2OFrame with scaled values from the current frame.
"""
return H2OFrame._expr(expr=ExprNode("scale", self, center, scale), cache=self._ex._cache)
|
[
"def",
"scale",
"(",
"self",
",",
"center",
"=",
"True",
",",
"scale",
"=",
"True",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"scale\"",
",",
"self",
",",
"center",
",",
"scale",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")"
] | 62.363636
| 34.909091
|
def check_api_response(self, response):
"""Check API response and raise exceptions if needed.
:param requests.models.Response response: request response to check
"""
# check response
if response.status_code == 200:
return True
elif response.status_code >= 400:
logging.error(
"{}: {} - {} - URL: {}".format(
response.status_code,
response.reason,
response.json().get("error"),
response.request.url,
)
)
return False, response.status_code
|
[
"def",
"check_api_response",
"(",
"self",
",",
"response",
")",
":",
"# check response",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"True",
"elif",
"response",
".",
"status_code",
">=",
"400",
":",
"logging",
".",
"error",
"(",
"\"{}: {} - {} - URL: {}\"",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"reason",
",",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"error\"",
")",
",",
"response",
".",
"request",
".",
"url",
",",
")",
")",
"return",
"False",
",",
"response",
".",
"status_code"
] | 34.944444
| 11.277778
|
def pass_time(self, t):
"""Non-blocking time-out for ``t`` seconds."""
cont = time.time() + t
while time.time() < cont:
time.sleep(0)
|
[
"def",
"pass_time",
"(",
"self",
",",
"t",
")",
":",
"cont",
"=",
"time",
".",
"time",
"(",
")",
"+",
"t",
"while",
"time",
".",
"time",
"(",
")",
"<",
"cont",
":",
"time",
".",
"sleep",
"(",
"0",
")"
] | 33
| 9.8
|
def generate_pages_by_file():
"""Generates custom pages of 'file' storage type."""
from veripress import app
from veripress.model import storage
from veripress.model.parsers import get_standard_format_name
from veripress.helpers import traverse_directory
deploy_dir = get_deploy_dir()
def copy_file(src, dst):
makedirs(os.path.dirname(dst), mode=0o755, exist_ok=True)
shutil.copyfile(src, dst)
with app.app_context(), app.test_client() as client:
root_path = os.path.join(app.instance_path, 'pages')
for path in traverse_directory(root_path):
# e.g. 'a/b/c/index.md'
rel_path = os.path.relpath(path, root_path)
# e.g. ('a/b/c/index', '.md')
filename, ext = os.path.splitext(rel_path)
if get_standard_format_name(ext[1:]) is not None:
# is source of custom page
rel_url = filename.replace(os.path.sep, '/') + '.html'
page = storage.get_page(rel_url, include_draft=False)
if page is not None:
# it's not a draft, so generate the html page
makedirs(os.path.join(deploy_dir,
os.path.dirname(rel_path)),
mode=0o755, exist_ok=True)
with open(os.path.join(deploy_dir, filename + '.html'),
'wb') as f:
f.write(client.get('/' + rel_url).data)
if app.config['PAGE_SOURCE_ACCESSIBLE']:
copy_file(path, os.path.join(deploy_dir, rel_path))
else:
# is other direct files
copy_file(path, os.path.join(deploy_dir, rel_path))
|
[
"def",
"generate_pages_by_file",
"(",
")",
":",
"from",
"veripress",
"import",
"app",
"from",
"veripress",
".",
"model",
"import",
"storage",
"from",
"veripress",
".",
"model",
".",
"parsers",
"import",
"get_standard_format_name",
"from",
"veripress",
".",
"helpers",
"import",
"traverse_directory",
"deploy_dir",
"=",
"get_deploy_dir",
"(",
")",
"def",
"copy_file",
"(",
"src",
",",
"dst",
")",
":",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dst",
")",
",",
"mode",
"=",
"0o755",
",",
"exist_ok",
"=",
"True",
")",
"shutil",
".",
"copyfile",
"(",
"src",
",",
"dst",
")",
"with",
"app",
".",
"app_context",
"(",
")",
",",
"app",
".",
"test_client",
"(",
")",
"as",
"client",
":",
"root_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"instance_path",
",",
"'pages'",
")",
"for",
"path",
"in",
"traverse_directory",
"(",
"root_path",
")",
":",
"# e.g. 'a/b/c/index.md'",
"rel_path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
",",
"root_path",
")",
"# e.g. ('a/b/c/index', '.md')",
"filename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"rel_path",
")",
"if",
"get_standard_format_name",
"(",
"ext",
"[",
"1",
":",
"]",
")",
"is",
"not",
"None",
":",
"# is source of custom page",
"rel_url",
"=",
"filename",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'/'",
")",
"+",
"'.html'",
"page",
"=",
"storage",
".",
"get_page",
"(",
"rel_url",
",",
"include_draft",
"=",
"False",
")",
"if",
"page",
"is",
"not",
"None",
":",
"# it's not a draft, so generate the html page",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"deploy_dir",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"rel_path",
")",
")",
",",
"mode",
"=",
"0o755",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"deploy_dir",
",",
"filename",
"+",
"'.html'",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"client",
".",
"get",
"(",
"'/'",
"+",
"rel_url",
")",
".",
"data",
")",
"if",
"app",
".",
"config",
"[",
"'PAGE_SOURCE_ACCESSIBLE'",
"]",
":",
"copy_file",
"(",
"path",
",",
"os",
".",
"path",
".",
"join",
"(",
"deploy_dir",
",",
"rel_path",
")",
")",
"else",
":",
"# is other direct files",
"copy_file",
"(",
"path",
",",
"os",
".",
"path",
".",
"join",
"(",
"deploy_dir",
",",
"rel_path",
")",
")"
] | 46.702703
| 17.189189
|
def _patch_expand_paths(self, settings, name, value):
"""
Apply ``SettingsPostProcessor._patch_expand_path`` to each element in
list.
Args:
settings (dict): Current settings.
name (str): Setting name.
value (list): List of paths to patch.
Returns:
list: Patched path list to an absolute path.
"""
return [self._patch_expand_path(settings, name, item)
for item in value]
|
[
"def",
"_patch_expand_paths",
"(",
"self",
",",
"settings",
",",
"name",
",",
"value",
")",
":",
"return",
"[",
"self",
".",
"_patch_expand_path",
"(",
"settings",
",",
"name",
",",
"item",
")",
"for",
"item",
"in",
"value",
"]"
] | 29.8125
| 19.3125
|
def send_job_and_wait(self, message, body_params=None, timeout=None, raises=False):
""".. versionchanged:: 0.8.4
Send a message as a job and wait for the response.
.. note::
Not all messages are jobs, you'll have to find out which are which
:param message: a message instance
:type message: :class:`.Msg`, :class:`.MsgProto`
:param body_params: a dict with params to the body (only :class:`.MsgProto`)
:type body_params: dict
:param timeout: (optional) seconds to wait
:type timeout: :class:`int`
:param raises: (optional) On timeout if ``False`` return ``None``, else raise ``gevent.Timeout``
:type raises: :class:`bool`
:return: response proto message
:rtype: :class:`.Msg`, :class:`.MsgProto`
:raises: ``gevent.Timeout``
"""
job_id = self.send_job(message, body_params)
response = self.wait_event(job_id, timeout, raises=raises)
if response is None:
return None
return response[0].body
|
[
"def",
"send_job_and_wait",
"(",
"self",
",",
"message",
",",
"body_params",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"raises",
"=",
"False",
")",
":",
"job_id",
"=",
"self",
".",
"send_job",
"(",
"message",
",",
"body_params",
")",
"response",
"=",
"self",
".",
"wait_event",
"(",
"job_id",
",",
"timeout",
",",
"raises",
"=",
"raises",
")",
"if",
"response",
"is",
"None",
":",
"return",
"None",
"return",
"response",
"[",
"0",
"]",
".",
"body"
] | 43.458333
| 18.666667
|
def correlation(df, cm=cm.PuOr_r, vmin=None, vmax=None, labels=None, show_scatter=False):
"""
Generate a column-wise correlation plot from the provided data.
The columns of the supplied dataframes will be correlated (using `analysis.correlation`) to
generate a Pearson correlation plot heatmap. Scatter plots of correlated samples can also be generated over
the redundant half of the plot to give a visual indication of the protein distribution.
:param df: `pandas.DataFrame`
:param cm: Matplotlib colormap (default cm.PuOr_r)
:param vmin: Minimum value for colormap normalization
:param vmax: Maximum value for colormap normalization
:param labels: Index column to retrieve labels from
:param show_scatter: Show overlaid scatter plots for each sample in lower-left half. Note that this is slow for large numbers of samples.
:return: `matplotlib.Figure` generated Figure.
"""
data = analysis.correlation(df)
if labels:
for axis in (0,1):
data.sort_index(level=labels, axis=axis, inplace=True)
data = data.values
# Plot the distributions
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1,1,1)
if vmin is None:
vmin = np.nanmin(data)
if vmax is None:
vmax = np.nanmax(data)
n_dims = data.shape[0]
# If showing scatter plots, set the inlay portion to np.nan
if show_scatter:
# Get the triangle, other values will be zeroed
idx = np.tril_indices(n_dims)
data[idx] = np.nan
cm.set_bad('w', 1.)
i = ax.imshow(data, cmap=cm, vmin=vmin, vmax=vmax, interpolation='none')
fig.colorbar(i)
fig.axes[0].grid('off')
if show_scatter:
figo = mpl.figure.Figure(figsize=(n_dims, n_dims), dpi=300)
# Create a dummy Agg canvas so we don't have to display/output this intermediate
canvas = FigureCanvasAgg(figo)
for x in range(0, n_dims):
for y in range(x, n_dims):
ax = figo.add_subplot(n_dims, n_dims, y*n_dims+x+1)
if x != y:
xd = df.values[:, x]
yd = df.values[:, y]
ax.scatter(xd, yd, lw=0, s=5, c='k', alpha=0.2)
ax.grid('off')
ax.axis('off')
figo.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
raw = BytesIO()
figo.savefig(raw, format='png', bbox_inches=0, transparent=True)
del figo
raw.seek(0)
img = mplimg.imread(raw)
ax2 = fig.add_axes(fig.axes[0].get_position(), label='image', zorder=1)
ax2.axis('off')
ax2.imshow(img)
if labels:
# Build labels from the supplied axis
labels = [
df.columns.get_level_values(l)
for l in labels
]
labels = [" ".join([str(s) for s in l]) for l in zip(*labels) ]
fig.axes[0].set_xticks(range(n_dims))
fig.axes[0].set_xticklabels(labels, rotation=45)
fig.axes[0].set_yticks(range(n_dims))
fig.axes[0].set_yticklabels(labels)
return fig
|
[
"def",
"correlation",
"(",
"df",
",",
"cm",
"=",
"cm",
".",
"PuOr_r",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"show_scatter",
"=",
"False",
")",
":",
"data",
"=",
"analysis",
".",
"correlation",
"(",
"df",
")",
"if",
"labels",
":",
"for",
"axis",
"in",
"(",
"0",
",",
"1",
")",
":",
"data",
".",
"sort_index",
"(",
"level",
"=",
"labels",
",",
"axis",
"=",
"axis",
",",
"inplace",
"=",
"True",
")",
"data",
"=",
"data",
".",
"values",
"# Plot the distributions",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"10",
",",
"10",
")",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"1",
",",
"1",
")",
"if",
"vmin",
"is",
"None",
":",
"vmin",
"=",
"np",
".",
"nanmin",
"(",
"data",
")",
"if",
"vmax",
"is",
"None",
":",
"vmax",
"=",
"np",
".",
"nanmax",
"(",
"data",
")",
"n_dims",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"# If showing scatter plots, set the inlay portion to np.nan",
"if",
"show_scatter",
":",
"# Get the triangle, other values will be zeroed",
"idx",
"=",
"np",
".",
"tril_indices",
"(",
"n_dims",
")",
"data",
"[",
"idx",
"]",
"=",
"np",
".",
"nan",
"cm",
".",
"set_bad",
"(",
"'w'",
",",
"1.",
")",
"i",
"=",
"ax",
".",
"imshow",
"(",
"data",
",",
"cmap",
"=",
"cm",
",",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
",",
"interpolation",
"=",
"'none'",
")",
"fig",
".",
"colorbar",
"(",
"i",
")",
"fig",
".",
"axes",
"[",
"0",
"]",
".",
"grid",
"(",
"'off'",
")",
"if",
"show_scatter",
":",
"figo",
"=",
"mpl",
".",
"figure",
".",
"Figure",
"(",
"figsize",
"=",
"(",
"n_dims",
",",
"n_dims",
")",
",",
"dpi",
"=",
"300",
")",
"# Create a dummy Agg canvas so we don't have to display/output this intermediate",
"canvas",
"=",
"FigureCanvasAgg",
"(",
"figo",
")",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"n_dims",
")",
":",
"for",
"y",
"in",
"range",
"(",
"x",
",",
"n_dims",
")",
":",
"ax",
"=",
"figo",
".",
"add_subplot",
"(",
"n_dims",
",",
"n_dims",
",",
"y",
"*",
"n_dims",
"+",
"x",
"+",
"1",
")",
"if",
"x",
"!=",
"y",
":",
"xd",
"=",
"df",
".",
"values",
"[",
":",
",",
"x",
"]",
"yd",
"=",
"df",
".",
"values",
"[",
":",
",",
"y",
"]",
"ax",
".",
"scatter",
"(",
"xd",
",",
"yd",
",",
"lw",
"=",
"0",
",",
"s",
"=",
"5",
",",
"c",
"=",
"'k'",
",",
"alpha",
"=",
"0.2",
")",
"ax",
".",
"grid",
"(",
"'off'",
")",
"ax",
".",
"axis",
"(",
"'off'",
")",
"figo",
".",
"subplots_adjust",
"(",
"left",
"=",
"0",
",",
"bottom",
"=",
"0",
",",
"right",
"=",
"1",
",",
"top",
"=",
"1",
",",
"wspace",
"=",
"0",
",",
"hspace",
"=",
"0",
")",
"raw",
"=",
"BytesIO",
"(",
")",
"figo",
".",
"savefig",
"(",
"raw",
",",
"format",
"=",
"'png'",
",",
"bbox_inches",
"=",
"0",
",",
"transparent",
"=",
"True",
")",
"del",
"figo",
"raw",
".",
"seek",
"(",
"0",
")",
"img",
"=",
"mplimg",
".",
"imread",
"(",
"raw",
")",
"ax2",
"=",
"fig",
".",
"add_axes",
"(",
"fig",
".",
"axes",
"[",
"0",
"]",
".",
"get_position",
"(",
")",
",",
"label",
"=",
"'image'",
",",
"zorder",
"=",
"1",
")",
"ax2",
".",
"axis",
"(",
"'off'",
")",
"ax2",
".",
"imshow",
"(",
"img",
")",
"if",
"labels",
":",
"# Build labels from the supplied axis",
"labels",
"=",
"[",
"df",
".",
"columns",
".",
"get_level_values",
"(",
"l",
")",
"for",
"l",
"in",
"labels",
"]",
"labels",
"=",
"[",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"l",
"]",
")",
"for",
"l",
"in",
"zip",
"(",
"*",
"labels",
")",
"]",
"fig",
".",
"axes",
"[",
"0",
"]",
".",
"set_xticks",
"(",
"range",
"(",
"n_dims",
")",
")",
"fig",
".",
"axes",
"[",
"0",
"]",
".",
"set_xticklabels",
"(",
"labels",
",",
"rotation",
"=",
"45",
")",
"fig",
".",
"axes",
"[",
"0",
"]",
".",
"set_yticks",
"(",
"range",
"(",
"n_dims",
")",
")",
"fig",
".",
"axes",
"[",
"0",
"]",
".",
"set_yticklabels",
"(",
"labels",
")",
"return",
"fig"
] | 32.212766
| 24.87234
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.