text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
|---|---|---|---|
def next(self):
""" Returns the next row in the CSV, for iteration """
if self._cur_row >= len(self._table):
raise StopIteration
data = self._table[self._cur_row].copy()
self._cur_row += 1
return data
|
[
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cur_row",
">=",
"len",
"(",
"self",
".",
"_table",
")",
":",
"raise",
"StopIteration",
"data",
"=",
"self",
".",
"_table",
"[",
"self",
".",
"_cur_row",
"]",
".",
"copy",
"(",
")",
"self",
".",
"_cur_row",
"+=",
"1",
"return",
"data"
] | 35.142857
| 11.714286
|
def write_point(self, **kw):
"""
Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`)
"""
assert 'type' in kw
self.convert_bool(kw, 'score_exit')
return self.write_tag_with_content('Point', **kw)
|
[
"def",
"write_point",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"assert",
"'type'",
"in",
"kw",
"self",
".",
"convert_bool",
"(",
"kw",
",",
"'score_exit'",
")",
"return",
"self",
".",
"write_tag_with_content",
"(",
"'Point'",
",",
"*",
"*",
"kw",
")"
] | 31.791667
| 20.125
|
def __find_sentence_initial_proper_names(self, docs):
""" Moodustame lausealguliste pärisnimede loendi: vaatame sõnu, millel nii
pärisnimeanalüüs(id) kui ka mittepärisnimeanalüüs(id) ning mis esinevad
lause või nummerdatud loendi alguses - jäädvustame selliste sõnade
unikaalsed lemmad;
"""
sentInitialNames = set()
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0 # Tavaline lausealgus
for i in range(len(sentence)):
word = sentence[i]
# Täiendavad heuristikud lausealguspositsioonide leidmiseks:
# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:
if all([ a[POSTAG] == 'Z' for a in word[ANALYSIS] ]) and \
not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
# 2) potentsiaalne loendi algus (arv, millele järgneb punkt või
# sulg ja mis ei ole kuupäev);
if not re.match('^[1234567890]*$', word[TEXT] ) and \
not re.match('^[1234567890]{1,2}.[1234567890]{1,2}.[1234567890]{4}$', word[TEXT] ) and \
re.match("^[1234567890.()]*$", word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
if sentencePos == 0:
# Vaatame lausealgulisi sõnu, millel on nii pärisnimeanalüüs(e)
# kui ka mitte-pärisnimeanalüüs(e)
h_postags = [ a[POSTAG] == 'H' for a in word[ANALYSIS] ]
if any( h_postags ) and not all( h_postags ):
for analysis in word[ANALYSIS]:
# Jätame meelde kõik unikaalsed pärisnimelemmad
if analysis[POSTAG] == 'H':
sentInitialNames.add( analysis[ROOT] )
sentencePos += 1
return sentInitialNames
|
[
"def",
"__find_sentence_initial_proper_names",
"(",
"self",
",",
"docs",
")",
":",
"sentInitialNames",
"=",
"set",
"(",
")",
"for",
"doc",
"in",
"docs",
":",
"for",
"sentence",
"in",
"doc",
".",
"divide",
"(",
"layer",
"=",
"WORDS",
",",
"by",
"=",
"SENTENCES",
")",
":",
"sentencePos",
"=",
"0",
"# Tavaline lausealgus",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"sentence",
")",
")",
":",
"word",
"=",
"sentence",
"[",
"i",
"]",
"# Täiendavad heuristikud lausealguspositsioonide leidmiseks:",
"# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:",
"if",
"all",
"(",
"[",
"a",
"[",
"POSTAG",
"]",
"==",
"'Z'",
"for",
"a",
"in",
"word",
"[",
"ANALYSIS",
"]",
"]",
")",
"and",
"not",
"re",
".",
"match",
"(",
"'^[,;]+$'",
",",
"word",
"[",
"TEXT",
"]",
")",
":",
"sentencePos",
"=",
"0",
"#self.__debug_print_word_in_sentence_str(sentence, word)",
"continue",
"# 2) potentsiaalne loendi algus (arv, millele järgneb punkt või",
"# sulg ja mis ei ole kuupäev);",
"if",
"not",
"re",
".",
"match",
"(",
"'^[1234567890]*$'",
",",
"word",
"[",
"TEXT",
"]",
")",
"and",
"not",
"re",
".",
"match",
"(",
"'^[1234567890]{1,2}.[1234567890]{1,2}.[1234567890]{4}$'",
",",
"word",
"[",
"TEXT",
"]",
")",
"and",
"re",
".",
"match",
"(",
"\"^[1234567890.()]*$\"",
",",
"word",
"[",
"TEXT",
"]",
")",
":",
"sentencePos",
"=",
"0",
"#self.__debug_print_word_in_sentence_str(sentence, word)",
"continue",
"if",
"sentencePos",
"==",
"0",
":",
"# Vaatame lausealgulisi sõnu, millel on nii pärisnimeanalüüs(e) ",
"# kui ka mitte-pärisnimeanalüüs(e)",
"h_postags",
"=",
"[",
"a",
"[",
"POSTAG",
"]",
"==",
"'H'",
"for",
"a",
"in",
"word",
"[",
"ANALYSIS",
"]",
"]",
"if",
"any",
"(",
"h_postags",
")",
"and",
"not",
"all",
"(",
"h_postags",
")",
":",
"for",
"analysis",
"in",
"word",
"[",
"ANALYSIS",
"]",
":",
"# Jätame meelde kõik unikaalsed pärisnimelemmad",
"if",
"analysis",
"[",
"POSTAG",
"]",
"==",
"'H'",
":",
"sentInitialNames",
".",
"add",
"(",
"analysis",
"[",
"ROOT",
"]",
")",
"sentencePos",
"+=",
"1",
"return",
"sentInitialNames"
] | 59.657895
| 22.842105
|
def device_to_user(self, x, y):
"""Transform a coordinate from device space to user space
by multiplying the given point
by the inverse of the current transformation matrix (CTM).
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(user_x, user_y)`` tuple of floats.
"""
xy = ffi.new('double[2]', [x, y])
cairo.cairo_device_to_user(self._pointer, xy + 0, xy + 1)
self._check_status()
return tuple(xy)
|
[
"def",
"device_to_user",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"xy",
"=",
"ffi",
".",
"new",
"(",
"'double[2]'",
",",
"[",
"x",
",",
"y",
"]",
")",
"cairo",
".",
"cairo_device_to_user",
"(",
"self",
".",
"_pointer",
",",
"xy",
"+",
"0",
",",
"xy",
"+",
"1",
")",
"self",
".",
"_check_status",
"(",
")",
"return",
"tuple",
"(",
"xy",
")"
] | 33
| 15.375
|
def infer_from_frame_stack(self, ob_stack):
"""Infer policy from stack of observations.
Args:
ob_stack: array of shape (1, frame_stack_size, height, width, channels)
Returns:
logits and vf.
"""
logits, vf = self.sess.run([self.logits_t, self.value_function_t],
feed_dict={self.obs_t: ob_stack})
return logits, vf
|
[
"def",
"infer_from_frame_stack",
"(",
"self",
",",
"ob_stack",
")",
":",
"logits",
",",
"vf",
"=",
"self",
".",
"sess",
".",
"run",
"(",
"[",
"self",
".",
"logits_t",
",",
"self",
".",
"value_function_t",
"]",
",",
"feed_dict",
"=",
"{",
"self",
".",
"obs_t",
":",
"ob_stack",
"}",
")",
"return",
"logits",
",",
"vf"
] | 30.833333
| 22.666667
|
def AddRow(self, values):
"""Adds a row of values.
Args:
values (list[object]): values.
Raises:
ValueError: if the number of values is out of bounds.
"""
if self._number_of_columns and len(values) != self._number_of_columns:
raise ValueError('Number of values is out of bounds.')
self._rows.append(values)
if not self._number_of_columns:
self._number_of_columns = len(values)
|
[
"def",
"AddRow",
"(",
"self",
",",
"values",
")",
":",
"if",
"self",
".",
"_number_of_columns",
"and",
"len",
"(",
"values",
")",
"!=",
"self",
".",
"_number_of_columns",
":",
"raise",
"ValueError",
"(",
"'Number of values is out of bounds.'",
")",
"self",
".",
"_rows",
".",
"append",
"(",
"values",
")",
"if",
"not",
"self",
".",
"_number_of_columns",
":",
"self",
".",
"_number_of_columns",
"=",
"len",
"(",
"values",
")"
] | 26
| 20.6875
|
def file_exists(original_file):
"""
Validate the original file is in the S3 bucket
"""
s3 = boto3.resource('s3')
bucket_name, object_key = _parse_s3_file(original_file)
bucket = s3.Bucket(bucket_name)
bucket_iterator = bucket.objects.filter(Prefix=object_key)
bucket_list = [x for x in bucket_iterator]
logger.debug("Bucket List: {0}".format(", ".join([x.key for x in bucket_list])))
logger.debug("bucket_list length: {0}".format(len(bucket_list)))
return len(bucket_list) == 1
|
[
"def",
"file_exists",
"(",
"original_file",
")",
":",
"s3",
"=",
"boto3",
".",
"resource",
"(",
"'s3'",
")",
"bucket_name",
",",
"object_key",
"=",
"_parse_s3_file",
"(",
"original_file",
")",
"bucket",
"=",
"s3",
".",
"Bucket",
"(",
"bucket_name",
")",
"bucket_iterator",
"=",
"bucket",
".",
"objects",
".",
"filter",
"(",
"Prefix",
"=",
"object_key",
")",
"bucket_list",
"=",
"[",
"x",
"for",
"x",
"in",
"bucket_iterator",
"]",
"logger",
".",
"debug",
"(",
"\"Bucket List: {0}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"[",
"x",
".",
"key",
"for",
"x",
"in",
"bucket_list",
"]",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"bucket_list length: {0}\"",
".",
"format",
"(",
"len",
"(",
"bucket_list",
")",
")",
")",
"return",
"len",
"(",
"bucket_list",
")",
"==",
"1"
] | 42.5
| 13.5
|
def heightmap_rain_erosion(
hm: np.ndarray,
nbDrops: int,
erosionCoef: float,
sedimentationCoef: float,
rnd: Optional[tcod.random.Random] = None,
) -> None:
"""Simulate the effect of rain drops on the terrain, resulting in erosion.
``nbDrops`` should be at least hm.size.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
nbDrops (int): Number of rain drops to simulate.
erosionCoef (float): Amount of ground eroded on the drop's path.
sedimentationCoef (float): Amount of ground deposited when the drops
stops to flow.
rnd (Optional[Random]): A tcod.Random instance, or None.
"""
lib.TCOD_heightmap_rain_erosion(
_heightmap_cdata(hm),
nbDrops,
erosionCoef,
sedimentationCoef,
rnd.random_c if rnd else ffi.NULL,
)
|
[
"def",
"heightmap_rain_erosion",
"(",
"hm",
":",
"np",
".",
"ndarray",
",",
"nbDrops",
":",
"int",
",",
"erosionCoef",
":",
"float",
",",
"sedimentationCoef",
":",
"float",
",",
"rnd",
":",
"Optional",
"[",
"tcod",
".",
"random",
".",
"Random",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"lib",
".",
"TCOD_heightmap_rain_erosion",
"(",
"_heightmap_cdata",
"(",
"hm",
")",
",",
"nbDrops",
",",
"erosionCoef",
",",
"sedimentationCoef",
",",
"rnd",
".",
"random_c",
"if",
"rnd",
"else",
"ffi",
".",
"NULL",
",",
")"
] | 33.692308
| 19.192308
|
def extract_archive(archive_path, dest):
"""Extract a local or GCS archive file to a folder.
Args:
archive_path: local or gcs path to a *.tar.gz or *.tar file
dest: local folder the archive will be extracted to
"""
# Make the dest folder if it does not exist
if not os.path.isdir(dest):
os.makedirs(dest)
try:
tmpfolder = None
if (not tf.gfile.Exists(archive_path)) or tf.gfile.IsDirectory(archive_path):
raise ValueError('archive path %s is not a file' % archive_path)
if archive_path.startswith('gs://'):
# Copy the file to a local temp folder
tmpfolder = tempfile.mkdtemp()
cmd_args = ['gsutil', 'cp', archive_path, tmpfolder]
_shell_process.run_and_monitor(cmd_args, os.getpid())
archive_path = os.path.join(tmpfolder, os.path.name(archive_path))
if archive_path.lower().endswith('.tar.gz'):
flags = '-xzf'
elif archive_path.lower().endswith('.tar'):
flags = '-xf'
else:
raise ValueError('Only tar.gz or tar.Z files are supported.')
cmd_args = ['tar', flags, archive_path, '-C', dest]
_shell_process.run_and_monitor(cmd_args, os.getpid())
finally:
if tmpfolder:
shutil.rmtree(tmpfolder)
|
[
"def",
"extract_archive",
"(",
"archive_path",
",",
"dest",
")",
":",
"# Make the dest folder if it does not exist",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dest",
")",
":",
"os",
".",
"makedirs",
"(",
"dest",
")",
"try",
":",
"tmpfolder",
"=",
"None",
"if",
"(",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"archive_path",
")",
")",
"or",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"archive_path",
")",
":",
"raise",
"ValueError",
"(",
"'archive path %s is not a file'",
"%",
"archive_path",
")",
"if",
"archive_path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"# Copy the file to a local temp folder",
"tmpfolder",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"cmd_args",
"=",
"[",
"'gsutil'",
",",
"'cp'",
",",
"archive_path",
",",
"tmpfolder",
"]",
"_shell_process",
".",
"run_and_monitor",
"(",
"cmd_args",
",",
"os",
".",
"getpid",
"(",
")",
")",
"archive_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmpfolder",
",",
"os",
".",
"path",
".",
"name",
"(",
"archive_path",
")",
")",
"if",
"archive_path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.tar.gz'",
")",
":",
"flags",
"=",
"'-xzf'",
"elif",
"archive_path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.tar'",
")",
":",
"flags",
"=",
"'-xf'",
"else",
":",
"raise",
"ValueError",
"(",
"'Only tar.gz or tar.Z files are supported.'",
")",
"cmd_args",
"=",
"[",
"'tar'",
",",
"flags",
",",
"archive_path",
",",
"'-C'",
",",
"dest",
"]",
"_shell_process",
".",
"run_and_monitor",
"(",
"cmd_args",
",",
"os",
".",
"getpid",
"(",
")",
")",
"finally",
":",
"if",
"tmpfolder",
":",
"shutil",
".",
"rmtree",
"(",
"tmpfolder",
")"
] | 32.861111
| 21.027778
|
def set_mode_broodlord_params(
self, zerg_count=None,
vassal_overload_sos_interval=None, vassal_queue_items_sos=None):
"""This mode is a way for a vassal to ask for reinforcements to the Emperor.
Reinforcements are new vassals spawned on demand generally bound on the same socket.
.. warning:: If you are looking for a way to dynamically adapt the number
of workers of an instance, check the Cheaper subsystem - adaptive process spawning mode.
*Broodlord mode is for spawning totally new instances.*
:param int zerg_count: Maximum number of zergs to spawn.
:param int vassal_overload_sos_interval: Ask emperor for reinforcement when overloaded.
Accepts the number of seconds to wait between asking for a new reinforcements.
:param int vassal_queue_items_sos: Ask emperor for sos if listen queue (backlog) has more
items than the value specified
"""
self._set('emperor-broodlord', zerg_count)
self._set('vassal-sos', vassal_overload_sos_interval)
self._set('vassal-sos-backlog', vassal_queue_items_sos)
return self._section
|
[
"def",
"set_mode_broodlord_params",
"(",
"self",
",",
"zerg_count",
"=",
"None",
",",
"vassal_overload_sos_interval",
"=",
"None",
",",
"vassal_queue_items_sos",
"=",
"None",
")",
":",
"self",
".",
"_set",
"(",
"'emperor-broodlord'",
",",
"zerg_count",
")",
"self",
".",
"_set",
"(",
"'vassal-sos'",
",",
"vassal_overload_sos_interval",
")",
"self",
".",
"_set",
"(",
"'vassal-sos-backlog'",
",",
"vassal_queue_items_sos",
")",
"return",
"self",
".",
"_section"
] | 44.807692
| 31
|
def filter(self, track=None, follow=None, locations=None, event=None,
record_keepalive=False):
"""
Returns an iterator for tweets that match a given filter track from
the livestream of tweets happening right now.
If a threading.Event is provided for event and the event is set,
the filter will be interrupted.
"""
if locations is not None:
if type(locations) == list:
locations = ','.join(locations)
locations = locations.replace('\\', '')
url = 'https://stream.twitter.com/1.1/statuses/filter.json'
params = {
"stall_warning": True,
"include_ext_alt_text": True
}
if track:
params["track"] = track
if follow:
params["follow"] = follow
if locations:
params["locations"] = locations
headers = {'accept-encoding': 'deflate, gzip'}
errors = 0
while True:
try:
log.info("connecting to filter stream for %s", params)
resp = self.post(url, params, headers=headers, stream=True)
errors = 0
for line in resp.iter_lines(chunk_size=1024):
if event and event.is_set():
log.info("stopping filter")
# Explicitly close response
resp.close()
return
if not line:
log.info("keep-alive")
if record_keepalive:
yield "keep-alive"
continue
try:
yield json.loads(line.decode())
except Exception as e:
log.error("json parse error: %s - %s", e, line)
except requests.exceptions.HTTPError as e:
errors += 1
log.error("caught http error %s on %s try", e, errors)
if self.http_errors and errors == self.http_errors:
log.warning("too many errors")
raise e
if e.response.status_code == 420:
if interruptible_sleep(errors * 60, event):
log.info("stopping filter")
return
else:
if interruptible_sleep(errors * 5, event):
log.info("stopping filter")
return
except Exception as e:
errors += 1
log.error("caught exception %s on %s try", e, errors)
if self.http_errors and errors == self.http_errors:
log.warning("too many exceptions")
raise e
log.error(e)
if interruptible_sleep(errors, event):
log.info("stopping filter")
return
|
[
"def",
"filter",
"(",
"self",
",",
"track",
"=",
"None",
",",
"follow",
"=",
"None",
",",
"locations",
"=",
"None",
",",
"event",
"=",
"None",
",",
"record_keepalive",
"=",
"False",
")",
":",
"if",
"locations",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"locations",
")",
"==",
"list",
":",
"locations",
"=",
"','",
".",
"join",
"(",
"locations",
")",
"locations",
"=",
"locations",
".",
"replace",
"(",
"'\\\\'",
",",
"''",
")",
"url",
"=",
"'https://stream.twitter.com/1.1/statuses/filter.json'",
"params",
"=",
"{",
"\"stall_warning\"",
":",
"True",
",",
"\"include_ext_alt_text\"",
":",
"True",
"}",
"if",
"track",
":",
"params",
"[",
"\"track\"",
"]",
"=",
"track",
"if",
"follow",
":",
"params",
"[",
"\"follow\"",
"]",
"=",
"follow",
"if",
"locations",
":",
"params",
"[",
"\"locations\"",
"]",
"=",
"locations",
"headers",
"=",
"{",
"'accept-encoding'",
":",
"'deflate, gzip'",
"}",
"errors",
"=",
"0",
"while",
"True",
":",
"try",
":",
"log",
".",
"info",
"(",
"\"connecting to filter stream for %s\"",
",",
"params",
")",
"resp",
"=",
"self",
".",
"post",
"(",
"url",
",",
"params",
",",
"headers",
"=",
"headers",
",",
"stream",
"=",
"True",
")",
"errors",
"=",
"0",
"for",
"line",
"in",
"resp",
".",
"iter_lines",
"(",
"chunk_size",
"=",
"1024",
")",
":",
"if",
"event",
"and",
"event",
".",
"is_set",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"stopping filter\"",
")",
"# Explicitly close response",
"resp",
".",
"close",
"(",
")",
"return",
"if",
"not",
"line",
":",
"log",
".",
"info",
"(",
"\"keep-alive\"",
")",
"if",
"record_keepalive",
":",
"yield",
"\"keep-alive\"",
"continue",
"try",
":",
"yield",
"json",
".",
"loads",
"(",
"line",
".",
"decode",
"(",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"json parse error: %s - %s\"",
",",
"e",
",",
"line",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"errors",
"+=",
"1",
"log",
".",
"error",
"(",
"\"caught http error %s on %s try\"",
",",
"e",
",",
"errors",
")",
"if",
"self",
".",
"http_errors",
"and",
"errors",
"==",
"self",
".",
"http_errors",
":",
"log",
".",
"warning",
"(",
"\"too many errors\"",
")",
"raise",
"e",
"if",
"e",
".",
"response",
".",
"status_code",
"==",
"420",
":",
"if",
"interruptible_sleep",
"(",
"errors",
"*",
"60",
",",
"event",
")",
":",
"log",
".",
"info",
"(",
"\"stopping filter\"",
")",
"return",
"else",
":",
"if",
"interruptible_sleep",
"(",
"errors",
"*",
"5",
",",
"event",
")",
":",
"log",
".",
"info",
"(",
"\"stopping filter\"",
")",
"return",
"except",
"Exception",
"as",
"e",
":",
"errors",
"+=",
"1",
"log",
".",
"error",
"(",
"\"caught exception %s on %s try\"",
",",
"e",
",",
"errors",
")",
"if",
"self",
".",
"http_errors",
"and",
"errors",
"==",
"self",
".",
"http_errors",
":",
"log",
".",
"warning",
"(",
"\"too many exceptions\"",
")",
"raise",
"e",
"log",
".",
"error",
"(",
"e",
")",
"if",
"interruptible_sleep",
"(",
"errors",
",",
"event",
")",
":",
"log",
".",
"info",
"(",
"\"stopping filter\"",
")",
"return"
] | 40.985915
| 15.070423
|
def ends_to_curve(start_node, end_node):
"""Convert a "pair" of intersection nodes to a curve segment.
.. note::
This is a helper used only by :func:`basic_interior_combine`, which in
turn is only used by :func:`combine_intersections`.
.. note::
This function could specialize to the first or second segment
attached to ``start_node`` and ``end_node``. We determine
first / second based on the classification of ``start_node``,
but the callers of this function could provide that information /
isolate the base curve and the two parameters for us.
.. note::
This only checks the classification of the ``start_node``.
Args:
start_node (.Intersection): The beginning of a segment.
end_node (.Intersection): The end of (the same) segment.
Returns:
Tuple[int, float, float]: The 3-tuple of:
* The edge index along the first surface (if in ``{0, 1, 2}``)
or the edge index along the second surface shifted to the right by
3 (if in ``{3, 4, 5}``)
* The start parameter along the edge
* The end parameter along the edge
Raises:
ValueError: If the ``start_node`` and ``end_node`` disagree on
the first curve when classified as "FIRST".
ValueError: If the ``start_node`` and ``end_node`` disagree on
the second curve when classified as "SECOND".
ValueError: If the ``start_node`` and ``end_node`` disagree on
the both curves when classified as "COINCIDENT".
ValueError: If the ``start_node`` is not classified as
:attr:`~.IntersectionClassification.FIRST`,
:attr:`~.IntersectionClassification.TANGENT_FIRST`,
:attr:`~.IntersectionClassification.SECOND`,
:attr:`~.IntersectionClassification.TANGENT_SECOND` or
:attr:`~.IntersectionClassification.COINCIDENT`.
"""
if is_first(start_node.interior_curve):
if end_node.index_first != start_node.index_first:
raise ValueError(_WRONG_CURVE)
return start_node.index_first, start_node.s, end_node.s
elif is_second(start_node.interior_curve):
if end_node.index_second != start_node.index_second:
raise ValueError(_WRONG_CURVE)
return start_node.index_second + 3, start_node.t, end_node.t
elif start_node.interior_curve == CLASSIFICATION_T.COINCIDENT:
if end_node.index_first == start_node.index_first:
return start_node.index_first, start_node.s, end_node.s
elif end_node.index_second == start_node.index_second:
return start_node.index_second + 3, start_node.t, end_node.t
else:
raise ValueError(_WRONG_CURVE)
else:
raise ValueError(
'Segment start must be classified as "FIRST", "TANGENT_FIRST", '
'"SECOND", "TANGENT_SECOND" or "COINCIDENT".'
)
|
[
"def",
"ends_to_curve",
"(",
"start_node",
",",
"end_node",
")",
":",
"if",
"is_first",
"(",
"start_node",
".",
"interior_curve",
")",
":",
"if",
"end_node",
".",
"index_first",
"!=",
"start_node",
".",
"index_first",
":",
"raise",
"ValueError",
"(",
"_WRONG_CURVE",
")",
"return",
"start_node",
".",
"index_first",
",",
"start_node",
".",
"s",
",",
"end_node",
".",
"s",
"elif",
"is_second",
"(",
"start_node",
".",
"interior_curve",
")",
":",
"if",
"end_node",
".",
"index_second",
"!=",
"start_node",
".",
"index_second",
":",
"raise",
"ValueError",
"(",
"_WRONG_CURVE",
")",
"return",
"start_node",
".",
"index_second",
"+",
"3",
",",
"start_node",
".",
"t",
",",
"end_node",
".",
"t",
"elif",
"start_node",
".",
"interior_curve",
"==",
"CLASSIFICATION_T",
".",
"COINCIDENT",
":",
"if",
"end_node",
".",
"index_first",
"==",
"start_node",
".",
"index_first",
":",
"return",
"start_node",
".",
"index_first",
",",
"start_node",
".",
"s",
",",
"end_node",
".",
"s",
"elif",
"end_node",
".",
"index_second",
"==",
"start_node",
".",
"index_second",
":",
"return",
"start_node",
".",
"index_second",
"+",
"3",
",",
"start_node",
".",
"t",
",",
"end_node",
".",
"t",
"else",
":",
"raise",
"ValueError",
"(",
"_WRONG_CURVE",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Segment start must be classified as \"FIRST\", \"TANGENT_FIRST\", '",
"'\"SECOND\", \"TANGENT_SECOND\" or \"COINCIDENT\".'",
")"
] | 38.891892
| 24.945946
|
def get_share_url_with_dirname(uk, shareid, dirname):
'''得到共享目录的链接'''
return ''.join([
const.PAN_URL, 'wap/link',
'?shareid=', shareid,
'&uk=', uk,
'&dir=', encoder.encode_uri_component(dirname),
'&third=0',
])
|
[
"def",
"get_share_url_with_dirname",
"(",
"uk",
",",
"shareid",
",",
"dirname",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"const",
".",
"PAN_URL",
",",
"'wap/link'",
",",
"'?shareid='",
",",
"shareid",
",",
"'&uk='",
",",
"uk",
",",
"'&dir='",
",",
"encoder",
".",
"encode_uri_component",
"(",
"dirname",
")",
",",
"'&third=0'",
",",
"]",
")"
] | 30.333333
| 16.555556
|
def checkAndRaise(pageNum, itemsPerPage):
"""Check and Raise an Exception if needed
Args:
pageNum (int): Page number
itemsPerPage (int): Number of items per Page
Raises:
ErrPaginationLimits: If we are out of limits
"""
if pageNum < 1:
raise ErrPaginationLimits(ErrPaginationLimits.ERR_PAGE_NUM)
if itemsPerPage < Settings.itemsPerPageMin or itemsPerPage > Settings.itemsPerPageMax:
raise ErrPaginationLimits(ErrPaginationLimits.ERR_ITEMS_PER_PAGE)
|
[
"def",
"checkAndRaise",
"(",
"pageNum",
",",
"itemsPerPage",
")",
":",
"if",
"pageNum",
"<",
"1",
":",
"raise",
"ErrPaginationLimits",
"(",
"ErrPaginationLimits",
".",
"ERR_PAGE_NUM",
")",
"if",
"itemsPerPage",
"<",
"Settings",
".",
"itemsPerPageMin",
"or",
"itemsPerPage",
">",
"Settings",
".",
"itemsPerPageMax",
":",
"raise",
"ErrPaginationLimits",
"(",
"ErrPaginationLimits",
".",
"ERR_ITEMS_PER_PAGE",
")"
] | 36.25
| 21.875
|
def listdir(self, folder_id='0', offset=None, limit=None, fields=None):
'Get Box object, representing list of objects in a folder.'
if fields is not None\
and not isinstance(fields, types.StringTypes): fields = ','.join(fields)
return self(
join('folders', folder_id, 'items'),
dict(offset=offset, limit=limit, fields=fields) )
|
[
"def",
"listdir",
"(",
"self",
",",
"folder_id",
"=",
"'0'",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"fields",
"=",
"None",
")",
":",
"if",
"fields",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"fields",
",",
"types",
".",
"StringTypes",
")",
":",
"fields",
"=",
"','",
".",
"join",
"(",
"fields",
")",
"return",
"self",
"(",
"join",
"(",
"'folders'",
",",
"folder_id",
",",
"'items'",
")",
",",
"dict",
"(",
"offset",
"=",
"offset",
",",
"limit",
"=",
"limit",
",",
"fields",
"=",
"fields",
")",
")"
] | 48
| 20.285714
|
def _GetSanitizedEventValues(self, event):
"""Sanitizes the event for use in 4n6time.
Args:
event (EventObject): event.
Returns:
dict[str, object]: dictionary containing the sanitized event values.
Raises:
NoFormatterFound: If no event formatter can be found to match the data
type in the event object.
"""
data_type = getattr(event, 'data_type', 'UNKNOWN')
event_formatter = self._output_mediator.GetEventFormatter(event)
if not event_formatter:
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
source_short, source = self._output_mediator.GetFormattedSources(event)
if source is None or source_short is None:
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
datetime_string = self._FormatDateTime(event)
format_variables = self._output_mediator.GetFormatStringAttributeNames(
event)
if format_variables is None:
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
extra_attributes = []
for attribute_name, attribute_value in sorted(event.GetAttributes()):
if (attribute_name in definitions.RESERVED_VARIABLE_NAMES or
attribute_name in format_variables):
continue
extra_attributes.append(
'{0:s}: {1!s} '.format(attribute_name, attribute_value))
extra_attributes = ' '.join(extra_attributes)
inode = event.inode
if inode is None and hasattr(event, 'pathspec'):
inode = getattr(event.pathspec, 'inode', '-')
if inode is None:
inode = '-'
tags = None
if getattr(event, 'tag', None):
tags = getattr(event.tag, 'tags', None)
taglist = ''
if isinstance(tags, (list, tuple)):
taglist = ','.join(tags)
offset = event.offset
if offset is None:
offset = 0
row = {
'timezone': '{0!s}'.format(self._output_mediator.timezone),
'MACB': self._output_mediator.GetMACBRepresentation(event),
'source': source_short,
'sourcetype': source,
'type': event.timestamp_desc or '-',
'user': getattr(event, 'username', '-'),
'host': getattr(event, 'hostname', '-'),
'description': message,
'filename': getattr(event, 'filename', '-'),
'inode': inode,
'notes': getattr(event, 'notes', '-'),
'format': getattr(event, 'parser', '-'),
'extra': extra_attributes,
'datetime': datetime_string,
'reportnotes': '',
'inreport': '',
'tag': taglist,
'offset': offset,
'vss_store_number': self._GetVSSNumber(event),
'URL': getattr(event, 'url', '-'),
'record_number': getattr(event, 'record_number', 0),
'event_identifier': getattr(event, 'event_identifier', '-'),
'event_type': getattr(event, 'event_type', '-'),
'source_name': getattr(event, 'source_name', '-'),
'user_sid': getattr(event, 'user_sid', '-'),
'computer_name': getattr(event, 'computer_name', '-'),
'evidence': self._evidence}
return row
|
[
"def",
"_GetSanitizedEventValues",
"(",
"self",
",",
"event",
")",
":",
"data_type",
"=",
"getattr",
"(",
"event",
",",
"'data_type'",
",",
"'UNKNOWN'",
")",
"event_formatter",
"=",
"self",
".",
"_output_mediator",
".",
"GetEventFormatter",
"(",
"event",
")",
"if",
"not",
"event_formatter",
":",
"raise",
"errors",
".",
"NoFormatterFound",
"(",
"'Unable to find event formatter for: {0:s}.'",
".",
"format",
"(",
"data_type",
")",
")",
"message",
",",
"_",
"=",
"self",
".",
"_output_mediator",
".",
"GetFormattedMessages",
"(",
"event",
")",
"if",
"message",
"is",
"None",
":",
"raise",
"errors",
".",
"NoFormatterFound",
"(",
"'Unable to find event formatter for: {0:s}.'",
".",
"format",
"(",
"data_type",
")",
")",
"source_short",
",",
"source",
"=",
"self",
".",
"_output_mediator",
".",
"GetFormattedSources",
"(",
"event",
")",
"if",
"source",
"is",
"None",
"or",
"source_short",
"is",
"None",
":",
"raise",
"errors",
".",
"NoFormatterFound",
"(",
"'Unable to find event formatter for: {0:s}.'",
".",
"format",
"(",
"data_type",
")",
")",
"datetime_string",
"=",
"self",
".",
"_FormatDateTime",
"(",
"event",
")",
"format_variables",
"=",
"self",
".",
"_output_mediator",
".",
"GetFormatStringAttributeNames",
"(",
"event",
")",
"if",
"format_variables",
"is",
"None",
":",
"raise",
"errors",
".",
"NoFormatterFound",
"(",
"'Unable to find event formatter for: {0:s}.'",
".",
"format",
"(",
"data_type",
")",
")",
"extra_attributes",
"=",
"[",
"]",
"for",
"attribute_name",
",",
"attribute_value",
"in",
"sorted",
"(",
"event",
".",
"GetAttributes",
"(",
")",
")",
":",
"if",
"(",
"attribute_name",
"in",
"definitions",
".",
"RESERVED_VARIABLE_NAMES",
"or",
"attribute_name",
"in",
"format_variables",
")",
":",
"continue",
"extra_attributes",
".",
"append",
"(",
"'{0:s}: {1!s} '",
".",
"format",
"(",
"attribute_name",
",",
"attribute_value",
")",
")",
"extra_attributes",
"=",
"' '",
".",
"join",
"(",
"extra_attributes",
")",
"inode",
"=",
"event",
".",
"inode",
"if",
"inode",
"is",
"None",
"and",
"hasattr",
"(",
"event",
",",
"'pathspec'",
")",
":",
"inode",
"=",
"getattr",
"(",
"event",
".",
"pathspec",
",",
"'inode'",
",",
"'-'",
")",
"if",
"inode",
"is",
"None",
":",
"inode",
"=",
"'-'",
"tags",
"=",
"None",
"if",
"getattr",
"(",
"event",
",",
"'tag'",
",",
"None",
")",
":",
"tags",
"=",
"getattr",
"(",
"event",
".",
"tag",
",",
"'tags'",
",",
"None",
")",
"taglist",
"=",
"''",
"if",
"isinstance",
"(",
"tags",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"taglist",
"=",
"','",
".",
"join",
"(",
"tags",
")",
"offset",
"=",
"event",
".",
"offset",
"if",
"offset",
"is",
"None",
":",
"offset",
"=",
"0",
"row",
"=",
"{",
"'timezone'",
":",
"'{0!s}'",
".",
"format",
"(",
"self",
".",
"_output_mediator",
".",
"timezone",
")",
",",
"'MACB'",
":",
"self",
".",
"_output_mediator",
".",
"GetMACBRepresentation",
"(",
"event",
")",
",",
"'source'",
":",
"source_short",
",",
"'sourcetype'",
":",
"source",
",",
"'type'",
":",
"event",
".",
"timestamp_desc",
"or",
"'-'",
",",
"'user'",
":",
"getattr",
"(",
"event",
",",
"'username'",
",",
"'-'",
")",
",",
"'host'",
":",
"getattr",
"(",
"event",
",",
"'hostname'",
",",
"'-'",
")",
",",
"'description'",
":",
"message",
",",
"'filename'",
":",
"getattr",
"(",
"event",
",",
"'filename'",
",",
"'-'",
")",
",",
"'inode'",
":",
"inode",
",",
"'notes'",
":",
"getattr",
"(",
"event",
",",
"'notes'",
",",
"'-'",
")",
",",
"'format'",
":",
"getattr",
"(",
"event",
",",
"'parser'",
",",
"'-'",
")",
",",
"'extra'",
":",
"extra_attributes",
",",
"'datetime'",
":",
"datetime_string",
",",
"'reportnotes'",
":",
"''",
",",
"'inreport'",
":",
"''",
",",
"'tag'",
":",
"taglist",
",",
"'offset'",
":",
"offset",
",",
"'vss_store_number'",
":",
"self",
".",
"_GetVSSNumber",
"(",
"event",
")",
",",
"'URL'",
":",
"getattr",
"(",
"event",
",",
"'url'",
",",
"'-'",
")",
",",
"'record_number'",
":",
"getattr",
"(",
"event",
",",
"'record_number'",
",",
"0",
")",
",",
"'event_identifier'",
":",
"getattr",
"(",
"event",
",",
"'event_identifier'",
",",
"'-'",
")",
",",
"'event_type'",
":",
"getattr",
"(",
"event",
",",
"'event_type'",
",",
"'-'",
")",
",",
"'source_name'",
":",
"getattr",
"(",
"event",
",",
"'source_name'",
",",
"'-'",
")",
",",
"'user_sid'",
":",
"getattr",
"(",
"event",
",",
"'user_sid'",
",",
"'-'",
")",
",",
"'computer_name'",
":",
"getattr",
"(",
"event",
",",
"'computer_name'",
",",
"'-'",
")",
",",
"'evidence'",
":",
"self",
".",
"_evidence",
"}",
"return",
"row"
] | 34.385417
| 20.270833
|
def append_from_dict(self, the_dict):
"""
Creates a ``measurement.Measurement`` object from the supplied dict
and then appends it to the buffer
:param the_dict: dict
"""
m = Measurement.from_dict(the_dict)
self.append(m)
|
[
"def",
"append_from_dict",
"(",
"self",
",",
"the_dict",
")",
":",
"m",
"=",
"Measurement",
".",
"from_dict",
"(",
"the_dict",
")",
"self",
".",
"append",
"(",
"m",
")"
] | 29.888889
| 12.333333
|
def get_category_metrics(self, category):
"""Get metrics belonging to the given category"""
slug_list = self._category_slugs(category)
return self.get_metrics(slug_list)
|
[
"def",
"get_category_metrics",
"(",
"self",
",",
"category",
")",
":",
"slug_list",
"=",
"self",
".",
"_category_slugs",
"(",
"category",
")",
"return",
"self",
".",
"get_metrics",
"(",
"slug_list",
")"
] | 47.5
| 3.25
|
def defvalkey(js, key, default=None, take_none=True):
"""
Returns js[key] if set, otherwise default. Note js[key] can be None.
:param js:
:param key:
:param default:
:param take_none:
:return:
"""
if js is None:
return default
if key not in js:
return default
if js[key] is None and not take_none:
return default
return js[key]
|
[
"def",
"defvalkey",
"(",
"js",
",",
"key",
",",
"default",
"=",
"None",
",",
"take_none",
"=",
"True",
")",
":",
"if",
"js",
"is",
"None",
":",
"return",
"default",
"if",
"key",
"not",
"in",
"js",
":",
"return",
"default",
"if",
"js",
"[",
"key",
"]",
"is",
"None",
"and",
"not",
"take_none",
":",
"return",
"default",
"return",
"js",
"[",
"key",
"]"
] | 24
| 17.625
|
def choose_template(self, template):
'''Choose a template
Args:
template: String, choose which template you would like.
Returns:
None
Raises:
None
'''
n1 = int(template)/10
n2 = int(template)%10
self.send('^TS'+'0'+str(n1)+str(n2))
|
[
"def",
"choose_template",
"(",
"self",
",",
"template",
")",
":",
"n1",
"=",
"int",
"(",
"template",
")",
"/",
"10",
"n2",
"=",
"int",
"(",
"template",
")",
"%",
"10",
"self",
".",
"send",
"(",
"'^TS'",
"+",
"'0'",
"+",
"str",
"(",
"n1",
")",
"+",
"str",
"(",
"n2",
")",
")"
] | 25.230769
| 19.538462
|
def env_present(name,
value=None,
user='root'):
'''
Verifies that the specified environment variable is present in the crontab
for the specified user.
name
The name of the environment variable to set in the user crontab
user
The name of the user whose crontab needs to be modified, defaults to
the root user
value
The value to set for the given environment variable
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __opts__['test']:
status = _check_cron_env(user, name, value=value)
ret['result'] = None
if status == 'absent':
ret['comment'] = 'Cron env {0} is set to be added'.format(name)
elif status == 'present':
ret['result'] = True
ret['comment'] = 'Cron env {0} already present'.format(name)
elif status == 'update':
ret['comment'] = 'Cron env {0} is set to be updated'.format(name)
return ret
data = __salt__['cron.set_env'](user, name, value=value)
if data == 'present':
ret['comment'] = 'Cron env {0} already present'.format(name)
return ret
if data == 'new':
ret['comment'] = 'Cron env {0} added to {1}\'s crontab'.format(name, user)
ret['changes'] = {user: name}
return ret
if data == 'updated':
ret['comment'] = 'Cron env {0} updated'.format(name)
ret['changes'] = {user: name}
return ret
ret['comment'] = ('Cron env {0} for user {1} failed to commit with error \n{2}'
.format(name, user, data))
ret['result'] = False
return ret
|
[
"def",
"env_present",
"(",
"name",
",",
"value",
"=",
"None",
",",
"user",
"=",
"'root'",
")",
":",
"ret",
"=",
"{",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"''",
",",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
"}",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"status",
"=",
"_check_cron_env",
"(",
"user",
",",
"name",
",",
"value",
"=",
"value",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"if",
"status",
"==",
"'absent'",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Cron env {0} is set to be added'",
".",
"format",
"(",
"name",
")",
"elif",
"status",
"==",
"'present'",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Cron env {0} already present'",
".",
"format",
"(",
"name",
")",
"elif",
"status",
"==",
"'update'",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Cron env {0} is set to be updated'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"data",
"=",
"__salt__",
"[",
"'cron.set_env'",
"]",
"(",
"user",
",",
"name",
",",
"value",
"=",
"value",
")",
"if",
"data",
"==",
"'present'",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Cron env {0} already present'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"data",
"==",
"'new'",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Cron env {0} added to {1}\\'s crontab'",
".",
"format",
"(",
"name",
",",
"user",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"user",
":",
"name",
"}",
"return",
"ret",
"if",
"data",
"==",
"'updated'",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Cron env {0} updated'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"user",
":",
"name",
"}",
"return",
"ret",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'Cron env {0} for user {1} failed to commit with error \\n{2}'",
".",
"format",
"(",
"name",
",",
"user",
",",
"data",
")",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret"
] | 32.647059
| 23.27451
|
def describe_jobflow(self, jobflow_id):
"""
Describes a single Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: The job flow id of interest
"""
jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
if jobflows:
return jobflows[0]
|
[
"def",
"describe_jobflow",
"(",
"self",
",",
"jobflow_id",
")",
":",
"jobflows",
"=",
"self",
".",
"describe_jobflows",
"(",
"jobflow_ids",
"=",
"[",
"jobflow_id",
"]",
")",
"if",
"jobflows",
":",
"return",
"jobflows",
"[",
"0",
"]"
] | 31.4
| 13.6
|
def _update_flip(context, flip_id, ip_type, requested_ports):
"""Update a flip based IPAddress
:param context: neutron api request context.
:param flip_id: id of the flip or scip
:param ip_type: ip_types.FLOATING | ip_types.SCALING
:param requested_ports: dictionary of the structure:
{"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"}
:return: quark.models.IPAddress
"""
# This list will hold flips that require notifications.
# Using sets to avoid dups, if any.
notifications = {
billing.IP_ASSOC: set(),
billing.IP_DISASSOC: set()
}
context.session.begin()
try:
flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE)
if not flip:
if ip_type == ip_types.SCALING:
raise q_exc.ScalingIpNotFound(id=flip_id)
raise q_exc.FloatingIpNotFound(id=flip_id)
current_ports = flip.ports
# Determine what ports are being removed, being added, and remain
req_port_ids = [request_port.get('port_id')
for request_port in requested_ports]
curr_port_ids = [curr_port.id for curr_port in current_ports]
added_port_ids = [port_id for port_id in req_port_ids
if port_id and port_id not in curr_port_ids]
removed_port_ids = [port_id for port_id in curr_port_ids
if port_id not in req_port_ids]
remaining_port_ids = set(curr_port_ids) - set(removed_port_ids)
# Validations just for floating ip types
if (ip_type == ip_types.FLOATING and curr_port_ids and
curr_port_ids == req_port_ids):
d = dict(flip_id=flip_id, port_id=curr_port_ids[0])
raise q_exc.PortAlreadyAssociatedToFloatingIp(**d)
if (ip_type == ip_types.FLOATING and
not curr_port_ids and not req_port_ids):
raise q_exc.FloatingIpUpdateNoPortIdSupplied()
# Validate that GW IP is not in use on the NW.
flip_subnet = v._make_subnet_dict(flip.subnet)
for added_port_id in added_port_ids:
port = _get_port(context, added_port_id)
nw = port.network
nw_ports = v._make_ports_list(nw.ports)
fixed_ips = [ip.get('ip_address') for p in nw_ports
for ip in p.get('fixed_ips')]
gw_ip = flip_subnet.get('gateway_ip')
if gw_ip in fixed_ips:
port_with_gateway_ip = None
for port in nw_ports:
for ip in port.get('fixed_ips'):
if gw_ip in ip.get('ip_address'):
port_with_gateway_ip = port
break
port_id = port_with_gateway_ip.get('id')
network_id = port_with_gateway_ip.get('network_id')
raise q_exc.FixedIpAllocatedToGatewayIp(port_id=port_id,
network_id=network_id)
port_fixed_ips = {}
# Keep the ports and fixed ips that have not changed
for port_id in remaining_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)
port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}
# Disassociate the ports and fixed ips from the flip that were
# associated to the flip but are not anymore
for port_id in removed_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
flip = db_api.port_disassociate_ip(context, [port], flip)
notifications[billing.IP_DISASSOC].add(flip)
fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)
if fixed_ip:
flip = db_api.floating_ip_disassociate_fixed_ip(
context, flip, fixed_ip)
# Validate the new ports with the flip and associate the new ports
# and fixed ips with the flip
for port_id in added_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.FLOATING)):
raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.SCALING)):
raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id)
fixed_ip = _get_next_available_fixed_ip(port)
LOG.info('new fixed ip: %s' % fixed_ip)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}
flip = db_api.port_associate_ip(context, [port], flip, [port_id])
notifications[billing.IP_ASSOC].add(flip)
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
# If there are not any remaining ports and no new ones are being added,
# remove the floating ip from unicorn
if not remaining_port_ids and not added_port_ids:
flip_driver.remove_floating_ip(flip)
# If new ports are being added but there previously was not any ports,
# then register a new floating ip with the driver because it is
# assumed it does not exist
elif added_port_ids and not curr_port_ids:
flip_driver.register_floating_ip(flip, port_fixed_ips)
else:
flip_driver.update_floating_ip(flip, port_fixed_ips)
context.session.commit()
except Exception:
context.session.rollback()
raise
# Send notifications for possible associate/disassociate events
for notif_type, flip_set in notifications.iteritems():
for flip in flip_set:
billing.notify(context, notif_type, flip)
# NOTE(blogan): ORM does not seem to update the model to the real state
# of the database, so I'm doing an explicit refresh for now.
context.session.refresh(flip)
return flip
|
[
"def",
"_update_flip",
"(",
"context",
",",
"flip_id",
",",
"ip_type",
",",
"requested_ports",
")",
":",
"# This list will hold flips that require notifications.",
"# Using sets to avoid dups, if any.",
"notifications",
"=",
"{",
"billing",
".",
"IP_ASSOC",
":",
"set",
"(",
")",
",",
"billing",
".",
"IP_DISASSOC",
":",
"set",
"(",
")",
"}",
"context",
".",
"session",
".",
"begin",
"(",
")",
"try",
":",
"flip",
"=",
"db_api",
".",
"floating_ip_find",
"(",
"context",
",",
"id",
"=",
"flip_id",
",",
"scope",
"=",
"db_api",
".",
"ONE",
")",
"if",
"not",
"flip",
":",
"if",
"ip_type",
"==",
"ip_types",
".",
"SCALING",
":",
"raise",
"q_exc",
".",
"ScalingIpNotFound",
"(",
"id",
"=",
"flip_id",
")",
"raise",
"q_exc",
".",
"FloatingIpNotFound",
"(",
"id",
"=",
"flip_id",
")",
"current_ports",
"=",
"flip",
".",
"ports",
"# Determine what ports are being removed, being added, and remain",
"req_port_ids",
"=",
"[",
"request_port",
".",
"get",
"(",
"'port_id'",
")",
"for",
"request_port",
"in",
"requested_ports",
"]",
"curr_port_ids",
"=",
"[",
"curr_port",
".",
"id",
"for",
"curr_port",
"in",
"current_ports",
"]",
"added_port_ids",
"=",
"[",
"port_id",
"for",
"port_id",
"in",
"req_port_ids",
"if",
"port_id",
"and",
"port_id",
"not",
"in",
"curr_port_ids",
"]",
"removed_port_ids",
"=",
"[",
"port_id",
"for",
"port_id",
"in",
"curr_port_ids",
"if",
"port_id",
"not",
"in",
"req_port_ids",
"]",
"remaining_port_ids",
"=",
"set",
"(",
"curr_port_ids",
")",
"-",
"set",
"(",
"removed_port_ids",
")",
"# Validations just for floating ip types",
"if",
"(",
"ip_type",
"==",
"ip_types",
".",
"FLOATING",
"and",
"curr_port_ids",
"and",
"curr_port_ids",
"==",
"req_port_ids",
")",
":",
"d",
"=",
"dict",
"(",
"flip_id",
"=",
"flip_id",
",",
"port_id",
"=",
"curr_port_ids",
"[",
"0",
"]",
")",
"raise",
"q_exc",
".",
"PortAlreadyAssociatedToFloatingIp",
"(",
"*",
"*",
"d",
")",
"if",
"(",
"ip_type",
"==",
"ip_types",
".",
"FLOATING",
"and",
"not",
"curr_port_ids",
"and",
"not",
"req_port_ids",
")",
":",
"raise",
"q_exc",
".",
"FloatingIpUpdateNoPortIdSupplied",
"(",
")",
"# Validate that GW IP is not in use on the NW.",
"flip_subnet",
"=",
"v",
".",
"_make_subnet_dict",
"(",
"flip",
".",
"subnet",
")",
"for",
"added_port_id",
"in",
"added_port_ids",
":",
"port",
"=",
"_get_port",
"(",
"context",
",",
"added_port_id",
")",
"nw",
"=",
"port",
".",
"network",
"nw_ports",
"=",
"v",
".",
"_make_ports_list",
"(",
"nw",
".",
"ports",
")",
"fixed_ips",
"=",
"[",
"ip",
".",
"get",
"(",
"'ip_address'",
")",
"for",
"p",
"in",
"nw_ports",
"for",
"ip",
"in",
"p",
".",
"get",
"(",
"'fixed_ips'",
")",
"]",
"gw_ip",
"=",
"flip_subnet",
".",
"get",
"(",
"'gateway_ip'",
")",
"if",
"gw_ip",
"in",
"fixed_ips",
":",
"port_with_gateway_ip",
"=",
"None",
"for",
"port",
"in",
"nw_ports",
":",
"for",
"ip",
"in",
"port",
".",
"get",
"(",
"'fixed_ips'",
")",
":",
"if",
"gw_ip",
"in",
"ip",
".",
"get",
"(",
"'ip_address'",
")",
":",
"port_with_gateway_ip",
"=",
"port",
"break",
"port_id",
"=",
"port_with_gateway_ip",
".",
"get",
"(",
"'id'",
")",
"network_id",
"=",
"port_with_gateway_ip",
".",
"get",
"(",
"'network_id'",
")",
"raise",
"q_exc",
".",
"FixedIpAllocatedToGatewayIp",
"(",
"port_id",
"=",
"port_id",
",",
"network_id",
"=",
"network_id",
")",
"port_fixed_ips",
"=",
"{",
"}",
"# Keep the ports and fixed ips that have not changed",
"for",
"port_id",
"in",
"remaining_port_ids",
":",
"port",
"=",
"db_api",
".",
"port_find",
"(",
"context",
",",
"id",
"=",
"port_id",
",",
"scope",
"=",
"db_api",
".",
"ONE",
")",
"fixed_ip",
"=",
"_get_flip_fixed_ip_by_port_id",
"(",
"flip",
",",
"port_id",
")",
"port_fixed_ips",
"[",
"port_id",
"]",
"=",
"{",
"'port'",
":",
"port",
",",
"'fixed_ip'",
":",
"fixed_ip",
"}",
"# Disassociate the ports and fixed ips from the flip that were",
"# associated to the flip but are not anymore",
"for",
"port_id",
"in",
"removed_port_ids",
":",
"port",
"=",
"db_api",
".",
"port_find",
"(",
"context",
",",
"id",
"=",
"port_id",
",",
"scope",
"=",
"db_api",
".",
"ONE",
")",
"flip",
"=",
"db_api",
".",
"port_disassociate_ip",
"(",
"context",
",",
"[",
"port",
"]",
",",
"flip",
")",
"notifications",
"[",
"billing",
".",
"IP_DISASSOC",
"]",
".",
"add",
"(",
"flip",
")",
"fixed_ip",
"=",
"_get_flip_fixed_ip_by_port_id",
"(",
"flip",
",",
"port_id",
")",
"if",
"fixed_ip",
":",
"flip",
"=",
"db_api",
".",
"floating_ip_disassociate_fixed_ip",
"(",
"context",
",",
"flip",
",",
"fixed_ip",
")",
"# Validate the new ports with the flip and associate the new ports",
"# and fixed ips with the flip",
"for",
"port_id",
"in",
"added_port_ids",
":",
"port",
"=",
"db_api",
".",
"port_find",
"(",
"context",
",",
"id",
"=",
"port_id",
",",
"scope",
"=",
"db_api",
".",
"ONE",
")",
"if",
"not",
"port",
":",
"raise",
"n_exc",
".",
"PortNotFound",
"(",
"port_id",
"=",
"port_id",
")",
"if",
"any",
"(",
"ip",
"for",
"ip",
"in",
"port",
".",
"ip_addresses",
"if",
"(",
"ip",
".",
"get",
"(",
"'address_type'",
")",
"==",
"ip_types",
".",
"FLOATING",
")",
")",
":",
"raise",
"q_exc",
".",
"PortAlreadyContainsFloatingIp",
"(",
"port_id",
"=",
"port_id",
")",
"if",
"any",
"(",
"ip",
"for",
"ip",
"in",
"port",
".",
"ip_addresses",
"if",
"(",
"ip",
".",
"get",
"(",
"'address_type'",
")",
"==",
"ip_types",
".",
"SCALING",
")",
")",
":",
"raise",
"q_exc",
".",
"PortAlreadyContainsScalingIp",
"(",
"port_id",
"=",
"port_id",
")",
"fixed_ip",
"=",
"_get_next_available_fixed_ip",
"(",
"port",
")",
"LOG",
".",
"info",
"(",
"'new fixed ip: %s'",
"%",
"fixed_ip",
")",
"if",
"not",
"fixed_ip",
":",
"raise",
"q_exc",
".",
"NoAvailableFixedIpsForPort",
"(",
"port_id",
"=",
"port_id",
")",
"port_fixed_ips",
"[",
"port_id",
"]",
"=",
"{",
"'port'",
":",
"port",
",",
"'fixed_ip'",
":",
"fixed_ip",
"}",
"flip",
"=",
"db_api",
".",
"port_associate_ip",
"(",
"context",
",",
"[",
"port",
"]",
",",
"flip",
",",
"[",
"port_id",
"]",
")",
"notifications",
"[",
"billing",
".",
"IP_ASSOC",
"]",
".",
"add",
"(",
"flip",
")",
"flip",
"=",
"db_api",
".",
"floating_ip_associate_fixed_ip",
"(",
"context",
",",
"flip",
",",
"fixed_ip",
")",
"flip_driver",
"=",
"registry",
".",
"DRIVER_REGISTRY",
".",
"get_driver",
"(",
")",
"# If there are not any remaining ports and no new ones are being added,",
"# remove the floating ip from unicorn",
"if",
"not",
"remaining_port_ids",
"and",
"not",
"added_port_ids",
":",
"flip_driver",
".",
"remove_floating_ip",
"(",
"flip",
")",
"# If new ports are being added but there previously was not any ports,",
"# then register a new floating ip with the driver because it is",
"# assumed it does not exist",
"elif",
"added_port_ids",
"and",
"not",
"curr_port_ids",
":",
"flip_driver",
".",
"register_floating_ip",
"(",
"flip",
",",
"port_fixed_ips",
")",
"else",
":",
"flip_driver",
".",
"update_floating_ip",
"(",
"flip",
",",
"port_fixed_ips",
")",
"context",
".",
"session",
".",
"commit",
"(",
")",
"except",
"Exception",
":",
"context",
".",
"session",
".",
"rollback",
"(",
")",
"raise",
"# Send notifications for possible associate/disassociate events",
"for",
"notif_type",
",",
"flip_set",
"in",
"notifications",
".",
"iteritems",
"(",
")",
":",
"for",
"flip",
"in",
"flip_set",
":",
"billing",
".",
"notify",
"(",
"context",
",",
"notif_type",
",",
"flip",
")",
"# NOTE(blogan): ORM does not seem to update the model to the real state",
"# of the database, so I'm doing an explicit refresh for now.",
"context",
".",
"session",
".",
"refresh",
"(",
"flip",
")",
"return",
"flip"
] | 47.105263
| 20.150376
|
def seek(self, offset, whence=0):
"""Seek to the specified position.
:param int offset: The offset in bytes.
:param int whence: Where the offset is from.
Returns the position after seeking."""
logger.debug('seeking to offset: %r whence: %r', offset, whence)
if whence not in s3.WHENCE_CHOICES:
raise ValueError('invalid whence, expected one of %r' % s3.WHENCE_CHOICES)
if not self.seekable():
raise OSError
if whence == s3.START:
new_pos = offset
elif whence == s3.CURRENT:
new_pos = self._current_pos + offset
elif whence == s3.END:
new_pos = self.content_length + offset
new_pos = s3.clamp(new_pos, 0, self.content_length)
if self._current_pos == new_pos:
return self._current_pos
logger.debug("http seeking from current_pos: %d to new_pos: %d", self._current_pos, new_pos)
self._current_pos = new_pos
if new_pos == self.content_length:
self.response = None
self._read_iter = None
self._read_buffer.empty()
else:
response = self._partial_request(new_pos)
if response.ok:
self.response = response
self._read_iter = self.response.iter_content(self.buffer_size)
self._read_buffer.empty()
else:
self.response = None
return self._current_pos
|
[
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
"=",
"0",
")",
":",
"logger",
".",
"debug",
"(",
"'seeking to offset: %r whence: %r'",
",",
"offset",
",",
"whence",
")",
"if",
"whence",
"not",
"in",
"s3",
".",
"WHENCE_CHOICES",
":",
"raise",
"ValueError",
"(",
"'invalid whence, expected one of %r'",
"%",
"s3",
".",
"WHENCE_CHOICES",
")",
"if",
"not",
"self",
".",
"seekable",
"(",
")",
":",
"raise",
"OSError",
"if",
"whence",
"==",
"s3",
".",
"START",
":",
"new_pos",
"=",
"offset",
"elif",
"whence",
"==",
"s3",
".",
"CURRENT",
":",
"new_pos",
"=",
"self",
".",
"_current_pos",
"+",
"offset",
"elif",
"whence",
"==",
"s3",
".",
"END",
":",
"new_pos",
"=",
"self",
".",
"content_length",
"+",
"offset",
"new_pos",
"=",
"s3",
".",
"clamp",
"(",
"new_pos",
",",
"0",
",",
"self",
".",
"content_length",
")",
"if",
"self",
".",
"_current_pos",
"==",
"new_pos",
":",
"return",
"self",
".",
"_current_pos",
"logger",
".",
"debug",
"(",
"\"http seeking from current_pos: %d to new_pos: %d\"",
",",
"self",
".",
"_current_pos",
",",
"new_pos",
")",
"self",
".",
"_current_pos",
"=",
"new_pos",
"if",
"new_pos",
"==",
"self",
".",
"content_length",
":",
"self",
".",
"response",
"=",
"None",
"self",
".",
"_read_iter",
"=",
"None",
"self",
".",
"_read_buffer",
".",
"empty",
"(",
")",
"else",
":",
"response",
"=",
"self",
".",
"_partial_request",
"(",
"new_pos",
")",
"if",
"response",
".",
"ok",
":",
"self",
".",
"response",
"=",
"response",
"self",
".",
"_read_iter",
"=",
"self",
".",
"response",
".",
"iter_content",
"(",
"self",
".",
"buffer_size",
")",
"self",
".",
"_read_buffer",
".",
"empty",
"(",
")",
"else",
":",
"self",
".",
"response",
"=",
"None",
"return",
"self",
".",
"_current_pos"
] | 32.931818
| 18.659091
|
def _read_moment_tensor_from_ndk_string(ndk_string, system='USE'):
"""
Reads the moment tensor from the ndk_string representation
ndk_string = [Mrr, sigMrr, Mtt, sigMtt, Mpp, sigMpp, Mrt, sigMrt, Mrp,
sigMrp, Mtp, sigMtp]
Output tensors should be of format:
expected = [[Mtt, Mtp, Mtr],
[Mtp, Mpp, Mpr],
[Mtr, Mpr, Mrr]]
sigma = [[sigMtt, sigMtp, sigMtr],
[sigMtp, sigMpp, sigMpr],
[sigMtr, sigMpr, sigMrr]]
Exponent returned in Nm
:param str ndk_string:
String of data in ndk format (line 4 of event)
:param str system:
Reference frame of tensor Up, South, East {USE} or North, East, Down
(NED)
"""
exponent = float(ndk_string[0:2]) - 7.
mkr = np.array([2, 9, 15], dtype=int)
vector = []
for i in range(0, 6):
vector.extend([
float(ndk_string[mkr[0]:mkr[1]]),
float(ndk_string[mkr[1]:mkr[2]])])
mkr = mkr + 13
vector = np.array(vector)
mrr, mtt, mpp, mrt, mrp, mtp = tuple(vector[np.arange(0, 12, 2)])
sig_mrr, sig_mtt, sig_mpp, sig_mrt, sig_mrp, sig_mtp = \
tuple(vector[np.arange(1, 13, 2)])
tensor = utils.COORD_SYSTEM[system](mrr, mtt, mpp, mrt, mrp, mtp)
tensor = (10. ** exponent) * tensor
sigma = utils.COORD_SYSTEM[system](sig_mrr, sig_mtt, sig_mpp,
sig_mrt, sig_mrp, sig_mtp)
sigma = (10. ** exponent) * sigma
return tensor, sigma, exponent
|
[
"def",
"_read_moment_tensor_from_ndk_string",
"(",
"ndk_string",
",",
"system",
"=",
"'USE'",
")",
":",
"exponent",
"=",
"float",
"(",
"ndk_string",
"[",
"0",
":",
"2",
"]",
")",
"-",
"7.",
"mkr",
"=",
"np",
".",
"array",
"(",
"[",
"2",
",",
"9",
",",
"15",
"]",
",",
"dtype",
"=",
"int",
")",
"vector",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"6",
")",
":",
"vector",
".",
"extend",
"(",
"[",
"float",
"(",
"ndk_string",
"[",
"mkr",
"[",
"0",
"]",
":",
"mkr",
"[",
"1",
"]",
"]",
")",
",",
"float",
"(",
"ndk_string",
"[",
"mkr",
"[",
"1",
"]",
":",
"mkr",
"[",
"2",
"]",
"]",
")",
"]",
")",
"mkr",
"=",
"mkr",
"+",
"13",
"vector",
"=",
"np",
".",
"array",
"(",
"vector",
")",
"mrr",
",",
"mtt",
",",
"mpp",
",",
"mrt",
",",
"mrp",
",",
"mtp",
"=",
"tuple",
"(",
"vector",
"[",
"np",
".",
"arange",
"(",
"0",
",",
"12",
",",
"2",
")",
"]",
")",
"sig_mrr",
",",
"sig_mtt",
",",
"sig_mpp",
",",
"sig_mrt",
",",
"sig_mrp",
",",
"sig_mtp",
"=",
"tuple",
"(",
"vector",
"[",
"np",
".",
"arange",
"(",
"1",
",",
"13",
",",
"2",
")",
"]",
")",
"tensor",
"=",
"utils",
".",
"COORD_SYSTEM",
"[",
"system",
"]",
"(",
"mrr",
",",
"mtt",
",",
"mpp",
",",
"mrt",
",",
"mrp",
",",
"mtp",
")",
"tensor",
"=",
"(",
"10.",
"**",
"exponent",
")",
"*",
"tensor",
"sigma",
"=",
"utils",
".",
"COORD_SYSTEM",
"[",
"system",
"]",
"(",
"sig_mrr",
",",
"sig_mtt",
",",
"sig_mpp",
",",
"sig_mrt",
",",
"sig_mrp",
",",
"sig_mtp",
")",
"sigma",
"=",
"(",
"10.",
"**",
"exponent",
")",
"*",
"sigma",
"return",
"tensor",
",",
"sigma",
",",
"exponent"
] | 36.902439
| 15.243902
|
def select_column(self, column_name):
"""
Get a reference to the :class:`~turicreate.SArray` that corresponds with
the given column_name. Throws an exception if the column_name is
something other than a string or if the column name is not found.
Parameters
----------
column_name: str
The column name.
Returns
-------
out : SArray
The SArray that is referred by ``column_name``.
See Also
--------
select_columns
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie']})
>>> # This line is equivalent to `sa = sf['user_name']`
>>> sa = sf.select_column('user_name')
>>> sa
dtype: str
Rows: 3
['alice', 'bob', 'charlie']
"""
if not isinstance(column_name, str):
raise TypeError("Invalid column_nametype: must be str")
with cython_context():
return SArray(data=[], _proxy=self.__proxy__.select_column(column_name))
|
[
"def",
"select_column",
"(",
"self",
",",
"column_name",
")",
":",
"if",
"not",
"isinstance",
"(",
"column_name",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid column_nametype: must be str\"",
")",
"with",
"cython_context",
"(",
")",
":",
"return",
"SArray",
"(",
"data",
"=",
"[",
"]",
",",
"_proxy",
"=",
"self",
".",
"__proxy__",
".",
"select_column",
"(",
"column_name",
")",
")"
] | 31.828571
| 22.457143
|
def get_account_details(self, account):
""" Get the account details. """
result = {}
try:
luser = self._get_account(account.username)
luser = preload(luser, database=self._database)
except ObjectDoesNotExist:
return result
for i, j in luser.items():
if i != 'userPassword' and j is not None:
result[i] = j
return result
|
[
"def",
"get_account_details",
"(",
"self",
",",
"account",
")",
":",
"result",
"=",
"{",
"}",
"try",
":",
"luser",
"=",
"self",
".",
"_get_account",
"(",
"account",
".",
"username",
")",
"luser",
"=",
"preload",
"(",
"luser",
",",
"database",
"=",
"self",
".",
"_database",
")",
"except",
"ObjectDoesNotExist",
":",
"return",
"result",
"for",
"i",
",",
"j",
"in",
"luser",
".",
"items",
"(",
")",
":",
"if",
"i",
"!=",
"'userPassword'",
"and",
"j",
"is",
"not",
"None",
":",
"result",
"[",
"i",
"]",
"=",
"j",
"return",
"result"
] | 30
| 16.714286
|
def re_rect(FlowRate, Width, DistCenter, Nu, openchannel):
"""Return the Reynolds Number for a rectangular channel."""
#Checking input validity - inputs not checked here are checked by
#functions this function calls.
ut.check_range([FlowRate, ">0", "Flow rate"], [Nu, ">0", "Nu"])
return (4 * FlowRate
* radius_hydraulic(Width, DistCenter, openchannel).magnitude
/ (Width * DistCenter * Nu))
|
[
"def",
"re_rect",
"(",
"FlowRate",
",",
"Width",
",",
"DistCenter",
",",
"Nu",
",",
"openchannel",
")",
":",
"#Checking input validity - inputs not checked here are checked by",
"#functions this function calls.",
"ut",
".",
"check_range",
"(",
"[",
"FlowRate",
",",
"\">0\"",
",",
"\"Flow rate\"",
"]",
",",
"[",
"Nu",
",",
"\">0\"",
",",
"\"Nu\"",
"]",
")",
"return",
"(",
"4",
"*",
"FlowRate",
"*",
"radius_hydraulic",
"(",
"Width",
",",
"DistCenter",
",",
"openchannel",
")",
".",
"magnitude",
"/",
"(",
"Width",
"*",
"DistCenter",
"*",
"Nu",
")",
")"
] | 53.5
| 15.875
|
def rlmb_tiny_recurrent():
"""Tiny setting with a recurrent next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_recurrent"
hparams.generative_model_params = "next_frame_basic_recurrent"
return hparams
|
[
"def",
"rlmb_tiny_recurrent",
"(",
")",
":",
"hparams",
"=",
"rlmb_ppo_tiny",
"(",
")",
"hparams",
".",
"epochs",
"=",
"1",
"# Too slow with 2 for regular runs.",
"hparams",
".",
"generative_model",
"=",
"\"next_frame_basic_recurrent\"",
"hparams",
".",
"generative_model_params",
"=",
"\"next_frame_basic_recurrent\"",
"return",
"hparams"
] | 43.142857
| 15.571429
|
def _GetFileSystemCacheIdentifier(self, path_spec):
"""Determines the file system cache identifier for the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
str: identifier of the VFS object.
"""
string_parts = []
string_parts.append(getattr(path_spec.parent, 'comparable', ''))
string_parts.append('type: {0:s}'.format(path_spec.type_indicator))
return ''.join(string_parts)
|
[
"def",
"_GetFileSystemCacheIdentifier",
"(",
"self",
",",
"path_spec",
")",
":",
"string_parts",
"=",
"[",
"]",
"string_parts",
".",
"append",
"(",
"getattr",
"(",
"path_spec",
".",
"parent",
",",
"'comparable'",
",",
"''",
")",
")",
"string_parts",
".",
"append",
"(",
"'type: {0:s}'",
".",
"format",
"(",
"path_spec",
".",
"type_indicator",
")",
")",
"return",
"''",
".",
"join",
"(",
"string_parts",
")"
] | 29.066667
| 21.533333
|
def generate_identifier(sender, instance, **kwargs):
"""
Generate and set identifier of concept before saving object to DB
Args:
sender (class): should be Concept
instance (Concept): saving concept
"""
identifier = Concept.create_identifier(instance.query)
qs = Concept.objects.filter(identifier=identifier, lang=instance.lang)
if instance.pk:
qs = qs.exclude(pk=instance.pk)
if qs.count() > 0:
raise ValueError("Concept identifier conflict")
instance.identifier = identifier
|
[
"def",
"generate_identifier",
"(",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"identifier",
"=",
"Concept",
".",
"create_identifier",
"(",
"instance",
".",
"query",
")",
"qs",
"=",
"Concept",
".",
"objects",
".",
"filter",
"(",
"identifier",
"=",
"identifier",
",",
"lang",
"=",
"instance",
".",
"lang",
")",
"if",
"instance",
".",
"pk",
":",
"qs",
"=",
"qs",
".",
"exclude",
"(",
"pk",
"=",
"instance",
".",
"pk",
")",
"if",
"qs",
".",
"count",
"(",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Concept identifier conflict\"",
")",
"instance",
".",
"identifier",
"=",
"identifier"
] | 35.333333
| 15.066667
|
def getReturnPage(context, prior=False):
'''
This tag makes it easy to get return links from within a template without
requiring custom logic inside the view. Just include
{% getReturnPage as returnPage %} and then reference {{ returnPage.url }}
and {{ returnPage.title }} as needed.
'''
siteHistory = getattr(context.get('request',None),'session',{}).get('SITE_HISTORY',{})
return returnPageHelper(siteHistory,prior=prior)
|
[
"def",
"getReturnPage",
"(",
"context",
",",
"prior",
"=",
"False",
")",
":",
"siteHistory",
"=",
"getattr",
"(",
"context",
".",
"get",
"(",
"'request'",
",",
"None",
")",
",",
"'session'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'SITE_HISTORY'",
",",
"{",
"}",
")",
"return",
"returnPageHelper",
"(",
"siteHistory",
",",
"prior",
"=",
"prior",
")"
] | 50.666667
| 24.888889
|
def init(config, workdir=None, logfile=None, loglevel=logging.INFO, **kwargs):
"""
Initialize the Lago environment
Args:
config(str): Path to LagoInitFile
workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago"
**kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init`
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
:class:`~lago.sdk.SDK`: Initialized Lago enviornment
Raises:
:exc:`~lago.utils.LagoException`: If initialization failed
"""
setup_sdk_logging(logfile, loglevel)
defaults = lago_config.get_section('init')
if workdir is None:
workdir = os.path.abspath('.lago')
defaults['workdir'] = workdir
defaults['virt_config'] = config
defaults.update(kwargs)
workdir, prefix = cmd.do_init(**defaults)
return SDK(workdir, prefix)
|
[
"def",
"init",
"(",
"config",
",",
"workdir",
"=",
"None",
",",
"logfile",
"=",
"None",
",",
"loglevel",
"=",
"logging",
".",
"INFO",
",",
"*",
"*",
"kwargs",
")",
":",
"setup_sdk_logging",
"(",
"logfile",
",",
"loglevel",
")",
"defaults",
"=",
"lago_config",
".",
"get_section",
"(",
"'init'",
")",
"if",
"workdir",
"is",
"None",
":",
"workdir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"'.lago'",
")",
"defaults",
"[",
"'workdir'",
"]",
"=",
"workdir",
"defaults",
"[",
"'virt_config'",
"]",
"=",
"config",
"defaults",
".",
"update",
"(",
"kwargs",
")",
"workdir",
",",
"prefix",
"=",
"cmd",
".",
"do_init",
"(",
"*",
"*",
"defaults",
")",
"return",
"SDK",
"(",
"workdir",
",",
"prefix",
")"
] | 32.925926
| 17.814815
|
def calculate_priority(ratios=None, **kwargs):
"Calculates a priority score based on a number of attributes."
if not ratios:
ratios = PRIORITY_FEATURE_WEIGHTS
scores = [DEFAULT_PRIORITY_SCORE]
for key, value in kwargs.items():
if key not in PRIORITY_FEATURE_WEIGHTS:
raise KeyError('The following keyword arguments are supported: '
'{keys}'.format(keys=PRIORITY_FEATURES))
if value is True:
scores.append(PRIORITY_FEATURE_WEIGHTS[key])
return float(sum(scores)) / len(scores)
|
[
"def",
"calculate_priority",
"(",
"ratios",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"ratios",
":",
"ratios",
"=",
"PRIORITY_FEATURE_WEIGHTS",
"scores",
"=",
"[",
"DEFAULT_PRIORITY_SCORE",
"]",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"PRIORITY_FEATURE_WEIGHTS",
":",
"raise",
"KeyError",
"(",
"'The following keyword arguments are supported: '",
"'{keys}'",
".",
"format",
"(",
"keys",
"=",
"PRIORITY_FEATURES",
")",
")",
"if",
"value",
"is",
"True",
":",
"scores",
".",
"append",
"(",
"PRIORITY_FEATURE_WEIGHTS",
"[",
"key",
"]",
")",
"return",
"float",
"(",
"sum",
"(",
"scores",
")",
")",
"/",
"len",
"(",
"scores",
")"
] | 46.583333
| 13.75
|
def needs_repo_fabsetup_custom(func):
'''Decorator, ensures that fabsetup-custom exists and it is a git repo.'''
from fabric.api import local
@wraps(func)
def wrapper(*args, **kwargs):
if not os.path.exists(FABSETUP_CUSTOM_DIR):
msg = '''\
Git repository ~/.fabsetup-custom with configurations does not exist.
This configs are required to use fabsetup.
Clone it if you already have your own fabsetup-custom repository:
git clone <user>@<hostname>:/path/to/fabsetup-custom.git ~/.fabetup-custom
Else, initialize a new repository.
Init a new repository `~/.fabsetup-custom`?'''
if not query_yes_no(msg, default='yes'):
sys.exit('abort')
custom_dir = FABSETUP_CUSTOM_DIR
presetting_dir = join(FABFILE_DATA_DIR,
'presetting-fabsetup-custom')
if not isdir(custom_dir):
print(yellow('\n** ** Init ') +
yellow('~/.fabsetup-custom', bold=True) +
yellow(' ** **\n'))
print(yellow(flo('** Create files in dir {custom_dir} **')))
local(flo('mkdir -p {custom_dir}'))
local(flo('cp -r --no-clobber {presetting_dir}/. {custom_dir}'))
import_fabsetup_custom(globals())
else:
with quiet():
local(flo(
'cp -r --no-clobber {presetting_dir}/. {custom_dir}'))
if not isdir(join(custom_dir, '.git')):
print(yellow(
'\n** Git repo ~/.fabsetup-custom: '
'init and first commit **'))
local(flo('cd {custom_dir} && git init'))
local(flo('cd {custom_dir} && git add .'))
local(flo('cd {custom_dir} && git commit -am "Initial commit"'))
print(yellow("** Done. Don't forget to create a backup of your "
'~/.fabsetup-custom repo **\n'))
print(yellow("** But do not make it public, it's custom **\n",
bold=True))
else:
with quiet():
cmd = flo('cd {custom_dir} && git status --porcelain')
res = local(cmd, capture=True)
if res:
print(yellow('\n** git repo ') +
magenta('~/.fabsetup-custom ') +
yellow('has uncommitted changes: **'))
print(cmd)
print(yellow(res, bold=True))
print(yellow(
"** Don't forget to commit them and make a "
"backup of your repo **\n"))
return func(*args, **kwargs)
return wrapper
|
[
"def",
"needs_repo_fabsetup_custom",
"(",
"func",
")",
":",
"from",
"fabric",
".",
"api",
"import",
"local",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"FABSETUP_CUSTOM_DIR",
")",
":",
"msg",
"=",
"'''\\\nGit repository ~/.fabsetup-custom with configurations does not exist.\nThis configs are required to use fabsetup.\n\nClone it if you already have your own fabsetup-custom repository:\n\n git clone <user>@<hostname>:/path/to/fabsetup-custom.git ~/.fabetup-custom\n\nElse, initialize a new repository.\n\nInit a new repository `~/.fabsetup-custom`?'''",
"if",
"not",
"query_yes_no",
"(",
"msg",
",",
"default",
"=",
"'yes'",
")",
":",
"sys",
".",
"exit",
"(",
"'abort'",
")",
"custom_dir",
"=",
"FABSETUP_CUSTOM_DIR",
"presetting_dir",
"=",
"join",
"(",
"FABFILE_DATA_DIR",
",",
"'presetting-fabsetup-custom'",
")",
"if",
"not",
"isdir",
"(",
"custom_dir",
")",
":",
"print",
"(",
"yellow",
"(",
"'\\n** ** Init '",
")",
"+",
"yellow",
"(",
"'~/.fabsetup-custom'",
",",
"bold",
"=",
"True",
")",
"+",
"yellow",
"(",
"' ** **\\n'",
")",
")",
"print",
"(",
"yellow",
"(",
"flo",
"(",
"'** Create files in dir {custom_dir} **'",
")",
")",
")",
"local",
"(",
"flo",
"(",
"'mkdir -p {custom_dir}'",
")",
")",
"local",
"(",
"flo",
"(",
"'cp -r --no-clobber {presetting_dir}/. {custom_dir}'",
")",
")",
"import_fabsetup_custom",
"(",
"globals",
"(",
")",
")",
"else",
":",
"with",
"quiet",
"(",
")",
":",
"local",
"(",
"flo",
"(",
"'cp -r --no-clobber {presetting_dir}/. {custom_dir}'",
")",
")",
"if",
"not",
"isdir",
"(",
"join",
"(",
"custom_dir",
",",
"'.git'",
")",
")",
":",
"print",
"(",
"yellow",
"(",
"'\\n** Git repo ~/.fabsetup-custom: '",
"'init and first commit **'",
")",
")",
"local",
"(",
"flo",
"(",
"'cd {custom_dir} && git init'",
")",
")",
"local",
"(",
"flo",
"(",
"'cd {custom_dir} && git add .'",
")",
")",
"local",
"(",
"flo",
"(",
"'cd {custom_dir} && git commit -am \"Initial commit\"'",
")",
")",
"print",
"(",
"yellow",
"(",
"\"** Done. Don't forget to create a backup of your \"",
"'~/.fabsetup-custom repo **\\n'",
")",
")",
"print",
"(",
"yellow",
"(",
"\"** But do not make it public, it's custom **\\n\"",
",",
"bold",
"=",
"True",
")",
")",
"else",
":",
"with",
"quiet",
"(",
")",
":",
"cmd",
"=",
"flo",
"(",
"'cd {custom_dir} && git status --porcelain'",
")",
"res",
"=",
"local",
"(",
"cmd",
",",
"capture",
"=",
"True",
")",
"if",
"res",
":",
"print",
"(",
"yellow",
"(",
"'\\n** git repo '",
")",
"+",
"magenta",
"(",
"'~/.fabsetup-custom '",
")",
"+",
"yellow",
"(",
"'has uncommitted changes: **'",
")",
")",
"print",
"(",
"cmd",
")",
"print",
"(",
"yellow",
"(",
"res",
",",
"bold",
"=",
"True",
")",
")",
"print",
"(",
"yellow",
"(",
"\"** Don't forget to commit them and make a \"",
"\"backup of your repo **\\n\"",
")",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | 44.887097
| 19.5
|
def get_auth_info(self, **params):
"""https://developers.coinbase.com/api/v2#show-authorization-information"""
response = self._get('v2', 'user', 'auth', params=params)
return self._make_api_object(response, APIObject)
|
[
"def",
"get_auth_info",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"response",
"=",
"self",
".",
"_get",
"(",
"'v2'",
",",
"'user'",
",",
"'auth'",
",",
"params",
"=",
"params",
")",
"return",
"self",
".",
"_make_api_object",
"(",
"response",
",",
"APIObject",
")"
] | 59.75
| 12
|
def RemoveKeywordsForName(self, name, keywords):
"""Removes keywords for a name.
Args:
name: A name which should not be associated with some keywords anymore.
keywords: A collection of keywords.
"""
data_store.DB.IndexRemoveKeywordsForName(self.urn, name, keywords)
|
[
"def",
"RemoveKeywordsForName",
"(",
"self",
",",
"name",
",",
"keywords",
")",
":",
"data_store",
".",
"DB",
".",
"IndexRemoveKeywordsForName",
"(",
"self",
".",
"urn",
",",
"name",
",",
"keywords",
")"
] | 35.875
| 18.375
|
def write_hier_all(self, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False):
"""Write hierarchy for all GO Terms in obo file."""
# Print: [biological_process, molecular_function, and cellular_component]
for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']:
self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)
|
[
"def",
"write_hier_all",
"(",
"self",
",",
"out",
"=",
"sys",
".",
"stdout",
",",
"len_dash",
"=",
"1",
",",
"max_depth",
"=",
"None",
",",
"num_child",
"=",
"None",
",",
"short_prt",
"=",
"False",
")",
":",
"# Print: [biological_process, molecular_function, and cellular_component]",
"for",
"go_id",
"in",
"[",
"'GO:0008150'",
",",
"'GO:0003674'",
",",
"'GO:0005575'",
"]",
":",
"self",
".",
"write_hier",
"(",
"go_id",
",",
"out",
",",
"len_dash",
",",
"max_depth",
",",
"num_child",
",",
"short_prt",
",",
"None",
")"
] | 68.833333
| 25.666667
|
def deps_used(self, pkg, used):
"""Create dependencies dictionary
"""
if find_package(pkg + self.meta.sp, self.meta.pkg_path):
if pkg not in self.deps_dict.values():
self.deps_dict[pkg] = used
else:
self.deps_dict[pkg] += used
|
[
"def",
"deps_used",
"(",
"self",
",",
"pkg",
",",
"used",
")",
":",
"if",
"find_package",
"(",
"pkg",
"+",
"self",
".",
"meta",
".",
"sp",
",",
"self",
".",
"meta",
".",
"pkg_path",
")",
":",
"if",
"pkg",
"not",
"in",
"self",
".",
"deps_dict",
".",
"values",
"(",
")",
":",
"self",
".",
"deps_dict",
"[",
"pkg",
"]",
"=",
"used",
"else",
":",
"self",
".",
"deps_dict",
"[",
"pkg",
"]",
"+=",
"used"
] | 37.375
| 8.875
|
def install(self, **kwargs):
"""
Installs the app in the current user's account.
"""
if self._dxid is not None:
return dxpy.api.app_install(self._dxid, **kwargs)
else:
return dxpy.api.app_install('app-' + self._name, alias=self._alias, **kwargs)
|
[
"def",
"install",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_dxid",
"is",
"not",
"None",
":",
"return",
"dxpy",
".",
"api",
".",
"app_install",
"(",
"self",
".",
"_dxid",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"dxpy",
".",
"api",
".",
"app_install",
"(",
"'app-'",
"+",
"self",
".",
"_name",
",",
"alias",
"=",
"self",
".",
"_alias",
",",
"*",
"*",
"kwargs",
")"
] | 37.75
| 16.25
|
def get_texts(self):
""" Parse documents from a .txt file assuming 1 document per line, yielding lists of filtered tokens """
with self.getstream() as text_stream:
for i, line in enumerate(text_stream):
line = to_unicode(line)
line = (TweetCorpus.case_normalizer or passthrough)(line)
# line = self.case_normalizer(line)
if self.mask is not None and not self.mask[i]:
continue
ngrams = []
for ng in tokens2ngrams((TweetCorpus.tokenizer or str.split)(line), n=self.num_grams):
if self.ignore_matcher(ng):
continue
ngrams += [ng]
if not (i % 1000):
print(line)
print(ngrams)
yield ngrams
|
[
"def",
"get_texts",
"(",
"self",
")",
":",
"with",
"self",
".",
"getstream",
"(",
")",
"as",
"text_stream",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"text_stream",
")",
":",
"line",
"=",
"to_unicode",
"(",
"line",
")",
"line",
"=",
"(",
"TweetCorpus",
".",
"case_normalizer",
"or",
"passthrough",
")",
"(",
"line",
")",
"# line = self.case_normalizer(line)",
"if",
"self",
".",
"mask",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"mask",
"[",
"i",
"]",
":",
"continue",
"ngrams",
"=",
"[",
"]",
"for",
"ng",
"in",
"tokens2ngrams",
"(",
"(",
"TweetCorpus",
".",
"tokenizer",
"or",
"str",
".",
"split",
")",
"(",
"line",
")",
",",
"n",
"=",
"self",
".",
"num_grams",
")",
":",
"if",
"self",
".",
"ignore_matcher",
"(",
"ng",
")",
":",
"continue",
"ngrams",
"+=",
"[",
"ng",
"]",
"if",
"not",
"(",
"i",
"%",
"1000",
")",
":",
"print",
"(",
"line",
")",
"print",
"(",
"ngrams",
")",
"yield",
"ngrams"
] | 47.111111
| 13.555556
|
def fll(self, value):
"""
Returns loglike of value
"""
self.stochastic.value = value
try:
ll = self.loglike
except ZeroProbability:
ll = -np.infty
self.stochastic.revert()
return ll
|
[
"def",
"fll",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"stochastic",
".",
"value",
"=",
"value",
"try",
":",
"ll",
"=",
"self",
".",
"loglike",
"except",
"ZeroProbability",
":",
"ll",
"=",
"-",
"np",
".",
"infty",
"self",
".",
"stochastic",
".",
"revert",
"(",
")",
"return",
"ll"
] | 23.545455
| 11.181818
|
def p_array_literal_2(self, p):
"""array_literal : LBRACKET element_list RBRACKET
| LBRACKET element_list COMMA elision_opt RBRACKET
"""
items = p[2]
if len(p) == 6:
items.extend(p[4])
p[0] = self.asttypes.Array(items=items)
p[0].setpos(p)
|
[
"def",
"p_array_literal_2",
"(",
"self",
",",
"p",
")",
":",
"items",
"=",
"p",
"[",
"2",
"]",
"if",
"len",
"(",
"p",
")",
"==",
"6",
":",
"items",
".",
"extend",
"(",
"p",
"[",
"4",
"]",
")",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"Array",
"(",
"items",
"=",
"items",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")"
] | 35.111111
| 12.888889
|
def overlapping_bins(start, stop=None):
"""
Given an interval `start:stop`, return bins for intervals *overlapping*
`start:stop` by at least one position. The order is according to the bin
level (starting with the smallest bins), and within a level according to
the bin number (ascending).
:arg int start, stop: Interval positions (zero-based, open-ended). If
`stop` is not provided, the interval is assumed to be of length 1
(equivalent to `stop = start + 1`).
:return: All bins for intervals overlapping `start:stop`, ordered first
according to bin level (ascending) and then according to bin number
(ascending).
:rtype: list(int)
:raise OutOfRangeError: If `start:stop` exceeds the range of the binning
scheme.
"""
if stop is None:
stop = start + 1
return [bin
for first, last in range_per_level(start, stop)
for bin in range(first, last + 1)]
|
[
"def",
"overlapping_bins",
"(",
"start",
",",
"stop",
"=",
"None",
")",
":",
"if",
"stop",
"is",
"None",
":",
"stop",
"=",
"start",
"+",
"1",
"return",
"[",
"bin",
"for",
"first",
",",
"last",
"in",
"range_per_level",
"(",
"start",
",",
"stop",
")",
"for",
"bin",
"in",
"range",
"(",
"first",
",",
"last",
"+",
"1",
")",
"]"
] | 37.44
| 24
|
def calculate_rsq(self):
"""calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero.
"""
assert hasattr(self, 'betas'), 'no betas found, please run regression before rsq'
explained_times = self.design_matrix.sum(axis = 0) != 0
explained_signal = self.predict_from_design_matrix(self.design_matrix)
self.rsq = 1.0 - np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1) / np.sum(self.resampled_signal[:,explained_times].squeeze()**2, axis = -1)
self.ssr = np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1)
return np.squeeze(self.rsq)
|
[
"def",
"calculate_rsq",
"(",
"self",
")",
":",
"assert",
"hasattr",
"(",
"self",
",",
"'betas'",
")",
",",
"'no betas found, please run regression before rsq'",
"explained_times",
"=",
"self",
".",
"design_matrix",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"!=",
"0",
"explained_signal",
"=",
"self",
".",
"predict_from_design_matrix",
"(",
"self",
".",
"design_matrix",
")",
"self",
".",
"rsq",
"=",
"1.0",
"-",
"np",
".",
"sum",
"(",
"(",
"explained_signal",
"[",
":",
",",
"explained_times",
"]",
"-",
"self",
".",
"resampled_signal",
"[",
":",
",",
"explained_times",
"]",
")",
"**",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"/",
"np",
".",
"sum",
"(",
"self",
".",
"resampled_signal",
"[",
":",
",",
"explained_times",
"]",
".",
"squeeze",
"(",
")",
"**",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"self",
".",
"ssr",
"=",
"np",
".",
"sum",
"(",
"(",
"explained_signal",
"[",
":",
",",
"explained_times",
"]",
"-",
"self",
".",
"resampled_signal",
"[",
":",
",",
"explained_times",
"]",
")",
"**",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"np",
".",
"squeeze",
"(",
"self",
".",
"rsq",
")"
] | 76.181818
| 41.272727
|
def multi_split(text, regexes):
"""
Split the text by the given regexes, in priority order.
Make sure that the regex is parenthesized so that matches are returned in
re.split().
Splitting on a single regex works like normal split.
>>> '|'.join(multi_split('one two three', [r'\w+']))
'one| |two| |three'
Splitting on digits first separates the digits from their word
>>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+']))
'one|234|five| |678'
Splitting on words first keeps the word with digits intact.
>>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+']))
'one234five| |678'
"""
def make_regex(s):
return re.compile(s) if isinstance(s, basestring) else s
regexes = [make_regex(r) for r in regexes]
# Run the list of pieces through the regex split, splitting it into more
# pieces. Once a piece has been matched, add it to finished_pieces and
# don't split it again. The pieces should always join back together to form
# the original text.
piece_list = [text]
finished_pieces = set()
def apply_re(regex, piece_list):
for piece in piece_list:
if piece in finished_pieces:
yield piece
continue
for s in full_split(piece, regex):
if regex.match(s):
finished_pieces.add(s)
if s:
yield s
for regex in regexes:
piece_list = list(apply_re(regex, piece_list))
assert ''.join(piece_list) == text
return piece_list
|
[
"def",
"multi_split",
"(",
"text",
",",
"regexes",
")",
":",
"def",
"make_regex",
"(",
"s",
")",
":",
"return",
"re",
".",
"compile",
"(",
"s",
")",
"if",
"isinstance",
"(",
"s",
",",
"basestring",
")",
"else",
"s",
"regexes",
"=",
"[",
"make_regex",
"(",
"r",
")",
"for",
"r",
"in",
"regexes",
"]",
"# Run the list of pieces through the regex split, splitting it into more",
"# pieces. Once a piece has been matched, add it to finished_pieces and",
"# don't split it again. The pieces should always join back together to form",
"# the original text.",
"piece_list",
"=",
"[",
"text",
"]",
"finished_pieces",
"=",
"set",
"(",
")",
"def",
"apply_re",
"(",
"regex",
",",
"piece_list",
")",
":",
"for",
"piece",
"in",
"piece_list",
":",
"if",
"piece",
"in",
"finished_pieces",
":",
"yield",
"piece",
"continue",
"for",
"s",
"in",
"full_split",
"(",
"piece",
",",
"regex",
")",
":",
"if",
"regex",
".",
"match",
"(",
"s",
")",
":",
"finished_pieces",
".",
"add",
"(",
"s",
")",
"if",
"s",
":",
"yield",
"s",
"for",
"regex",
"in",
"regexes",
":",
"piece_list",
"=",
"list",
"(",
"apply_re",
"(",
"regex",
",",
"piece_list",
")",
")",
"assert",
"''",
".",
"join",
"(",
"piece_list",
")",
"==",
"text",
"return",
"piece_list"
] | 35.045455
| 19.363636
|
def add_entity_meta(self, datastream, options, data):
"""
To add entity meta data to a datastream
:param datastream: string
:param options: dict
:param data: list
"""
url = '/datastream/' + str(datastream) + '/entityMeta'
response = self.http.post(url, data)
entityMetaList = []
for entityMeta in response:
entityMetaList.append(Schemas.EntityMeta(entityMeta=entityMeta))
return entityMetaList
|
[
"def",
"add_entity_meta",
"(",
"self",
",",
"datastream",
",",
"options",
",",
"data",
")",
":",
"url",
"=",
"'/datastream/'",
"+",
"str",
"(",
"datastream",
")",
"+",
"'/entityMeta'",
"response",
"=",
"self",
".",
"http",
".",
"post",
"(",
"url",
",",
"data",
")",
"entityMetaList",
"=",
"[",
"]",
"for",
"entityMeta",
"in",
"response",
":",
"entityMetaList",
".",
"append",
"(",
"Schemas",
".",
"EntityMeta",
"(",
"entityMeta",
"=",
"entityMeta",
")",
")",
"return",
"entityMetaList"
] | 37
| 11.153846
|
def uniform_random_global_points(n=100):
"""
Returns an array of `n` uniformally distributed `shapely.geometry.Point` objects. Points are coordinates
distributed equivalently across the Earth's surface.
"""
xs = np.random.uniform(-180, 180, n)
ys = np.random.uniform(-90, 90, n)
return [shapely.geometry.Point(x, y) for x, y in zip(xs, ys)]
|
[
"def",
"uniform_random_global_points",
"(",
"n",
"=",
"100",
")",
":",
"xs",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"180",
",",
"180",
",",
"n",
")",
"ys",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"90",
",",
"90",
",",
"n",
")",
"return",
"[",
"shapely",
".",
"geometry",
".",
"Point",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"xs",
",",
"ys",
")",
"]"
] | 45.125
| 13.875
|
def get_cv_pattern(self, word, pprint=False):
"""
input = iparras
pattern = [('V', 1, 'i'), ('C', 1, 'p'), ('V', 2, 'a'), ('C', 2, 'r'),
('C', 2, 'r'), ('V', 2, 'a'), ('C', 3, 's')]
pprint = V₁C₁V₂C₂C₂V₂C₃
"""
subscripts = {
1: '₁',
2: '₂',
3: '₃',
4: '₄',
5: '₅',
6: '₆',
7: '₇',
8: '₈',
9: '₉',
0: '₀'
}
pattern = []
c_count = 1
v_count = 1
count = 0
for char in word:
if char in self.akkadian['consonants']:
cv = 'C'
else:
cv = 'V'
# remove length:
if char in self.akkadian['macron_vowels']:
char = self.akkadian['short_vowels'][self.akkadian['macron_vowels'].index(char)]
elif char in self.akkadian['circumflex_vowels']:
char = self.akkadian['short_vowels'][self.akkadian['circumflex_vowels'].index(char)]
if char not in [x[2] for x in pattern]:
if cv == 'C':
count = c_count
c_count += 1
elif cv == 'V':
count = v_count
v_count += 1
pattern.append((cv, count, char))
elif char in [x[2] for x in pattern]:
pattern.append((cv, next(x[1] for x in pattern if x[2] == char), char))
if pprint:
output = ''
for item in pattern:
output += (item[0] + subscripts[item[1]])
return output
return pattern
|
[
"def",
"get_cv_pattern",
"(",
"self",
",",
"word",
",",
"pprint",
"=",
"False",
")",
":",
"subscripts",
"=",
"{",
"1",
":",
"'₁',",
"",
"2",
":",
"'₂',",
"",
"3",
":",
"'₃',",
"",
"4",
":",
"'₄',",
"",
"5",
":",
"'₅',",
"",
"6",
":",
"'₆',",
"",
"7",
":",
"'₇',",
"",
"8",
":",
"'₈',",
"",
"9",
":",
"'₉',",
"",
"0",
":",
"'₀'",
"}",
"pattern",
"=",
"[",
"]",
"c_count",
"=",
"1",
"v_count",
"=",
"1",
"count",
"=",
"0",
"for",
"char",
"in",
"word",
":",
"if",
"char",
"in",
"self",
".",
"akkadian",
"[",
"'consonants'",
"]",
":",
"cv",
"=",
"'C'",
"else",
":",
"cv",
"=",
"'V'",
"# remove length:",
"if",
"char",
"in",
"self",
".",
"akkadian",
"[",
"'macron_vowels'",
"]",
":",
"char",
"=",
"self",
".",
"akkadian",
"[",
"'short_vowels'",
"]",
"[",
"self",
".",
"akkadian",
"[",
"'macron_vowels'",
"]",
".",
"index",
"(",
"char",
")",
"]",
"elif",
"char",
"in",
"self",
".",
"akkadian",
"[",
"'circumflex_vowels'",
"]",
":",
"char",
"=",
"self",
".",
"akkadian",
"[",
"'short_vowels'",
"]",
"[",
"self",
".",
"akkadian",
"[",
"'circumflex_vowels'",
"]",
".",
"index",
"(",
"char",
")",
"]",
"if",
"char",
"not",
"in",
"[",
"x",
"[",
"2",
"]",
"for",
"x",
"in",
"pattern",
"]",
":",
"if",
"cv",
"==",
"'C'",
":",
"count",
"=",
"c_count",
"c_count",
"+=",
"1",
"elif",
"cv",
"==",
"'V'",
":",
"count",
"=",
"v_count",
"v_count",
"+=",
"1",
"pattern",
".",
"append",
"(",
"(",
"cv",
",",
"count",
",",
"char",
")",
")",
"elif",
"char",
"in",
"[",
"x",
"[",
"2",
"]",
"for",
"x",
"in",
"pattern",
"]",
":",
"pattern",
".",
"append",
"(",
"(",
"cv",
",",
"next",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"pattern",
"if",
"x",
"[",
"2",
"]",
"==",
"char",
")",
",",
"char",
")",
")",
"if",
"pprint",
":",
"output",
"=",
"''",
"for",
"item",
"in",
"pattern",
":",
"output",
"+=",
"(",
"item",
"[",
"0",
"]",
"+",
"subscripts",
"[",
"item",
"[",
"1",
"]",
"]",
")",
"return",
"output",
"return",
"pattern"
] | 33.918367
| 18.571429
|
def bytenet_internal(inputs, targets, hparams):
"""ByteNet, main step used for training."""
with tf.variable_scope("bytenet"):
# Flatten inputs and extend length by 50%.
inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2)
extend_length = tf.to_int32(0.5 * tf.to_float(tf.shape(inputs)[1]))
inputs_shape = inputs.shape.as_list()
inputs = tf.pad(inputs, [[0, 0], [0, extend_length], [0, 0], [0, 0]])
inputs_shape[1] = None
inputs.set_shape(inputs_shape) # Don't lose the other shapes when padding.
# Pad inputs and targets to be the same length, divisible by 50.
inputs, targets = common_layers.pad_to_same_length(
inputs, targets, final_length_divisible_by=50)
final_encoder = residual_dilated_conv(inputs, hparams.num_block_repeat,
"SAME", "encoder", hparams)
shifted_targets = common_layers.shift_right(targets)
kernel = (hparams.kernel_height, hparams.kernel_width)
decoder_start = common_layers.conv_block(
tf.concat([final_encoder, shifted_targets], axis=3),
hparams.hidden_size, [((1, 1), kernel)],
padding="LEFT")
return residual_dilated_conv(decoder_start, hparams.num_block_repeat,
"LEFT", "decoder", hparams)
|
[
"def",
"bytenet_internal",
"(",
"inputs",
",",
"targets",
",",
"hparams",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"bytenet\"",
")",
":",
"# Flatten inputs and extend length by 50%.",
"inputs",
"=",
"tf",
".",
"expand_dims",
"(",
"common_layers",
".",
"flatten4d3d",
"(",
"inputs",
")",
",",
"axis",
"=",
"2",
")",
"extend_length",
"=",
"tf",
".",
"to_int32",
"(",
"0.5",
"*",
"tf",
".",
"to_float",
"(",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"1",
"]",
")",
")",
"inputs_shape",
"=",
"inputs",
".",
"shape",
".",
"as_list",
"(",
")",
"inputs",
"=",
"tf",
".",
"pad",
"(",
"inputs",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"extend_length",
"]",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
"inputs_shape",
"[",
"1",
"]",
"=",
"None",
"inputs",
".",
"set_shape",
"(",
"inputs_shape",
")",
"# Don't lose the other shapes when padding.",
"# Pad inputs and targets to be the same length, divisible by 50.",
"inputs",
",",
"targets",
"=",
"common_layers",
".",
"pad_to_same_length",
"(",
"inputs",
",",
"targets",
",",
"final_length_divisible_by",
"=",
"50",
")",
"final_encoder",
"=",
"residual_dilated_conv",
"(",
"inputs",
",",
"hparams",
".",
"num_block_repeat",
",",
"\"SAME\"",
",",
"\"encoder\"",
",",
"hparams",
")",
"shifted_targets",
"=",
"common_layers",
".",
"shift_right",
"(",
"targets",
")",
"kernel",
"=",
"(",
"hparams",
".",
"kernel_height",
",",
"hparams",
".",
"kernel_width",
")",
"decoder_start",
"=",
"common_layers",
".",
"conv_block",
"(",
"tf",
".",
"concat",
"(",
"[",
"final_encoder",
",",
"shifted_targets",
"]",
",",
"axis",
"=",
"3",
")",
",",
"hparams",
".",
"hidden_size",
",",
"[",
"(",
"(",
"1",
",",
"1",
")",
",",
"kernel",
")",
"]",
",",
"padding",
"=",
"\"LEFT\"",
")",
"return",
"residual_dilated_conv",
"(",
"decoder_start",
",",
"hparams",
".",
"num_block_repeat",
",",
"\"LEFT\"",
",",
"\"decoder\"",
",",
"hparams",
")"
] | 51.12
| 20.12
|
def send(self, to, from_, body, dm=False):
"""
Send BODY as an @message from FROM to TO
If we don't have the access tokens for FROM, raise AccountNotFoundError.
If the tweet resulting from '@{0} {1}'.format(TO, BODY) is > 140 chars
raise TweetTooLongError.
If we want to send this message as a DM, do so.
Arguments:
- `to`: str
- `from_`: str
- `body`: str
- `dm`: [optional] bool
Return: None
Exceptions: AccountNotFoundError
TweetTooLongError
"""
tweet = '@{0} {1}'.format(to, body)
if from_ not in self.accounts:
raise AccountNotFoundError()
if len(tweet) > 140:
raise TweetTooLongError()
self.auth.set_access_token(*self.accounts.get(from_))
api = tweepy.API(self.auth)
if dm:
api.send_direct_message(screen_name=to, text=body)
else:
api.update_status(tweet)
return
|
[
"def",
"send",
"(",
"self",
",",
"to",
",",
"from_",
",",
"body",
",",
"dm",
"=",
"False",
")",
":",
"tweet",
"=",
"'@{0} {1}'",
".",
"format",
"(",
"to",
",",
"body",
")",
"if",
"from_",
"not",
"in",
"self",
".",
"accounts",
":",
"raise",
"AccountNotFoundError",
"(",
")",
"if",
"len",
"(",
"tweet",
")",
">",
"140",
":",
"raise",
"TweetTooLongError",
"(",
")",
"self",
".",
"auth",
".",
"set_access_token",
"(",
"*",
"self",
".",
"accounts",
".",
"get",
"(",
"from_",
")",
")",
"api",
"=",
"tweepy",
".",
"API",
"(",
"self",
".",
"auth",
")",
"if",
"dm",
":",
"api",
".",
"send_direct_message",
"(",
"screen_name",
"=",
"to",
",",
"text",
"=",
"body",
")",
"else",
":",
"api",
".",
"update_status",
"(",
"tweet",
")",
"return"
] | 29
| 18.058824
|
def write(self, session, data):
"""Writes data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
:param session: Unique logical identifier to a session.
:param data: data to be written.
:type data: str
:return: Number of bytes actually transferred, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
"""
try:
sess = self.sessions[session]
except KeyError:
return constants.StatusCode.error_invalid_object
try:
return sess.write(data)
except AttributeError:
return constants.StatusCode.error_nonsupported_operation
|
[
"def",
"write",
"(",
"self",
",",
"session",
",",
"data",
")",
":",
"try",
":",
"sess",
"=",
"self",
".",
"sessions",
"[",
"session",
"]",
"except",
"KeyError",
":",
"return",
"constants",
".",
"StatusCode",
".",
"error_invalid_object",
"try",
":",
"return",
"sess",
".",
"write",
"(",
"data",
")",
"except",
"AttributeError",
":",
"return",
"constants",
".",
"StatusCode",
".",
"error_nonsupported_operation"
] | 34.047619
| 20.47619
|
def process_orders(self, orderbook):
''' Default and costant orders processor. Overwrite it for more
sophisticated strategies '''
for stock, alloc in orderbook.iteritems():
self.logger.info('{}: Ordered {} {} stocks'.format(
self.datetime, stock, alloc))
if isinstance(alloc, int):
self.order(stock, alloc)
elif isinstance(alloc, float) and \
alloc >= -1 and alloc <= 1:
self.order_percent(stock, alloc)
else:
self.logger.warning(
'{}: invalid order for {}: {})'
.format(self.datetime, stock, alloc))
|
[
"def",
"process_orders",
"(",
"self",
",",
"orderbook",
")",
":",
"for",
"stock",
",",
"alloc",
"in",
"orderbook",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'{}: Ordered {} {} stocks'",
".",
"format",
"(",
"self",
".",
"datetime",
",",
"stock",
",",
"alloc",
")",
")",
"if",
"isinstance",
"(",
"alloc",
",",
"int",
")",
":",
"self",
".",
"order",
"(",
"stock",
",",
"alloc",
")",
"elif",
"isinstance",
"(",
"alloc",
",",
"float",
")",
"and",
"alloc",
">=",
"-",
"1",
"and",
"alloc",
"<=",
"1",
":",
"self",
".",
"order_percent",
"(",
"stock",
",",
"alloc",
")",
"else",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'{}: invalid order for {}: {})'",
".",
"format",
"(",
"self",
".",
"datetime",
",",
"stock",
",",
"alloc",
")",
")"
] | 45.466667
| 10.4
|
def previous_unwrittable_on_col(view, coords):
"""Return position of the previous (in column) letter that is unwrittable"""
x, y = coords
miny = -1
for offset in range(y - 1, miny, -1):
letter = view[x, offset]
if letter not in REWRITABLE_LETTERS:
return offset
return None
|
[
"def",
"previous_unwrittable_on_col",
"(",
"view",
",",
"coords",
")",
":",
"x",
",",
"y",
"=",
"coords",
"miny",
"=",
"-",
"1",
"for",
"offset",
"in",
"range",
"(",
"y",
"-",
"1",
",",
"miny",
",",
"-",
"1",
")",
":",
"letter",
"=",
"view",
"[",
"x",
",",
"offset",
"]",
"if",
"letter",
"not",
"in",
"REWRITABLE_LETTERS",
":",
"return",
"offset",
"return",
"None"
] | 34.777778
| 12.111111
|
def get_clusters_interfaces(clusters, extra_cond=lambda nic: True):
""" Returns for each cluster the available cluster interfaces
Args:
clusters (str): list of the clusters
extra_cond (lambda): extra predicate to filter network card retrieved
from the API. E.g lambda nic: not nic['mounted'] will retrieve all the
usable network cards that are not mounted by default.
Returns:
dict of cluster with their associated nic names
Examples:
.. code-block:: python
# pseudo code
actual = get_clusters_interfaces(["paravance"])
expected = {"paravance": ["eth0", "eth1"]}
assertDictEquals(expected, actual)
"""
interfaces = {}
for cluster in clusters:
nics = get_cluster_interfaces(cluster, extra_cond=extra_cond)
interfaces.setdefault(cluster, nics)
return interfaces
|
[
"def",
"get_clusters_interfaces",
"(",
"clusters",
",",
"extra_cond",
"=",
"lambda",
"nic",
":",
"True",
")",
":",
"interfaces",
"=",
"{",
"}",
"for",
"cluster",
"in",
"clusters",
":",
"nics",
"=",
"get_cluster_interfaces",
"(",
"cluster",
",",
"extra_cond",
"=",
"extra_cond",
")",
"interfaces",
".",
"setdefault",
"(",
"cluster",
",",
"nics",
")",
"return",
"interfaces"
] | 32.407407
| 22.555556
|
def azimuth(self, point):
"""
Compute the azimuth (in decimal degrees) between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:returns:
The azimuth, value in a range ``[0, 360)``.
:rtype:
float
"""
return geodetic.azimuth(self.longitude, self.latitude,
point.longitude, point.latitude)
|
[
"def",
"azimuth",
"(",
"self",
",",
"point",
")",
":",
"return",
"geodetic",
".",
"azimuth",
"(",
"self",
".",
"longitude",
",",
"self",
".",
"latitude",
",",
"point",
".",
"longitude",
",",
"point",
".",
"latitude",
")"
] | 30.0625
| 17.3125
|
def write(self, stream):
'''
Write PLY data to a writeable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
|
[
"def",
"write",
"(",
"self",
",",
"stream",
")",
":",
"(",
"must_close",
",",
"stream",
")",
"=",
"_open_stream",
"(",
"stream",
",",
"'write'",
")",
"try",
":",
"stream",
".",
"write",
"(",
"self",
".",
"header",
".",
"encode",
"(",
"'ascii'",
")",
")",
"stream",
".",
"write",
"(",
"b'\\n'",
")",
"for",
"elt",
"in",
"self",
":",
"elt",
".",
"_write",
"(",
"stream",
",",
"self",
".",
"text",
",",
"self",
".",
"byte_order",
")",
"finally",
":",
"if",
"must_close",
":",
"stream",
".",
"close",
"(",
")"
] | 30.785714
| 20.928571
|
def drawBezier(page, p1, p2, p3, p4, color=None, fill=None,
dashes=None, width=1, morph=None,
closePath=False, roundCap=False, overlay=True):
"""Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3.
"""
img = page.newShape()
Q = img.drawBezier(Point(p1), Point(p2), Point(p3), Point(p4))
img.finish(color=color, fill=fill, dashes=dashes, width=width,
roundCap=roundCap, morph=morph, closePath=closePath)
img.commit(overlay)
return Q
|
[
"def",
"drawBezier",
"(",
"page",
",",
"p1",
",",
"p2",
",",
"p3",
",",
"p4",
",",
"color",
"=",
"None",
",",
"fill",
"=",
"None",
",",
"dashes",
"=",
"None",
",",
"width",
"=",
"1",
",",
"morph",
"=",
"None",
",",
"closePath",
"=",
"False",
",",
"roundCap",
"=",
"False",
",",
"overlay",
"=",
"True",
")",
":",
"img",
"=",
"page",
".",
"newShape",
"(",
")",
"Q",
"=",
"img",
".",
"drawBezier",
"(",
"Point",
"(",
"p1",
")",
",",
"Point",
"(",
"p2",
")",
",",
"Point",
"(",
"p3",
")",
",",
"Point",
"(",
"p4",
")",
")",
"img",
".",
"finish",
"(",
"color",
"=",
"color",
",",
"fill",
"=",
"fill",
",",
"dashes",
"=",
"dashes",
",",
"width",
"=",
"width",
",",
"roundCap",
"=",
"roundCap",
",",
"morph",
"=",
"morph",
",",
"closePath",
"=",
"closePath",
")",
"img",
".",
"commit",
"(",
"overlay",
")",
"return",
"Q"
] | 43.75
| 18.666667
|
def _setup_argparse(self):
"""Create `argparse` instance, and setup with appropriate parameters.
"""
parser = argparse.ArgumentParser(
prog='catalog', description='Parent Catalog class for astrocats.')
subparsers = parser.add_subparsers(
description='valid subcommands', dest='subcommand')
# Data Import
# -----------
# Add the 'import' command, and related arguments
self._add_parser_arguments_import(subparsers)
# Git Subcommands
# ---------------
self._add_parser_arguments_git(subparsers)
# Analyze Catalogs
# ----------------
# Add the 'analyze' command, and related arguments
self._add_parser_arguments_analyze(subparsers)
return parser
|
[
"def",
"_setup_argparse",
"(",
"self",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"'catalog'",
",",
"description",
"=",
"'Parent Catalog class for astrocats.'",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"description",
"=",
"'valid subcommands'",
",",
"dest",
"=",
"'subcommand'",
")",
"# Data Import",
"# -----------",
"# Add the 'import' command, and related arguments",
"self",
".",
"_add_parser_arguments_import",
"(",
"subparsers",
")",
"# Git Subcommands",
"# ---------------",
"self",
".",
"_add_parser_arguments_git",
"(",
"subparsers",
")",
"# Analyze Catalogs",
"# ----------------",
"# Add the 'analyze' command, and related arguments",
"self",
".",
"_add_parser_arguments_analyze",
"(",
"subparsers",
")",
"return",
"parser"
] | 32.333333
| 19.416667
|
def create_image_stream(self, name, docker_image_repository,
insecure_registry=False):
"""
Create an ImageStream object
Raises exception on error
:param name: str, name of ImageStream
:param docker_image_repository: str, pull spec for docker image
repository
:param insecure_registry: bool, whether plain HTTP should be used
:return: response
"""
img_stream_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream.json')
with open(img_stream_file) as f:
stream = json.load(f)
stream['metadata']['name'] = name
stream['metadata'].setdefault('annotations', {})
stream['metadata']['annotations'][ANNOTATION_SOURCE_REPO] = docker_image_repository
if insecure_registry:
stream['metadata']['annotations'][ANNOTATION_INSECURE_REPO] = 'true'
return self.os.create_image_stream(json.dumps(stream))
|
[
"def",
"create_image_stream",
"(",
"self",
",",
"name",
",",
"docker_image_repository",
",",
"insecure_registry",
"=",
"False",
")",
":",
"img_stream_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"os_conf",
".",
"get_build_json_store",
"(",
")",
",",
"'image_stream.json'",
")",
"with",
"open",
"(",
"img_stream_file",
")",
"as",
"f",
":",
"stream",
"=",
"json",
".",
"load",
"(",
"f",
")",
"stream",
"[",
"'metadata'",
"]",
"[",
"'name'",
"]",
"=",
"name",
"stream",
"[",
"'metadata'",
"]",
".",
"setdefault",
"(",
"'annotations'",
",",
"{",
"}",
")",
"stream",
"[",
"'metadata'",
"]",
"[",
"'annotations'",
"]",
"[",
"ANNOTATION_SOURCE_REPO",
"]",
"=",
"docker_image_repository",
"if",
"insecure_registry",
":",
"stream",
"[",
"'metadata'",
"]",
"[",
"'annotations'",
"]",
"[",
"ANNOTATION_INSECURE_REPO",
"]",
"=",
"'true'",
"return",
"self",
".",
"os",
".",
"create_image_stream",
"(",
"json",
".",
"dumps",
"(",
"stream",
")",
")"
] | 42.217391
| 20.304348
|
def name(self):
"""Returns the name of the device.
Args:
self (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance
Returns:
Device name.
"""
return ctypes.cast(self.sName, ctypes.c_char_p).value.decode()
|
[
"def",
"name",
"(",
"self",
")",
":",
"return",
"ctypes",
".",
"cast",
"(",
"self",
".",
"sName",
",",
"ctypes",
".",
"c_char_p",
")",
".",
"value",
".",
"decode",
"(",
")"
] | 25.5
| 23
|
def unaccentuate(s):
""" Replace accentuated chars in string by their non accentuated equivalent. """
return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c))
|
[
"def",
"unaccentuate",
"(",
"s",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"unicodedata",
".",
"normalize",
"(",
"\"NFKD\"",
",",
"s",
")",
"if",
"not",
"unicodedata",
".",
"combining",
"(",
"c",
")",
")"
] | 66.333333
| 25
|
def add_output_file(self, filename):
"""
Add filename as a output file for this DAG node.
@param filename: output filename to add
"""
if filename not in self.__output_files:
self.__output_files.append(filename)
if not isinstance(self.job(), CondorDAGManJob):
if self.job().get_universe() == 'grid':
self.add_output_macro(filename)
|
[
"def",
"add_output_file",
"(",
"self",
",",
"filename",
")",
":",
"if",
"filename",
"not",
"in",
"self",
".",
"__output_files",
":",
"self",
".",
"__output_files",
".",
"append",
"(",
"filename",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"job",
"(",
")",
",",
"CondorDAGManJob",
")",
":",
"if",
"self",
".",
"job",
"(",
")",
".",
"get_universe",
"(",
")",
"==",
"'grid'",
":",
"self",
".",
"add_output_macro",
"(",
"filename",
")"
] | 33.727273
| 7.727273
|
def readTuple(self, line, n=3):
""" Reads a tuple of numbers. e.g. vertices, normals or teture coords.
"""
numbers = [num for num in line.split(' ') if num]
return [float(num) for num in numbers[1:n + 1]]
|
[
"def",
"readTuple",
"(",
"self",
",",
"line",
",",
"n",
"=",
"3",
")",
":",
"numbers",
"=",
"[",
"num",
"for",
"num",
"in",
"line",
".",
"split",
"(",
"' '",
")",
"if",
"num",
"]",
"return",
"[",
"float",
"(",
"num",
")",
"for",
"num",
"in",
"numbers",
"[",
"1",
":",
"n",
"+",
"1",
"]",
"]"
] | 46.4
| 8.2
|
def compose_MDAL_dic(self, site, point_type,
start, end, var, agg, window, aligned, points=None, return_names=False):
""" Create dictionary for MDAL request.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
point_type : str
Type of data, i.e. Green_Button_Meter, Building_Electric_Meter...
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
???
"""
# Convert time to UTC
start = self.convert_to_utc(start)
end = self.convert_to_utc(end)
request = {}
# Add Time Details - single set for one or multiple series
request['Time'] = {
'Start': start,
'End': end,
'Window': window,
'Aligned': aligned
}
# Define Variables
request["Variables"] = {}
request['Composition'] = []
request['Aggregation'] = {}
if isinstance(point_type, str): # if point_type is a string -> single type of point requested
request["Variables"][var] = self.compose_BRICK_query(point_type=point_type,site=site) # pass one point type at the time
request['Composition'] = [var]
request['Aggregation'][var] = [agg]
elif isinstance(point_type, list): # loop through all the point_types and create one section of the brick query at the time
for idx, point in enumerate(point_type):
request["Variables"][var[idx]] = self.compose_BRICK_query(point_type=point,site=site) # pass one point type at the time
request['Composition'].append(var[idx])
if isinstance(agg, str): # if agg is a string -> single type of aggregation requested
request['Aggregation'][var[idx]] = [agg]
elif isinstance(agg, list): # if agg is a list -> expected one agg per point
request['Aggregation'][var[idx]] = [agg[idx]]
return request
|
[
"def",
"compose_MDAL_dic",
"(",
"self",
",",
"site",
",",
"point_type",
",",
"start",
",",
"end",
",",
"var",
",",
"agg",
",",
"window",
",",
"aligned",
",",
"points",
"=",
"None",
",",
"return_names",
"=",
"False",
")",
":",
"# Convert time to UTC",
"start",
"=",
"self",
".",
"convert_to_utc",
"(",
"start",
")",
"end",
"=",
"self",
".",
"convert_to_utc",
"(",
"end",
")",
"request",
"=",
"{",
"}",
"# Add Time Details - single set for one or multiple series",
"request",
"[",
"'Time'",
"]",
"=",
"{",
"'Start'",
":",
"start",
",",
"'End'",
":",
"end",
",",
"'Window'",
":",
"window",
",",
"'Aligned'",
":",
"aligned",
"}",
"# Define Variables ",
"request",
"[",
"\"Variables\"",
"]",
"=",
"{",
"}",
"request",
"[",
"'Composition'",
"]",
"=",
"[",
"]",
"request",
"[",
"'Aggregation'",
"]",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"point_type",
",",
"str",
")",
":",
"# if point_type is a string -> single type of point requested",
"request",
"[",
"\"Variables\"",
"]",
"[",
"var",
"]",
"=",
"self",
".",
"compose_BRICK_query",
"(",
"point_type",
"=",
"point_type",
",",
"site",
"=",
"site",
")",
"# pass one point type at the time",
"request",
"[",
"'Composition'",
"]",
"=",
"[",
"var",
"]",
"request",
"[",
"'Aggregation'",
"]",
"[",
"var",
"]",
"=",
"[",
"agg",
"]",
"elif",
"isinstance",
"(",
"point_type",
",",
"list",
")",
":",
"# loop through all the point_types and create one section of the brick query at the time",
"for",
"idx",
",",
"point",
"in",
"enumerate",
"(",
"point_type",
")",
":",
"request",
"[",
"\"Variables\"",
"]",
"[",
"var",
"[",
"idx",
"]",
"]",
"=",
"self",
".",
"compose_BRICK_query",
"(",
"point_type",
"=",
"point",
",",
"site",
"=",
"site",
")",
"# pass one point type at the time",
"request",
"[",
"'Composition'",
"]",
".",
"append",
"(",
"var",
"[",
"idx",
"]",
")",
"if",
"isinstance",
"(",
"agg",
",",
"str",
")",
":",
"# if agg is a string -> single type of aggregation requested",
"request",
"[",
"'Aggregation'",
"]",
"[",
"var",
"[",
"idx",
"]",
"]",
"=",
"[",
"agg",
"]",
"elif",
"isinstance",
"(",
"agg",
",",
"list",
")",
":",
"# if agg is a list -> expected one agg per point",
"request",
"[",
"'Aggregation'",
"]",
"[",
"var",
"[",
"idx",
"]",
"]",
"=",
"[",
"agg",
"[",
"idx",
"]",
"]",
"return",
"request"
] | 37.41791
| 22.58209
|
def _PartitionChunks(chunks):
"""Groups chunks into partitions of size safe for a single INSERT."""
partitions = [[]]
partition_size = 0
for chunk in chunks:
cursize = len(chunk["blob_chunk"])
if (cursize + partition_size > BLOB_CHUNK_SIZE or
len(partitions[-1]) >= CHUNKS_PER_INSERT):
partitions.append([])
partition_size = 0
partitions[-1].append(chunk)
partition_size += cursize
return partitions
|
[
"def",
"_PartitionChunks",
"(",
"chunks",
")",
":",
"partitions",
"=",
"[",
"[",
"]",
"]",
"partition_size",
"=",
"0",
"for",
"chunk",
"in",
"chunks",
":",
"cursize",
"=",
"len",
"(",
"chunk",
"[",
"\"blob_chunk\"",
"]",
")",
"if",
"(",
"cursize",
"+",
"partition_size",
">",
"BLOB_CHUNK_SIZE",
"or",
"len",
"(",
"partitions",
"[",
"-",
"1",
"]",
")",
">=",
"CHUNKS_PER_INSERT",
")",
":",
"partitions",
".",
"append",
"(",
"[",
"]",
")",
"partition_size",
"=",
"0",
"partitions",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"chunk",
")",
"partition_size",
"+=",
"cursize",
"return",
"partitions"
] | 28.866667
| 16.266667
|
def find_valid_random_signature(s: int) -> Tuple[int, int]:
"""
Find v and r valid values for a given s
:param s: random value
:return: v, r
"""
for _ in range(10000):
r = int(os.urandom(31).hex(), 16)
v = (r % 2) + 27
if r < secpk1n:
tx = Transaction(0, 1, 21000, b'', 0, b'', v=v, r=r, s=s)
try:
tx.sender
return v, r
except (InvalidTransaction, ValueError):
logger.debug('Cannot find signature with v=%d r=%d s=%d', v, r, s)
raise ValueError('Valid signature not found with s=%d', s)
|
[
"def",
"find_valid_random_signature",
"(",
"s",
":",
"int",
")",
"->",
"Tuple",
"[",
"int",
",",
"int",
"]",
":",
"for",
"_",
"in",
"range",
"(",
"10000",
")",
":",
"r",
"=",
"int",
"(",
"os",
".",
"urandom",
"(",
"31",
")",
".",
"hex",
"(",
")",
",",
"16",
")",
"v",
"=",
"(",
"r",
"%",
"2",
")",
"+",
"27",
"if",
"r",
"<",
"secpk1n",
":",
"tx",
"=",
"Transaction",
"(",
"0",
",",
"1",
",",
"21000",
",",
"b''",
",",
"0",
",",
"b''",
",",
"v",
"=",
"v",
",",
"r",
"=",
"r",
",",
"s",
"=",
"s",
")",
"try",
":",
"tx",
".",
"sender",
"return",
"v",
",",
"r",
"except",
"(",
"InvalidTransaction",
",",
"ValueError",
")",
":",
"logger",
".",
"debug",
"(",
"'Cannot find signature with v=%d r=%d s=%d'",
",",
"v",
",",
"r",
",",
"s",
")",
"raise",
"ValueError",
"(",
"'Valid signature not found with s=%d'",
",",
"s",
")"
] | 37.222222
| 16.444444
|
def force_text(s, encoding='utf-8', errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, text_type):
return s
try:
if not isinstance(s, string_types):
if PY3:
if isinstance(s, bytes):
s = text_type(s, encoding, errors)
else:
s = text_type(s)
elif hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
s = text_type(bytes(s), encoding, errors)
else:
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise FlaskMailUnicodeDecodeError(s, *e.args)
else:
s = ' '.join([force_text(arg, encoding, errors)
for arg in s])
return s
|
[
"def",
"force_text",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'strict'",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"text_type",
")",
":",
"return",
"s",
"try",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"string_types",
")",
":",
"if",
"PY3",
":",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"s",
"=",
"text_type",
"(",
"s",
",",
"encoding",
",",
"errors",
")",
"else",
":",
"s",
"=",
"text_type",
"(",
"s",
")",
"elif",
"hasattr",
"(",
"s",
",",
"'__unicode__'",
")",
":",
"s",
"=",
"s",
".",
"__unicode__",
"(",
")",
"else",
":",
"s",
"=",
"text_type",
"(",
"bytes",
"(",
"s",
")",
",",
"encoding",
",",
"errors",
")",
"else",
":",
"s",
"=",
"s",
".",
"decode",
"(",
"encoding",
",",
"errors",
")",
"except",
"UnicodeDecodeError",
"as",
"e",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"Exception",
")",
":",
"raise",
"FlaskMailUnicodeDecodeError",
"(",
"s",
",",
"*",
"e",
".",
"args",
")",
"else",
":",
"s",
"=",
"' '",
".",
"join",
"(",
"[",
"force_text",
"(",
"arg",
",",
"encoding",
",",
"errors",
")",
"for",
"arg",
"in",
"s",
"]",
")",
"return",
"s"
] | 32.933333
| 15.333333
|
def has_been(s, since, dt=None):
'''
A parser to check whether a (cron-like) string has been true during a certain time period.
Useful for applications which cannot check every minute or need to catch up during a restart.
@input:
s = cron-like string (minute, hour, day of month, month, day of week)
since = datetime to use as reference time for start of period
dt = datetime to use as reference time for end of period, defaults to now
@output: boolean of result
'''
if dt is None:
dt = datetime.now(tz=since.tzinfo)
if dt < since:
raise ValueError("The since datetime must be before the current datetime.")
while since <= dt:
if is_now(s, since):
return True
since += timedelta(minutes=1)
return False
|
[
"def",
"has_been",
"(",
"s",
",",
"since",
",",
"dt",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"dt",
"=",
"datetime",
".",
"now",
"(",
"tz",
"=",
"since",
".",
"tzinfo",
")",
"if",
"dt",
"<",
"since",
":",
"raise",
"ValueError",
"(",
"\"The since datetime must be before the current datetime.\"",
")",
"while",
"since",
"<=",
"dt",
":",
"if",
"is_now",
"(",
"s",
",",
"since",
")",
":",
"return",
"True",
"since",
"+=",
"timedelta",
"(",
"minutes",
"=",
"1",
")",
"return",
"False"
] | 36
| 27.909091
|
def val(self, piece, ref_color):
"""
Finds value of ``Piece``
:type: piece: Piece
:type: ref_color: Color
:rtype: int
"""
if piece is None:
return 0
if ref_color == piece.color:
const = 1
else:
const = -1
if isinstance(piece, Pawn):
return self.PAWN_VALUE * const
elif isinstance(piece, Queen):
return self.QUEEN_VALUE * const
elif isinstance(piece, Bishop):
return self.BISHOP_VALUE * const
elif isinstance(piece, Rook):
return self.ROOK_VALUE * const
elif isinstance(piece, Knight):
return self.KNIGHT_VALUE * const
elif isinstance(piece, King):
return self.KING_VALUE * const
return 0
|
[
"def",
"val",
"(",
"self",
",",
"piece",
",",
"ref_color",
")",
":",
"if",
"piece",
"is",
"None",
":",
"return",
"0",
"if",
"ref_color",
"==",
"piece",
".",
"color",
":",
"const",
"=",
"1",
"else",
":",
"const",
"=",
"-",
"1",
"if",
"isinstance",
"(",
"piece",
",",
"Pawn",
")",
":",
"return",
"self",
".",
"PAWN_VALUE",
"*",
"const",
"elif",
"isinstance",
"(",
"piece",
",",
"Queen",
")",
":",
"return",
"self",
".",
"QUEEN_VALUE",
"*",
"const",
"elif",
"isinstance",
"(",
"piece",
",",
"Bishop",
")",
":",
"return",
"self",
".",
"BISHOP_VALUE",
"*",
"const",
"elif",
"isinstance",
"(",
"piece",
",",
"Rook",
")",
":",
"return",
"self",
".",
"ROOK_VALUE",
"*",
"const",
"elif",
"isinstance",
"(",
"piece",
",",
"Knight",
")",
":",
"return",
"self",
".",
"KNIGHT_VALUE",
"*",
"const",
"elif",
"isinstance",
"(",
"piece",
",",
"King",
")",
":",
"return",
"self",
".",
"KING_VALUE",
"*",
"const",
"return",
"0"
] | 27.517241
| 11.655172
|
def bin(self):
"""Full-length binary representation of the IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.bin())
01111111000000000000000000000001
"""
bits = self.v == 4 and 32 or 128
return bin(self.ip).split('b')[1].rjust(bits, '0')
|
[
"def",
"bin",
"(",
"self",
")",
":",
"bits",
"=",
"self",
".",
"v",
"==",
"4",
"and",
"32",
"or",
"128",
"return",
"bin",
"(",
"self",
".",
"ip",
")",
".",
"split",
"(",
"'b'",
")",
"[",
"1",
"]",
".",
"rjust",
"(",
"bits",
",",
"'0'",
")"
] | 31.666667
| 11.666667
|
def trace_memory_stop(self):
""" Stops measuring memory consumption """
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
trace_type = context.get_current_config()["trace_memory_type"]
if trace_type:
filename = '%s/%s-%s.png' % (
context.get_current_config()["trace_memory_output_dir"],
trace_type,
self.id)
chain = objgraph.find_backref_chain(
random.choice(
objgraph.by_type(trace_type)
),
objgraph.is_proper_module
)
objgraph.show_chain(chain, filename=filename)
del filename
del chain
gc.collect()
self._memory_stop = self.worker.get_memory()["total"]
diff = self._memory_stop - self._memory_start
context.log.debug("Memory diff for job %s : %s" % (self.id, diff))
# We need to update it later than the results, we need them off memory
# already.
self.collection.update(
{"_id": self.id},
{"$set": {
"memory_diff": diff
}},
w=1
)
|
[
"def",
"trace_memory_stop",
"(",
"self",
")",
":",
"self",
".",
"trace_memory_clean_caches",
"(",
")",
"objgraph",
".",
"show_growth",
"(",
"limit",
"=",
"30",
")",
"trace_type",
"=",
"context",
".",
"get_current_config",
"(",
")",
"[",
"\"trace_memory_type\"",
"]",
"if",
"trace_type",
":",
"filename",
"=",
"'%s/%s-%s.png'",
"%",
"(",
"context",
".",
"get_current_config",
"(",
")",
"[",
"\"trace_memory_output_dir\"",
"]",
",",
"trace_type",
",",
"self",
".",
"id",
")",
"chain",
"=",
"objgraph",
".",
"find_backref_chain",
"(",
"random",
".",
"choice",
"(",
"objgraph",
".",
"by_type",
"(",
"trace_type",
")",
")",
",",
"objgraph",
".",
"is_proper_module",
")",
"objgraph",
".",
"show_chain",
"(",
"chain",
",",
"filename",
"=",
"filename",
")",
"del",
"filename",
"del",
"chain",
"gc",
".",
"collect",
"(",
")",
"self",
".",
"_memory_stop",
"=",
"self",
".",
"worker",
".",
"get_memory",
"(",
")",
"[",
"\"total\"",
"]",
"diff",
"=",
"self",
".",
"_memory_stop",
"-",
"self",
".",
"_memory_start",
"context",
".",
"log",
".",
"debug",
"(",
"\"Memory diff for job %s : %s\"",
"%",
"(",
"self",
".",
"id",
",",
"diff",
")",
")",
"# We need to update it later than the results, we need them off memory",
"# already.",
"self",
".",
"collection",
".",
"update",
"(",
"{",
"\"_id\"",
":",
"self",
".",
"id",
"}",
",",
"{",
"\"$set\"",
":",
"{",
"\"memory_diff\"",
":",
"diff",
"}",
"}",
",",
"w",
"=",
"1",
")"
] | 28.585366
| 21.560976
|
def revert(self, strip=0, root=None):
""" apply patch in reverse order """
reverted = copy.deepcopy(self)
reverted._reverse()
return reverted.apply(strip, root)
|
[
"def",
"revert",
"(",
"self",
",",
"strip",
"=",
"0",
",",
"root",
"=",
"None",
")",
":",
"reverted",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"reverted",
".",
"_reverse",
"(",
")",
"return",
"reverted",
".",
"apply",
"(",
"strip",
",",
"root",
")"
] | 34.4
| 5.6
|
def get_ip_address(ifname):
""" Hack to get IP address from the interface """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
|
[
"def",
"get_ip_address",
"(",
"ifname",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"return",
"socket",
".",
"inet_ntoa",
"(",
"fcntl",
".",
"ioctl",
"(",
"s",
".",
"fileno",
"(",
")",
",",
"0x8915",
",",
"# SIOCGIFADDR",
"struct",
".",
"pack",
"(",
"'256s'",
",",
"ifname",
"[",
":",
"15",
"]",
")",
")",
"[",
"20",
":",
"24",
"]",
")"
] | 30.888889
| 14.111111
|
def load_file(self, dfile, incremental=False):
"""
Loads the specified file and returns the Instances object.
In case of incremental loading, only the structure.
:param dfile: the file to load
:type dfile: str
:param incremental: whether to load the dataset incrementally
:type incremental: bool
:return: the full dataset or the header (if incremental)
:rtype: Instances
:raises Exception: if the file does not exist
"""
self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter")
self.incremental = incremental
if not javabridge.is_instance_of(dfile, "Ljava/io/File;"):
dfile = javabridge.make_instance(
"Ljava/io/File;", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(str(dfile)))
javabridge.call(self.jobject, "reset", "()V")
# check whether file exists, otherwise previously set file gets loaded again
sfile = javabridge.to_string(dfile)
if not os.path.exists(sfile):
raise Exception("Dataset file does not exist: " + str(sfile))
javabridge.call(self.jobject, "setFile", "(Ljava/io/File;)V", dfile)
if incremental:
self.structure = Instances(javabridge.call(self.jobject, "getStructure", "()Lweka/core/Instances;"))
return self.structure
else:
return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;"))
|
[
"def",
"load_file",
"(",
"self",
",",
"dfile",
",",
"incremental",
"=",
"False",
")",
":",
"self",
".",
"enforce_type",
"(",
"self",
".",
"jobject",
",",
"\"weka.core.converters.FileSourcedConverter\"",
")",
"self",
".",
"incremental",
"=",
"incremental",
"if",
"not",
"javabridge",
".",
"is_instance_of",
"(",
"dfile",
",",
"\"Ljava/io/File;\"",
")",
":",
"dfile",
"=",
"javabridge",
".",
"make_instance",
"(",
"\"Ljava/io/File;\"",
",",
"\"(Ljava/lang/String;)V\"",
",",
"javabridge",
".",
"get_env",
"(",
")",
".",
"new_string_utf",
"(",
"str",
"(",
"dfile",
")",
")",
")",
"javabridge",
".",
"call",
"(",
"self",
".",
"jobject",
",",
"\"reset\"",
",",
"\"()V\"",
")",
"# check whether file exists, otherwise previously set file gets loaded again",
"sfile",
"=",
"javabridge",
".",
"to_string",
"(",
"dfile",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sfile",
")",
":",
"raise",
"Exception",
"(",
"\"Dataset file does not exist: \"",
"+",
"str",
"(",
"sfile",
")",
")",
"javabridge",
".",
"call",
"(",
"self",
".",
"jobject",
",",
"\"setFile\"",
",",
"\"(Ljava/io/File;)V\"",
",",
"dfile",
")",
"if",
"incremental",
":",
"self",
".",
"structure",
"=",
"Instances",
"(",
"javabridge",
".",
"call",
"(",
"self",
".",
"jobject",
",",
"\"getStructure\"",
",",
"\"()Lweka/core/Instances;\"",
")",
")",
"return",
"self",
".",
"structure",
"else",
":",
"return",
"Instances",
"(",
"javabridge",
".",
"call",
"(",
"self",
".",
"jobject",
",",
"\"getDataSet\"",
",",
"\"()Lweka/core/Instances;\"",
")",
")"
] | 51.172414
| 22.689655
|
def answer(self) -> str:
"""Get a random answer in current language.
:return: An answer.
:Example:
No
"""
answers = self._data['answers']
return self.random.choice(answers)
|
[
"def",
"answer",
"(",
"self",
")",
"->",
"str",
":",
"answers",
"=",
"self",
".",
"_data",
"[",
"'answers'",
"]",
"return",
"self",
".",
"random",
".",
"choice",
"(",
"answers",
")"
] | 22.5
| 16.1
|
def _merge_csv_column(table, csvs):
"""
Add csv data to each column in a list of columns
:param dict table: Table metadata
:param str crumbs: Hierarchy crumbs
:param str pc: Paleo or Chron table type
:return dict: Table metadata with csv "values" entry
:return bool ensemble: Ensemble data or not ensemble data
"""
# Start putting CSV data into corresponding column "values" key
try:
ensemble = is_ensemble(table["columns"])
if ensemble:
# realization columns
if len(table["columns"]) == 1:
for _name, _column in table["columns"].items():
_column["values"] = csvs
# depth column + realization columns
elif len(table["columns"]) == 2:
_multi_column = False
for _name, _column in table["columns"].items():
if isinstance(_column["number"], (int, float)):
col_num = cast_int(_column["number"])
_column['values'] = csvs[col_num - 1]
elif isinstance(_column["number"], list):
if _multi_column:
raise Exception("Error: merge_csv_column: This jsonld metadata looks wrong!\n"
"\tAn ensemble table depth should not reference multiple columns of CSV data.\n"
"\tPlease manually fix the ensemble columns in 'metadata.jsonld' inside of your LiPD file.")
else:
_multi_column = True
_column["values"] = csvs[2:]
else:
for _name, _column in table['columns'].items():
col_num = cast_int(_column["number"])
_column['values'] = csvs[col_num - 1]
except IndexError:
logger_csvs.warning("merge_csv_column: IndexError: index out of range of csv_data list")
except KeyError:
logger_csvs.error("merge_csv_column: KeyError: missing columns key")
except Exception as e:
logger_csvs.error("merge_csv_column: Unknown Error: {}".format(e))
print("Quitting...")
exit(1)
# We want to keep one missing value ONLY at the table level. Remove MVs if they're still in column-level
return table, ensemble
|
[
"def",
"_merge_csv_column",
"(",
"table",
",",
"csvs",
")",
":",
"# Start putting CSV data into corresponding column \"values\" key",
"try",
":",
"ensemble",
"=",
"is_ensemble",
"(",
"table",
"[",
"\"columns\"",
"]",
")",
"if",
"ensemble",
":",
"# realization columns",
"if",
"len",
"(",
"table",
"[",
"\"columns\"",
"]",
")",
"==",
"1",
":",
"for",
"_name",
",",
"_column",
"in",
"table",
"[",
"\"columns\"",
"]",
".",
"items",
"(",
")",
":",
"_column",
"[",
"\"values\"",
"]",
"=",
"csvs",
"# depth column + realization columns",
"elif",
"len",
"(",
"table",
"[",
"\"columns\"",
"]",
")",
"==",
"2",
":",
"_multi_column",
"=",
"False",
"for",
"_name",
",",
"_column",
"in",
"table",
"[",
"\"columns\"",
"]",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"_column",
"[",
"\"number\"",
"]",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"col_num",
"=",
"cast_int",
"(",
"_column",
"[",
"\"number\"",
"]",
")",
"_column",
"[",
"'values'",
"]",
"=",
"csvs",
"[",
"col_num",
"-",
"1",
"]",
"elif",
"isinstance",
"(",
"_column",
"[",
"\"number\"",
"]",
",",
"list",
")",
":",
"if",
"_multi_column",
":",
"raise",
"Exception",
"(",
"\"Error: merge_csv_column: This jsonld metadata looks wrong!\\n\"",
"\"\\tAn ensemble table depth should not reference multiple columns of CSV data.\\n\"",
"\"\\tPlease manually fix the ensemble columns in 'metadata.jsonld' inside of your LiPD file.\"",
")",
"else",
":",
"_multi_column",
"=",
"True",
"_column",
"[",
"\"values\"",
"]",
"=",
"csvs",
"[",
"2",
":",
"]",
"else",
":",
"for",
"_name",
",",
"_column",
"in",
"table",
"[",
"'columns'",
"]",
".",
"items",
"(",
")",
":",
"col_num",
"=",
"cast_int",
"(",
"_column",
"[",
"\"number\"",
"]",
")",
"_column",
"[",
"'values'",
"]",
"=",
"csvs",
"[",
"col_num",
"-",
"1",
"]",
"except",
"IndexError",
":",
"logger_csvs",
".",
"warning",
"(",
"\"merge_csv_column: IndexError: index out of range of csv_data list\"",
")",
"except",
"KeyError",
":",
"logger_csvs",
".",
"error",
"(",
"\"merge_csv_column: KeyError: missing columns key\"",
")",
"except",
"Exception",
"as",
"e",
":",
"logger_csvs",
".",
"error",
"(",
"\"merge_csv_column: Unknown Error: {}\"",
".",
"format",
"(",
"e",
")",
")",
"print",
"(",
"\"Quitting...\"",
")",
"exit",
"(",
"1",
")",
"# We want to keep one missing value ONLY at the table level. Remove MVs if they're still in column-level",
"return",
"table",
",",
"ensemble"
] | 46.836735
| 21.734694
|
def convert_outlook_msg(msg_bytes):
"""
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
standard RFC 822 format
Args:
msg_bytes (bytes): the content of the .msg file
Returns:
A RFC 822 string
"""
if not is_outlook_msg(msg_bytes):
raise ValueError("The supplied bytes are not an Outlook MSG file")
orig_dir = os.getcwd()
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
with open("sample.msg", "wb") as msg_file:
msg_file.write(msg_bytes)
try:
subprocess.check_call(["msgconvert", "sample.msg"],
stdout=null_file, stderr=null_file)
eml_path = "sample.eml"
with open(eml_path, "rb") as eml_file:
rfc822 = eml_file.read()
except FileNotFoundError:
raise EmailParserError(
"Failed to convert Outlook MSG: msgconvert utility not found")
finally:
os.chdir(orig_dir)
shutil.rmtree(tmp_dir)
return rfc822
|
[
"def",
"convert_outlook_msg",
"(",
"msg_bytes",
")",
":",
"if",
"not",
"is_outlook_msg",
"(",
"msg_bytes",
")",
":",
"raise",
"ValueError",
"(",
"\"The supplied bytes are not an Outlook MSG file\"",
")",
"orig_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"os",
".",
"chdir",
"(",
"tmp_dir",
")",
"with",
"open",
"(",
"\"sample.msg\"",
",",
"\"wb\"",
")",
"as",
"msg_file",
":",
"msg_file",
".",
"write",
"(",
"msg_bytes",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"\"msgconvert\"",
",",
"\"sample.msg\"",
"]",
",",
"stdout",
"=",
"null_file",
",",
"stderr",
"=",
"null_file",
")",
"eml_path",
"=",
"\"sample.eml\"",
"with",
"open",
"(",
"eml_path",
",",
"\"rb\"",
")",
"as",
"eml_file",
":",
"rfc822",
"=",
"eml_file",
".",
"read",
"(",
")",
"except",
"FileNotFoundError",
":",
"raise",
"EmailParserError",
"(",
"\"Failed to convert Outlook MSG: msgconvert utility not found\"",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"orig_dir",
")",
"shutil",
".",
"rmtree",
"(",
"tmp_dir",
")",
"return",
"rfc822"
] | 30.6875
| 18
|
def serialize(self):
"""Return the string representation of the receiver."""
res = '<?xml version="1.0" encoding="UTF-8"?>'
for ns in self.namespaces:
self.top_grammar.attr["xmlns:" + self.namespaces[ns]] = ns
res += self.top_grammar.start_tag()
for ch in self.top_grammar.children:
res += ch.serialize()
res += self.tree.serialize()
for d in self.global_defs:
res += self.global_defs[d].serialize()
for i in self.identities:
res += self.identities[i].serialize()
return res + self.top_grammar.end_tag()
|
[
"def",
"serialize",
"(",
"self",
")",
":",
"res",
"=",
"'<?xml version=\"1.0\" encoding=\"UTF-8\"?>'",
"for",
"ns",
"in",
"self",
".",
"namespaces",
":",
"self",
".",
"top_grammar",
".",
"attr",
"[",
"\"xmlns:\"",
"+",
"self",
".",
"namespaces",
"[",
"ns",
"]",
"]",
"=",
"ns",
"res",
"+=",
"self",
".",
"top_grammar",
".",
"start_tag",
"(",
")",
"for",
"ch",
"in",
"self",
".",
"top_grammar",
".",
"children",
":",
"res",
"+=",
"ch",
".",
"serialize",
"(",
")",
"res",
"+=",
"self",
".",
"tree",
".",
"serialize",
"(",
")",
"for",
"d",
"in",
"self",
".",
"global_defs",
":",
"res",
"+=",
"self",
".",
"global_defs",
"[",
"d",
"]",
".",
"serialize",
"(",
")",
"for",
"i",
"in",
"self",
".",
"identities",
":",
"res",
"+=",
"self",
".",
"identities",
"[",
"i",
"]",
".",
"serialize",
"(",
")",
"return",
"res",
"+",
"self",
".",
"top_grammar",
".",
"end_tag",
"(",
")"
] | 43.571429
| 9.071429
|
def __boost(self, grad, hess):
"""Boost Booster for one iteration with customized gradient statistics.
Note
----
For multi-class task, the score is group by class_id first, then group by row_id.
If you want to get i-th row score in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
grad : 1-D numpy array or 1-D list
The first order derivative (gradient).
hess : 1-D numpy array or 1-D list
The second order derivative (Hessian).
Returns
-------
is_finished : bool
Whether the boost was successfully finished.
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
assert grad.flags.c_contiguous
assert hess.flags.c_contiguous
if len(grad) != len(hess):
raise ValueError("Lengths of gradient({}) and hessian({}) don't match"
.format(len(grad), len(hess)))
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
|
[
"def",
"__boost",
"(",
"self",
",",
"grad",
",",
"hess",
")",
":",
"grad",
"=",
"list_to_1d_numpy",
"(",
"grad",
",",
"name",
"=",
"'gradient'",
")",
"hess",
"=",
"list_to_1d_numpy",
"(",
"hess",
",",
"name",
"=",
"'hessian'",
")",
"assert",
"grad",
".",
"flags",
".",
"c_contiguous",
"assert",
"hess",
".",
"flags",
".",
"c_contiguous",
"if",
"len",
"(",
"grad",
")",
"!=",
"len",
"(",
"hess",
")",
":",
"raise",
"ValueError",
"(",
"\"Lengths of gradient({}) and hessian({}) don't match\"",
".",
"format",
"(",
"len",
"(",
"grad",
")",
",",
"len",
"(",
"hess",
")",
")",
")",
"is_finished",
"=",
"ctypes",
".",
"c_int",
"(",
"0",
")",
"_safe_call",
"(",
"_LIB",
".",
"LGBM_BoosterUpdateOneIterCustom",
"(",
"self",
".",
"handle",
",",
"grad",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_float",
")",
")",
",",
"hess",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_float",
")",
")",
",",
"ctypes",
".",
"byref",
"(",
"is_finished",
")",
")",
")",
"self",
".",
"__is_predicted_cur_iter",
"=",
"[",
"False",
"for",
"_",
"in",
"range_",
"(",
"self",
".",
"__num_dataset",
")",
"]",
"return",
"is_finished",
".",
"value",
"==",
"1"
] | 41.305556
| 19.194444
|
def verify_order(self, hostname, domain, location, hourly, flavor, router=None):
"""Verifies an order for a dedicated host.
See :func:`place_order` for a list of available options.
"""
create_options = self._generate_create_dict(hostname=hostname,
router=router,
domain=domain,
flavor=flavor,
datacenter=location,
hourly=hourly)
return self.client['Product_Order'].verifyOrder(create_options)
|
[
"def",
"verify_order",
"(",
"self",
",",
"hostname",
",",
"domain",
",",
"location",
",",
"hourly",
",",
"flavor",
",",
"router",
"=",
"None",
")",
":",
"create_options",
"=",
"self",
".",
"_generate_create_dict",
"(",
"hostname",
"=",
"hostname",
",",
"router",
"=",
"router",
",",
"domain",
"=",
"domain",
",",
"flavor",
"=",
"flavor",
",",
"datacenter",
"=",
"location",
",",
"hourly",
"=",
"hourly",
")",
"return",
"self",
".",
"client",
"[",
"'Product_Order'",
"]",
".",
"verifyOrder",
"(",
"create_options",
")"
] | 48.714286
| 27.214286
|
def unblock_events(self):
"""
Allows the widget to send signals.
"""
self._widget.blockSignals(False)
self._widget.setUpdatesEnabled(True)
|
[
"def",
"unblock_events",
"(",
"self",
")",
":",
"self",
".",
"_widget",
".",
"blockSignals",
"(",
"False",
")",
"self",
".",
"_widget",
".",
"setUpdatesEnabled",
"(",
"True",
")"
] | 28.833333
| 3.5
|
def wiki_create(self, title, body, other_names=None):
"""Action to lets you create a wiki page (Requires login) (UNTESTED).
Parameters:
title (str): Page title.
body (str): Page content.
other_names (str): Other names.
"""
params = {
'wiki_page[title]': title,
'wiki_page[body]': body,
'wiki_page[other_names]': other_names
}
return self._get('wiki_pages.json', params, method='POST', auth=True)
|
[
"def",
"wiki_create",
"(",
"self",
",",
"title",
",",
"body",
",",
"other_names",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'wiki_page[title]'",
":",
"title",
",",
"'wiki_page[body]'",
":",
"body",
",",
"'wiki_page[other_names]'",
":",
"other_names",
"}",
"return",
"self",
".",
"_get",
"(",
"'wiki_pages.json'",
",",
"params",
",",
"method",
"=",
"'POST'",
",",
"auth",
"=",
"True",
")"
] | 36.214286
| 13.214286
|
def get_webhook(self, scaling_group, policy, webhook):
"""
Gets the detail for the specified webhook.
"""
return self._manager.get_webhook(scaling_group, policy, webhook)
|
[
"def",
"get_webhook",
"(",
"self",
",",
"scaling_group",
",",
"policy",
",",
"webhook",
")",
":",
"return",
"self",
".",
"_manager",
".",
"get_webhook",
"(",
"scaling_group",
",",
"policy",
",",
"webhook",
")"
] | 39.6
| 11.2
|
def apply_same_chip_constraints(vertices_resources, nets, constraints):
"""Modify a set of vertices_resources, nets and constraints to account for
all SameChipConstraints.
To allow placement algorithms to handle SameChipConstraints without any
special cases, Vertices identified in a SameChipConstraint are merged into
a new vertex whose vertices_resources are the sum total of their parts
which may be placed as if a single vertex. Once placed, the placement can
be expanded into a full placement of all the original vertices using
:py:func:`finalise_same_chip_constraints`.
A typical use pattern might look like::
def my_placer(vertices_resources, nets, machine, constraints):
# Should be done first thing since this may redefine
# vertices_resources, nets and constraints.
vertices_resources, nets, constraints, substitutions = \\
apply_same_chip_constraints(vertices_resources,
nets, constraints)
# ...deal with other types of constraint...
# ...perform placement...
finalise_same_chip_constraints(substitutions, placements)
return placements
Note that this function does not modify its arguments but rather returns
new copies of the structures supplied.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
nets : [:py:class:`~rig.netlist.Net`, ...]
constraints : [constraint, ...]
Returns
-------
(vertices_resources, nets, constraints, substitutions)
The vertices_resources, nets and constraints values contain modified
copies of the supplied data structures modified to contain a single
vertex in place of the individual constrained vertices.
substitutions is a list of :py:class:`MergedVertex` objects which
resulted from the combining of the constrained vertices. The order of
the list is the order the substitutions were carried out. The
:py:func:`finalise_same_chip_constraints` function can be used to
expand a set of substitutions.
"""
# Make a copy of the basic structures to be modified by this function
vertices_resources = vertices_resources.copy()
nets = nets[:]
constraints = constraints[:]
substitutions = []
for same_chip_constraint in constraints:
if not isinstance(same_chip_constraint, SameChipConstraint):
continue
# Skip constraints which don't actually merge anything...
if len(same_chip_constraint.vertices) <= 1:
continue
# The new (merged) vertex with which to replace the constrained
# vertices
merged_vertex = MergedVertex(same_chip_constraint.vertices)
substitutions.append(merged_vertex)
# A set containing the set of vertices to be merged (to remove
# duplicates)
merged_vertices = set(same_chip_constraint.vertices)
# Remove the merged vertices from the set of vertices resources and
# accumulate the total resources consumed. Note add_resources is not
# used since we don't know if the resources consumed by each vertex are
# overlapping.
total_resources = {}
for vertex in merged_vertices:
resources = vertices_resources.pop(vertex)
for resource, value in iteritems(resources):
total_resources[resource] = (total_resources.get(resource, 0) +
value)
vertices_resources[merged_vertex] = total_resources
# Update any nets which pointed to a merged vertex
for net_num, net in enumerate(nets):
net_changed = False
# Change net sources
if net.source in merged_vertices:
net_changed = True
net = Net(merged_vertex, net.sinks, net.weight)
# Change net sinks
for sink_num, sink in enumerate(net.sinks):
if sink in merged_vertices:
if not net_changed:
net = Net(net.source, net.sinks, net.weight)
net_changed = True
net.sinks[sink_num] = merged_vertex
if net_changed:
nets[net_num] = net
# Update any constraints which refer to a merged vertex
for constraint_num, constraint in enumerate(constraints):
if isinstance(constraint, LocationConstraint):
if constraint.vertex in merged_vertices:
constraints[constraint_num] = LocationConstraint(
merged_vertex, constraint.location)
elif isinstance(constraint, SameChipConstraint):
if not set(constraint.vertices).isdisjoint(merged_vertices):
constraints[constraint_num] = SameChipConstraint([
merged_vertex if v in merged_vertices else v
for v in constraint.vertices
])
elif isinstance(constraint, RouteEndpointConstraint):
if constraint.vertex in merged_vertices:
constraints[constraint_num] = RouteEndpointConstraint(
merged_vertex, constraint.route)
return (vertices_resources, nets, constraints, substitutions)
|
[
"def",
"apply_same_chip_constraints",
"(",
"vertices_resources",
",",
"nets",
",",
"constraints",
")",
":",
"# Make a copy of the basic structures to be modified by this function",
"vertices_resources",
"=",
"vertices_resources",
".",
"copy",
"(",
")",
"nets",
"=",
"nets",
"[",
":",
"]",
"constraints",
"=",
"constraints",
"[",
":",
"]",
"substitutions",
"=",
"[",
"]",
"for",
"same_chip_constraint",
"in",
"constraints",
":",
"if",
"not",
"isinstance",
"(",
"same_chip_constraint",
",",
"SameChipConstraint",
")",
":",
"continue",
"# Skip constraints which don't actually merge anything...",
"if",
"len",
"(",
"same_chip_constraint",
".",
"vertices",
")",
"<=",
"1",
":",
"continue",
"# The new (merged) vertex with which to replace the constrained",
"# vertices",
"merged_vertex",
"=",
"MergedVertex",
"(",
"same_chip_constraint",
".",
"vertices",
")",
"substitutions",
".",
"append",
"(",
"merged_vertex",
")",
"# A set containing the set of vertices to be merged (to remove",
"# duplicates)",
"merged_vertices",
"=",
"set",
"(",
"same_chip_constraint",
".",
"vertices",
")",
"# Remove the merged vertices from the set of vertices resources and",
"# accumulate the total resources consumed. Note add_resources is not",
"# used since we don't know if the resources consumed by each vertex are",
"# overlapping.",
"total_resources",
"=",
"{",
"}",
"for",
"vertex",
"in",
"merged_vertices",
":",
"resources",
"=",
"vertices_resources",
".",
"pop",
"(",
"vertex",
")",
"for",
"resource",
",",
"value",
"in",
"iteritems",
"(",
"resources",
")",
":",
"total_resources",
"[",
"resource",
"]",
"=",
"(",
"total_resources",
".",
"get",
"(",
"resource",
",",
"0",
")",
"+",
"value",
")",
"vertices_resources",
"[",
"merged_vertex",
"]",
"=",
"total_resources",
"# Update any nets which pointed to a merged vertex",
"for",
"net_num",
",",
"net",
"in",
"enumerate",
"(",
"nets",
")",
":",
"net_changed",
"=",
"False",
"# Change net sources",
"if",
"net",
".",
"source",
"in",
"merged_vertices",
":",
"net_changed",
"=",
"True",
"net",
"=",
"Net",
"(",
"merged_vertex",
",",
"net",
".",
"sinks",
",",
"net",
".",
"weight",
")",
"# Change net sinks",
"for",
"sink_num",
",",
"sink",
"in",
"enumerate",
"(",
"net",
".",
"sinks",
")",
":",
"if",
"sink",
"in",
"merged_vertices",
":",
"if",
"not",
"net_changed",
":",
"net",
"=",
"Net",
"(",
"net",
".",
"source",
",",
"net",
".",
"sinks",
",",
"net",
".",
"weight",
")",
"net_changed",
"=",
"True",
"net",
".",
"sinks",
"[",
"sink_num",
"]",
"=",
"merged_vertex",
"if",
"net_changed",
":",
"nets",
"[",
"net_num",
"]",
"=",
"net",
"# Update any constraints which refer to a merged vertex",
"for",
"constraint_num",
",",
"constraint",
"in",
"enumerate",
"(",
"constraints",
")",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"LocationConstraint",
")",
":",
"if",
"constraint",
".",
"vertex",
"in",
"merged_vertices",
":",
"constraints",
"[",
"constraint_num",
"]",
"=",
"LocationConstraint",
"(",
"merged_vertex",
",",
"constraint",
".",
"location",
")",
"elif",
"isinstance",
"(",
"constraint",
",",
"SameChipConstraint",
")",
":",
"if",
"not",
"set",
"(",
"constraint",
".",
"vertices",
")",
".",
"isdisjoint",
"(",
"merged_vertices",
")",
":",
"constraints",
"[",
"constraint_num",
"]",
"=",
"SameChipConstraint",
"(",
"[",
"merged_vertex",
"if",
"v",
"in",
"merged_vertices",
"else",
"v",
"for",
"v",
"in",
"constraint",
".",
"vertices",
"]",
")",
"elif",
"isinstance",
"(",
"constraint",
",",
"RouteEndpointConstraint",
")",
":",
"if",
"constraint",
".",
"vertex",
"in",
"merged_vertices",
":",
"constraints",
"[",
"constraint_num",
"]",
"=",
"RouteEndpointConstraint",
"(",
"merged_vertex",
",",
"constraint",
".",
"route",
")",
"return",
"(",
"vertices_resources",
",",
"nets",
",",
"constraints",
",",
"substitutions",
")"
] | 43.081301
| 23.463415
|
def _load_script(self, filename: str) -> Script:
"""Load a Lua script.
Read the Lua script file to generate its Script object. If the script
starts with a magic string, add it to the list of scripts requiring an
idempotency token to execute.
"""
with open(path.join(here, 'redis_scripts', filename), mode='rb') as f:
script_data = f.read()
rv = self._r.register_script(script_data)
if script_data.startswith(b'-- idempotency protected script'):
self._idempotency_protected_scripts.append(rv)
return rv
|
[
"def",
"_load_script",
"(",
"self",
",",
"filename",
":",
"str",
")",
"->",
"Script",
":",
"with",
"open",
"(",
"path",
".",
"join",
"(",
"here",
",",
"'redis_scripts'",
",",
"filename",
")",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"script_data",
"=",
"f",
".",
"read",
"(",
")",
"rv",
"=",
"self",
".",
"_r",
".",
"register_script",
"(",
"script_data",
")",
"if",
"script_data",
".",
"startswith",
"(",
"b'-- idempotency protected script'",
")",
":",
"self",
".",
"_idempotency_protected_scripts",
".",
"append",
"(",
"rv",
")",
"return",
"rv"
] | 45.076923
| 19.230769
|
def _outputs(self):
"""List of layers containing outputs from the IF.
:returns: A list of vector layers.
:rtype: list
"""
layers = OrderedDict()
layers[layer_purpose_exposure_summary['key']] = (
self._exposure_summary)
layers[layer_purpose_aggregate_hazard_impacted['key']] = (
self._aggregate_hazard_impacted)
layers[layer_purpose_aggregation_summary['key']] = (
self._aggregation_summary)
layers[layer_purpose_analysis_impacted['key']] = (
self._analysis_impacted)
layers[layer_purpose_exposure_summary_table['key']] = (
self._exposure_summary_table)
layers[layer_purpose_profiling['key']] = self._profiling_table
# Extra layers produced by pre-processing
layers.update(self._preprocessors_layers)
for expected_purpose, layer in list(layers.items()):
if layer:
purpose = layer.keywords.get('layer_purpose')
if purpose != expected_purpose:
# ET 18/11/16
# I'm disabling this check. If an exception is raised in
# the IF, this exception might be raised and will hide the
# other one.
# raise Exception('Wrong layer purpose : %s != %s' % (
# purpose, expected_purpose))
pass
# Remove layers which are not set.
layers = [layer for layer in list(layers.values()) if layer]
return layers
|
[
"def",
"_outputs",
"(",
"self",
")",
":",
"layers",
"=",
"OrderedDict",
"(",
")",
"layers",
"[",
"layer_purpose_exposure_summary",
"[",
"'key'",
"]",
"]",
"=",
"(",
"self",
".",
"_exposure_summary",
")",
"layers",
"[",
"layer_purpose_aggregate_hazard_impacted",
"[",
"'key'",
"]",
"]",
"=",
"(",
"self",
".",
"_aggregate_hazard_impacted",
")",
"layers",
"[",
"layer_purpose_aggregation_summary",
"[",
"'key'",
"]",
"]",
"=",
"(",
"self",
".",
"_aggregation_summary",
")",
"layers",
"[",
"layer_purpose_analysis_impacted",
"[",
"'key'",
"]",
"]",
"=",
"(",
"self",
".",
"_analysis_impacted",
")",
"layers",
"[",
"layer_purpose_exposure_summary_table",
"[",
"'key'",
"]",
"]",
"=",
"(",
"self",
".",
"_exposure_summary_table",
")",
"layers",
"[",
"layer_purpose_profiling",
"[",
"'key'",
"]",
"]",
"=",
"self",
".",
"_profiling_table",
"# Extra layers produced by pre-processing",
"layers",
".",
"update",
"(",
"self",
".",
"_preprocessors_layers",
")",
"for",
"expected_purpose",
",",
"layer",
"in",
"list",
"(",
"layers",
".",
"items",
"(",
")",
")",
":",
"if",
"layer",
":",
"purpose",
"=",
"layer",
".",
"keywords",
".",
"get",
"(",
"'layer_purpose'",
")",
"if",
"purpose",
"!=",
"expected_purpose",
":",
"# ET 18/11/16",
"# I'm disabling this check. If an exception is raised in",
"# the IF, this exception might be raised and will hide the",
"# other one.",
"# raise Exception('Wrong layer purpose : %s != %s' % (",
"# purpose, expected_purpose))",
"pass",
"# Remove layers which are not set.",
"layers",
"=",
"[",
"layer",
"for",
"layer",
"in",
"list",
"(",
"layers",
".",
"values",
"(",
")",
")",
"if",
"layer",
"]",
"return",
"layers"
] | 41.486486
| 17.540541
|
def smart_email_send(self, smart_email_id, to, consent_to_track, cc=None, bcc=None, attachments=None, data=None, add_recipients_to_list=None):
"""Sends the smart email."""
validate_consent_to_track(consent_to_track)
body = {
"To": to,
"CC": cc,
"BCC": bcc,
"Attachments": attachments,
"Data": data,
"AddRecipientsToList": add_recipients_to_list,
"ConsentToTrack": consent_to_track,
}
response = self._post("/transactional/smartEmail/%s/send" %
smart_email_id, json.dumps(body))
return json_to_py(response)
|
[
"def",
"smart_email_send",
"(",
"self",
",",
"smart_email_id",
",",
"to",
",",
"consent_to_track",
",",
"cc",
"=",
"None",
",",
"bcc",
"=",
"None",
",",
"attachments",
"=",
"None",
",",
"data",
"=",
"None",
",",
"add_recipients_to_list",
"=",
"None",
")",
":",
"validate_consent_to_track",
"(",
"consent_to_track",
")",
"body",
"=",
"{",
"\"To\"",
":",
"to",
",",
"\"CC\"",
":",
"cc",
",",
"\"BCC\"",
":",
"bcc",
",",
"\"Attachments\"",
":",
"attachments",
",",
"\"Data\"",
":",
"data",
",",
"\"AddRecipientsToList\"",
":",
"add_recipients_to_list",
",",
"\"ConsentToTrack\"",
":",
"consent_to_track",
",",
"}",
"response",
"=",
"self",
".",
"_post",
"(",
"\"/transactional/smartEmail/%s/send\"",
"%",
"smart_email_id",
",",
"json",
".",
"dumps",
"(",
"body",
")",
")",
"return",
"json_to_py",
"(",
"response",
")"
] | 43.533333
| 21.266667
|
def NewFromJSON(data):
"""
Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance.
"""
return Comment(
body=data.get('body', None),
posted_at=data.get('posted_at', None),
user=User.NewFromJSON(data.get('user', None))
)
|
[
"def",
"NewFromJSON",
"(",
"data",
")",
":",
"return",
"Comment",
"(",
"body",
"=",
"data",
".",
"get",
"(",
"'body'",
",",
"None",
")",
",",
"posted_at",
"=",
"data",
".",
"get",
"(",
"'posted_at'",
",",
"None",
")",
",",
"user",
"=",
"User",
".",
"NewFromJSON",
"(",
"data",
".",
"get",
"(",
"'user'",
",",
"None",
")",
")",
")"
] | 26.8
| 18.133333
|
def CheckBlobsExist(self, blob_ids):
"""Checks if given blobs exit."""
result = {}
for blob_id in blob_ids:
result[blob_id] = blob_id in self.blobs
return result
|
[
"def",
"CheckBlobsExist",
"(",
"self",
",",
"blob_ids",
")",
":",
"result",
"=",
"{",
"}",
"for",
"blob_id",
"in",
"blob_ids",
":",
"result",
"[",
"blob_id",
"]",
"=",
"blob_id",
"in",
"self",
".",
"blobs",
"return",
"result"
] | 22.25
| 18.625
|
def parent(self):
"""A new URL with last part of path removed and cleaned up query and
fragment.
"""
path = self.raw_path
if not path or path == "/":
if self.raw_fragment or self.raw_query_string:
return URL(self._val._replace(query="", fragment=""), encoded=True)
return self
parts = path.split("/")
val = self._val._replace(path="/".join(parts[:-1]), query="", fragment="")
return URL(val, encoded=True)
|
[
"def",
"parent",
"(",
"self",
")",
":",
"path",
"=",
"self",
".",
"raw_path",
"if",
"not",
"path",
"or",
"path",
"==",
"\"/\"",
":",
"if",
"self",
".",
"raw_fragment",
"or",
"self",
".",
"raw_query_string",
":",
"return",
"URL",
"(",
"self",
".",
"_val",
".",
"_replace",
"(",
"query",
"=",
"\"\"",
",",
"fragment",
"=",
"\"\"",
")",
",",
"encoded",
"=",
"True",
")",
"return",
"self",
"parts",
"=",
"path",
".",
"split",
"(",
"\"/\"",
")",
"val",
"=",
"self",
".",
"_val",
".",
"_replace",
"(",
"path",
"=",
"\"/\"",
".",
"join",
"(",
"parts",
"[",
":",
"-",
"1",
"]",
")",
",",
"query",
"=",
"\"\"",
",",
"fragment",
"=",
"\"\"",
")",
"return",
"URL",
"(",
"val",
",",
"encoded",
"=",
"True",
")"
] | 38.307692
| 18.076923
|
def wikilinks(self) -> List['WikiLink']:
"""Return a list of wikilink objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
WikiLink(_lststr, _type_to_spans, span, 'WikiLink')
for span in self._subspans('WikiLink')]
|
[
"def",
"wikilinks",
"(",
"self",
")",
"->",
"List",
"[",
"'WikiLink'",
"]",
":",
"_lststr",
"=",
"self",
".",
"_lststr",
"_type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"return",
"[",
"WikiLink",
"(",
"_lststr",
",",
"_type_to_spans",
",",
"span",
",",
"'WikiLink'",
")",
"for",
"span",
"in",
"self",
".",
"_subspans",
"(",
"'WikiLink'",
")",
"]"
] | 41.714286
| 10.285714
|
def negative_sharpe(
weights, expected_returns, cov_matrix, gamma=0, risk_free_rate=0.02
):
"""
Calculate the negative Sharpe ratio of a portfolio
:param weights: asset weights of the portfolio
:type weights: np.ndarray
:param expected_returns: expected return of each asset
:type expected_returns: pd.Series
:param cov_matrix: the covariance matrix of asset returns
:type cov_matrix: pd.DataFrame
:param gamma: L2 regularisation parameter, defaults to 0. Increase if you want more
non-negligible weights
:type gamma: float, optional
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:return: negative Sharpe ratio
:rtype: float
"""
mu = weights.dot(expected_returns)
sigma = np.sqrt(np.dot(weights, np.dot(cov_matrix, weights.T)))
L2_reg = gamma * (weights ** 2).sum()
return -(mu - risk_free_rate) / sigma + L2_reg
|
[
"def",
"negative_sharpe",
"(",
"weights",
",",
"expected_returns",
",",
"cov_matrix",
",",
"gamma",
"=",
"0",
",",
"risk_free_rate",
"=",
"0.02",
")",
":",
"mu",
"=",
"weights",
".",
"dot",
"(",
"expected_returns",
")",
"sigma",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"weights",
",",
"np",
".",
"dot",
"(",
"cov_matrix",
",",
"weights",
".",
"T",
")",
")",
")",
"L2_reg",
"=",
"gamma",
"*",
"(",
"weights",
"**",
"2",
")",
".",
"sum",
"(",
")",
"return",
"-",
"(",
"mu",
"-",
"risk_free_rate",
")",
"/",
"sigma",
"+",
"L2_reg"
] | 39.958333
| 15.791667
|
def extract_files(files):
"""Expand list of paths to include all text files matching the pattern."""
expanded_files = []
legal_extensions = [".md", ".txt", ".rtf", ".html", ".tex", ".markdown"]
for f in files:
# If it's a directory, recursively walk through it and find the files.
if os.path.isdir(f):
for dir_, _, filenames in os.walk(f):
for filename in filenames:
fn, file_extension = os.path.splitext(filename)
if file_extension in legal_extensions:
joined_file = os.path.join(dir_, filename)
expanded_files.append(joined_file)
# Otherwise add the file directly.
else:
expanded_files.append(f)
return expanded_files
|
[
"def",
"extract_files",
"(",
"files",
")",
":",
"expanded_files",
"=",
"[",
"]",
"legal_extensions",
"=",
"[",
"\".md\"",
",",
"\".txt\"",
",",
"\".rtf\"",
",",
"\".html\"",
",",
"\".tex\"",
",",
"\".markdown\"",
"]",
"for",
"f",
"in",
"files",
":",
"# If it's a directory, recursively walk through it and find the files.",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"f",
")",
":",
"for",
"dir_",
",",
"_",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"f",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"fn",
",",
"file_extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"file_extension",
"in",
"legal_extensions",
":",
"joined_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_",
",",
"filename",
")",
"expanded_files",
".",
"append",
"(",
"joined_file",
")",
"# Otherwise add the file directly.",
"else",
":",
"expanded_files",
".",
"append",
"(",
"f",
")",
"return",
"expanded_files"
] | 39.15
| 20.35
|
def _find_relation_factory(module):
"""
Attempt to find a RelationFactory subclass in the module.
Note: RelationFactory and RelationBase are ignored so they may
be imported to be used as base classes without fear.
"""
if not module:
return None
# All the RelationFactory subclasses
candidates = [o for o in (getattr(module, attr) for attr in dir(module))
if (o is not RelationFactory and
o is not RelationBase and
isclass(o) and
issubclass(o, RelationFactory))]
# Filter out any factories that are superclasses of another factory
# (none of the other factories subclass it). This usually makes
# the explict check for RelationBase and RelationFactory unnecessary.
candidates = [c1 for c1 in candidates
if not any(issubclass(c2, c1) for c2 in candidates
if c1 is not c2)]
if not candidates:
hookenv.log('No RelationFactory found in {}'.format(module.__name__),
hookenv.WARNING)
return None
if len(candidates) > 1:
raise RuntimeError('Too many RelationFactory found in {}'
''.format(module.__name__))
return candidates[0]
|
[
"def",
"_find_relation_factory",
"(",
"module",
")",
":",
"if",
"not",
"module",
":",
"return",
"None",
"# All the RelationFactory subclasses",
"candidates",
"=",
"[",
"o",
"for",
"o",
"in",
"(",
"getattr",
"(",
"module",
",",
"attr",
")",
"for",
"attr",
"in",
"dir",
"(",
"module",
")",
")",
"if",
"(",
"o",
"is",
"not",
"RelationFactory",
"and",
"o",
"is",
"not",
"RelationBase",
"and",
"isclass",
"(",
"o",
")",
"and",
"issubclass",
"(",
"o",
",",
"RelationFactory",
")",
")",
"]",
"# Filter out any factories that are superclasses of another factory",
"# (none of the other factories subclass it). This usually makes",
"# the explict check for RelationBase and RelationFactory unnecessary.",
"candidates",
"=",
"[",
"c1",
"for",
"c1",
"in",
"candidates",
"if",
"not",
"any",
"(",
"issubclass",
"(",
"c2",
",",
"c1",
")",
"for",
"c2",
"in",
"candidates",
"if",
"c1",
"is",
"not",
"c2",
")",
"]",
"if",
"not",
"candidates",
":",
"hookenv",
".",
"log",
"(",
"'No RelationFactory found in {}'",
".",
"format",
"(",
"module",
".",
"__name__",
")",
",",
"hookenv",
".",
"WARNING",
")",
"return",
"None",
"if",
"len",
"(",
"candidates",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"'Too many RelationFactory found in {}'",
"''",
".",
"format",
"(",
"module",
".",
"__name__",
")",
")",
"return",
"candidates",
"[",
"0",
"]"
] | 37.117647
| 20.470588
|
def simple_mult(A, B, start):
""" Builds a slow, small multiplier using the simple shift-and-add algorithm.
Requires very small area (it uses only a single adder), but has long delay
(worst case is len(A) cycles). start is a one-bit input to indicate inputs are ready.
done is a one-bit output signal raised when the multiplication is finished.
:param WireVector A, B: two input wires for the multiplication
:returns: Register containing the product; the "done" signal
"""
triv_result = _trivial_mult(A, B)
if triv_result is not None:
return triv_result, pyrtl.Const(1, 1)
alen = len(A)
blen = len(B)
areg = pyrtl.Register(alen)
breg = pyrtl.Register(blen + alen)
accum = pyrtl.Register(blen + alen)
done = (areg == 0) # Multiplication is finished when a becomes 0
# During multiplication, shift a right every cycle, b left every cycle
with pyrtl.conditional_assignment:
with start: # initialization
areg.next |= A
breg.next |= B
accum.next |= 0
with ~done: # don't run when there's no work to do
areg.next |= areg[1:] # right shift
breg.next |= pyrtl.concat(breg, pyrtl.Const(0, 1)) # left shift
a_0_val = areg[0].sign_extended(len(accum))
# adds to accum only when LSB of areg is 1
accum.next |= accum + (a_0_val & breg)
return accum, done
|
[
"def",
"simple_mult",
"(",
"A",
",",
"B",
",",
"start",
")",
":",
"triv_result",
"=",
"_trivial_mult",
"(",
"A",
",",
"B",
")",
"if",
"triv_result",
"is",
"not",
"None",
":",
"return",
"triv_result",
",",
"pyrtl",
".",
"Const",
"(",
"1",
",",
"1",
")",
"alen",
"=",
"len",
"(",
"A",
")",
"blen",
"=",
"len",
"(",
"B",
")",
"areg",
"=",
"pyrtl",
".",
"Register",
"(",
"alen",
")",
"breg",
"=",
"pyrtl",
".",
"Register",
"(",
"blen",
"+",
"alen",
")",
"accum",
"=",
"pyrtl",
".",
"Register",
"(",
"blen",
"+",
"alen",
")",
"done",
"=",
"(",
"areg",
"==",
"0",
")",
"# Multiplication is finished when a becomes 0",
"# During multiplication, shift a right every cycle, b left every cycle",
"with",
"pyrtl",
".",
"conditional_assignment",
":",
"with",
"start",
":",
"# initialization",
"areg",
".",
"next",
"|=",
"A",
"breg",
".",
"next",
"|=",
"B",
"accum",
".",
"next",
"|=",
"0",
"with",
"~",
"done",
":",
"# don't run when there's no work to do",
"areg",
".",
"next",
"|=",
"areg",
"[",
"1",
":",
"]",
"# right shift",
"breg",
".",
"next",
"|=",
"pyrtl",
".",
"concat",
"(",
"breg",
",",
"pyrtl",
".",
"Const",
"(",
"0",
",",
"1",
")",
")",
"# left shift",
"a_0_val",
"=",
"areg",
"[",
"0",
"]",
".",
"sign_extended",
"(",
"len",
"(",
"accum",
")",
")",
"# adds to accum only when LSB of areg is 1",
"accum",
".",
"next",
"|=",
"accum",
"+",
"(",
"a_0_val",
"&",
"breg",
")",
"return",
"accum",
",",
"done"
] | 39.138889
| 20.305556
|
def get_undeclared_type(self, item):
"""
Checks if a typed has already been declared in the python output
or is a builtin python type.
"""
if item in self.done:
return None
if isinstance(item, typedesc.FundamentalType):
return None
if isinstance(item, typedesc.PointerType):
return self.get_undeclared_type(item.typ)
if isinstance(item, typedesc.ArrayType):
return self.get_undeclared_type(item.typ)
# else its an undeclared structure.
return item
|
[
"def",
"get_undeclared_type",
"(",
"self",
",",
"item",
")",
":",
"if",
"item",
"in",
"self",
".",
"done",
":",
"return",
"None",
"if",
"isinstance",
"(",
"item",
",",
"typedesc",
".",
"FundamentalType",
")",
":",
"return",
"None",
"if",
"isinstance",
"(",
"item",
",",
"typedesc",
".",
"PointerType",
")",
":",
"return",
"self",
".",
"get_undeclared_type",
"(",
"item",
".",
"typ",
")",
"if",
"isinstance",
"(",
"item",
",",
"typedesc",
".",
"ArrayType",
")",
":",
"return",
"self",
".",
"get_undeclared_type",
"(",
"item",
".",
"typ",
")",
"# else its an undeclared structure.",
"return",
"item"
] | 37.4
| 11.133333
|
def connected_ids(self, subset=None, show_ip=False, show_ipv4=None, include_localhost=None):
'''
Return a set of all connected minion ids, optionally within a subset
'''
if include_localhost is not None:
salt.utils.versions.warn_until(
'Sodium',
'The \'include_localhost\' argument is no longer required; any'
'connected localhost minion will always be included.'
)
if show_ipv4 is not None:
salt.utils.versions.warn_until(
'Sodium',
'The \'show_ipv4\' argument has been renamed to \'show_ip\' as'
'it now also includes IPv6 addresses for IPv6-connected'
'minions.'
)
minions = set()
if self.opts.get('minion_data_cache', False):
search = self.cache.list('minions')
if search is None:
return minions
addrs = salt.utils.network.local_port_tcp(int(self.opts['publish_port']))
if '127.0.0.1' in addrs:
# Add in the address of a possible locally-connected minion.
addrs.discard('127.0.0.1')
addrs.update(set(salt.utils.network.ip_addrs(include_loopback=False)))
if '::1' in addrs:
# Add in the address of a possible locally-connected minion.
addrs.discard('::1')
addrs.update(set(salt.utils.network.ip_addrs6(include_loopback=False)))
if subset:
search = subset
for id_ in search:
try:
mdata = self.cache.fetch('minions/{0}'.format(id_), 'data')
except SaltCacheError:
# If a SaltCacheError is explicitly raised during the fetch operation,
# permission was denied to open the cached data.p file. Continue on as
# in the releases <= 2016.3. (An explicit error raise was added in PR
# #35388. See issue #36867 for more information.
continue
if mdata is None:
continue
grains = mdata.get('grains', {})
for ipv4 in grains.get('ipv4', []):
if ipv4 in addrs:
if show_ip:
minions.add((id_, ipv4))
else:
minions.add(id_)
break
for ipv6 in grains.get('ipv6', []):
if ipv6 in addrs:
if show_ip:
minions.add((id_, ipv6))
else:
minions.add(id_)
break
return minions
|
[
"def",
"connected_ids",
"(",
"self",
",",
"subset",
"=",
"None",
",",
"show_ip",
"=",
"False",
",",
"show_ipv4",
"=",
"None",
",",
"include_localhost",
"=",
"None",
")",
":",
"if",
"include_localhost",
"is",
"not",
"None",
":",
"salt",
".",
"utils",
".",
"versions",
".",
"warn_until",
"(",
"'Sodium'",
",",
"'The \\'include_localhost\\' argument is no longer required; any'",
"'connected localhost minion will always be included.'",
")",
"if",
"show_ipv4",
"is",
"not",
"None",
":",
"salt",
".",
"utils",
".",
"versions",
".",
"warn_until",
"(",
"'Sodium'",
",",
"'The \\'show_ipv4\\' argument has been renamed to \\'show_ip\\' as'",
"'it now also includes IPv6 addresses for IPv6-connected'",
"'minions.'",
")",
"minions",
"=",
"set",
"(",
")",
"if",
"self",
".",
"opts",
".",
"get",
"(",
"'minion_data_cache'",
",",
"False",
")",
":",
"search",
"=",
"self",
".",
"cache",
".",
"list",
"(",
"'minions'",
")",
"if",
"search",
"is",
"None",
":",
"return",
"minions",
"addrs",
"=",
"salt",
".",
"utils",
".",
"network",
".",
"local_port_tcp",
"(",
"int",
"(",
"self",
".",
"opts",
"[",
"'publish_port'",
"]",
")",
")",
"if",
"'127.0.0.1'",
"in",
"addrs",
":",
"# Add in the address of a possible locally-connected minion.",
"addrs",
".",
"discard",
"(",
"'127.0.0.1'",
")",
"addrs",
".",
"update",
"(",
"set",
"(",
"salt",
".",
"utils",
".",
"network",
".",
"ip_addrs",
"(",
"include_loopback",
"=",
"False",
")",
")",
")",
"if",
"'::1'",
"in",
"addrs",
":",
"# Add in the address of a possible locally-connected minion.",
"addrs",
".",
"discard",
"(",
"'::1'",
")",
"addrs",
".",
"update",
"(",
"set",
"(",
"salt",
".",
"utils",
".",
"network",
".",
"ip_addrs6",
"(",
"include_loopback",
"=",
"False",
")",
")",
")",
"if",
"subset",
":",
"search",
"=",
"subset",
"for",
"id_",
"in",
"search",
":",
"try",
":",
"mdata",
"=",
"self",
".",
"cache",
".",
"fetch",
"(",
"'minions/{0}'",
".",
"format",
"(",
"id_",
")",
",",
"'data'",
")",
"except",
"SaltCacheError",
":",
"# If a SaltCacheError is explicitly raised during the fetch operation,",
"# permission was denied to open the cached data.p file. Continue on as",
"# in the releases <= 2016.3. (An explicit error raise was added in PR",
"# #35388. See issue #36867 for more information.",
"continue",
"if",
"mdata",
"is",
"None",
":",
"continue",
"grains",
"=",
"mdata",
".",
"get",
"(",
"'grains'",
",",
"{",
"}",
")",
"for",
"ipv4",
"in",
"grains",
".",
"get",
"(",
"'ipv4'",
",",
"[",
"]",
")",
":",
"if",
"ipv4",
"in",
"addrs",
":",
"if",
"show_ip",
":",
"minions",
".",
"add",
"(",
"(",
"id_",
",",
"ipv4",
")",
")",
"else",
":",
"minions",
".",
"add",
"(",
"id_",
")",
"break",
"for",
"ipv6",
"in",
"grains",
".",
"get",
"(",
"'ipv6'",
",",
"[",
"]",
")",
":",
"if",
"ipv6",
"in",
"addrs",
":",
"if",
"show_ip",
":",
"minions",
".",
"add",
"(",
"(",
"id_",
",",
"ipv6",
")",
")",
"else",
":",
"minions",
".",
"add",
"(",
"id_",
")",
"break",
"return",
"minions"
] | 45.966667
| 18.833333
|
def _buildItem(self, elem, cls=None, initpath=None):
""" Factory function to build objects based on registered PLEXOBJECTS. """
# cls is specified, build the object and return
initpath = initpath or self._initpath
if cls is not None:
return cls(self._server, elem, initpath)
# cls is not specified, try looking it up in PLEXOBJECTS
etype = elem.attrib.get('type', elem.attrib.get('streamType'))
ehash = '%s.%s' % (elem.tag, etype) if etype else elem.tag
ecls = utils.PLEXOBJECTS.get(ehash, utils.PLEXOBJECTS.get(elem.tag))
# log.debug('Building %s as %s', elem.tag, ecls.__name__)
if ecls is not None:
return ecls(self._server, elem, initpath)
raise UnknownType("Unknown library type <%s type='%s'../>" % (elem.tag, etype))
|
[
"def",
"_buildItem",
"(",
"self",
",",
"elem",
",",
"cls",
"=",
"None",
",",
"initpath",
"=",
"None",
")",
":",
"# cls is specified, build the object and return",
"initpath",
"=",
"initpath",
"or",
"self",
".",
"_initpath",
"if",
"cls",
"is",
"not",
"None",
":",
"return",
"cls",
"(",
"self",
".",
"_server",
",",
"elem",
",",
"initpath",
")",
"# cls is not specified, try looking it up in PLEXOBJECTS",
"etype",
"=",
"elem",
".",
"attrib",
".",
"get",
"(",
"'type'",
",",
"elem",
".",
"attrib",
".",
"get",
"(",
"'streamType'",
")",
")",
"ehash",
"=",
"'%s.%s'",
"%",
"(",
"elem",
".",
"tag",
",",
"etype",
")",
"if",
"etype",
"else",
"elem",
".",
"tag",
"ecls",
"=",
"utils",
".",
"PLEXOBJECTS",
".",
"get",
"(",
"ehash",
",",
"utils",
".",
"PLEXOBJECTS",
".",
"get",
"(",
"elem",
".",
"tag",
")",
")",
"# log.debug('Building %s as %s', elem.tag, ecls.__name__)",
"if",
"ecls",
"is",
"not",
"None",
":",
"return",
"ecls",
"(",
"self",
".",
"_server",
",",
"elem",
",",
"initpath",
")",
"raise",
"UnknownType",
"(",
"\"Unknown library type <%s type='%s'../>\"",
"%",
"(",
"elem",
".",
"tag",
",",
"etype",
")",
")"
] | 58.714286
| 19.285714
|
def getScreenRGB(self,screen_data=None):
"""This function fills screen_data with the data
screen_data MUST be a numpy array of uint32/int32. This can be initialized like so:
screen_data = np.array(w*h,dtype=np.uint32)
Notice, it must be width*height in size also
If it is None, then this function will initialize it
"""
if(screen_data is None):
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenWidth(self.obj)
screen_data = np.zeros(width*height,dtype=np.uint32)
ale_lib.getScreenRGB(self.obj,as_ctypes(screen_data))
return screen_data
|
[
"def",
"getScreenRGB",
"(",
"self",
",",
"screen_data",
"=",
"None",
")",
":",
"if",
"(",
"screen_data",
"is",
"None",
")",
":",
"width",
"=",
"ale_lib",
".",
"getScreenWidth",
"(",
"self",
".",
"obj",
")",
"height",
"=",
"ale_lib",
".",
"getScreenWidth",
"(",
"self",
".",
"obj",
")",
"screen_data",
"=",
"np",
".",
"zeros",
"(",
"width",
"*",
"height",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"ale_lib",
".",
"getScreenRGB",
"(",
"self",
".",
"obj",
",",
"as_ctypes",
"(",
"screen_data",
")",
")",
"return",
"screen_data"
] | 49.923077
| 14.307692
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.