content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_orientation(origin, p1, p2):
"""
Given origin and two points, return the orientation of the Point p1 with
regards to Point p2 using origin.
Returns
-------
integer: Negative if p1 is clockwise of p2.
"""
difference = (
((p2[0] - origin[0]) * (p1[1] - origin[1]))
- ((p1[0] - origin[0]) * (p2[1] - origin[1]))
)
return difference | d158763530550e6663b7e9d81e6993599ad5b141 | 691,401 |
def get_pixel_info(local_info, d_behind, obs_range, image_size):
"""
Transform local vehicle info to pixel info, with ego placed at lower center of image.
Here the ego local coordinate is left-handed, the pixel coordinate is also left-handed,
with its origin at the left bottom.
:param local_info: local vehicle info in ego coordinate
:param d_behind: distance from ego to bottom of FOV
:param obs_range: length of edge of FOV
:param image_size: size of edge of image
:return: tuple of pixel level info, including (x, y, yaw, l, w) all in pixels
"""
x, y, yaw, l, w = local_info
x_pixel = (x + d_behind) / obs_range * image_size
y_pixel = y / obs_range * image_size + image_size / 2
yaw_pixel = yaw
l_pixel = l / obs_range * image_size
w_pixel = w / obs_range * image_size
pixel_tuple = (x_pixel, y_pixel, yaw_pixel, l_pixel, w_pixel)
return pixel_tuple | a0aaa8accdbb9bab6102b9f8e9895986376c594e | 691,402 |
import os
def extract_label_set(dataset):
""" Collect the labels from the given dataset (folder with classified images) """
labels = [label for label in os.listdir(dataset) if os.path.isdir(os.path.join(dataset, label))]
return labels | 527ffe0b2d420c6bb17227a2c95ae038e220e069 | 691,403 |
def _joinNamePath(prefix=None, name=None, index=None):
"""
Utility function for generating nested configuration names
"""
if not prefix and not name:
raise ValueError("Invalid name: cannot be None")
elif not name:
name = prefix
elif prefix and name:
name = prefix + "." + name
if index is not None:
return "%s[%r]" % (name, index)
else:
return name | fb26dc39ded907cefc1a319d6d0692e67f8c5007 | 691,404 |
import sys
def get_ecs_image_url(client, cluster, service):
"""Gets the current docker image url of an ECS service"""
try:
task_definition = client.describe_services(
cluster=cluster, services=[service]
).get('services')[0].get('taskDefinition')
except Exception as err:
sys.stderr.write(f'Service lookup failed: {err}')
sys.exit(1)
try:
image = client.describe_task_definition(
taskDefinition=task_definition
).get('taskDefinition').get('containerDefinitions')[0].get('image')
except Exception as err:
sys.stderr.write(f'Task lookup failed: {err}')
sys.exit(1)
return image | 369f6e3c76155f07c7b749e40c4a894b8a711989 | 691,405 |
async def tag_object(computervision_client, img_data):
"""Tag image"""
tags = computervision_client.tag_image_in_stream(img_data)
# Filter tags (remove food and fruit)
index = 0
for tag in tags.tags:
if not 'food' in tag.name and not 'fruit' in tag.name:
return tags.tags[index].name
index += 1
return tags.tags[-1].name | 539705ff3252527747aa84bd98918a97f7318531 | 691,406 |
import datetime
import time
def convert_to_seconds(minutes):
"""
Return minutes elapsed in time format to seconds elapsed
:param minutes: time elapsed
:return: time elapsed in seconds
"""
if minutes == '-16:0-':
return '1200' # Sometimes in the html at the end of the game the time is -16:0-
x = time.strptime(minutes.strip(' '), '%M:%S')
return datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds() | 5d841de245cc6790a9a9d15c998406927ffda5a6 | 691,407 |
def is_valid_port(port):
"""Validate a port.
:param port: port to validate.
:type port: ``(string, int)``
:returns: True if is valid else False
:rtype: ``bool``
"""
try:
return 0 < int(port) <= 65535
except ValueError:
return False | b840d4e82d5d7afc2c97129590c6c311b355c2fb | 691,408 |
from typing import Optional
def _server_address() -> Optional[str]:
"""The address where the server will listen for client and browser
connections. Use this if you want to bind the server to a specific address.
If set, the server will only be accessible from this address, and not from
any aliases (like localhost).
Default: (unset)
"""
return None | 96d54936ad443004437a40eb4aaa41c8d13ac2b4 | 691,409 |
def un_center_data(data,mu):
"""
uncenter the data
"""
return data+mu | 3d3f4dd7af9ef278d432e06b3113e9c4f1767d85 | 691,411 |
def parse_typename(typename):
"""
Parse a TypeName string into a namespace, type pair.
:param typename: a string of the form <namespace>/<type>
:return: a tuple of a namespace type.
"""
if typename is None:
raise ValueError("function type must be provided")
idx = typename.rfind("/")
if idx < 0:
raise ValueError("function type must be of the from namespace/name")
namespace = typename[:idx]
if not namespace:
raise ValueError("function type's namespace must not be empty")
type = typename[idx + 1:]
if not type:
raise ValueError("function type's name must not be empty")
return namespace, type | 7770939cbe7d6e7afabb8e0679380836977f17aa | 691,412 |
def linear_fit_to_2_points(point0, point1):
"""Return a linear estimator that passes through the points."""
x0,y0 = point0
x1,y1 = point1
return lambda x: (x - x0) * (y1 - y0) / (x1 - x0) + y0 | e4d62ba39bb495765329d52f5b8cbe51cdd9b8e0 | 691,413 |
from typing import Tuple
def min_or_max_index(L: list, minimum: bool) -> Tuple[int, int]:
""" If the Boolean parameter refers to True
the function returns a tuple containing the minimum and its index;
if it refers to False,
it returns a tuple containing the maximum and its index
>>> min_or_max_index([5, 4, 3, 2, 8, 9], True)
(2, 3)
>>> min_or_max_index([5, 4, 3, 2, 8, 9], False)
(9, 5)
"""
# find both the minimum value in a list and that value’s index
# and the maximum value in a list and that value’s index
# in one pass through the list
smallest = L[0]
smallest_pos = 0
biggest = L[0]
biggest_pos = 0
for element in L:
if element < smallest:
smallest = element
smallest_pos = L.index(element)
if element > biggest:
biggest = element
biggest_pos = L.index(element)
if minimum:
return (smallest, smallest_pos)
else:
return (biggest, biggest_pos) | e2f0f572f248fec946690cc586bd4faf326aa699 | 691,414 |
def line_csv_to_dict_output(text, intent, tags):
"""
Convert outputs in rasa's format
Args:
text: Text column
intent: Intent column
tags: Tags column
Returns:
A result as rasa format. Example: \n
[{ \n
'confidence': confidence, \n
'end': end, \n
'start': start, \n
'entity': entity, \n
'extractor': extractor, \n
'value': value \n
}] \n
"""
rasa_output = {}
rasa_output["text"] = text
rasa_output["intent"] = intent
tags = tags.split(' ')
words = text.split(' ')
# get index start words
ids = [0]
temp = 0
for i in range(1, len(words)):
ids.append(temp + len(words[i-1]) + 1)
temp = ids[-1]
ids.append(len(rasa_output["text"]) + 1)
entities = []
start = 0
entity = None
end = 0
ping = False
for i in range(len(tags)):
if ping == True:
if tags[i] == 'O':
end = i
entities.append({
'entity': entity,
'start': ids[start],
'end': ids[end] - 1,
'value': ' '.join(words[start:end]).strip()
})
ping = False
elif ("B-" in tags[i]) and (i == len(tags) - 1):
end = i
entities.append({
'entity': entity,
'start': ids[start],
'end': ids[end] - 1,
'value': ' '.join(words[start:end]).strip()
})
start = i
end = i + 1
entity = tags[i][2:]
entities.append({
'entity': entity,
'start': ids[start],
'end': ids[end] - 1,
'value': ' '.join(words[start:end]).strip()
})
elif "B-" in tags[i]:
end = i
entities.append({
'entity': entity,
'start': ids[start],
'end': ids[end] - 1,
'value': ' '.join(words[start:end]).strip()
})
ping = True
start = i
entity = tags[i][2:]
elif i == len(tags) - 1:
end = i + 1
entities.append({
'entity': entity,
'start': ids[start],
'end': ids[end] - 1,
'value': ' '.join(words[start:end]).strip()
})
else:
if "B-" in tags[i] and i == len(tags) - 1:
start = i
end = i + 1
entity = tags[i][2:]
entities.append({
'entity': entity,
'start': ids[start],
'end': ids[end] - 1,
'value': ' '.join(words[start:end]).strip()
})
elif "B-" in tags[i]:
start = i
entity = tags[i][2:]
ping = True
rasa_output["entities"] = entities
return rasa_output | 324b2ef26e01434cd08da71628b28e878a77f1b3 | 691,415 |
def get_from_cache(key, default_value=None, ctx=None):
"""Returns value from the context cache.
:param str key: String with the key.
:param obj default_value: Optional default value, if the
key is not found inside the cache this value will be
returned, defaults to None.
:param obj ctx: The context object.
:returns: The value in the cache that corresponds to the provided key.
"""
if not ctx:
return default_value
return ctx.get(key, default_value) | 4a63525802b7076e5b196269c3fc43fd198039ff | 691,416 |
def get_coverage(biom_dict, sample_list, gc_names):
""" get coverage scores from the biom file
----------
biom_dict
dict, biom file converted to a dictionary format
sample_list
list, ids of all the relevant samples
gc_names
list, names of all the relevant GCs
returns
---------
out_dict = dict {GC name: [coverage scores]}
"""
out_dict = {}
list_samples = []
list_values = []
for ids in biom_dict["rows"]:
if ids["id"] in gc_names:
for sample_name in ids["metadata"].keys():
if sample_name in sample_list and sample_name not in list_samples:
list_samples.append(sample_name)
list_values.append(float(ids["metadata"][sample_name]))
out_dict[ids["id"]] = list_values
list_samples = []
list_values = []
return out_dict | 9c54676f8567dafe5c9f4b457b2caf5ba2dabe96 | 691,417 |
def is_number(word):
""" Function that returns 'NUM' if a shipo word is a number or False if not
:param word: a word to be evaluated
:type word: str
:returns: 'NUM' if a shipo word is a number or False if not
:rtype: str
:Example:
>>> import chana.ner
>>> chana.ner.is_number('kimisha')
'NUM'
"""
numbers=['westiora','rabé','kimisha','chosko','pichika','sokota','kanchis','posaka','iskon','chonka','pacha','waranka']
if word.lower() in numbers:
return 'NUM'
else:
return False | 45daa79eff17c93e201b03082bbea3c05c383d33 | 691,418 |
from typing import Dict
from typing import Any
import uuid
def make_test_experiment_config(config: Dict[str, Any]) -> Dict[str, Any]:
"""
Create a short experiment that based on a modified version of the
experiment config of the request and monitors its progress for success.
The short experiment is created as archived to be not user-visible by
default.
The experiment configuration is modified such that:
1. The training step takes a minimum amount of time.
2. All checkpoints are GC'd after experiment finishes.
3. The experiment does not attempt restarts on failure.
"""
config_test = config.copy()
config_test.update(
{
"description": "[test-mode] {}".format(
config_test.get("description", str(uuid.uuid4()))
),
"batches_per_step": 1,
"min_validation_period": 1,
"checkpoint_storage": {
**config_test.get("checkpoint_storage", {}),
"save_experiment_best": 0,
"save_trial_best": 0,
"save_trial_latest": 0,
},
"searcher": {
"name": "single",
"metric": config_test["searcher"]["metric"],
"max_steps": 1,
},
"resources": {**config_test.get("resources", {"slots_per_trial": 1})},
"max_restarts": 0,
}
)
config.setdefault(
"data_layer", {"type": "shared_fs", "container_storage_path": "/tmp/determined"}
)
return config_test | 72ee62a9b6fa977aaff6f0621e18e0cdb0a8a0bb | 691,419 |
def operation_jnz(register, register_check, jump_by):
"""Jump operation. Jump if register_check is not 0."""
if register.get(register_check, register_check) != 0:
return jump_by | bfeca2efbb7389c21749497db73e624d37faef62 | 691,420 |
from typing import List
def increment_index(index: List[int], dims: List[int]) -> bool:
"""Increments an index.
Args:
index: The index to be incremented.
dims: The dimensions of the block this index is in.
Returns:
True iff the index overflowed and is now all zeros again.
"""
cur = len(index) - 1
index[cur] += 1
while index[cur] == dims[cur]:
index[cur] = 0
if cur == 0:
return True
cur -= 1
index[cur] += 1
return False | 833e0baa9c29348067102ef6fe530e45b91ff988 | 691,421 |
def add_up_errors(list_keyerror):
"""
#################################################################################
Description:
Adds in one string the given keyerrors from the list of keyerrors
#################################################################################
:param list_keyerror: list of string
list of keyerrors (strings or None) listing encountered errors
:return keyerror: string
string of all encountered errors concatenated
"""
keyerror = ''
for key in list_keyerror:
if len(keyerror) > 0 and key is not None:
keyerror += " ; "
if key is not None:
keyerror += str(key)
if keyerror == '':
keyerror = None
return keyerror | 62e61fa97dc3bfcd5f62d312d3f573a730479dde | 691,424 |
import random
def _create_list_of_floats(n):
"""Create a list of n floats."""
list = []
for _ in range(n):
list.append(random.random() * 100)
return list | bbade853b1fd091aa157ac81d6ee3b636f723a7b | 691,425 |
def counts_in_out_packets(packets):
"""
Counts the number of packets in & out in the array packets
@param packets is a list of packets, structured as follows: `[(time, direction)]`
@return tuple `(num_packets_in, num_packets_out)`
"""
packets_in, packets_out = [], []
for val in packets:
if val[1] < 0:
packets_in.append(val)
elif val[1] > 0:
packets_out.append(val)
return (len(packets_in), len(packets_out)) | c60186ba48764e019204d2088ea554fffb999f7b | 691,426 |
def minimize(data, exclude=None):
"""Central function call. This will call all other compression
functions. To add further compression algorithms, simply add
functions whose names end in _helper which take a string as input
and return a more compressed string as output."""
for key, item in globals().items():
if key.endswith("_helper"):
func_key = key[:-7]
if not exclude or not func_key in exclude:
data = item(data)
return data | 3ea434068211b0ac968e762bf600b8a46bd20a57 | 691,427 |
from typing import Tuple
from datetime import datetime
def getStartDatetime(start: str, days: Tuple) -> datetime:
""" obtain starting datetime from json input
:param start: string representation of the day and time
:param days: a tuple of 3 characters representation of day
:return : datetime instance
"""
day, time = start.split(' ')
day = days.index(day.lower())
hour, minute = int(time[:2]), int(time[2:])
return datetime(1, 1, day, hour, minute, 0, 0) | e4ffdc7743700914fbb3bd677ddc8ecef0fe16a4 | 691,428 |
def _find_no_grad_vars(block, op_path, targets, no_grad_set):
"""
Find the vars which is not used in the program, and
those var belong to no_grad_var.
"""
output_names = set([out.name for out in targets])
no_grad_var = []
for i, op in reversed(list(enumerate(op_path))):
# If the op has sub_block, it is too complicated to find the correct no_grad_var.
if not op.has_attr("sub_block"):
for out_var in op.desc.output_arg_names():
if out_var not in output_names and out_var not in op.desc.input_arg_names(
) and not block.vars[out_var].stop_gradient:
no_grad_var.append(out_var)
for name in op.desc.input_arg_names():
if name not in no_grad_set:
output_names.add(name)
return set(no_grad_var) | 99e484f9ae508c2637be719ce2d3f16631b75694 | 691,429 |
import argparse
def parse_args(argv=None):
"""
taking CMD arguments and sending them through the argument
parser.
"""
parser = argparse.ArgumentParser(prog="jigsaw")
# parser.add_argument('action', help='Action to be completed')
subparsers = parser.add_subparsers(
dest="action", help="commands", required=True
)
# create action
create_parser = subparsers.add_parser(
"create", help="Create an jigsaw using supplied image"
)
create_parser.add_argument("image_path", help="Path to image")
# solve action
solve_parser = subparsers.add_parser("solve", help="Solve jigsaw image")
solve_parser.add_argument("folder_path", help="Path to scrambled image folder")
return parser.parse_args(argv) | b03d7e23456d816854f4f2100f0faab4cd446b21 | 691,430 |
from datetime import datetime
def now():
"""
:return: the current UTC time in ISO8601 zulu timestamp format
"""
# utcnow returns a naive datetime object without tzinfo, which means that
# when isoformat() is called no time zone info is included
# however, python doesn't seem to do zulu format =/ so just add 'Z' to the
# end
return "{0}Z".format(datetime.utcnow().isoformat()) | 625b313cc4c88018540cd6f76fc1631e1baa1754 | 691,431 |
def mod(a,b):
"""Return the modulus of a with respect to b."""
c = int(a/b)
result = a - b*c
return result | 7f0b961fcd6b83071d66e2cdbb8f5e1f08e0542a | 691,432 |
from typing import Sized
from typing import Iterable
from typing import Mapping
import six
def is_listy(x):
"""Return True if `x` is "listy", i.e. a list-like object.
"Listy" is defined as a sized iterable which is neither a map nor a string:
>>> is_listy(["a", "b"])
True
>>> is_listy(set())
True
>>> is_listy(iter(["a", "b"]))
False
>>> is_listy({"a": "b"})
False
>>> is_listy("a regular string")
False
Note:
Iterables and generators fail the "listy" test because they
are not sized.
Args:
x (any value): The object to test.
Returns:
bool: True if `x` is "listy", False otherwise.
"""
return (isinstance(x, Sized) and
isinstance(x, Iterable) and
not isinstance(x, Mapping) and
not isinstance(x, six.string_types)) | ca8f1d6b025990e9083f94ecf0f9e4ec9b168876 | 691,433 |
import pickle
def read_pickle(relnm):
""" Read serialized object from pickle on disk at relnm
Args:
relnm (str) : Relative name/path to pickled object
Returns:
obj (`:obj: unpickled object`)
"""
with open(relnm, 'rb') as f:
obj = pickle.load(f)
print('Loaded object from disk at {}'.format(relnm))
return obj | ee9572c38c0c5c18d308c07ef4bad9410ce4048a | 691,434 |
import argparse
def parse_arguments():
"""Parses inputted arguments as described"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--Format', help="format of output", type=str, required=True,
choices=["VCF", "23_and_me", "ancestry"])
parser.add_argument(
'-i', '--Input', help='input tsv ', type=str, required=True)
parser.add_argument('-n', '--name',
help='name of save file', required=True)
parser.add_argument("-m", "--minSuppReads", help="Minimum supporting reads for a genotype call", default=1, type=int)
parser.add_argument("-k", "--kmerFlags", help="If set, then output kmer flags in VCF conversion", action="store_true")
args = parser.parse_args()
return args | 5b92eed944754db56eb9c9448b0876cb51ac439f | 691,435 |
def parse_key_value_config(config_value):
""" Parses out key-value pairs from a string that has the following format:
key: value, key2: value, key3: value
:param string config_value: a string to parse key-value pairs from
:returns dict:
"""
if not config_value:
return {}
key_values_unparsed = config_value.split(',')
key_values = {}
for key_value_pair in key_values_unparsed:
key, value = key_value_pair.strip().split(':')
key_values[key.strip()] = value.strip()
return key_values | f00c79d85e71364db58bfb5b91fb2b8654ebe75c | 691,436 |
from typing import List
import os
import subprocess
def _call_py_linters(tests_python_path: str, files: List[str]) -> int:
"""
Args:
tests_python_path: the path of the directory `tezos/tests_python`
files (list(str)): The files to lint
Returns:
The return code of calling linters, stopping at first failure
"""
tests_python_basename = os.path.basename(tests_python_path)
# Filter out files that are not under tests_python since this is the scope
# of the poetry sandbox
target_files = " ".join(
[
file[len(tests_python_basename) + len(os.sep):]
for file in files
if file.startswith(tests_python_basename)
]
)
if not target_files:
return 0 # Nothing to do
# Run all analyses defined in Makefile for this hook
cmd = ["make", "pre_commit_targets"]
# We use the SRCS environment variable to pass the filenames that have
# changed as our analyses targets
completed_process = subprocess.run(
cmd,
cwd=tests_python_basename,
check=False,
env=dict(os.environ, SRCS=target_files),
)
return completed_process.returncode | f92db5f7fb48c1eaa79df2da5d8b22c1ee3f3936 | 691,437 |
import uuid
import base64
import os
def generate_base64_uid(byte_length: int = 32, urlsafe: bool = True) -> str:
"""generate customized uuid string"""
uid = uuid.uuid1().bytes
uid = uid + os.urandom(byte_length-len(uid))
if urlsafe:
session_id = base64.urlsafe_b64encode(uid)
else:
session_id = base64.b64encode(uid)
return session_id.decode('utf-8') | b25229c903cffeb42a0eb3c4f9a630bdc2f7dafe | 691,438 |
def fileno(fil):
"""Return the file descriptor representation of the file.
If int is passed in, it is returned unchanged. Otherwise fileno()
is called and its value is returned as long as it is an int
object. In all other cases, TypeError is raised.
"""
if isinstance(fil, int):
return fil
elif hasattr(fil, "fileno"):
fileno = fil.fileno()
if not isinstance(fileno, int):
raise TypeError("expected fileno to return an int, not " + type(fileno).__name__)
return fileno
raise TypeError("expected int or an object with a fileno() method, not " + type(fil).__name__) | 983243c11a3264c77752a2da5a1d3d33279814ce | 691,439 |
import os
def get_device_fw():
""" Returns device fw (release, version) """
return (os.uname()[2], os.uname()[3]) | c27ac1186aa5b96182512d00ab49738d4010e1e0 | 691,440 |
def _get_video_file(reader, num_frames):
"""Helper function to get an entire videofile"""
video = []
for _ in range(num_frames):
output = reader.run({})
video.append(output["img"])
return video | ca3a8d3a648593ec96bbd35d32b2470770475dbb | 691,441 |
from typing import List
from typing import Dict
from typing import Any
def build_message(commit: str,
commit_github_url: str,
changed_files: List[str],
diffs: dict,
commit_author: str) -> List[Dict[str, Any]]:
"""Construct message for slack.
Args:
commit: current commit hash
commit_github_url: Url of commit on github.
changed_files: List of relevant files that were changed.
diffs: Dictionary with changed_files keys and changesets as values.
commit_author: Author of commit.
Returns:
message: slack message in a format like slack_sdk attachment argument expects.
"""
title = f"Commit {commit[0:7]} Modified Schema Struct Files"
changes: List[Dict[str, Any]] = [{"value": f"Changed by: {commit_author}"}]
for changed_file in changed_files:
changes.append({
"title": f"{changed_file}",
"value": f"```{diffs[changed_file]}```",
"short": False
})
message = [{
'fallback': title,
'color': 'warning',
'title': title,
'title_link': commit_github_url,
'fields': changes
}]
return message | 2531003df67b620b5ac933911023e7bfb6875786 | 691,442 |
import os
def masks(parent_directory):
"""The path where the pruning masks are stored."""
return os.path.join(parent_directory, 'masks') | 379b06657c8cb722d1037c383bcf21047f7ed9fb | 691,443 |
import os
def GetGClientPrimarySolutionName(gclient_root_dir_path):
"""Returns the name of the primary solution in the .gclient file specified."""
gclient_config_file = os.path.join(gclient_root_dir_path, '.gclient')
env = {}
exec(compile(open(gclient_config_file).read(), gclient_config_file, 'exec'),
env)
solutions = env.get('solutions', [])
if solutions:
return solutions[0].get('name')
return None | 52e9967b8c16607eb5b00fc8283f84c5fcc751dd | 691,444 |
def _count_submissions(conductors):
""" From array of conductors, accumulate submission count. """
return sum(c.num_cmd_submissions for c in conductors) | 9ae42a47fa0284fe28fff572dafad5f601e6bd56 | 691,445 |
def get_headers(oauth_token: str) -> dict:
"""Common headers for all requests"""
return {
'Authorization': f'OAuth {oauth_token}'
} | 285f88b6268f50209432698a12ba2b4b57ecd1ee | 691,446 |
def compose(f, g):
""""Return a function which which composes/pipes g(x) into f(x)"""
return lambda x: f(g(x)) | 74341889fbfaae9aa89ccdc3c829754750be7667 | 691,447 |
def role_change(statement):
"""
Get link for a role update.
:param statement: the xAPI statement
:return: The url location.
"""
return '/assistants/api/user_sync_agent/' | d9b3391e0601f12201f830499cc73168cef7ff10 | 691,448 |
import inspect
def whois_callers_caller():
"""
:returns: the frame of our caller's caller.
"""
return inspect.stack()[2][0] | 07c4c53fc1bf11541d9e2e0f3b5b7be0fdbbd299 | 691,449 |
def find_diff(g, start):
"""
g -> Graph of states of nibbles
start -> Starting configuration
Find all possible differentials given a start configuration. The function
returns a tuple containing:
1. the number of rounds
2. a list of all possible end states (second-last round)
3. a list of all possible end states (last round)
"""
vs = set([start])
states = [vs]
rounds = 0
is_end = False
while len(vs) > 0 and not is_end:
n_vs = set()
for v0 in vs:
for v1 in g[v0]:
if v1 == 4095:
is_end = True
n_vs.add(v1)
vs = n_vs
states.append(vs)
rounds += 1
return (rounds, states) | 74f24be96f057e2810fed6de6bb7753be31092d5 | 691,450 |
def sg_to_plato(sg):
"""
Specific Gravity to Degrees Plato
:param float sg: Specific Gravity
:return: Degrees Plato
:rtype: float
:math:`\\text{Plato} = \\frac{\\big(\\text{SG} - 1\\big) \\times 1000}{4}`
The more precise calculation of Plato is:
:math:`\\text{Plato} = -616.868 + 1111.14 \\times sg - 630.272 \\times sg^2 + 135.997 \\times sg^3`
Source:
* http://www.brewersfriend.com/2012/10/31/on-the-relationship-between-plato-and-specific-gravity/
""" # noqa
# return (sg - 1.0) * 1000 / 4
return ((135.997 * sg - 630.272) * sg + 1111.14) * sg - 616.868 | cf689927fb64d5a9108b28626dd324982c0ce776 | 691,451 |
from typing import List
from typing import Dict
from typing import Tuple
def replace_variables(
sentence: List[str], sentence_variables: Dict[str, str]
) -> Tuple[List[str], List[str]]:
"""
Replaces abstract variables in text with their concrete counterparts.
"""
tokens = []
tags = []
for token in sentence:
if token not in sentence_variables:
tokens.append(token)
tags.append("O")
else:
for word in sentence_variables[token].split():
tokens.append(word)
tags.append(token)
return tokens, tags | 4a5111c209a4faf03b96c920af113d086d0c970f | 691,452 |
import re
def get_lem_dict(path):
"""Create mapping from word to lemma
"""
SKIP = 32
lem_by_word = {}
uncommon = set()
uncommon_re = re.compile('daw[.]|przest[.]|rzad[.]|gwar[.]')
fi = open(path,'rb')
i = 0
for line in fi:
i += 1
if i<=SKIP: continue
line = line.rstrip().decode('utf8').lower()
word,lem,info0,info1,info2 = (line+'\t\t\t\t').split('\t')[:5]
if word not in lem_by_word:
lem_by_word[word] = set([lem])
else:
lem_by_word[word].add(lem)
if uncommon_re.findall(info1):
uncommon.add(lem)
# minimize conflicts by removing uncommon lemmas if common one is present
out = {}
for word in lem_by_word:
lemmas = lem_by_word[word]
common = lemmas - uncommon
if not common:
out[word] = u'/'.join(lemmas)
else:
out[word] = u'/'.join(common)
return out | acdf7bb28c520dd16ca6d80be078780a57720397 | 691,453 |
def opsworks_instance_name_targets_for_instances(ilist):
"""
Generate targets list by opwsorks instance name
"""
oin_nodes = {}
for i in ilist:
if i.tags is None:
continue
for tag in i.tags:
if tag['Key'] == 'opsworks:instance':
name = tag['Value']
if name not in oin_nodes:
oin_nodes[name] = []
oin_nodes[name].append(i.private_ip_address)
return [('oin:{}'.format(oin), ips) for oin, ips in oin_nodes.items()] | 07cd709cec8be98d04301cc9431ea3ff543ce2b2 | 691,454 |
def num_patches(output_img_dim=(3, 256, 256), sub_patch_dim=(64, 64)):
"""
Creates non-overlaping patches to feed to the PATCH GAN
(Section 2.2.2 in paper)
The paper provides 3 options.
Pixel GAN = 1x1 patches (aka each pixel)
PatchGAN = nxn patches (non-overlaping blocks of the image)
ImageGAN = im_size x im_size (full image)
Ex: 4x4 image with patch_size of 2 means 4 non-overlaping patches
:param output_img_dim:
:param sub_patch_dim:
:return:
"""
# num of non-overlaping patches
nb_non_overlaping_patches = (output_img_dim[1] / sub_patch_dim[0]) * (output_img_dim[2] / sub_patch_dim[1])
# dimensions for the patch discriminator
patch_disc_img_dim = (output_img_dim[0], sub_patch_dim[0], sub_patch_dim[1])
return int(nb_non_overlaping_patches), patch_disc_img_dim | 612fc2a5de8e560d6b6d79a4db27484e3ea36de0 | 691,456 |
from typing import Any
def none_conditional(obj: Any) -> Any:
""" Patch given object's __getattr__ function in order to allow non-existing attributes (returns None by default), allowing simpler synthax close to C#'s null-conditional member access operator.
Actually, this function just patches `__getattr__` of given object and, recursively, of any objects returned by getattr like if `getattr(obj, attr, default=None)` was called instead of a regular dot operator usage for member access.
Inspired from (C# null-conditional member access operator)[https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/member-access-operators#null-conditional-operators--and-]
.. See also `none_conditional_unpatch` function which allows to 'un-patch' `obj` object and any resulting objects which have a `__getattr__` method. (restores original `__getattr__` method).
Bellow is simple example usage showcasing how it can simplify code writing when accessing members/methods which may not exist:
``` python
# No need to call `none_conditional` on `write` even if it may be `None` and even if `close` member may not exist as `none_conditional` patches returned types too in order to propagate throught all member accesses.
a = none_conditional(path).open().write('Hello').close()
none_conditional_unpatch(path, a)
...
```
"""
if hasattr(obj, '__getattr__'):
def _getattr_wrapper(self, attr: str, default: Any = None) -> Any:
nonlocal obj
if self is None or not attr:
return default
rslt = obj.__getattr__none_conditional_original_fn(obj, attr, default)
return none_conditional(rslt)
obj.__getattr__none_conditional_original_fn = obj.__getattr__ # Store previous implementation unpatched __getattr__ method in order to allow to restore it later
obj.__getattr__ = _getattr_wrapper
return obj | 7e01b20c8e35a0c71cf25574f559def3d7b2f4a5 | 691,457 |
import types
def command(method=None, **kwargs):
"""Mark this method as a CLI command.
This will only have any meaningful effect in methods that are members of a
Resource subclass.
"""
# Create the actual decorator to be applied.
# This is done in such a way to make `@resources.command`,
# `@resources.command()`, and `@resources.command(foo='bar')` all work.
def actual_decorator(method):
method._cli_command = True
method._cli_command_attrs = kwargs
return method
# If we got the method straight-up, apply the decorator and return
# the decorated method; otherwise, return the actual decorator for
# the Python interpreter to apply.
if method and isinstance(method, types.FunctionType):
return actual_decorator(method)
else:
return actual_decorator | ba65a47a9ed9e1b84ea7234efacf0ae18bbab806 | 691,458 |
def diag_indices(n):
""" torch does not support this """
return (range(n),) * 2 | 728305cb6b48f74db8577949ef139e555cb7c835 | 691,459 |
def poly2(x,C0,C1,C2):
"""
Calculate a polynomial function of degree 2 with a single variable 'x'.
Parameters
----------
x : numeric
Input variable.
C0, C1, C2 : numeric
Polynomial coefficients
Returns
-------
numeric
Result of the polynomial function.
"""
return C0+C1*x+C2*x**2 | a37c6bfa7bedef4f24791e802a5a50b651956094 | 691,460 |
import csv
def load_data(dataset_path):
"""
Extracts the relevant data from the dataset given.
Returns {lang: {gloss: [transcription,]}}.
Asserts that there are no entries with unknown or no transcriptions.
"""
data = {} # lang: {gloss: [transcription,]}
with open(dataset_path) as f:
reader = csv.reader(f, delimiter='\t')
next(reader)
for line in reader:
if line[0] not in data:
data[line[0]] = {}
if line[3] not in data[line[0]]:
data[line[0]][line[3]] = []
assert line[5] not in ('', 'XXX')
data[line[0]][line[3]].append(line[5])
return data | d7bd2fa760dbb8ec70135eca0e27473baef69213 | 691,461 |
import os
def getMeterNum(imageID):
"""get meter num in an image"""
num = 0
rootdir = 'template/'
list = os.listdir(rootdir)
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
prefix, suffix = list[i].split(".")
prefixImageID = prefix.split("_")[0]
if os.path.isfile(path) and suffix == "jpg" and prefixImageID == imageID:
num += 1
return num | 13475299219d2a6ab6d7d7ba36564d4bc6aac548 | 691,462 |
def openstack_basecmd(*args, **kwargs):
"""Generates the openstack base command"""
return (
f'openstack --os-auth-url {kwargs.get("auth-url",None)} --os-project-domain-name {kwargs.get("domain",None)} '
f'--os-user-domain-name {kwargs.get("domain",None)} --os-project-name {kwargs.get("project",None)} '
f'--os-username {kwargs.get("username")} --os-password {kwargs.get("password")}'
) | 2d00bf8c466d576c83624ada3042629a038c2d2f | 691,463 |
import os
def get_files_in_folder(folder):
"""Return files from folder"""
return [fn for fn in os.listdir(folder) if fn.lower().endswith('.eml')] | e9e8c8c6ea37407c2b51552807c0f1dd8d598606 | 691,464 |
def get_bonds_of_molecule(molecule):
"""
It returns the atom's indexes for all bonds of a molecule.
:param molecule: Mol object with the ligand (Rdkit)
:return: list of tuples with the indexes for each bond
"""
bonding_idx = []
for bond in molecule.GetBonds():
bonding_idx.append((bond.GetEndAtomIdx() + 1, bond.GetBeginAtomIdx() + 1))
return bonding_idx | 5eecdb40b31c1bd32f966594999a029ac75a6347 | 691,465 |
def fahr_to_celsius(fahr):
""" Convert Fahrenheit to Celsius (F-32) + 5/9 """
result_in_celsius = (fahr - 32) + 5/9
return result_in_celsius | 09601a064da209e64d9652be0e7b5fab01cd0038 | 691,466 |
import os
import hashlib
def is_model_valid(model_file, model_sha256):
"""Check whether the sha256 hash is valid"""
if not os.path.isfile(model_file):
return False
with open(model_file, "rb") as f:
file_hash = hashlib.sha256()
while chunk := f.read(65536):
file_hash.update(chunk)
return model_sha256 == file_hash.hexdigest() | a297e3d98dba40745150514f4ae4c207ef4eb256 | 691,467 |
def format_stats(stats: dict) -> str:
"""Prepares stats for logging."""
return ", ".join([f"{k}: {v} commits" for k, v in stats.items()]) | 8c554012e7ff2db2ba6394dcb87541ab668ed31c | 691,468 |
def grandchildren_with_tag(child,tagnames):
"""Return children of child that have tag names in
the given set of tag names"""
ret=[]
for grandchild in child.iterchildren():
if grandchild.tag in tagnames:
ret.append(grandchild)
pass
pass
return ret | 1121345abab69f67abde0ee2b6e4629e71034c61 | 691,469 |
from typing import Counter
def calculate_gc_content1(sequence):
"""
Receives a DNA sequence (A, G, C, or T)
Returns the percentage of GC content (rounded to the last two digits)
"""
counts = Counter(sequence.upper())
gc_content = counts.get("G", 0) + counts.get("C", 0)
at_content = counts.get("A", 0) + counts.get("T", 0)
return round(gc_content * 100 / (gc_content + at_content), 2) | acebad90e15a69d9ab52c32826e132e466f926b6 | 691,471 |
def bfalist(self, area="", lab="", **kwargs):
"""Lists the body force loads on an area.
APDL Command: BFALIST
Parameters
----------
area
Area at which body load is to be listed. If ALL (or blank), list
for all selected areas [ASEL]. If AREA = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may also be substituted for AREA.
lab
Valid body load label. If ALL, use all appropriate labels. Load
labels are listed under "Body Loads" in the input table for each
element type in the Element Reference. See the BFA command for
labels.
Notes
-----
Lists the body force loads for the specified area and label. Body
loads may be defined on an area with the BFA command.
This command is valid in any processor.
"""
command = f"BFALIST,{area},{lab}"
return self.run(command, **kwargs) | acecc25b0f337fae3e39d93682b2c9233630b354 | 691,472 |
from typing import Optional
def parse_float(value: bytes) -> Optional[float]:
"""
Convert bytes to a float.
Args:
value: A bytes value to be converted to a float.
Returns:
A float if the bytes value is a valid numeric value, but
``None`` otherwise.
"""
try:
return float(value)
except ValueError:
return None | 2ccf0e19e3b2168750892ef7a675dced438ce4a6 | 691,473 |
def say_hi(msg):
"""say_hi function documentation"""
return msg | 8b164f0f7e4a39c0efb355f313f3c7912dd0b693 | 691,474 |
def trim_to_peak(peaks, troughs):
"""
Trims the peaks and troughs arrays such that they have the same length
and the first peak comes first.
Args:
peaks (numpy array): list of peak indices or times
troughs (numpy array): list of trough indices or times
Returns:
peaks (numpy array): list of peak indices or times
troughs (numpy array): list of trough indices or times
"""
# start index for troughs:
tidx = 0
if len(peaks) > 0 and len(troughs) > 0 and troughs[0] < peaks[0]:
tidx = 1
# common len:
n = min(len(peaks), len(troughs[tidx:]))
# align arrays:
return peaks[:n], troughs[tidx:tidx + n] | 908547e34238fb68576481414df98183b329553e | 691,475 |
def int_from_bytes(bytes_) -> int:
"""Calculates
"""
output = 0
for i in range(0, len(bytes_)):
output += bytes_[i] * (2**(8*i))
return output | fda5d574ccd4f759a8cde0eb7947a0208389dab9 | 691,476 |
def find_all_conditions(path: str) -> set:
"""
Find all unique conditions in given training data file.
:param path: Path to training data.
:return: Set of all the unique available conditions in the file.
"""
cond = set()
with open(path) as f:
for line in f:
split = line.split(",")
if len(split) >= 1: # Don't have to be strict about the format
try: # If it's a number, don't add to the set
float(split[0])
except ValueError:
# Not a number, add to set if not already in it
if split[0] not in cond:
cond.add(split[0])
return cond | 0e3b355342b1f1a5e95f5fa6d2c6c6fa67f1231f | 691,477 |
def get_real_window_type(window_type):
"""
由官网窗口配置转换为后台窗口配置
"""
window_type_map = {
"scroll": "tumbling",
"accumulate": "accumulate",
"slide": "sliding",
"session": "session",
}
if window_type in window_type_map:
return window_type_map.get(window_type)
else:
return None | 2a4529927411dc69e9253d4ba76884bd7c5463a3 | 691,478 |
def trace_grad(fn, args):
"""Trace a function, and return a VJP and the function's output."""
# from tensorflow.python.eager.backprop import make_vjp
# result, vjp = make_vjp(fn)(*args)
# return result, vjp
return None | 0b176c987d5bc95319baa29dffdb24c6885d5853 | 691,479 |
import argparse
from typing import Tuple
from typing import List
import glob
def get_filenames(args: argparse.Namespace) -> Tuple[List[str], int]:
""" Collect filenames glob """
print(type(args))
filenames = glob.glob(args.dir)
file_num = len(filenames)
return filenames, file_num | a01576d1932e1f91eecc59691aed63094e746012 | 691,480 |
def body_mass_index(weight: float, height: float) -> float:
"""Returns body mass index
Parameters
----------
weight
weight in kg
height
height in cm
Returns
-------
float
body mass index
"""
return round(weight / (height / 100) ** 2, 1) | bd05ae4accd4dee5c9839ca950df0c1da5a646db | 691,481 |
def separate_files_and_options(args):
"""
Take a list of arguments and separate out files and options. An option is
any string starting with a '-'. File arguments are relative to the current
working directory and they can be incomplete.
Returns:
a tuple (file_list, option_list)
"""
file_args = []
opt_args = []
for arg in args:
if arg and arg[0] != "-":
file_args.append(arg)
else:
opt_args.append(arg)
return file_args, opt_args | eae2de9cb93308b78ddf279304a73c6c5e1070e2 | 691,482 |
def isvalidposition(pdic,iprime,distance):
"""
Checks if a position is valid for mutation. It queries all neighboring positions (iprime +-distance) to check whether there already was a mutation in pdic
@type pdic: dictionary
@param pdic: Diciontary containing mutations and start/ stop codons..
@type iprime: int
@param iprime: Position of the prospective mutation (DNA level)
@type distance: int
@param distance: User defined parameter which limits the distance between two mutations.
@rtype: Bool
@return: Boolean which decides if the position is valid (1= yes,0 = no)
"""
# deal with base shifts
distance = distance-2
istforbidden = 0
for o in range(-distance,distance+2,1):
if (iprime+o in pdic):
# E = end of orf
# S = start of orf
if((pdic[iprime+o]=="E") or (pdic[iprime+o]=="S")):
if((o >3) or (o <-3)):
pass
else:
istforbidden = 1
break
else:
istforbidden = 1
break
else:
pass
return(istforbidden) | 48dd79708d18fb4d3d37f604a87fc18723cbc704 | 691,483 |
import numpy
def csc(arg):
"""
Cosecant
"""
return 1 / numpy.sin(arg) | c8fd1501fd81c58d89bcc875ec9754d2101eacc3 | 691,484 |
def odds_table(result):
"""
:param result: ensemble des valeurs affichées dans le terminal par une fonction du package
sportsbetting
:return: Tableau des cotes
"""
lines = result.split("\n")
i = 0
for i, line in enumerate(lines):
if "}}" in line:
break
dict_odds = eval("".join(lines[1:i + 1]))
odds = dict_odds["odds"]
if len(list(odds.values())[0]) == 2:
for key in odds.keys():
odds[key].insert(1, "- ")
table = []
for key, value in odds.items():
table.append([key] + list(map(str, value)))
return table | 0bb3f54b4711cace4ee6b97b4f83e3de61b5d808 | 691,485 |
import collections
def tcl_findprd_checkfilter(prddict, tocheck=None):
"""
Filter PRD dict if needed.
:param prddict: PRD center:[ends] dictionary.
:type prddict: collections.defaultdict(str: list)
:param tocheck: Specific PRD(s) to check, None if all will be checked. Default is None.
:type tocheck: list(str)
"""
prddict2 = prddict
if tocheck is not None:
prddict2 = collections.defaultdict(list)
for toch in tocheck:
toch = toch.replace("PRD-", "").replace("APBI-PRD", "")
prddict2[toch] = prddict[toch]
return prddict2 | e2fffd082b3563c98fda5ce10205d7bef7edd110 | 691,486 |
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields | ee2150aa62b0897c77e20e99b15ee6d0138e293b | 691,489 |
def array_key_exists(interp, space, w_key, w_obj):
""" Checks if the given key or index exists in the array """
if w_obj.tp not in (space.tp_object, space.tp_array):
space.ec.warn("array_key_exists() expects parameter "
"2 to be array, %s given" %
space.get_type_name(w_obj.tp))
return space.w_Null
if w_key.tp not in (space.tp_int, space.tp_str, space.tp_null):
space.ec.warn("array_key_exists(): The first argument "
"should be either a string or an integer")
return space.w_False
if w_obj.tp == space.tp_array:
return space.newbool(w_obj.isset_index(space, w_key))
if w_obj.tp == space.tp_object:
return space.newbool(w_obj.hasattr(interp, space.str_w(w_key), None)) | b48b2543877b56041d874d26e9206a79f0823d60 | 691,490 |
import base64
def decode(base64_data):
"""decode base64 string"""
return base64.b64decode(base64_data) | 4cd493f2ffe62e672a1739d1291ce1519eaa3e2f | 691,491 |
import re
def clean_tweet(row):
"""
description:Clean right,left spaces from the tweet, links and other characters
and return a tuple(sentiment, [list_of_words])
"""
split_row=row.split(",")
sentiment=split_row[1]
tweet=split_row[3]
tweet.strip()
#remove url from the tweet
tweet=re.sub(r"http\S+", "", tweet)
#remove other characters from the tweet
tweet=re.sub("[^a-zA-z]", " ", tweet)
tweet=tweet.lower()
#convert tweet in to list of words
words_list=tweet.split()
return(sentiment, words_list) | 52f1eef35cc017aebce4c558c7463c2b1d475d1e | 691,492 |
import argparse
def _single_prefix_char(token: str, parser: argparse.ArgumentParser) -> bool:
"""Returns if a token is just a single flag prefix character"""
return len(token) == 1 and token[0] in parser.prefix_chars | 87d23b71c491a319f65b86a055f5f22fee6e16ff | 691,493 |
import functools
import time
def to_timestamp_property(func_to_decorate):
""" A decorator for properties to convert the property value from a
datetime to a timestamp. """
@functools.wraps(func_to_decorate)
def wrapper(instance, value):
""" Closure that converts from datetime to timestamp. """
if value:
value = time.mktime(value.timetuple())
func_to_decorate(instance, value)
return wrapper | 04a6776284b739d06fbff9620072c91e8ba84c64 | 691,494 |
import json
def json_has_structure(response_body, expected_json):
"""Does the JSON this HTTP response match the expected structure?"""
actual_json = json.loads(response_body)
return actual_json == expected_json | 96cecc78d3cf34166862a870601539bc97d16e5b | 691,495 |
def format_kmer(seqid, start):
"""
prints out a header with 1-based indexing.
>>> format_kmer('chr3', 1000)
'chr3_1001'
"""
return "%s_%i" % (seqid, start + 1) | 2d2dcd2c00b14e68f5f7a095120ec9c1b4c45a20 | 691,496 |
import sys
def read_jobs_from_stdin():
"""Prompts for and then reads job(s) JSON from stdin"""
print('Enter the raw job(s) JSON (press Ctrl+D on a blank line to submit)', file=sys.stderr)
jobs_json = sys.stdin.read()
return jobs_json | 34498e263a276fbca3d9e0905e01de20ca2493f4 | 691,497 |
def find_undefined_value(cbf_handle):
"""Given a cbf handle, get the value for the undefined pixel."""
cbf_handle.find_category(b"array_intensities")
cbf_handle.find_column(b"undefined_value")
return cbf_handle.get_doublevalue() | 9e53dd7ebac6f711e02e1cf77f1d2553a09d9c3b | 691,498 |
import inspect
def args_to_kwargs(tool, args):
"""Use inspection to convert a list of args to a dictionary of kwargs.
"""
argnames = list(inspect.signature(tool).parameters.keys())[1:] # get list of argsnames and remove self
kwargs = {argnames[i]: arg for i, arg in enumerate(args)}
return kwargs | 7b3a8c022bb504348b2564d7ae4f683aa891604e | 691,499 |
from typing import Optional
import re
def guess_wapi_version(endpoint: str) -> Optional[float]:
"""Guess WAPI version given endpoint URL"""
match = re.match(r".+\/wapi\/v(\d+\.\d+)$", endpoint)
return float(match.group(1)) if match else None | 8bc35926d2317916c10e71f1997a9f2dbac047de | 691,500 |
def message_prefix(file_format, run_type):
"""Text describing saved case file format and run results type."""
format_str = ' format' if file_format == 'mat' else ' format'
if run_type == ['PF run']:
run_str = run_type + ': '
else:
run_str = run_type + ':'
return 'Savecase: ' + file_format + format_str + ' - ' + run_str | 4e08d6aa39f2dc7eeee80250219887c03751fc72 | 691,501 |
from pathlib import Path
def modified_after(first_path: Path, second_path: Path):
"""Returns ``True`` if first_path's mtime is higher than second_path's mtime.
If one of the files doesn't exist or is ``None``, it is considered "never modified".
"""
try:
first_mtime = first_path.stat().st_mtime
except (EnvironmentError, AttributeError):
return False
try:
second_mtime = second_path.stat().st_mtime
except (EnvironmentError, AttributeError):
return True
return first_mtime > second_mtime | e3628e63f0ed1d220702aa618cf18732077942cf | 691,502 |
def parse_images(grp):
"""
Read the input data into titles and
images.
"""
images = []
titles = []
for i in range(len(grp) // 11):
_title = int(grp[11 * i][:-1].split(" ")[1])
titles.append(_title)
images.append(grp[11 * i + 1 : 11 * (i + 1)])
return titles, images | bbd4936c0e35794d9302a3361b3557a96f3718fc | 691,503 |
def bmes2words(text, tags):
"""根据BMES标签从text中切分出词"""
chars = list(text)
results = []
if len(chars) == 0:
return results
word = chars[0]
for char, tag in zip(chars[1:], tags[1:]):
if tag in ("B", "S"):
results.append(word)
word = ""
word += char
if len(word) != 0:
results.append(word)
return results | 1003ebbb9eb08895fb54117b1326595ef98d7544 | 691,507 |
def int_128_to_64(ip):
"""Binary split 128 bit integer to two 64bit values.
This is used for storing 128bit IP Addressses in integer format in
database. Its easier to search integer values.
Args:
ip (int): IPv4 / IPv6 Address.
Returns:
tuple: (first 64bits, last 64bits)
"""
int1 = ip >> 64
int2 = ip & (1 << 64) - 1
return (int1, int2) | 95c5bf460ccd4160fa8e1601b038bd5d1fe4c01a | 691,509 |
import math
def get_from_decomposition(decomposition):
"""Returns a number from a prime decomposition"""
result = 1
for key in decomposition:
result *= math.pow(key, decomposition[key])
return result | ed66dda787f22306643fda8e2ff497b4f2e820cb | 691,510 |
def compute_tolerance(baseline: float, abstol: float, reltol: float) -> float:
""" Computes effective tolerance from a baseline value and relative and absolute tolerances.
:param baseline: the input value
:param abstol: absolute tolerance
:param reltol: relative tolerance
:return: tolerance to use for th einput value
Example:
>> compute_tolerance(1000, 3, 0.01)
>> 10
>> compute_tolerance(1000, 1, 0.002)
>> 2
"""
assert abstol >= 0
assert reltol >= 0
assert reltol < 1
return max(abstol, reltol * abs(baseline)) | 35c8ccef3b1c330d59aa3e55940f390b414fccca | 691,511 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.