content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import time
def date_to_iso(thedate, decimals=True):
"""
Convert date to isoformat time
:param thedate:
:return:
"""
strdate = thedate.strftime("%Y-%m-%dT%H:%M:%S")
minute = (time.localtime().tm_gmtoff / 60) % 60
hour = ((time.localtime().tm_gmtoff / 60) - minute) / 60
utcoffset = "%.2d%.2d" %(hour, minute)
if decimals:
three_digits = "."+ str(thedate.microsecond)[:3]
else:
three_digits = ""
if utcoffset[0] != '-':
utcoffset = '+' + utcoffset
return strdate + three_digits + utcoffset | b64ee810d924e2e19bf99b7e9566b5068f34cd1c | 694,182 |
def tree_depth(span):
"""Returns the depth of the dependency tree of a sentence."""
i = 1
d = 1
obj = span.root
children = obj.children
children_left = True
while children_left == True:
clist = []
i += 1
for c in children:
if len(list(c.children)) > 0:
for c2 in c.children:
clist.append(c2)
d = i
if len(clist) == 0:
children_left = False
else:
children = clist
return d | a957eb5a40b8cc0c61a633a118c1fd638a6e528d | 694,183 |
def print_args(args):
"""Convenience function for printing the current value of the input
arguments to the command line.
----------------------------------------------------------------------------
Args:
args: argparse object returned by ArgumentParser.parse_args()
Returns:
None
"""
print("Current input flags...")
for arg in vars(args):
print("\t%s : %s" % (arg, getattr(args, arg)))
return None | 18548ce7ad9f8683cbebf7bbda5198c97ea5dfd9 | 694,184 |
def logstr(*args, **kwargs):
""" Log info with print()-like function arguments """
msg = ""
# Seperation
sep = kwargs.pop("sep", " ")
# Get all arguments
for arg in args:
msg += str(arg) + sep
# Remove final seperation of necessary
if sep:
msg = msg[:-len(sep)]
return msg + kwargs.pop("endl", "\n") | 55d2852410a3716cc2fe36f838d4df04b85dc5d7 | 694,185 |
import os
def make_links(directory):
"""
Return list of tuples [(link, name), ...]
Example:
'category1' contains 'subcategory1', 'subcategory2'.
This will return the following:
[(/category1/subcategory1, subcategory1),
(/category1/subcategory2, subcategory2)]
It returns an empty string if directory has no subdirectories.
"""
try:
directories = next(os.walk(os.path.join('products', directory)))[1]
links = ['/' + os.path.join(directory, d) for d in directories]
names = [os.path.basename(link) for link in links]
return zip(links, names) if links else None
except StopIteration as e:
# Quick hack to handle nonexisting categories typed in the address bar.
# Calling make_links with an empty string lists links in "products"
return make_links('') | 4ad7741dfbae59fe68adf35e9ffc5c3887d44de8 | 694,186 |
def func(*args):
"""sum stuff"""
return sum(args) | d5abd25e9a95839e9e903f1a6c9714289128300d | 694,187 |
import re
def decode_string(string):
"""Decode a string by resolving backslash-escaped characters."""
return re.sub(r'\\(?:\\|"|x[0-9a-f]{2})', '*', string[1:-1]) | 31cbc278ce0d6af6863dca1ddfaf005192b7779f | 694,188 |
def multiply(a, b):
"""
>>> multiply(2,3)
6
>>> multiply('baka~',3)
'baka~baka~baka~'
"""
return a * b | be6c2321a04bc99634c99034058423f489988281 | 694,190 |
def create_weak_signal_view(path, views, load_and_process_data):
"""
:param path: relative path to the dataset
:type: string
:param views: dictionary containing the index of the weak signals where the keys are numbered from 0
:type: dict
:param load_and_process_data: method that loads the dataset and process it into a table form
:type: function
:return: tuple of data and weak signal data
:return type: tuple
"""
data = load_and_process_data(path)
train_data, train_labels = data['training_data']
val_data, val_labels = data['validation_data']
test_data, test_labels = data['test_data']
weak_signal_train_data = []
weak_signal_val_data = []
weak_signal_test_data = []
for i in range(len(views)):
f = views[i]
weak_signal_train_data.append(train_data[:, f:f+1])
weak_signal_val_data.append(val_data[:, f:f+1])
weak_signal_test_data.append(test_data[:, f:f+1])
weak_signal_data = [weak_signal_train_data, weak_signal_val_data, weak_signal_test_data]
return data, weak_signal_data | bdb5fac8038b73377ec5f0019d108ae10a9ecdaf | 694,191 |
def format_text_table(rows, num_headers=0,
top_header_span_start=0,
top_header_text=None):
"""
Format rows in as a reStructuredText table, in the vein of::
========== ========== ==========
-- top header text, span start 1
---------- ---------------------
row0col0 r0c1 r0c2
========== ========== ==========
row1col0 r1c1 r1c2
row2col0 r2c1 r2c2
========== ========== ==========
"""
# Format content
text_rows = [["{0}".format(item).replace("\n", " ") for item in row]
for row in rows]
# Ensure same number of items on all rows
num_items = max(len(row) for row in text_rows)
for row in text_rows:
row.extend(['']*(num_items - len(row)))
# Determine widths
col_widths = [max(len(row[j]) for row in text_rows) + 2
for j in range(num_items)]
# Pad content
text_rows = [[item.center(w) for w, item in zip(col_widths, row)]
for row in text_rows]
# Generate result
headers = [" ".join(row) for row in text_rows[:num_headers]]
content = [" ".join(row) for row in text_rows[num_headers:]]
separator = " ".join("-"*w for w in col_widths)
result = []
if top_header_text is not None:
left_span = "-".join("-"*w for w in col_widths[:top_header_span_start])
right_span = "-".join("-"*w for w in col_widths[top_header_span_start:])
if left_span and right_span:
result += ["--" + " " * (len(left_span)-1) + top_header_text.center(len(right_span))]
result += [" ".join([left_span, right_span])]
else:
result += [top_header_text.center(len(separator))]
result += ["-".join([left_span, right_span])]
result += headers
result += [separator.replace("-", "=")]
elif headers:
result += headers
result += [separator]
result += content
result = [separator.replace("-", "=")] + result
result += [separator.replace("-", "=")]
return "\n".join(result) | 84693883f0518a0c91235ede366841013e74193e | 694,192 |
def datetime_pyxll_function_3(x):
"""returns a string description of the datetime"""
return "type=%s, datetime=%s" % (type(x), x) | e9810a137aa1a79d925059a5168d46e0adf0b0ee | 694,193 |
import os
def get_locale():
"""Gets the locale of the image generator used in the run."""
with os.popen("locale | grep LANG= | awk -F '=' {'print $2'}") as stream:
output = stream.read().strip()
if len(output) == 0:
try:
output = os.environ['LANG']
except KeyError:
return "could not get locale"
return output | 76535d532da6bd0e3b16c41de1644af2d6165607 | 694,194 |
import argparse
def parse_argv(argv):
"""
Parse command line arguments
:return: parsing object
"""
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--authorship_information',
dest='auth_info', action='store_false',
default=True)
parser.add_argument('-v', '--version', dest='version',
action='store_true', default=False)
return parser.parse_args(argv) | d4377db2819349edad609bfc164ee2720a74834b | 694,196 |
def say_hi():
"""Get a thought."""
return 'Hi, my friend ...' | f2e8f8f5ae593cddc24972e3136525e7eaec698b | 694,197 |
def create_news(abc_news, mixins=None):
"""Concrete news model factory.
:param abc_news: Abstract base news to use as base.
:type abc_news: Any ABC news from :func:`~create_abc_news` factory
function.
:param mixins: Mixins to be mixed into concrete news model.
:type mixins: Iterable mixin classes.
:returns: Concrete news model based on given abc news and mixins.
:rtype: :class:`~news.models.AbstractNews` Django implementation
based on given abc news and mixins.
"""
mixins = mixins or tuple()
return type(
'News', mixins + (abc_news,),
{'__module__': __name__}
) | 28269e596a26f1ac79e1b78019a4010ab129bf2a | 694,198 |
import torch
def get_maxprob_metric(log_probs):
"""
Computes the average max probability
"""
max_lp = log_probs.max(axis = -1).values
return torch.exp(max_lp).mean() | 83eb87ea1b85a25212dc391d2c55a1d4db285a52 | 694,199 |
def get_free_uidNumber(uidlist):
"""
Returns:
str. A uidNumber that is not yet in use in the LDAP.
"""
nbrs = [int(number) for number in uidlist]
uidNumber = max(nbrs) + 1
if uidNumber >= 2000 and uidNumber < 3000:
return uidNumber
else :
raise RuntimeError('No uidNumber left in range %s' % [2000, 3000]) | 65c54c9999bc55aa4af0855a10020625f1ca6d9d | 694,200 |
def parse_int_string(obj: str) -> int:
"""Parse a string as `int`. Throws a ValueError if `obj` is not a string.
Args:
obj : the string to parse
"""
if not isinstance(obj, str):
raise ValueError()
return int(obj) | 23e6806049ac8b0cec8c08fafd55a45fb75cc85c | 694,201 |
def make_face_rects(rect):
""" Given a rectangle (covering a face), return two rectangles.
which cover the forehead and the area under the eyes """
x, y, w, h = rect
rect1_x = x + w / 4.0
rect1_w = w / 2.0
rect1_y = y + 0.05 * h
rect1_h = h * 0.9 * 0.2
rect2_x = rect1_x
rect2_w = rect1_w
rect2_y = y + 0.05 * h + (h * 0.9 * 0.55)
rect2_h = h * 0.9 * 0.45
return (
(int(rect1_x), int(rect1_y), int(rect1_w), int(rect1_h)),
(int(rect2_x), int(rect2_y), int(rect2_w), int(rect2_h))
) | 832706e74fd9d008b68687f1cd5f5d6ee9c01cf6 | 694,202 |
def mrep(self, name="", arg1="", arg2="", arg3="", arg4="", arg5="",
arg6="", arg7="", arg8="", arg9="", arg10="", arg11="", arg12="",
arg13="", arg14="", arg15="", arg16="", arg17="", arg18="",
**kwargs):
"""Enables you to reissue the graphics command macro "name" during a
APDL Command: /MREP
replot or zoom operation.
Parameters
----------
name
The name identifying the macro file or macro block on a macro
library file. The name can contain up to eight characters maximum
and must begin with a letter.
arg1, arg2, arg3, . . . , arg18
Values to be passed into the file or block.
Notes
-----
This command reissues the graphics command macro "name" during a replot
operation [/REPLOT] or a zoom [/ZOOM] operation. The ANSYS program
passes the command macro arguments to the replot and zoom feature for
use by the graphics macro. You should place the s-MREP command at the
end of the graphics command macro, following the last graphics command
within the macro, to enable the replot or zoom feature.
"""
command = f"/MREP,{name},{arg1},{arg2},{arg3},{arg4},{arg5},{arg6},{arg7},{arg8},{arg9},{arg10},{arg11},{arg12},{arg13},{arg14},{arg15},{arg16},{arg17},{arg18}"
return self.run(command, **kwargs) | 6f6715af52e6e65d700ccd8b36318985d6a4207e | 694,203 |
import optparse
def Flags():
""" Constructs a parser for extracting flags from the command line. """
parser = optparse.OptionParser()
parser.add_option(
"--inline_images",
help=("Encode img payloads as data:// URLs rather than local files."),
default=False,
action='store_true')
parser.add_option(
"--verbose",
help="Print verbose output",
default=False,
action="store_true")
return parser | 691b9ff9df7d567e60ec17ad3af80c084ce6112a | 694,204 |
from warnings import warn
def deprecated(warning_string=""): # pragma: no cover
"""Decorate deprecated functions."""
def old_function(fcn):
def wrapper(*args, **kwargs):
printed_message = "{0} is deprecated. {1}".format(
fcn.__name__, warning_string
)
warn(printed_message, DeprecationWarning)
fcn(*args, **kwargs)
return wrapper
return old_function | 58edd73133885c48220b8a7be7d28793376f66c5 | 694,205 |
def create_path_file_obj(mol, extension='mol2'):
"""
Encode the input files
"""
return {'path': None, 'content': mol, 'extension': extension} | b80f88e04f7e7a18fcaabacd762aceb19cf00bd5 | 694,206 |
import collections
def _cleanup_opts(read_opts):
"""Cleanup duplicate options in namespace groups
Return a structure which removes duplicate options from a namespace group.
NOTE:(rbradfor) This does not remove duplicated options from repeating
groups in different namespaces:
:param read_opts: a list (namespace, [(group, [opt_1, opt_2])]) tuples
:returns: a list of (namespace, [(group, [opt_1, opt_2])]) tuples
"""
# OrderedDict is used specifically in the three levels to maintain the
# source order of namespace/group/opt values
clean = collections.OrderedDict()
for namespace, listing in read_opts:
if namespace not in clean:
clean[namespace] = collections.OrderedDict()
for group, opts in listing:
if group not in clean[namespace]:
clean[namespace][group] = collections.OrderedDict()
for opt in opts:
clean[namespace][group][opt.dest] = opt
# recreate the list of (namespace, [(group, [opt_1, opt_2])]) tuples
# from the cleaned structure.
cleaned_opts = [
(namespace, [(group, list(clean[namespace][group].values()))
for group in clean[namespace]])
for namespace in clean
]
return cleaned_opts | 71ee25451c56be318a94158e70cf2fb2390be231 | 694,208 |
def component_callback_null(instance_uniqid):
"""fake callback"""
return "OK" | b581708d0154458051fb029acbbbec70beefc120 | 694,209 |
import glob
def read_all_file_names(directory, extension):
"""Read all files with specified extension from given path and sorts them
based on a given sorting key.
Parameters
----------
directory: str
parent directory to be searched for files of the specified type
extension: str
file extension, i.e. ".edf" or ".txt"
Returns
-------
file_paths: list(str)
a list to all files found in (sub)directories of path
"""
assert extension.startswith(".")
file_paths = glob.glob(directory + "**/*" + extension, recursive=True)
assert len(file_paths) > 0, (
f"something went wrong. Found no {extension} files in {directory}")
return file_paths | 30217a765768d4d225ae3591037f75ffeeb8c042 | 694,210 |
def inclusion_two_params_from_template(one, two):
"""Expected inclusion_two_params_from_template __doc__"""
return {"result": "inclusion_two_params_from_template - Expected result: %s, %s" % (one, two)} | 4a81da93f96356b1e1eb370755bf7627b0cf434a | 694,211 |
def encode_yarn_weights(df):
""" uses yarn_weight_description to convert weights correspnding actual relative thicknesses
indstry standards splits into 7 categories, but this goes more ganually """
yarn_weights = {'Lace' : 1,
'Thread':1,
'Cobweb':1,
'Light Fingering':1.5,
'Fingering (14 wpi)': 2,
'Sport (12 wpi)': 3,
'DK / Sport' : 4,
'DK (11 wpi)' : 5,
'Worsted (9 wpi)':6,
'Aran / Worsted': 7,
'Aran (8 wpi)': 8,
'Bulky (7 wpi)':9,
'Super Bulky (5-6 wpi)':10,
'Jumbo (0-4 wpi)':11,
'No weight specified':5,
'Any gauge - designed for any gauge':5}
try:
df = df.replace({'yarn_weight_description':yarn_weights})
except:
print("okay - check out yarn_weight_description, something went wrong with the encoding")
return df | 000b917d6fc46cdf03220978f51fa7ccaa02b9f8 | 694,212 |
def parse_person_name( prsn ) -> str:
""" Returns display-name.
Called by view_person_manager.query_person() """
name: str = f'{prsn.first_name} {prsn.last_name}'.strip()
if name == '':
name = 'Not Listed'
return name | 68f26343925981f5b05fe4daff84cb77f10e4439 | 694,213 |
import functools
def mask_decorator(metric_function):
"""Decorator for masking the metrics
:param function metric_function: a python function that takes target and
prediction arrays as input
:return function wrapper_metric_function: input function that returns
metrics and masked_metrics if mask was passed as param to input function
"""
@functools.wraps(metric_function)
def wrapper_metric_function(**kwargs):
"""Expected inputs cur_target, prediction, mask
:param dict kwargs: with keys target, prediction and mask all of which
are np.arrays
"""
metric = metric_function(target=kwargs['target'],
prediction=kwargs['prediction'])
if 'mask' in kwargs:
mask = kwargs['mask']
cur_target = kwargs['target']
cur_pred = kwargs['prediction']
masked_metric = metric_function(target=cur_target[mask],
prediction=cur_pred[mask])
return [metric, masked_metric]
return metric
return wrapper_metric_function | 9829a6b1ef1774df6806394b69ba05f6a6df4ebb | 694,214 |
def split_to_domains(primary_structure, secondary_structure):
""" Splits the primary structure into domains.
Args:
primary_structure -- str: IUPAC primary structure
secondary_structure -- str: dotbracket notation of the secondary structure
Returns:
list<str>
"""
pairs = {}
stack = []
p1 = "..[[[[[[."
p2 = "..]]]]]]."
ss = secondary_structure.replace(p1, "P1").replace(p2, "P2")
#print(ss)
for i, sym in enumerate(ss):
if sym == "(":
stack.append(i)
elif sym == ")":
pairs[i] = stack.pop()
pairs[pairs[i]] = i
domains = []
for i, sym in enumerate(ss):
if sym == "(":
if pairs.get(i - 1) == None:
domains.append([sym])
elif pairs[i] == pairs[i - 1] - 1:
domains[-1].append(sym)
elif sym == ")":
if pairs.get(i - 1) == None or pairs[i] != pairs[i - 1] - 1:
domains.append([sym])
else:
domains[-1].append(sym)
else:
if len(domains) < 1 or domains[-1][0] in "()":
domains.append([sym])
else:
domains[-1].append(sym)
for i, d in enumerate(domains):
if "".join(d) == "P1":
domains[i] = p1
elif "".join(d) == "P2":
domains[i] = p2
#print(domains)
p_domains = []
i = 0
for t in domains:
p_domains.append(primary_structure[i : i + len(t)])
i += len(t)
return p_domains | 23b1ea0b9094cf2709f8335b2a44fa45d5db942e | 694,215 |
def sum_of_a_range(a = 10,b = 1000):
"""
What the heck is a docstring?!?
"""
total_sum = 0
for i in range(a, b):
total_sum += i
return total_sum | 3ea232361089fc88a352e9a6e2a1b65774cfc919 | 694,216 |
import math
def conn_original_to_pairs(conn):
"""
Input: conn of the form [0,0,0,1,0,... ] where 1 indicates presence of edge.
This format has been taken deprecated in favor of the adjacency matrix
"""
N = math.ceil(math.sqrt(len(conn)*2))
pairs = []
count = 0
for delta in range(1, N):
start = 0
while start + delta < N:
index1 = start
index2 = start + delta
# print(index1, index2)
# print("search for:", index1,index2)
if conn[count] == 1:
#print(index1, index2, "found")
pairs.append((index1, index2))
start += 1
count += 1
return pairs | 98a2def70fb5b221ffc267e705101fe18b67a3d9 | 694,217 |
def Identity():
"""Simple identity encoder."""
def network(inputs):
return inputs
return network | ba2e12c8b29c5f7967cdaeb958e2d9ef82db9287 | 694,218 |
def relu(S):
"""
Returns the output scaling of a ReLU
"""
return S.astype(int) | 1ae49cb5b309c3204147bcd55b593028fca2d6e7 | 694,219 |
import struct
def parse(data):
"""
SMP code is the first octet of the PDU
0 1 2 3 4 5 6 7
-----------------
| code |
-----------------
References can be found here:
* https://www.bluetooth.org/en-us/specification/adopted-specifications - Core specification 4.1
** [vol 3] Part H (Section 3.3) - Command Format
Return a tuple (code, data)
"""
code = struct.unpack("<B", data[:1])[0]
return (code, data[1:]) | 9d1d97f76213e4cabcc43f13d980cd658fcfae41 | 694,220 |
def wait_for_queue(handle):
"""
Read from ``sbatch`` output whether the queue is full.
:param object handle: sbatch handle
:return: ``True`` if queue is full, else ``False``
:rtype: :py:obj:`bool`
"""
for f in (handle.stdout, handle.stderr):
for line in f:
if ("maximum number of jobs" in line or
# A rare SLURM error, but may cause chaos in the
# information/accounting system
"unable to accept job" in line):
return True
return False | 0a1978354367be99f3fe969df81ae014ab7b2439 | 694,221 |
def is_deep_decision(tree):
"""
This function decides whether the root node of the tree only has
leaves as children to prune
"""
if tree["is_leaf"] is True:
return False
else:
for c in tree["children"]:
if tree["children"][c]["is_leaf"] is False:
return False
return True | d3debf44ab0fc2a43bcb0bbc064bb799c2dd0434 | 694,222 |
import argparse
def arg_parse():
"""Function to handle script arguments."""
parser = argparse.ArgumentParser(description="Fetches the current ATT&CK content expressed as STIX2 and creates spreadsheet mapping Techniques with Mitigations, Groups or Software.")
parser.add_argument("-d", "--domain", type=str, required=True, choices=["enterprise_attack", "mobile_attack"], help="Which ATT&CK domain to use (Enterprise, Mobile).")
parser.add_argument("-m", "--mapping-type", type=str, required=True, choices=["groups", "mitigations", "software"], help="Which type of object to output mappings for using ATT&CK content.")
parser.add_argument("-s", "--save", type=str, required=False, help="Save the CSV file with a different filename.")
return parser | d13f4c53f1fc65f5f388e2a32a7af36d09e47560 | 694,223 |
def object_copy(self, CopySource, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None):
"""Copy an object from one S3 location to this object.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import ibm_boto3
s3 = ibm_boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
bucket = s3.Bucket('otherbucket')
obj = bucket.Object('otherkey')
obj.copy(copy_source)
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: ibm_botocore or ibm_boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: ibm_boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
return self.meta.client.copy(
CopySource=CopySource, Bucket=self.bucket_name, Key=self.key,
ExtraArgs=ExtraArgs, Callback=Callback, SourceClient=SourceClient,
Config=Config) | 378d7143bf8649e187cba43a07df7b41df9b43c3 | 694,224 |
def name_dtypes(file):
""" read the names of the columns at the first of a file
given its path
"""
with open(file,'r') as f:
columns = f.readline().split()
return tuple(columns) | b756610da208020e5cfd0d828141bcf3ac05e709 | 694,225 |
import re
def getMessageError(response):
"""
Extracts the error message from an ERDDAP error output.
"""
emessageSearch = re.search(r'message="(.*)"', response)
if emessageSearch:
return emessageSearch.group(1)
else:
return "" | 020a597e8b3a93a190536dd83b7dfe12d651ed07 | 694,226 |
def _from_proc_output(output: bytes) -> str:
"""
Convert proc output from bytes to str, and trim heading-
and tailing-spaces
:param output: output in bytes
:return: output in str
"""
return str(output, encoding='utf-8').strip(' \t\n') | 67013919c2f8020f5794766f7bc034e80d09eb20 | 694,227 |
from functools import reduce
def get_deep_dict_value(source: dict, keys: str, default = None):
"""
Get values from deeply nested dicts.
:source (dict): Dictionary to get data from.
:keys (str): Keys split by '|'. E.g. outerkey|middlekey|innerkey.
:default: Default return value.
"""
value = reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("|"), source)
return value | 40de0e150c04589040ebc3b70ae5e8e63b68901d | 694,228 |
def calculate_step_or_functional_element_assignment(child_assignments: list, sufficient_scheme=False):
"""
Assigns a step result or functional element result based of the assignments of its children. In the case of steps,
this would be functional element assignments. In the case of functional elements this would be evidences.
For assignments from child genome properties YES or PARTIAL is considered YES.
See: https://github.com/ebi-pf-team/genome-properties/blob/
a76a5c0284f6c38cb8f43676618cf74f64634d33/code/modules/GenomeProperties.pm#L686
if($evObj->gp){
if(defined($self->get_defs->{ $evObj->gp })){
# For properties a PARTIAL or YES result is considered success
if( $self->get_defs->{ $evObj->gp }->result eq 'YES' or
$self->get_defs->{ $evObj->gp }->result eq 'PARTIAL' ){
$succeed++;
}elsif($self->get_defs->{ $evObj->gp }->result eq 'UNTESTED'){
$step->evaluated(0);
:param sufficient_scheme: If false, any child NOs mean NO. If true, any child YES/PARTIAL means YES
:param child_assignments: A list containing strings of YES, NO or PARTIAL
:return: The assignment as either YES or NO.
"""
no_count = child_assignments.count('NO')
# Given a list of sufficient evidences, any could be PARTIAL or YES and the result would be YES.
if sufficient_scheme:
if no_count < len(child_assignments):
result = 'YES'
else:
result = 'NO'
# Given a list of non-sufficient evidences, all evidences have to be YES or PARTIAL or the result would be NO.
else:
if no_count == 0:
result = 'YES'
else:
result = 'NO'
return result | 9dfa5cbf0c10bd5ed848fd5f1ca6e09dcc64a8c9 | 694,230 |
def _generate_join(collection, local_id, foreign_id):
"""Make join string for query from parameters."""
text = '{{!join from={fid} to={lid} fromIndex={collection}}}'
text = text.format(
collection=collection,
lid=local_id,
fid=foreign_id)
return text | 1e831fadda22866b098b1c0e1673e73d9370dd75 | 694,231 |
def get_tomorrow(utc_now, tz, to_local=False):
"""Return an :class:`~arrow.arrow.Arrow` datetime for *tomorrow 00:00 local
time*.
The calculation is done relative to the UTC datetime *utc_start* converted
to the timezone *tz*.
By default, the result is converted back to UTC. Set *to_local* to
``True`` to get a local date in timezone *tz*.
Since *utc_start* is converted to *tz* first, this function takes care of
local daylight saving time changes.
**Example:** *utc_now* is *2015-01-15 13:37:00+00:00* in UTC. If *tz* is
*Europe/Berlin*, the date corresponds to *2015-01-15 14:37:00+01:00* in
local time. The returned date will be *2015-01-16 00:00:00+01:00* if
*to_local* is ``True`` or *2015-01-15 23:00:00+00:00* else.
"""
today = utc_now.to(tz)
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
tomorrow = today.replace(days=1)
out_tz = tz if to_local else 'utc'
return tomorrow.to(out_tz) | c447f52fa770f81f0da6bcbaa13fbebc8a398d67 | 694,232 |
def stub_verify(conn, cert, errno, errdepth, code):
"""We don't verify the server when we attempt a MiTM.
If the client was connecting to a host with a bad cert
we still want to connect and MiTM them.
Hypothetically someone could MiTM our MiTM and intercept what we intercept,
use caution in what data you send through a MiTM'd connection if you don't trust
the rest of your path to the real endpoint.
"""
return True | a2ca80848b80bb7858563d9099981d41b6327132 | 694,233 |
import subprocess
def get_sequence_from_database(exe_loc, db, seq_id):
"""
Extract a sequence from the given BLAST database and return it
Args:
exe_loc: Directory containing BLAST executables.
db: The database to get sequence from.
seq_id: The sequence ID of the sequence to get.
Returns:
The sequence if found else an empty string
# Test:
>>> get_sequence_from_database('/Users/work/Projects/pyBlast/bin/', '/Users/work/Projects/pyBlast/db/yeast.nt', 'gi|6226515|ref|NC_001224.1|')
"""
try:
found = subprocess.check_output([exe_loc+'blastdbcmd', '-db', db, '-entry', seq_id])
except:
found = ''
return found | 751a82453c4c4943e492a9fc8fa134f68e11841e | 694,234 |
def extract_pem_cert(tree):
"""
Extract a given X509 certificate inside an XML tree and return the standard
form of a PEM-encoded certificate.
:param tree lxml.etree: The tree that contains the X509 element. This is
usually the KeyInfo element from the XMLDsig Signature
part of the message.
"""
cert = tree.find('.//{http://www.w3.org/2000/09/xmldsig#}X509Certificate').text
return "-----BEGIN CERTIFICATE-----\n" + cert + "-----END CERTIFICATE-----\n" | 6737fd4002f9de25142291be2cd19c265465340a | 694,235 |
import argparse
def parseArgs(params=None):
"""Parse input parameters (arguments)
params - the list of arguments to be parsed (argstr.split()), sys.argv is used if args is None
return
directed - the input networks can be directed (the adjacency matrix can be asymmetric)
and directed output networks should be produced (.nsa format instead of .nse)
nets - input networks to be converted
"""
parser = argparse.ArgumentParser(description='Network converter from mathlab format to .nsl (nse/nsa).')
parser.add_argument('mnets', metavar='MatNet', type=str, nargs='+', help='unsigned input network in the .mat format')
parser.add_argument('-d', '--directed', dest='directed', action='store_true'
, help='form directed output network from possibly directed input network')
args = parser.parse_args(params)
return args.directed, args.mnets | 52d994dfe5a2da3800c265803e5a83959f66aba9 | 694,236 |
import os
def dp_config_path(config_file, parent_file=None):
"""Return full path to config file."""
if parent_file and not os.path.isabs(config_file):
return os.path.realpath(os.path.join(os.path.dirname(parent_file), config_file))
return os.path.realpath(config_file) | 305888fcea00a6652e6d87d8891ff5c8d6a1cb15 | 694,237 |
def expand_by_interval(firstPos,numrep,horizontal=None,vertical=None):
"""
Expand the given first postioin (2-length tuple or 2-length list) to have
numrep points by specifing horizontal or vertical interval.
"""
x0,y0 = firstPos
poslist = []
if horizontal is not None:
for num in range(numrep):
xt,yt = (x0+horizontal*num,y0)
poslist.append((xt,yt))
return poslist
elif vertical is not None:
for num in range(numrep):
xt,yt = (x0,y0+vertical*num)
poslist.append((xt,yt))
return poslist
else:
raise ValueError("both horizontal and vertical are None!") | 0fbc336c2abc3f7f72d9ceb115307841740f8138 | 694,238 |
import re
def parseVtxIdx(idxList):
"""convert vertex index list from strings to indexes.
idxList : [u'vtx[1]', u'vtx[3]', u'vtx[6]', u'vtx[8]', u'vtx[12:13]']
return : [1,3,6,8,12,13]
"""
parseIdxList = []
for idxName in idxList:
match = re.search(r'\[.+\]', idxName)
if match:
content = match.group()[1:-1]
if ':' in content:
tokens = content.split(':')
startTok = int(tokens[0])
endTok = int(tokens[1])
for rangeIdx in range(startTok, endTok + 1):
parseIdxList.append(rangeIdx)
else:
parseIdxList.append(int(content))
return parseIdxList | 32c3a40ac374865cf61d44e9834a50801c9f236d | 694,239 |
def _transpose(group):
"""
Given a list of 3-tuples from _grouper, return 3 lists.
Also filter out possible None values from _grouper
"""
a, b, c = [], [], []
for g in group:
# g can be None
if g is not None:
x, y, z = g
#if x is not None and y is not None and z is not None:
a.append(x)
b.append(y)
c.append(z)
return a, b, c | 2027c7ee84340d6758fe352058b8b8cd962e4d96 | 694,240 |
def _is_member(s, e):
"""Return true if `e` is in the set `s`.
Args:
s: The set to inspect.
e: The element to search for.
Result:
Bool, true if `e` is in `s`, false otherwise.
"""
return e in s._set_items | 810336bb16babcca3af8bc9c931da3d058b6f14f | 694,242 |
import pandas
def concat_as_df_one_country_from_many_scenarios(scenario_names, func):
"""
Concatenate data frames returned by the given function for given list of scenarios.
:param list scenario_names: list of scenario names
:param func function: function taking a runner as an argument and
giving a pandas data frame as output
return: a concatenated dataframe with a scenario column and
all the other columns in the func output.
Example usage:
from cbmcfs3_runner.pump import concat_as_df_one_country_from_many_scenarios
scenario_names = ['static_demand','demand_minus_20', 'demand_plus_20']
# Get all scenario output for one country
country_iso2 = "LT"
def get_merch(scenario):
runner = continent[(scenario, country_iso2, 0)]
df = runner.post_processor.inventory.sum_merch_stock
return df
merch = concat_as_df_from_many_scenarios(scenario_names, get_merch)
"""
# Dictionary of all data frames returned by the function with scenario names as keys
dict_of_df = {x: func(x).copy() for x in scenario_names}
# Concatenate those tables for many scenarios for a particular country
df = pandas.concat(dict_of_df, sort=True)
df = df.reset_index(level=0)
df = df.rename(columns={'level_0': 'scenario'})
return df | dc27b378e6a4d71559779d9489766000f24f06a1 | 694,243 |
def read(f):
"""Read a file an return its content in utf-8"""
return open(f, 'rb').read().decode('utf-8') | 5d43d64392e8c8ed33cb2dfe3d64d780c6217321 | 694,244 |
def get_file_type(filename):
"""
Return the extension (if any) of the ``filename`` in lower case.
"""
return filename[filename.rfind('.')+1:].lower() | cb0487e0886d60a6d0e5f97fa7d2313293390f5d | 694,245 |
def pronto2kira(data):
"""Convert a pronto code to a discrete (single button press) Kira code"""
octets = [int(x, 16) for x in data.split()]
preamble = octets[:4]
convert = lambda x: 1000.0 / (x * 0.241246)
freq = convert(preamble[1])
period = 1000000.0 / (freq * 1000.0)
dataLen = preamble[2]
res = "K %02X%02X " %(freq, dataLen)
res += " ".join(["%0.4X" % min(0x2000, (period * x)) for x in octets[4: 4+(2*dataLen)]])
return res | e0efb6a148b20965bc4b5f959e5c7bdd3c5020f4 | 694,246 |
def _safe_value(value):
"""Converts value to int or float"""
if isinstance(value, (float, int)):
return value
value_int = int(value)
value_float = float(value)
return value_int if value_int == value_float else value_float | 215c911e7d435a58a7cf71896fbcf748575ab9c7 | 694,247 |
def covariance_matrix(nums_with_uncert):
"""
Return a matrix that contains the covariances between the given
sequence of numbers with uncertainties (AffineScalarFunc objects).
The resulting matrix implicitly depends on their ordering in
'nums_with_uncert'.
The covariances are floats (never int objects).
The returned covariance matrix is the exact linear approximation
result, if the nominal values of the numbers with uncertainties
and of their variables are their mean. Otherwise, the returned
covariance matrix should be close to its linear approximation
value.
The returned matrix is a list of lists.
"""
# See PSI.411 in EOL's notes.
covariance_matrix = []
for (i1, expr1) in enumerate(nums_with_uncert, 1):
derivatives1 = expr1.derivatives # Optimization
vars1 = set(derivatives1) # !! Python 2.7+: viewkeys() would work
coefs_expr1 = []
for expr2 in nums_with_uncert[:i1]:
derivatives2 = expr2.derivatives # Optimization
coefs_expr1.append(sum(
((derivatives1[var]*derivatives2[var]*var._std_dev**2)
# var is a variable common to both numbers with
# uncertainties:
for var in vars1.intersection(derivatives2)),
# The result is always a float (sum() with no terms
# returns an integer):
0.))
covariance_matrix.append(coefs_expr1)
# We symmetrize the matrix:
for (i, covariance_coefs) in enumerate(covariance_matrix):
covariance_coefs.extend([covariance_matrix[j][i]
for j in range(i+1, len(covariance_matrix))])
return covariance_matrix | 3251f5bf8855a3c5d478135338b9c9af7197a230 | 694,248 |
def buildParameters(obj, validList):
"""
>>> class TestClass(object):
... pass
>>> testClass = TestClass()
>>> testClass.a = 1
>>> testClass.b = "2"
>>> testClass.c = 3
>>> testClass.d = True
>>> buildParameters(testClass, ["a", "b"])
['--a', u'1', '--b', u'2']
>>> testClass.b = None
>>> buildParameters(testClass, ["a", "b"])
['--a', u'1']
The following shows support for boolean flags that don't have a value
associated with them:
>>> buildParameters(testClass, ["a", "d"])
['--a', u'1', '--d']
"""
params = []
for param in validList:
attr = getattr(obj, param)
if attr:
param = param.replace("_", "-")
if isinstance(attr, bool):
attr = ""
params.extend(["--%s" % param, str(attr)])
return [x for x in params if x] | 9504d950b3b5827e95543afdf5615a06f3fe3532 | 694,249 |
import numpy
def calc_protrude_scipy(independent_flux, *args):
"""Objective function used in initializing_Rm_fitting (SLSQP)
This function calculates penalty score of metabolic state out side of the feasible space.
Args:
independent_flux (array): vector of independent flux
*args (list): list of parameters.
Returns:
float: Penalty score
See Also:
initializing_Rm_fitting
"""
kwargs = args[0]
Rm_initial = kwargs['Rm_initial']
stoichiometric_num = kwargs['stoichiometric_num']
reaction_num = kwargs['reaction_num']
matrixinv = kwargs['matrixinv']
lb = kwargs['lb']
ub = kwargs['ub']
template = kwargs['template']
Rm = numpy.array(list(Rm_initial))
Rm[stoichiometric_num: reaction_num] = independent_flux[:]
#
tmp_r = numpy.dot(matrixinv, Rm)
#
f = 0.0
g = []
if len(template) > 0:
#
# if templete flux is available
#
for i, flux in enumerate(tmp_r):
#Between lower and upper boundary
g.append(flux - ub[i])
g.append(lb[i]- flux)
f = f + abs(flux - template[i])
else:
#
# to generate random flux
#
for i, flux in enumerate(tmp_r):
#Between lower and upper boundary
g.append(flux - ub[i])
g.append(lb[i]- flux)
if flux > ub[i]:
f = f + (flux - ub[i])
elif flux < lb[i]:
f = f + (lb[i] - flux)
fail = 0
#print(f)
return f | e30f07f4f134ceeb83fd7294e0bfb1b3b1915e48 | 694,250 |
from typing import Dict
async def assert_reply_embed_regex(self, message: str, patterns: Dict[str, str]):
""" Send a message and wait for a response. If the response is not an embed or does not match the regex,
fail the test.
See examples in example_target.py for examples of use.
:param message:
:param patterns: A dict of the attributes to check. See
:py:meth:`assert_message_contains <distest.TestInterface.assert_embed_regex>` for more info on this.
:return: message
:rtype: discord.Message
"""
response = await self.wait_for_reply(message)
return await self.assert_embed_regex(response, patterns) | fc74649fcf67aa657706c4e5c064fcf5d27ad701 | 694,251 |
def cap_contributions(s_contrib, threshold=0.1):
"""
The function is able to compute a mask indicating where the input matrix
has values above a given threshold in absolute value.
Parameters
----------
s : pandas.DataFrame
Local contributions, positive and negative values.
threshold: float, optional (default: 0.1)
User defined threshold above which local contributions are hidden.
Returns
-------
pandas.DataFrame
Mask with only True of False elements.
"""
mask = s_contrib.abs() >= threshold
return mask | 649d70ffaa20679e666466b866f09cf6a4213dbc | 694,252 |
def get_response(event_query):
"""Heleper method for looping over Get methods"""
response = []
for event in event_query.items:
obj = {
'id': event.id,
'event': event.event,
'location': event.location,
'category': event.category,
'date': event.date,
'description': event.description,
'created_by': event.created_by
}
response.append(obj)
return response | 4c6dc98b6f7ad2aa8d67afefc5a868663ed1b2b9 | 694,253 |
def only_one_image(supdata, max):
"""Place have one image"""
return max == 1 or isinstance(supdata['photo_ref'], str) | f92067d2694e209b2a782e98e371e3ff3079b99b | 694,254 |
import numpy
def zNormalize(ts):
"""
Returns a z-normalized version of a time series.
Parameters
----------
ts: Time series to be normalized
"""
ts -= numpy.mean(ts)
std = numpy.std(ts)
if std == 0:
raise ValueError("The Standard Deviation cannot be zero")
else:
ts /= std
return ts | 18f34c97f4166c608d99f462eb1d25b1870461cf | 694,255 |
import socket
import struct
def send_tcp(data,host,port):
"""
Helper function to send/receive DNS TCP request
(in/out packets will have prepended TCP length header)
"""
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((host,port))
sock.sendall(data)
response = sock.recv(8192)
length = struct.unpack("!H",bytes(response[:2]))[0]
while len(response) - 2 < length:
response += sock.recv(8192)
sock.close()
return response | 51d58d7ba31af919466acde7ea19780031c70158 | 694,256 |
def arity(argspec):
""" Determinal positional arity of argspec."""
args = argspec.args if argspec.args else []
defaults = argspec.defaults if argspec.defaults else []
return len(args) - len(defaults) | f19407943f92a2a4faa4735abf678467861153e7 | 694,257 |
def fw(model, input, model_pretrain_method):
"""Call and index forward according to model type."""
output = model(input)
if model_pretrain_method == "torchvision":
return output
elif model_pretrain_method == "Swav":
return output[1]
else:
return output[0] | 6367ab0cdfb4ad1dd2e85fee5e357148ee5f5d71 | 694,259 |
import torch
def postprocess(images):
"""change the range from [-1, 1] to [0., 1.]"""
images = torch.clamp((images + 1.) / 2., 0., 1.)
return images | 37421278b26cd0913905db61379f4b979d1d2a98 | 694,260 |
def first_shift_is_valid(cur_individual):
""" checks if the first shift is not an extension """
return cur_individual[:2] != '00' | dad399af47b8e71500f4b3caff8fc9455df97fdb | 694,261 |
def removeAnchor(url):
"""
"http://www.website.com#my-anchor" -> "http://www.website.com"
"""
if url.find('#') > 0:
return url[:url.find('#')]
return url | ea67df0bc2ff7e43a31f0669e54e576afde336f6 | 694,263 |
def dec2sex(rain,decin,as_string=False,decimal_places=2):
"""
Converts decimal coordinates to sexagesimal.
Parameters
----------
rain : float
Input Right Ascension in decimal -- e.g., 12.34567
decin : float
input Declination in decimal -- e.g. -34.56789
as_string : bool
Specifies whether to return output as a string (useful for making tables)
decimal_places : int
Number of decimals places to use when as_string=True
Returns
-------
list
['HH:MM:SS.ss', 'DD:MM:SS.ss']
"""
rmins,rsec=divmod(24./360*rain*3600,60)
rh,rmins=divmod(rmins,60)
#dmins,dsec=divmod(decin*3600,60)
#ddeg,dmins=divmod(dmins,60)
ddeg=int(decin)
dmins=int(abs(decin-ddeg)*60)
dsec=(abs((decin-ddeg)*60)-dmins)*60
if as_string==True: return ['{0}:{1}:{2:0>{4}.{3}f}'.format(int(rh),int(rmins),rsec,decimal_places,decimal_places+3),'{0}:{1}:{2:0>{4}.{3}f}'.format(int(ddeg),int(dmins),dsec,decimal_places,decimal_places+3)]
else: return [int(rh),int(rmins),rsec],[int(ddeg),int(dmins),dsec] | 1404996c46db480183a8cb46a11986bf6c9e7531 | 694,264 |
import base64
def encode_info_basic_http_auth(username, password):
"""
Encodes the username and password to be sent in the `Authenticate`
header in http using basic authentication.
:param username: username
:param password: password
:return: encoded value for the http 'Authenticate' header
"""
user_pass = '{}:{}'.format(username, password)
user_pass = base64.b64encode(user_pass.encode('ascii')).decode('ascii')
return 'Basic {}'.format(user_pass) | 9721f745cd6b38aa9c3b1c98c3eaa19c64949709 | 694,265 |
def sim_distance(prefs, person1, person2):
"""
Returns a distance based similarity score.
"""
# Get the list of shared_items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item] = 1
# if they have no ratings in common, return 0
if len(si) == 0:
return 0
# Add up the squares of all the differences
sum_of_squares = sum(
[
pow(prefs[person1][item] - prefs[person2][item], 2)
for item in prefs[person1]
if item in prefs[person2]
]
)
return 1 / (1 + sum_of_squares) | 48d40a59956c6579f92621490202431400459802 | 694,266 |
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__')) | a303d87a5ef790d629dff6fcd07c46d029277530 | 694,267 |
def _set_window_time(slices, times):
"""Aux function to define time as the last training time point"""
t_idx_ = [t[-1] for t in slices]
return times[t_idx_] | 0c296951496d2d2e5525359d6939add37fd0b07c | 694,268 |
import os
def dockerize_filepath(path):
"""
Convert a given filepath to be relative to the input-output folder that this
container gets from the host system.
"""
return os.path.join('/input-output', path) | 5adebe0166afd7dd7d7004b931ce08e4c1e6fcf4 | 694,270 |
def path2FileName(path):
"""Answer the file name part of the path.
>>> path2FileName('../../aFile.pdf')
'aFile.pdf'
>>> path2FileName('../../') is None # No file name
True
"""
return path.split('/')[-1] or None | 7c7d954ae2cf436624fa6e95edac887facf27dd4 | 694,271 |
def transaction(request, connection):
"""Will start a transaction on the connection. The connection will
be rolled back after it leaves its scope."""
transaction = connection.begin()
def fin():
print ("Rollback")
transaction.rollback()
request.addfinalizer(fin)
return connection | f23ee8ad8e325379224992d6a67f611eb549e001 | 694,272 |
def display(keyword):
"""``display`` property validation."""
return keyword in (
'inline', 'block', 'inline-block', 'list-item', 'none',
'table', 'inline-table', 'table-caption',
'table-row-group', 'table-header-group', 'table-footer-group',
'table-row', 'table-column-group', 'table-column', 'table-cell',
'flex', 'inline-flex') | 40096086981a1588aec55529425cb9b3fc1e43f0 | 694,273 |
def file_search(f_name, data_str):
"""Function: file_search
Description: Search for a string in a file and return the line it was
found in a line.
NOTE: Returns only the first instance found in the file.
Arguments:
(input) f_name -> File name searching.
(input) data_str -> Search string.
(output) line - > Full line string was found in or None, if not found.
"""
line = None
with open(f_name, "r") as s_file:
for item in s_file:
if data_str in item:
line = item
break
return line | 0d5b0514165674b390c3225c48f3c3e8810b4006 | 694,275 |
def build_abstract(*args):
"""Combines multiple messages into a single abstract over multiple lines.
>>> build_abstract("test1", "test2")
'test1\\ntest2'
"""
return "\n".join([_arg for _arg in args if _arg]) | 8cc7732e8cfc052e294320f6e96185e53f73b59b | 694,276 |
import plistlib
def load_datafile(filename):
"""Load a data file and return the contents as a dictionary """
with open(filename, 'rb') as f:
data = plistlib.load(f)
return data | f57a4a38afb0d5ae9d21b712ceb28143ba695c27 | 694,277 |
def get_qualified_name(names):
"""
``get_qualified_name`` gets a qualified name for the provided name list.
:param names: name list to qualify
:type names: list(str)
:return: a qualified name
:rtype: str
:Example:
>>> type, name = demangle_ms(Architecture["x86_64"], "?testf@Foobar@@SA?AW4foo@1@W421@@Z")
>>> get_qualified_name(name)
'Foobar::testf'
>>>
"""
return "::".join(names) | 15d05cd9006cde2127fc9f0293f50805deb3dbc5 | 694,278 |
import hashlib
def rgb_from_string(text, min_brightness=0.6):
""" Creates a rgb color from a given string """
ohash = hashlib.md5(text[::-1].encode("ascii")).hexdigest()
r, g, b = int(ohash[0:2], 16), int(ohash[2:4], 16), int(ohash[4:6], 16)
neg_inf = 1.0 - min_brightness
return (min_brightness + r / 255.0 * neg_inf,
min_brightness + g / 255.0 * neg_inf,
min_brightness + b / 255.0 * neg_inf) | feac9c8664dd96610fd9bbada578eb4d10ebfd0b | 694,279 |
def validate_key(key, keyname='Key'):
"""
Check input is valid key (Experiment id, metric name...)
"""
if not isinstance(key, str):
msg = '{} must be str, given: {}{}'
raise ValueError(msg.format(keyname, key, type(key)))
if ':' in key:
msg = '{} cannot contain colon (:): {}'
raise ValueError(msg.format(keyname, key))
return key | bd2bdc070ae0e511cbf3bf1e4cac6e851cdd683e | 694,280 |
import os
def get_version_info():
""" Extract version information as a dictionary from version.py. """
version_info = {}
version_filename = os.path.join("ibm2ieee", "version.py")
with open(version_filename, "r", encoding="utf-8") as version_module:
version_code = compile(version_module.read(), "version.py", "exec")
exec(version_code, version_info)
return version_info | cec29beb41a0564ff93c1a1596c0251b46423f74 | 694,281 |
import torch
def pre_process_datasets(encoded_datasets, start_token, delimiter_token, answer_token, clf_token, end_token):
""" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
"""
tensor_datasets = []
for dataset in encoded_datasets:
n_batch = len(dataset)
tensors, labels, prompts = [], [], []
max_len = 0
max_prompt = 0
train=True
for i, ex in enumerate(dataset):
tensor = ex.natural_tensor(end_token)
if len(tensor) > max_len:
max_len = len(tensor)
if len(ex.text) > max_prompt:
max_prompt = len(ex.text)
if ex.explanation is not None:
train = True
label = [-1] * len(ex.text) + tensor[len(ex.text):]
labels.append(label)
else:
train = False
prompts.append(ex.text)
tensors.append(tensor)
tensors = [f + [0.] * (max_len - len(f)) for f in tensors]
prompts = [f + [-1] * (max_prompt - len(f)) for f in prompts]
labels = [f + [-1] * (max_len - len(f)) for f in labels]
if train:
tensor_datasets.append([torch.tensor(tensors, dtype=torch.long)[:, :-1], torch.tensor(labels, dtype=torch.long)[:, 1:], torch.tensor(prompts, dtype=torch.long)])
else:
tensor_datasets.append([torch.tensor(tensors, dtype=torch.long)[:, :-1], torch.tensor(prompts, dtype=torch.long)])
return tensor_datasets | 6a0cfae8f8aead1fd4c84412e3b99289215bdad0 | 694,282 |
def mask_topk(x, topkk):
"""
Returns indices of `topk` entries of `x` in decreasing order
Args:
x: [N, ]
topk (int)
Returns:
array of shape [topk, ]
"""
mask = x.argsort()[-topkk:][::-1]
return mask | 9d8df1efb3152db935368c0155ddd00984030bc4 | 694,283 |
def get_col_info(datadf, colname, colsource = 'source', map='all'):
"""Search a data dictionary dataframe fror a column
return close matches
Parameters
----------
datadf : dataframe,
the dataframe that has dictionary columns.
colname : str
the column name to search for.
colsource : str, optional
the column to search [source, or sourcep_]. The default is 'source'.
map : str, optional
filter on map column ['program', 'all']. The default is 'all'.
Returns
-------
ret : DataFrame
dictionary rows for columns that are close matches to the provided text.
"""
#get info about a column
ret= datadf[datadf[colsource].str.contains(colname, flags=2).fillna(False)]
if map=='program':
ret = ret[ret['map'] == 'program']
return ret | 3b87d1705e627b6f16e129ec3a911027f17c1212 | 694,284 |
def get_consensus(out_fn, trim_margin):
""" Extract consensus sequence from output of spoa.
Parameters
----------
out_fn : str (output from spoa)
trim_margin : int (number of bp to trim on each end of the consensus, as the consensus sequence
is more likely to be erroneous on the ends)
Returns
-------
consensus : str (consensus sequence)
"""
fh = open(out_fn, 'rb')
lines = fh.readlines()
if len(lines) == 0:
return ''
consensus = lines[-1][:-1]
consensus = consensus[trim_margin:len(consensus) - trim_margin]
fh.close()
return consensus | 833289b301d553c5f35a799fd70418d4a63889c6 | 694,285 |
def rank_software_invocation(soft_invocation_info_list):
"""
Function to create a ranking over the different ways of executing a program.
If two elements have the same position in the ranking, it means that there is no priority among them.
Heuristic to order the invocation list is as follows, in decreasing order of prioritization:
- If package or library is detected, this will be always first.
- If something (script or service) is mentioned in the readme file, it is considered a priority.
- Services are prioritized over scripts
- Scripts with main are prioritized over script with body.
- Scripts with body are prioritized over scripts with no body.
TO DOs:
- If a script imports other scripts (or service), it gets prioritized (TO DO when examples are available)
- If several scripts are available, those at root level are prioritized (TO DO when examples are available)
:param soft_invocation_info_list JSON list with the different ways to execute a program.
"""
if len(soft_invocation_info_list) == 0:
return soft_invocation_info_list
# Calculate score for every entry in the list
for entry in soft_invocation_info_list:
score = 0
if "library" in entry["type"] or "package" in entry["type"]:
score += 100
try:
if entry["mentioned_in_readme"]:
score += 10
except:
pass
if "service" in entry["type"]:
score += 5
try:
if "main" in entry["has_structure"]:
score += 2
if "body" in entry["has_structure"]:
score += 1
except:
pass
entry["ranking"] = score
# Reorder vector and assign ranking
soft_invocation_info_list.sort(key=lambda x: x["ranking"], reverse=True)
# Replace score by number (but keep those with same score with the same ranking)
position = 1
previous_score = soft_invocation_info_list[0]["ranking"]
for entry in soft_invocation_info_list:
current_score = entry["ranking"]
if previous_score > current_score: # Ordered in descending order
position += 1
previous_score = current_score
entry["ranking"] = position
return soft_invocation_info_list | 1dd98477836cb9ba4d2179d344175fc63ff83744 | 694,286 |
def normalize_string(string):
"""
Standardize input strings by making
non-ascii spaces be ascii, and by converting
treebank-style brackets/parenthesis be characters
once more.
Arguments:
----------
string : str, characters to be standardized.
Returns:
--------
str : standardized
"""
return string.replace("\xa0", " ")\
.replace("\\", "")\
.replace("-LRB-", "(")\
.replace("-RRB-", ")")\
.replace("-LCB-", "{")\
.replace("-RCB-", "}")\
.replace("-LSB-", "[")\
.replace("-RSB-", "]") | 5e0f7850116fe2d7275674f3ff6a938689ac3c3e | 694,287 |
def stairs(N):
"""
Produces stairs array of size N
"""
stairs = []
for i in range(0,N):
# step = ''.join([str(N)]*(i+1)) + ''.join([' ']*(N-i-1))
step = '#'*(i+1) + ' '*(N-i-1)
stairs.append(step)
return stairs | 59b00a6d38d92410e9566846a1f8343f597f2400 | 694,288 |
def get_scope_completion(view, rules, skip_sel_check=False):
"""Get additional auto-popup scope rules for incomplete colors only."""
scopes = None
if rules is not None:
scopes = ','.join(rules.get('scan_completion_scopes', []))
sels = view.sel()
if not skip_sel_check:
if len(sels) == 0 or not scopes or view.score_selector(sels[0].begin(), scopes) == 0:
scopes = None
return scopes | 300feab8d3bf65c2c945f36ab573aa7378ba53ec | 694,289 |
def clip_box(box, max_width, max_height):
"""clipping a [x,y,w,h] boxes."""
x, y, w, h = box
if x + w > max_width:
w = max_width - x
w = 0.0 if w < 0 else w
if y + h > max_height:
h = max_height - y
h = 0.0 if h < 0 else h
if x > max_width:
x = max_width
if y > max_height:
y = max_height
return [x, y, w, h] | 0468b1827b74c1878d64f8ce0ea921f7bbbd7271 | 694,290 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.