content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_objective_by_task(target, task):
"""Returns an objective and a set of metrics for a specific task."""
if task == 'classification':
if target.nunique() == 2:
objective = 'binary'
else:
objective = 'multi'
elif task == 'regression':
objective = 'regression'
else:
raise_invalid_task_error(task)
return objective
|
7859f84ba89246b735c61b3b31e421b38692de34
| 3,648,108
|
def pileupGenes(GenePositions,filename,pad=500000,doBalance=False,
TPM=0,CTCFWapldKO=False,TPMlargerthan=True,
minlength=0,maxlength=5000000,OE=None, useTTS=False):
"""
This function piles up Hi-C contact maps around genes, centered on TSSs or TTSs.
Inputs
------
GenePositions - pandas dataframe - with genes and their transcription intensity
filename - str - is path to cooler file
pad - int - half of the window size in bp
OE - str or None - path to scaling data to use as "expected" to compute observed over expected
useTTS - bool - False to pile on TSS, True to pile on TTS
other parameters do some optional filtering of the data frame
"""
sortString="start"
if useTTS:
sortString="end"
OrderedPositions=GenePositions.sort_values(sortString)
c = cooler.Cooler(filename)
res = c.info['bin-size']
chromsizes = bioframe.fetch_chromsizes('mm9')
chrmList = list(chromsizes.index)
runningCount = 0
pile = []
for mychr in chrmList: #Iterate over chromosomes
mychrstrCooler=mychr
mychrstrDataFrame=mychr#Chromosomes in the dataframe GenePositions
#are labeled 1 to 19, X Y and M, while in the cooler file they are labeld 0 to 21
current = OrderedPositions[OrderedPositions["chrom"] == mychrstrDataFrame]
if len(current) <= 0:
continue
#identify + and - so we can reorient genes
#genes for which strand is +, and current gene is not too long and not too short
currentPlusStrand=current[(current['strand']=='+')&(current['gene_length']<maxlength)
&(current['gene_length']>minlength)]
#genes for which strand is -, and current gene is not too long and not too short
currentMinusStrand=current[(current['strand']=='-')&(current['gene_length']<maxlength)
&(current['gene_length']>minlength)]
if TPMlargerthan: #filter by TPM > threshold
if CTCFWapldKO:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']>=TPM)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_dKO+Adeno-Cre_30251-30253']>=TPM)]
else:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_wildtype']>=TPM)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_wildtype']>=TPM)]
else: #filter by TPM < thresh
if CTCFWapldKO:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']<=TPM)&(currentPlusStrand['next_TPM_dKO']>0)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_dKO+Adeno-Cre_30251-30253']<=TPM)
&(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']>0)]
else:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_wildtype']<=TPM)
&(currentPlusStrand['next_TPM_wildtype']>0)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_wildtype']<=TPM)
&(currentMinusStrand['TPM_wildtype']>0)]
centerString="start"
if useTTS:
centerString="end"
for st, end in zip(currentPlusStrand[centerString].values, currentPlusStrand[centerString].values):
reg1 = '{}:{}-{}'.format(mychrstrCooler, int(np.floor((st - pad) / res) * res),
int(np.floor((st + pad) / res) * res),)
reg2 = '{}:{}-{}'.format(mychrstrCooler,int(np.floor((end - pad) / res) * res),
int(np.floor((end + pad) / res) * res))
#from balanced matrix, fetch regions
try:
mat = c.matrix(balance=doBalance).fetch(reg1, reg2)
if OE!=None:#Divide by expected
mat=mat/OE[mychr]
pile.append(mat)
except Exception as e:
print(e)
#mat = np.nan * np.ones((pad * 2 //res, pad * 2 //res))
print('Cannot retrieve a window:', reg1, reg2)
centerString="end"
if useTTS:
centerString="start"
for st, end in zip(currentMinusStrand[centerString].values, currentMinusStrand[centerString].values):
reg1 = '{}:{}-{}'.format(mychrstrCooler, int(np.floor((st - pad) / res) * res),
int(np.floor((st + pad) / res) * res),)
reg2 = '{}:{}-{}'.format(mychrstrCooler,int(np.floor((end - pad) / res) * res),
int(np.floor((end + pad) / res) * res))
try:
temp=c.matrix(balance=doBalance).fetch(reg1, reg2)
if OE!=None:#Divide by expected
temp=temp/OE[mychr]
mat = temp[::-1].T[::-1].T #Rotate matrix 180 degrees to align genes
pile.append(mat)
except Exception as e:
print(e)
#mat = np.nan * np.ones((pad * 2 //res, pad * 2 //res))
print('Cannot retrieve a window:', reg1, reg2)
return pile
|
671f575b09131f44f07d659bc8e195a16cd9e2f9
| 3,648,109
|
def model_cnn_2layer(in_ch, in_dim, width, linear_size=128):
"""
CNN, small 2-layer (default kernel size is 4 by 4)
Parameter:
in_ch: input image channel, 1 for MNIST and 3 for CIFAR
in_dim: input dimension, 28 for MNIST and 32 for CIFAR
width: width multiplier
"""
model = nn.Sequential(
nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size),
nn.ReLU(),
nn.Linear(linear_size, 10)
)
return model
|
a08ec035cde3dd024ec32d85adb0af045fe845eb
| 3,648,110
|
def reserve_api():
"""Helper function for making API requests to the /reserve API endpoints
:returns: a function that can be called to make a request to /reserve
"""
def execute_reserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/reserve%s" % endpoint, **kwargs)
return execute_reserve_api_request
|
d9e07fcd72742685443cd83f44eaed074a8152dc
| 3,648,113
|
import re
def extract_user_id(source_open_url):
"""
extract the user id from given user's id
:param source_open_url: "sslocal://profile?refer=video&uid=6115075278" example
:return:
"""
if source_open_url[10:17] != 'profile':
return None
try:
res = re.search("\d+$", source_open_url).group(0)
return res.strip()
except (AttributeError, KeyError):
return None
|
36d3e41c4361a29306650fc67c9f396efe92cd66
| 3,648,114
|
def prolog_rule(line):
"""Specify prolog equivalent"""
def specify(rule):
"""Apply restrictions to rule"""
rule.prolog.insert(0, line)
return rule
return specify
|
dde4840dc2f8f725d4c4c123aed7c978ec1948f9
| 3,648,115
|
def load_GloVe_model(path):
"""
It is a function to load GloVe model
:param path: model path
:return: model array
"""
print("Load GloVe Model.")
with open(path, 'r') as f:
content = f.readlines()
model = {}
for line in content:
splitLine = line.split()
word = splitLine[0]
embedding = np.array((splitLine[1:]))
model[word] = embedding
print("Done.", len(model), " words loaded!\n")
return model
|
40e8fe203b195621b776ea3650bb531956769b48
| 3,648,116
|
import numpy
def quadratic_program() -> MPQP_Program:
"""a simple mplp to test the dimensional correctness of its functions"""
A = numpy.array(
[[1, 1, 0, 0], [0, 0, 1, 1], [-1, 0, -1, 0], [0, -1, 0, -1], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
[0, 0, 0, -1]])
b = numpy.array([350, 600, 0, 0, 0, 0, 0, 0]).reshape(8, 1)
c = 25 * make_column([1, 1, 1, 1])
F = numpy.array([[0, 0], [0, 0], [-1, 0], [0, -1], [0, 0], [0, 0], [0, 0], [0, 0]])
Q = 2.0 * numpy.diag([153, 162, 162, 126])
CRa = numpy.vstack((numpy.eye(2), -numpy.eye(2)))
CRb = numpy.array([1000, 1000, 0, 0]).reshape(4, 1)
H = numpy.zeros((F.shape[1], Q.shape[0]))
prog = MPQP_Program(A, b, c, H, Q, CRa, CRb, F)
prog.scale_constraints()
return prog
|
71d5d9b872572e05b86e640006c58cc407c76ec4
| 3,648,117
|
def snr(flux, axis=0):
""" Calculates the S/N ratio of a spectra.
Translated from the IDL routine der_snr.pro """
signal = np.nanmedian(flux, axis=axis)
noise = 1.482602 / np.sqrt(6.) * np.nanmedian(np.abs(2.*flux - \
np.roll(flux, 2, axis=axis) - np.roll(flux, -2, axis=axis)), \
axis=axis)
return signal, noise, signal / noise
|
964362545e2fb8a0e7f15df71d90c5ce3e2f5815
| 3,648,119
|
def subtract_images(img_input, img_output, img_height, img_width):
"""Subtract input and output image and compute difference image and ela image"""
input_data = img_input.T
output_data = img_output.T
if len(input_data) != len(output_data):
raise Exception("Input and Output image have different sizes!")
diff = abs(input_data - output_data)
diff = diff.reshape(img_height, img_width, 3)
diff = np.clip(diff, 0, 255)
if auto:
args.multiplier = np.divide(255, diff.max())
diff_multiplied = diff * args.multiplier
diff_multiplied = np.clip(diff_multiplied, 0, 255)
if args.cupy:
diff_img = Image.fromarray(np.asnumpy(diff).astype(np.uint8), 'RGB')
diff_img_multiplied = Image.fromarray(np.asnumpy(diff_multiplied).astype(np.uint8), 'RGB')
else:
diff_img = Image.fromarray(diff.astype(np.uint8), 'RGB')
diff_img_multiplied = Image.fromarray(diff_multiplied.astype(np.uint8), 'RGB')
return diff_img, diff_img_multiplied
|
7b2b2df57055cc73bec85bed2a5bece8187ddcdf
| 3,648,120
|
import ctypes
def k4a_playback_get_track_name(playback_handle, track_index, track_name, track_name_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_get_track_name(k4a_playback_t playback_handle,
size_t track_index,
char *track_name,
size_t *track_name_size);
"""
_k4a_playback_get_track_name = record_dll.k4a_playback_get_track_name
_k4a_playback_get_track_name.restype = k4a_buffer_result_t
_k4a_playback_get_track_name.argtypes = (k4a_playback_t,\
ctypes.c_size_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_get_track_name(playback_handle, track_index, track_name, track_name_size)
|
872c5aa73f9520f178fdbdfe47c314f9043282c0
| 3,648,121
|
def recommend_lowercase_d(data: pd.Series, **kwargs) -> int:
"""Returns the recommended value of differencing order 'd' to use
Parameters
----------
data : pd.Series
The data for which the differencing order needs to be calculated
*kwargs: Keyword arguments that can be passed to the difference test.
Values are:
alpha : float, optional
Significance Value, by default 0.05
test : str, optional
The test to use to test the order of differencing, by default 'kpss'
max_d : int, optional
maximum differencing order to try, by default 2
Returns
-------
int
The differencing order to use
"""
recommended_lowercase_d = ndiffs(data, **kwargs)
return recommended_lowercase_d
|
a249ed104c00e62f386f82c5c4aaecc7cf0c4001
| 3,648,122
|
from typing import Dict
import json
def get_player_current_games_to_move(username: str) -> Dict:
"""Public method that returns an array of Daily Chess games
where it is the player's turn to act
Parameters:
username -- username of the player
"""
r = _internal.do_get_request(f"/player/{username}/games/to-move")
return json.loads(r.data.decode('utf-8'))
|
1ce906d24fa278703d708d41ed753c2cb24b48d0
| 3,648,123
|
def get_rb_data_attribute(xmldict, attr):
"""Get Attribute `attr` from dict `xmldict`
Parameters
----------
xmldict : dict
Blob Description Dictionary
attr : str
Attribute key
Returns
-------
sattr : int
Attribute Values
"""
try:
sattr = int(xmldict["@" + attr])
except KeyError:
raise KeyError(
f"Attribute @{attr} is missing from "
"Blob Description. There may be some "
"problems with your file"
)
return sattr
|
dfc48ad47f67b2303874154ce4a164a176c1f4bf
| 3,648,124
|
def tls_params(mqtt_config):
"""Return the TLS configuration parameters from a :class:`.MQTTConfig`
object.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
Returns:
dict: A dict {'ca_certs': ca_certs, 'certfile': certfile,
'keyfile': keyfile} with the TLS configuration parameters, or None if
no TLS connection is used.
.. versionadded:: 0.6.0
"""
# Set up a dict containing TLS configuration parameters for the MQTT
# client.
if mqtt_config.tls.hostname:
return {'ca_certs': mqtt_config.tls.ca_file,
'certfile': mqtt_config.tls.client_cert,
'keyfile': mqtt_config.tls.client_key}
# Or don't use TLS.
else:
return None
|
4b5d214a50fea60f5cb325fc7a0c93dfa9cb3c02
| 3,648,127
|
def is_admin():
"""Check if current user is an admin."""
try:
return flask.g.admin
except AttributeError:
return False
|
0922a93afacc4002068b60f1ab8a6594f0ddb44a
| 3,648,128
|
def statistic():
""" RESTful CRUD Controller """
return crud_controller()
|
02ed03d6d7d159046c12cbea98cc69c7bb0ae024
| 3,648,129
|
def segment_range_to_fragment_range(segment_start, segment_end, segment_size,
fragment_size):
"""
Takes a byterange spanning some segments and converts that into a
byterange spanning the corresponding fragments within their fragment
archives.
Handles prefix, suffix, and fully-specified byte ranges.
:param segment_start: first byte of the first segment
:param segment_end: last byte of the last segment
:param segment_size: size of an EC segment, in bytes
:param fragment_size: size of an EC fragment, in bytes
:returns: a 2-tuple (frag_start, frag_end) where
* frag_start is the first byte of the first fragment, or None if this
is a suffix byte range
* frag_end is the last byte of the last fragment, or None if this is a
prefix byte range
"""
# Note: segment_start and (segment_end + 1) are
# multiples of segment_size, so we don't have to worry
# about integer math giving us rounding troubles.
#
# There's a whole bunch of +1 and -1 in here; that's because HTTP wants
# byteranges to be inclusive of the start and end, so e.g. bytes 200-300
# is a range containing 101 bytes. Python has half-inclusive ranges, of
# course, so we have to convert back and forth. We try to keep things in
# HTTP-style byteranges for consistency.
# the index of the first byte of the first fragment
fragment_start = ((
segment_start // segment_size * fragment_size)
if segment_start is not None else None)
# the index of the last byte of the last fragment
fragment_end = (
# range unbounded on the right
None if segment_end is None else
# range unbounded on the left; no -1 since we're
# asking for the last N bytes, not to have a
# particular byte be the last one
((segment_end + 1) // segment_size
* fragment_size) if segment_start is None else
# range bounded on both sides; the -1 is because the
# rest of the expression computes the length of the
# fragment, and a range of N bytes starts at index M
# and ends at M + N - 1.
((segment_end + 1) // segment_size * fragment_size) - 1)
return (fragment_start, fragment_end)
|
e20c9bb55d9d3e90beb20bed7a170d1066611ba9
| 3,648,131
|
import collections
def update_dict(d, u):
""" Recursively update dict d with values from dict u.
Args:
d: Dict to be updated
u: Dict with values to use for update
Returns: Updated dict
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
default = v.copy()
default.clear()
r = update_dict(d._get(k, default), v)
d[k] = r
else:
d[k] = v
return d
|
e0228d3d0946f20b4bad8bfbc94a725f62bddfc5
| 3,648,132
|
def get_dataset(args, tokenizer, evaluate=False):
"""Convert the text file into the GPT-2 TextDataset format.
Args:
tokenizer: The GPT-2 tokenizer object.
evaluate: Whether to evalute on the dataset.
"""
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size
)
else:
return TextDataset(
tokenizer=tokenizer,
file_path=file_path,
block_size=args.block_size,
overwrite_cache=args.overwrite_cache,
)
|
493bd0a5ca548b08052717d7e343e4f8b8911cb1
| 3,648,133
|
def test_function_decorators():
"""Function Decorators."""
# Function decorators are simply wrappers to existing functions. Putting the ideas mentioned
# above together, we can build a decorator. In this example let's consider a function that
# wraps the string output of another function by p tags.
# This is the function that we want to decorate.
def greeting(name):
return "Hello, {0}!".format(name)
# This function decorates another functions output with <p> tag.
def decorate_with_p(func):
def function_wrapper(name):
return "<p>{0}</p>".format(func(name))
return function_wrapper
# Now, let's call our decorator and pass the function we want decorate to it.
my_get_text = decorate_with_p(greeting)
# Here we go, we've just decorated the function output without changing the function itself.
assert my_get_text('John') == '<p>Hello, John!</p>' # With decorator.
assert greeting('John') == 'Hello, John!' # Without decorator.
# Now, Python makes creating and using decorators a bit cleaner and nicer for the programmer
# through some syntactic sugar There is a neat shortcut for that, which is to mention the
# name of the decorating function before the function to be decorated. The name of the
# decorator should be prepended with an @ symbol.
@decorate_with_p
def greeting_with_p(name):
return "Hello, {0}!".format(name)
assert greeting_with_p('John') == '<p>Hello, John!</p>'
# Now let's consider we wanted to decorate our greeting function by one more functions to wrap a
# div the string output.
# This will be our second decorator.
def decorate_with_div(func):
def function_wrapper(text):
return "<div>{0}</div>".format(func(text))
return function_wrapper
# With the basic approach, decorating get_text would be along the lines of
# greeting_with_div_p = decorate_with_div(decorate_with_p(greeting_with_p))
# With Python's decorator syntax, same thing can be achieved with much more expressive power.
@decorate_with_div
@decorate_with_p
def greeting_with_div_p(name):
return "Hello, {0}!".format(name)
assert greeting_with_div_p('John') == '<div><p>Hello, John!</p></div>'
# One important thing to notice here is that the order of setting our decorators matters.
# If the order was different in the example above, the output would have been different.
# Passing arguments to decorators.
# Looking back at the example before, you can notice how redundant the decorators in the
# example are. 2 decorators(decorate_with_div, decorate_with_p) each with the same
# functionality but wrapping the string with different tags. We can definitely do much better
# than that. Why not have a more general implementation for one that takes the tag to wrap
# with as a string? Yes please!
def tags(tag_name):
def tags_decorator(func):
def func_wrapper(name):
return "<{0}>{1}</{0}>".format(tag_name, func(name))
return func_wrapper
return tags_decorator
@tags('div')
@tags('p')
def greeting_with_tags(name):
return "Hello, {0}!".format(name)
assert greeting_with_tags('John') == '<div><p>Hello, John!</p></div>'
|
03b3ba299ceb7a75b0de1674fabe892243abd8b3
| 3,648,134
|
def make_loci_field( loci ):
""" make string representation of contig loci """
codes = [L.code for L in loci]
return c_delim2.join( codes )
|
a643f70f02c3b79214d76f82bb4379ea0c0e1e84
| 3,648,135
|
import tqdm
def compute_wilderness_impact1(ground_truth_all, prediction_all, video_list, known_classes, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
""" Compute wilderness impact for each video (WI=Po/Pc < 1)
"""
wi = np.zeros((len(tiou_thresholds), len(known_classes)))
# # Initialize true positive and false positive vectors.
tp_u2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
tp_k2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # TPc in WACV paper
fp_u2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # FPo in WACV paper
fp_k2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # FPc in WACV paper
fp_k2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
fp_bg2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
fp_bg2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all)))
ground_truth_by_vid = ground_truth_all.groupby('video-id')
prediction_by_vid = prediction_all.groupby('video-id')
def _get_predictions_with_vid(prediction_by_vid, video_name):
"""Get all predicitons of the given video. Return empty DataFrame if there
is no predcitions with the given video.
"""
try:
return prediction_by_vid.get_group(video_name).reset_index(drop=True)
except:
return pd.DataFrame()
# compute the TP, FPo and FPc for each predicted segment.
vidx_offset = 0
all_scores, all_max_tious = [], []
for video_name in tqdm(video_list, total=len(video_list), desc='Compute WI'):
ground_truth = ground_truth_by_vid.get_group(video_name).reset_index()
prediction = _get_predictions_with_vid(prediction_by_vid, video_name)
if prediction.empty:
vidx_offset += len(prediction)
all_scores.extend([0] * len(prediction)) # only for confidence score
all_max_tious.extend([0] * len(prediction))
continue # no predictions for this video
all_scores.extend(prediction['score'].values.tolist())
lock_gt = np.zeros((len(tiou_thresholds),len(ground_truth)))
for idx, this_pred in prediction.iterrows():
tiou_arr = segment_iou(this_pred[['t-start', 't-end']].values,
ground_truth[['t-start', 't-end']].values)
# attach each prediction with the gt that has maximum tIoU
max_iou = tiou_arr.max()
max_jdx = tiou_arr.argmax()
all_max_tious.append(max_iou)
label_pred = this_pred['label']
label_gt = int(ground_truth.loc[max_jdx]['label'])
for tidx, tiou_thr in enumerate(tiou_thresholds):
if max_iou > tiou_thr:
if label_pred == label_gt and lock_gt[tidx, max_jdx] == 0:
if label_gt == 0:
tp_u2u[tidx, vidx_offset + idx] = 1 # true positive (u2u), not used by WI by default
else:
tp_k2k[tidx, label_pred-1, vidx_offset + idx] = 1 # true positive (k2k)
lock_gt[tidx, max_jdx] = 1 # lock this ground truth
else:
if label_gt == 0: # false positive (u2k)
fp_u2k[tidx, label_pred-1, vidx_offset + idx] = 1
else: # false positive (k2k, k2u)
if label_pred == 0:
fp_k2u[tidx, vidx_offset + idx] = 1
else:
fp_k2k[tidx, label_pred-1, vidx_offset + idx] = 1
else: # GT is defined to be background (known), must be FP
if label_pred == 0:
fp_bg2u[tidx, vidx_offset + idx] = 1
else:
fp_bg2k[tidx, label_pred-1, vidx_offset + idx] = 1
# move the offset
vidx_offset += len(prediction)
stats = {'tp_k2k': tp_k2k, 'tp_u2u': tp_u2u, 'fp_k2k': fp_k2k, 'fp_k2u': fp_k2u, 'fp_u2k': fp_u2k, 'fp_bg2k': fp_bg2k, 'fp_bg2u': fp_bg2u,
'scores': all_scores, 'max_tious': all_max_tious}
# Here we assume the background detections (small tIoU) are from the background class, which is a known class
fp_k2u += fp_bg2u
fp_k2k += fp_bg2k
tp_k2k_sum = np.sum(tp_k2k, axis=-1).astype(np.float)
fp_u2k_sum = np.sum(fp_u2k, axis=-1).astype(np.float)
fp_k2k_sum = np.sum(fp_k2k, axis=-1).astype(np.float)
wi = fp_u2k_sum / (tp_k2k_sum + fp_k2k_sum + 1e-6)
return wi, stats
|
5f921d93993df1431c7b703df591f710cb2b5628
| 3,648,136
|
def require_client(func):
"""
Decorator for class methods that require a client either through keyword
argument, or through the object's client attribute.
Returns:
A wrapped version of the function. The object client attrobute will
be passed in as the client keyword if None is provided.
Raises:
AssertionError : Raised when the method is called without a client
keyword set and no client attribute.
"""
@wraps(func)
async def wrapper(self, *args, **kwargs):
client = kwargs.get("client", None) or getattr(self, "client", None)
if client is None:
msg = (
"{0} object does not have a client -- {0}.{1} will do "
"nothing. To set a client, initialize the object with "
"{0}(..., client=your_client). Alternatively, you can "
"use the client keyword argument in the method."
).format(
self.__class__.__name__,
func.__name__,
)
raise AssertionError(msg)
else:
kwargs["client"] = client
return await func(self, *args, **kwargs)
return wrapper
|
28cfd4821405a132cde4db8b95c68987db76b99d
| 3,648,138
|
import base64
def format_template(string, tokens=None, encode=None):
"""Create an encoding from given string template."""
if tokens is None:
tokens = {}
format_values = {"config": config,
"tokens": tokens}
result = string.format(**format_values)
if encode == "base64":
result = base64.b64encode(result.encode("utf-8")).decode("utf-8")
else:
assert encode is None, f"Unknown encoding {encode}"
return result
|
91dafa48c0a9f17bbd663bcc14cfb800f6f57877
| 3,648,140
|
import six
def make_datastore_api(client):
"""Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials.
"""
parse_result = six.moves.urllib_parse.urlparse(
client._base_url)
host = parse_result.netloc
if parse_result.scheme == 'https':
channel = make_secure_channel(
client._credentials, DEFAULT_USER_AGENT, host)
else:
channel = insecure_channel(host)
return GAPICDatastoreAPI(
channel=channel, lib_name='gccl', lib_version=__version__)
|
63ab5acf85bcd2df9d266d285887c624a7554c64
| 3,648,142
|
def _mul_certain(left, right):
"""Multiplies two values, where one is certain and the other is uncertain,
and returns the result."""
if _is_number(left):
return Uncertain(
value=right.value * left,
delta=right.delta,
)
return Uncertain(
value=left.value * right,
delta=left.delta,
)
|
ae6159d1f59daea13794aa0ee0fb8baadf647471
| 3,648,143
|
def felica_RequestSystemCode(): # -> (int, List[int]):
"""
Sends FeliCa Request System Code command
:returns: (status, systemCodeList)
status 1: Success, < 0: error
systemCodeList System Code list (Array length should longer than 16)
"""
cmd = bytearray([FELICA_CMD_REQUEST_SYSTEM_CODE]) + _felicaIDm[:8]
status, response = felica_SendCommand(cmd)
responseLength = len(response)
if (status != 1):
DMSG("Request System Code command failed\n")
return -1, []
numSystemCode = response[9]
# length check
if (responseLength < 10 + 2 * numSystemCode):
DMSG("Request System Code command failed (wrong response length)\n")
return -2, []
systemCodeList = []
for i in range(numSystemCode):
systemCodeList.append((response[10 + i * 2] << 8) + response[10 + i * 2 + 1])
return 1, systemCodeList
|
ff380077daef948a6a3ad274a9accccea22fcba1
| 3,648,144
|
from typing import Optional
from typing import Callable
import types
def get_regularizer(
regularizer_type: str, l_reg_factor_weight: float
) -> Optional[Callable[[tf.Tensor], Optional[tf.Tensor]]]:
"""Gets a regularizer of a given type and scale.
Args:
regularizer_type: One of types.RegularizationType
l_reg_factor_weight: Scale for regularization.
Returns:
A function with weights parameter that applies regularization.
"""
if regularizer_type == types.RegularizationType.NONE:
return None
elif regularizer_type == types.RegularizationType.L1:
return slim.l1_regularizer(scale=l_reg_factor_weight)
elif regularizer_type == types.RegularizationType.L2:
return slim.l2_regularizer(scale=l_reg_factor_weight)
else:
raise ValueError(f"Unknown regularization type {regularizer_type}")
|
c9f7f8dd227a7446c9b15aebf1e2f1065a3d810d
| 3,648,145
|
def metadata():
"""Returns shared metadata instance with naming convention."""
naming_convention = {
'ix': 'ix_%(column_0_label)s',
'uq': 'uq_%(table_name)s_%(column_0_name)s',
'ck': 'ck_%(table_name)s_%(constraint_name)s',
'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',
'pk': 'pk_%(table_name)s'}
return MetaData(naming_convention=naming_convention)
|
25363d243a2474dfbd43d57ff794899bfe30a44d
| 3,648,146
|
import time
def date_rss(dte=None):
"""Dtate au format RSS """
ctime = time if dte is None else time.mktime(dte.timetuple())
return ctime.strftime('%a, %d %b %Y %H:%M:%S %z')
|
6c343b675be5b89051fe9d76a9fb6437e89611bb
| 3,648,147
|
def generate_parallelogrammatic_board(width=5, height=5):
"""
Creates a board with a shape of a parallelogram.
Width and height specify the size (in fields) of the board.
"""
return [[1] * height for _ in range(width)]
|
1c9bd6e6e26f6693b434d44e6dbe4085ba9236b8
| 3,648,149
|
def function_is_even(latex_dict: dict) -> str:
"""
colloquially,
sympy.cos(x)==sympy.cos(-x)
sympy.cos(x) - sympy.cos(-x) == 0
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> function_is_even(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
|
0169440f0ebe373efdc18b79a1b8f48220fada13
| 3,648,150
|
def summarize_curriculum(
curriculum: AbstractCurriculum,
) -> str:
"""
Generate a detailed string summarizing the contents of the curriculum.
:return: A string that would print as a formatted outline of this curriculum's contents.
"""
def maybe_plural(num: int, label: str):
return f"{num} {label}" + ("" if num == 1 else "s")
block_summaries = []
for i_block, block in enumerate(curriculum.learn_blocks_and_eval_blocks()):
task_summaries = []
for i_task, task_block in enumerate(block.task_blocks()):
variant_summaries = []
for i_variant, task_variant in enumerate(task_block.task_variants()):
variant_summary = (
f"\n\t\t\tTask variant {i_variant+1}, "
f"{task_variant.task_label} - {task_variant.variant_label}: "
+ (
f"{maybe_plural(task_variant.num_episodes, 'episode')}."
if task_variant.num_episodes is not None
else f"{maybe_plural(task_variant.num_steps, 'step')}."
)
)
variant_summaries.append(variant_summary)
task_summary = (
f"\n\t\tTask {i_task+1}, {task_block.task_label}: "
f"{maybe_plural(len(variant_summaries), 'variant')}"
)
task_summaries.append(task_summary + "".join(variant_summaries))
block_summary = (
f"\n\n\tBlock {i_block+1}, "
f"{'learning' if block.is_learning_allowed else 'evaluation'}: "
f"{maybe_plural(len(task_summaries), 'task')}"
)
block_summaries.append(block_summary + "".join(task_summaries))
curriculum_summary = (
f"This curriculum has {maybe_plural(len(block_summaries), 'block')}"
+ "".join(block_summaries)
)
return curriculum_summary
|
010f3fb38473d0a572616204c381bde04f83fb0e
| 3,648,151
|
def get_solarsample():
"""
NAME:
get_solarsample
PURPOSE:
get the RC sample at solar abundances
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
"""
# Get the full sample first
data= get_rcsample()
# Now cut it
lowfeh= _solar_lowfeh(0.)
highfeh= _solar_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _solar_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _solar_highafe(data[_FEHTAG]))
return data[indx]
|
e511dbfdf013528f1bba3c7d794d776a2791e918
| 3,648,152
|
from typing import List
def perform_context_selection(
estimation_tasks: List[EstimationTask],
) -> List[EstimationTask]:
"""Changes the circuits in estimation tasks to involve context selection.
Args:
estimation_tasks: list of estimation tasks
"""
output_estimation_tasks = []
for estimation_task in estimation_tasks:
(
context_selection_circuit,
frame_operator,
) = get_context_selection_circuit_for_group(estimation_task.operator)
frame_circuit = estimation_task.circuit + context_selection_circuit
new_estimation_task = EstimationTask(
frame_operator, frame_circuit, estimation_task.number_of_shots
)
output_estimation_tasks.append(new_estimation_task)
return output_estimation_tasks
|
814a5dd72a6c4b76c52af606c3136a6fd1cb46d9
| 3,648,153
|
import torch
def where(condition, x, y):
"""Wrapper of `torch.where`.
Parameters
----------
condition : DTensor of bool
Where True, yield x, otherwise yield y.
x : DTensor
The first tensor.
y : DTensor
The second tensor.
"""
return torch.where(condition, x, y)
|
0ec419e19ab24500f1be6c511eb472d1d929fe2c
| 3,648,154
|
def ukhls_wave_prefix(columns, year):
""" Determine wave prefix for ukhls wave data.
Parameters
----------
columns : list
A list of column names to add wave prefixes to.
year : int
Which wave year is being processed.
Returns
-------
columns : list
Column names with wave prefixes added.
"""
#wave_letter = alphabet[year - 2008]
wave_letter = get_wave_letter(year)
exclude = ["pidp"]
for i, item in enumerate(columns):
if item not in exclude:
columns[i] = wave_letter + "_" + item # Looks stupid but needed to update the list.
return columns
|
726cc3a153ad9c43ebd99b96c405ab4bbd8ff56c
| 3,648,155
|
from typing import Iterable
from typing import Optional
from typing import List
from typing import cast
def sort_converters(converters: Iterable[Optional[GenericConverter]]) -> List[GenericConverter]:
"""
Sort a list of converters according to their priority.
"""
converters = cast(Iterable[GenericConverter], filter(bool, converters))
return sorted(converters, key=lambda c: c.priority, reverse=True)
|
8112bfe4da1154c0b0e5aa421cf3c6d90148cbed
| 3,648,156
|
def createPolygon(fire):
"""
create a Polygon object from list of points
"""
points = []
for coordinate in fire["geometry"]["coordinates"][0]:
points.append(tuple(coordinate))
polygon = Polygon(points)
return polygon
|
ce985b494d0d56f9b44a684ab187fb290d0c5d4f
| 3,648,159
|
def change_anim_nodes(node_object="", in_tangent='linear', out_tangent='linear'):
"""
Changes the setting on all anim nodes.
:param node_object:
:param in_tangent:
:param out_tangent:
:return: <bool> True for success. <bool> False for failure.
"""
anim_nodes = object_utils.get_connected_anim(node_object)
cmds.keyTangent(anim_nodes, itt=in_tangent, ott=out_tangent)
return True
|
649f740669c2f292bfc180895a2ce5d295b2ef68
| 3,648,160
|
def is_x_degenerated(x, codon, table):
"""Determine if codon is x-fold degenerated.
@param codon the codon
@param table code table id
@param true if x <= the degeneration of the codon
"""
return (x <= len(altcodons(codon, table)))
|
b6a6bd8afc21a8e9b94dc7aa086255b6dfa44e85
| 3,648,161
|
import torch
def get_long_tensor(tokens_list, batch_size, pad_id=constant.PAD_ID):
""" Convert (list of )+ tokens to a padded LongTensor. """
sizes = []
x = tokens_list
while isinstance(x[0], list):
sizes.append(max(len(y) for y in x))
x = [z for y in x for z in y]
tokens = torch.LongTensor(batch_size, *sizes).fill_(pad_id)
for i, s in enumerate(tokens_list):
tokens[i, :len(s)] = torch.LongTensor(s)
return tokens
|
d088277fcd8f599d142ff7bdb1b8e018c5b6c1cb
| 3,648,162
|
def business():
"""
show business posts
"""
business = Post.query.filter_by(category="Business").all()
return render_template('business.html', post=business)
|
2fd3a46391681a25cdb3291f0a92e110dc0d4eb3
| 3,648,163
|
def tile_image(x_gen, tiles=()):
"""Tiled image representations.
Args:
x_gen: 4D array of images (n x w x h x 3)
tiles (int pair, optional): number of rows and columns
Returns:
Array of tiled images (1 x W x H x 3)
"""
n_images = x_gen.shape[0]
if not tiles:
for i in range(int(np.sqrt(n_images)), 0, -1):
if n_images % i == 0:
break
n_rows = i
n_cols = n_images // i
else:
n_rows, n_cols = tiles
full = [np.hstack(x_gen[c * n_rows:(c + 1) * n_rows]) for c in range(n_cols)]
return np.expand_dims(np.vstack(full), 0)
|
d95a9cd12f6ba4239724b84efd701575678f217f
| 3,648,164
|
def note_updated_data(note, role):
"""Return the data for updated date
:param note: the note that holds the data
:type note: :class:`jukeboxcore.djadapter.models.Note`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the updated date
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
dt = note.date_updated
return dt_to_qdatetime(dt)
|
987e78ae0c5a7a4570b619520d8b20f592adae07
| 3,648,165
|
def _xyz_atom_coords(atom_group):
"""Use this method if you need to identify if CB is present in atom_group and if not return CA"""
tmp_dict = {}
for atom in atom_group.atoms():
if atom.name.strip() in {"CA", "CB"}:
tmp_dict[atom.name.strip()] = atom.xyz
if 'CB' in tmp_dict:
return tmp_dict['CB']
elif 'CA' in tmp_dict:
return tmp_dict['CA']
else:
return float('inf'), float('inf'), float('inf')
|
fd7ef43b1935f8722b692ad28a7e8b309033b720
| 3,648,166
|
import pathlib
def InitBareRepository(path):
"""Returns the Repo object"""
assert isinstance(path, str)
pathlib.Path(path).parent.mkdir(parents=True,exist_ok=True)
return git.Repo.init(path,bare=True)
|
84b321c7b27ee8ab7101339671361f45ec474e91
| 3,648,167
|
def get_http_exception(code):
"""Return an exception class based on its code"""
try:
return http_exceptions[int(code)]
except:
return None
|
f7b5077331b4425d5ee49a8c61849bb6ba822049
| 3,648,168
|
from typing import Union
def _gradients_input(model: Union[tf.keras.models.Model, 'keras.models.Model'],
x: tf.Tensor,
target: Union[None, tf.Tensor]) -> tf.Tensor:
"""
Calculates the gradients of the target class output (or the output if the output dimension is equal to 1)
with respect to each input feature.
Parameters
----------
model
Tensorflow or keras model.
x
Input data point.
target
Target for which the gradients are calculated if the output dimension is higher than 1.
Returns
-------
Gradients for each input feature.
"""
with tf.GradientTape() as tape:
tape.watch(x)
preds = _run_forward(model, x, target)
grads = tape.gradient(preds, x)
return grads
|
816c1f932533de4cdfd96525c44a2c11bae60254
| 3,648,170
|
from typing import OrderedDict
def get_databases():
"""Return an ordered dict of (dbname: database). The order is
according to search preference, the first DB to contain a document
should be assumed to be the authoritative one."""
sql_dbs = [
_SQLDb(
XFormInstanceSQL._meta.db_table,
lambda id_: XFormInstanceSQL.get_obj_by_id(id_),
"XFormInstance",
lambda doc: XFormInstanceSQLRawDocSerializer(doc).data,
),
_SQLDb(
CommCareCaseSQL._meta.db_table,
lambda id_: CommCareCaseSQL.get_obj_by_id(id_),
"CommCareCase",
lambda doc: CommCareCaseSQLRawDocSerializer(doc).data,
),
_SQLDb(
SQLLocation._meta.db_table,
lambda id_: SQLLocation.objects.get(location_id=id_),
'Location',
lambda doc: doc.to_json()
),
]
all_dbs = OrderedDict()
for db in sql_dbs:
all_dbs[db.dbname] = db
couchdbs_by_name = couch_config.all_dbs_by_db_name
for dbname in sorted(couchdbs_by_name):
all_dbs[dbname] = _CouchDb(couchdbs_by_name[dbname])
return all_dbs
|
01180d4cafbcf0b1d48e7c7554069a7ee5bf1a65
| 3,648,172
|
def list_examples():
"""List all examples"""
examples = ExampleModel.query()
form = ExampleForm()
if form.validate_on_submit():
example = ExampleModel(
example_name=form.example_name.data,
example_description=form.example_description.data,
added_by=users.get_current_user()
)
try:
example.put()
example_id = example.key.id()
flash(u'Example %s successfully saved.' % example_id, 'success')
return redirect(url_for('list_examples'))
except CapabilityDisabledError:
flash(u'App Engine Datastore is currently in read-only mode.', 'info')
return redirect(url_for('list_examples'))
return render_template('list_examples.html', examples=examples, form=form)
|
740e7005e1cadae22ba77095b43dadd6b4219012
| 3,648,174
|
import re
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(_MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
|
b442dc276cde0e28b56fbf855999feaeb199bff2
| 3,648,175
|
import types
def prepare_deep(schema: types.Schema, schemas: types.Schemas):
"""
Resolve $ref and merge allOf including for object properties and items.
Assume the schema is a valid JSONSchema.
Args:
schema: The schema to prepare.
schemas: The schemas from which to resolve any $ref.
Returns:
The prepared schema.
"""
schema = prepare(schema=schema, schemas=schemas)
# Resolve $ref in any properties
properties = schema.get(types.OpenApiProperties.PROPERTIES, None)
if properties is not None:
for name, prop_schema in properties.items():
properties[name] = prepare_deep(schema=prop_schema, schemas=schemas)
# Resolve $ref of any items
items_schema = peek.items(schema=schema, schemas={})
if items_schema is not None:
schema[types.OpenApiProperties.ITEMS] = prepare_deep(
schema=items_schema, schemas=schemas
)
return schema
|
e77f7f0e59a6400c2e7df78b0ec2a14bcc0b3ea6
| 3,648,176
|
def _resize_and_center_fundus(image, diameter):
"""
Helper function for scale normalizing image.
"""
copy = image.copy()
# Find largest contour in image.
contours = _find_contours(image)
# Return unless we have gotten some result contours.
if contours is None:
return None
center, radius = contours
# Calculate the min and max-boundaries for cropping the image.
x_min = max(0, int(center[0] - radius))
y_min = max(0, int(center[1] - radius))
z = int(radius*2)
x_max = x_min + z
y_max = y_min + z
# Crop the image.
copy = copy[y_min:y_max, x_min:x_max]
# Scale the image.
fx = fy = (diameter / 2) / radius
copy = cv2.resize(copy, (0, 0), fx=fx, fy=fy)
# Add padding to image.
shape = copy.shape
# Get the border shape size.
top = bottom = int((diameter - shape[0])/2)
left = right = int((diameter - shape[1])/2)
# Add 1 pixel if necessary.
if shape[0] + top + bottom == diameter - 1:
top += 1
if shape[1] + left + right == diameter - 1:
left += 1
# Define border of the image.
border = [top, bottom, left, right]
# Add border.
copy = cv2.copyMakeBorder(copy, *border,
borderType=cv2.BORDER_CONSTANT,
value=[0, 0, 0])
# Return the image.
return copy
|
f301740cec8ca423334ffd127005b90f54105ce5
| 3,648,177
|
def _pad_X_delta(X, delta, indices, padded_group_size):
"""Currently Unused."""
X_group = onp.take(X, indices, axis=0)
X_group = onp.pad(X_group, [(0, padded_group_size - X_group.shape[0]),
(0, 0)])
delta_group = onp.take(delta, indices, axis=0)
delta_group = onp.pad(delta_group, (
0,
padded_group_size - delta_group.shape[0],
))
return X_group, delta_group
|
d286b922efb6482382af8cd804c5e51b15d93088
| 3,648,178
|
def union(x, y=None):
"""Get sorted list of elements combined for two iterables."""
x, y = de_list_pair(x, y)
return sorted(list(set(x) | set(y)))
|
c24deb82e60569196e7a2d691db192c0ffcf91dd
| 3,648,179
|
from typing import get_origin
from typing import Tuple
def is_tuple(typ) -> bool:
"""
Test if the type is `typing.Tuple`.
"""
try:
return issubclass(get_origin(typ), tuple)
except TypeError:
return typ in (Tuple, tuple)
|
c8c75f4b1523971b20bbe8c716ced53199150b95
| 3,648,180
|
import functools
import unittest
def _skip_if(cond, reason):
"""Skip test if cond(self) is True"""
def decorator(impl):
@functools.wraps(impl)
def wrapper(self, *args, **kwargs):
if cond(self):
raise unittest.SkipTest(reason)
else:
impl(self, *args, **kwargs)
return wrapper
return decorator
|
4141cc1f99c84633bdf2e92941d9abf2010c11f6
| 3,648,181
|
def permute_columns(df,
column_to_order: str,
ind_permute: bool = False,
columns_to_permute: list = []):
"""
Author: Allison Wu
Description: This function permutes the columns specified in columns_to_permute
:param df:
:param column_to_order:
:param ind_permute:
:param columns_to_permute:
:return: permuted_df
"""
window = Window.partitionBy().orderBy(col(column_to_order))
window_rand = Window.partitionBy().orderBy(rand())
df = df. \
withColumn('id', func.row_number().over(window)). \
withColumn('rand_id', func.row_number().over(window_rand))
rand_df = df. \
select(['rand_id'] + columns_to_permute).\
withColumnRenamed('rand_id', 'id')
for c in columns_to_permute:
rand_df = rand_df.\
withColumnRenamed(c, f'rand_{c}')
permuted_df = df.join(rand_df, ['id'], how = 'inner').cache()
return permuted_df
|
89da810d93586d66963d9a3ee3c7d6c0960596c9
| 3,648,183
|
def reset_dismissed(institute_id, case_name):
"""Reset all dismissed variants for a case"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
controllers.reset_all_dimissed(store, institute_obj, case_obj)
return redirect(request.referrer)
|
c16dcc7e23be620212ee6756051db6e089014678
| 3,648,184
|
def texts_from_array(x_train, y_train, x_test=None, y_test=None,
class_names = [],
max_features=MAX_FEATURES, maxlen=MAXLEN,
val_pct=0.1, ngram_range=1, preprocess_mode='standard', verbose=1):
"""
Loads and preprocesses text data from arrays.
Args:
x_train(list): list of training texts
y_train(list): list of integers representing classes
x_val(list): list of training texts
y_val(list): list of integers representing classes
class_names (list): list of strings representing class labels
shape should be (num_examples,1) or (num_examples,)
max_features(int): max num of words to consider in vocabulary
maxlen(int): each document can be of most <maxlen> words. 0 is used as padding ID.
ngram_range(int): size of multi-word phrases to consider
e.g., 2 will consider both 1-word phrases and 2-word phrases
limited by max_features
val_pct(float): Proportion of training to use for validation.
Has no effect if x_val and y_val is supplied.
preprocess_mode (str): Either 'standard' (normal tokenization) or 'bert'
tokenization and preprocessing for use with
BERT text classification model.
verbose (boolean): verbosity
"""
if not class_names:
classes = list(set(y_train))
classes.sort()
class_names = ["%s" % (c) for c in classes]
if x_test is None or y_test is None:
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=val_pct)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# return preprocessed the texts
preproc_type = tpp.TEXT_PREPROCESSORS.get(preprocess_mode, None)
if None: raise ValueError('unsupported preprocess_mode')
preproc = preproc_type(maxlen,
max_features,
classes = class_names,
ngram_range=ngram_range)
trn = preproc.preprocess_train(x_train, y_train, verbose=verbose)
val = preproc.preprocess_test(x_test, y_test, verbose=verbose)
return (trn, val, preproc)
|
ecb120070ac76d21da34fd5f0d27799fe5ba093b
| 3,648,186
|
def display2(depts, level=0):
"""
[[a, 1], [b, 2], [c, 3], [d, 3], [a, 1]]
:param depts:
:return:
"""
lists = []
for d in depts:
lists.append([d, level])
children = Department.objects.filter(parent_id=d.id)
if children:
lists.extend(display2(children, level + 1))
return lists
|
6187c91dd5653ab3f7a7e68d6b811ffda2afb035
| 3,648,187
|
def _get_target_id_to_skill_opportunity_dict(suggestions):
"""Returns a dict of target_id to skill opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding skill opportunity dict.
"""
target_ids = set(s.target_id for s in suggestions)
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in opportunity_services.get_skill_opportunities_by_ids(
list(target_ids)).items()
}
opportunity_id_to_skill = {
skill.id: skill
for skill in skill_fetchers.get_multi_skills([
opp['id']
for opp in opportunity_id_to_opportunity_dict.values()
if opp is not None])
}
for opp_id, skill in opportunity_id_to_skill.items():
if skill is not None:
opportunity_id_to_opportunity_dict[opp_id]['skill_rubrics'] = [
rubric.to_dict() for rubric in skill.rubrics]
return opportunity_id_to_opportunity_dict
|
faf708abb5876a56b06282b99c4cc7221c33c5bd
| 3,648,188
|
def default_loc_scale_fn(
is_singular=False,
loc_initializer=tf.random_normal_initializer(stddev=0.1),
untransformed_scale_initializer=tf.random_normal_initializer(
mean=-3., stddev=0.1),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None):
"""Makes closure which creates `loc`, `scale` params from `tf.get_variable`.
This function produces a closure which produces `loc`, `scale` using
`tf.get_variable`. The closure accepts the following arguments:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Args:
is_singular: Python `bool` indicating if `scale is None`. Default: `False`.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result has mean
approximately `0.05` and std. deviation approximately `0.005`.
loc_regularizer: Regularizer function for the `loc` parameters.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters. The default (`None`) is to use the `tf.get_variable` default.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training. The default
(`None`) is to use the `tf.get_variable` default.
Returns:
default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale`
parameters from args: `dtype, shape, name, trainable, add_variable_fn`.
"""
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates `loc`, `scale` parameters."""
loc = add_variable_fn(
name=name + '_loc',
shape=shape,
initializer=loc_initializer,
regularizer=loc_regularizer,
constraint=loc_constraint,
dtype=dtype,
trainable=trainable)
if is_singular:
return loc, None
untransformed_scale = add_variable_fn(
name=name + '_untransformed_scale',
shape=shape,
initializer=untransformed_scale_initializer,
regularizer=untransformed_scale_regularizer,
constraint=untransformed_scale_constraint,
dtype=dtype,
trainable=trainable)
scale = (np.finfo(dtype.as_numpy_dtype).eps +
tf.nn.softplus(untransformed_scale))
return loc, scale
return _fn
|
54eeec1b5739a71473abfa5a5ffb7aa5aa572505
| 3,648,189
|
from skimage.morphology import disk
from cv2 import dilate
def db_eval_boundary(args):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
F (float): boundaries F-measure
P (float): boundaries precision
R (float): boundaries recall
"""
foreground_mask, gt_mask, ignore_mask, bound_th, class_id, pred_is_boundary = args
assert np.atleast_3d(foreground_mask).shape[2] == 1
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th*np.linalg.norm(foreground_mask.shape))
# print(bound_pix)
# print(gt.shape)
# print(np.unique(gt))
foreground_mask[ignore_mask] = 0
gt_mask[ignore_mask] = 0
# Get the pixel boundaries of both masks
if pred_is_boundary:
fg_boundary = foreground_mask
else:
fg_boundary = seg2bmap(foreground_mask)
gt_boundary = seg2bmap(gt_mask)
def binary_dilation(x, d): return dilate(
x.astype(np.uint8), d).astype(np.bool)
fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F, precision
|
0aa23d0d8b45681e50f3b99c72dfcb5851dd92a6
| 3,648,190
|
def angle_trunc(a):
"""
helper function to map all angles onto [-pi, pi]
"""
while a < 0.0:
a += pi * 2
return ((a + pi) % (pi * 2)) - pi
|
36e5d97affab5f3a1155b837df9985fe26795d76
| 3,648,191
|
from typing import Optional
def get_tag_or_default(
alignment: pysam.AlignedSegment, tag_key: str, default: Optional[str] = None
) -> Optional[str]:
"""Extracts the value associated to `tag_key` from `alignment`, and returns a default value
if the tag is not present."""
try:
return alignment.get_tag(tag_key)
except KeyError:
return default
|
bf3b1495224d7409c410f96daa20af8f70a27efa
| 3,648,192
|
def vtln_warp_mel_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq,
vtln_warp_factor, mel_freq):
"""
Inputs:
vtln_low_cutoff (float): lower frequency cutoffs for VTLN
vtln_high_cutoff (float): upper frequency cutoffs for VTLN
low_freq (float): lower frequency cutoffs in mel computation
high_freq (float): upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
mel_freq (Tensor): given frequency in Mel
Outputs:
Tensor: mel_freq after vtln warp
"""
return mel_scale(vtln_warp_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq,
vtln_warp_factor, inverse_mel_scale(mel_freq)))
|
e6432c0298e559dc958011bdf5d52c3e92544213
| 3,648,193
|
def _squared_loss_and_spatial_grad_derivative(X, y, w, mask, grad_weight):
"""
Computes the derivative of _squared_loss_and_spatial_grad.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Design matrix.
y : ndarray, shape (n_samples,)
Target / response vector.
w : ndarray shape (n_features,)
Unmasked, ravelized weights map.
grad_weight: float
l1_ratio * alpha
Returns
-------
ndarray, shape (n_features,)
Derivative of _squared_loss_and_spatial_grad function.
"""
data_section = np.dot(X, w) - y
image_buffer = np.zeros(mask.shape)
image_buffer[mask] = w
return (np.dot(X.T, data_section)
- grad_weight * _div(_gradient(image_buffer))[mask])
|
05ce984609f5d31fda33d7fb0263ecc056c2f0ed
| 3,648,194
|
import ctypes
def destructor(cfunc):
"""
Make a C function a destructor.
Destructors accept pointers to void pointers as argument. They are also wrapped as a staticmethod for usage in
classes.
:param cfunc: The C function as imported by ctypes.
:return: The configured destructor.
"""
cfunc.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
cfunc.restype = None
return staticmethod(cfunc)
|
05abd181649a2178d4dce704ef93f61eb5418092
| 3,648,195
|
def invalid_auth_header(jwt):
"""Produce invalid JWT tokens for use in tests."""
return {'Authorization': 'Bearer ' + jwt.create_jwt(claims=TestJwtClaims.invalid, header=JWT_HEADER)}
|
91f82b4a9be3740e115da4182325c71fa84440b7
| 3,648,196
|
def update_file_info_in_job(job, file_infos):
"""
Update the 'setup.package.fileInformations' data in the JSON to append new file information.
"""
for file_info in file_infos:
try:
job['setup']['package']['fileInformations'].append(file_info)
except (KeyError, TypeError, AttributeError):
# If we get here, 'setup.package.fileInformations' does not exist yet.
print('Job file input is missing required setup.package.fileInformations data.')
exit(1)
return job
|
9902173548d72fcd35c8f80bb44b59aac27d9401
| 3,648,197
|
def _FirstStatementsInScriptElements(contents):
"""Returns a list of first statements found in each <script> element."""
soup = parse_html.BeautifulSoup(contents)
script_elements = soup.find_all('script', src=None)
return [_FirstStatement(e.get_text()) for e in script_elements]
|
7b2a3bddfa63a6ab4765906862037adf786c253a
| 3,648,198
|
def load_image(input_file_path):
"""
Load the 'input_file_path' and return a 2D numpy array of the image it contains.
"""
image_array = np.array(pil_img.open(input_file_path).convert('L'))
return image_array
|
b5783b9bcca55be355a91c6e9e2d2fcd09d1989b
| 3,648,199
|
def ask_the_user(runner: Runner) -> Direction:
"""Ask the user what to do (in absolute UP, DOWN, etc.)"""
return runner.ask_absolute()
|
2f289aba30e1368abd675a9b9bb2be0924984d3d
| 3,648,200
|
def optimize_spot_bid(ctx, instance_type, spot_bid):
"""
Check whether the bid is sane and makes an effort to place the instance in a sensible zone.
"""
spot_history = _get_spot_history(ctx, instance_type)
if spot_history:
_check_spot_bid(spot_bid, spot_history)
zones = ctx.ec2.get_all_zones()
most_stable_zone = choose_spot_zone(zones, spot_bid, spot_history)
logger.info("Placing spot instances in zone %s.", most_stable_zone)
return most_stable_zone
|
25bd3d9c952c256df12c3cc7efa257629d127af9
| 3,648,202
|
def new_hassle_participants():
"""Select participants for the room helpers."""
# Get a list of all current members.
members = helpers.get_all_members()
return flask.render_template('hassle_new_participants.html', members=members)
|
c8062ae498691ac72a17a969cf2ba7547e08eb9c
| 3,648,203
|
import json
def data_store_remove_folder(request):
"""
remove a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store. It is invoked by an AJAX call and returns json object that include a
status of 'success' if succeeds, and HttpResponse of status code of 403, 400, or 500 if fails.
The AJAX request must be a POST request with input data passed in for res_id and folder_path
where folder_path is the relative path for the folder to be removed under
res_id collection/directory.
"""
res_id = request.POST.get('res_id', None)
if res_id is None:
return HttpResponse('Bad request - resource id is not included',
status=status.HTTP_400_BAD_REQUEST)
res_id = str(res_id).strip()
try:
resource, _, user = authorize(request, res_id,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
except NotFound:
return HttpResponse('Bad request - resource not found', status=status.HTTP_400_BAD_REQUEST)
except PermissionDenied:
return HttpResponse('Permission denied', status=status.HTTP_401_UNAUTHORIZED)
folder_path = request.POST.get('folder_path', None)
if folder_path is None:
return HttpResponse('Bad request - folder_path is not included',
status=status.HTTP_400_BAD_REQUEST)
folder_path = str(folder_path).strip()
if not folder_path:
return HttpResponse('Bad request - folder_path cannot be empty',
status=status.HTTP_400_BAD_REQUEST)
if not folder_path.startswith('data/contents/'):
return HttpResponse('Bad request - folder_path must start with data/contents/',
status=status.HTTP_400_BAD_REQUEST)
if folder_path.find('/../') >= 0 or folder_path.endswith('/..'):
return HttpResponse('Bad request - folder_path must not contain /../',
status=status.HTTP_400_BAD_REQUEST)
try:
remove_folder(user, res_id, folder_path)
except SessionException as ex:
return HttpResponse(ex.stderr, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as ex:
return HttpResponse(ex.message, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return_object = {'status': 'success'}
return HttpResponse(
json.dumps(return_object),
content_type="application/json"
)
|
d6583dca0967fdf282a3510defcdeeb70da6c7f7
| 3,648,204
|
import math
def distance(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Finds distance between two given points
Parameters:
x1, y1 : The x and y coordinates of first point
x2, y2 : The x and y coordinates of second point
Returns:
Distance upto two decimal places.
"""
distance = math.sqrt( ((x1-x2)**2)+((y1-y2)**2) )
return round(distance,2)
|
63f103f46b52aae146b52f385e15bc3441f042e5
| 3,648,205
|
def load_target_class(input_dir):
"""Loads target classes."""
df = pd.read_csv(join(input_dir, "target_class.csv"), header=None, index_col=0, names=["Target"])
return df
|
58fea2aebd6c04dd0b51ec3ef3fc627212aa2b29
| 3,648,206
|
def fix_labels(ply_gt, ply_seg):
"""
Remove extra vertices from the ground truth
"""
size = len(ply_gt.elements[0]["x"])
gt_x = np.array(ply_gt.elements[0]["x"])
seg_x = np.array(ply_seg.elements[0]["x"])
new_gt_label = np.zeros_like(seg_x)
gt_label = np.array(ply_gt.elements[0]["label"])
for i in range(size):
if seg_x.shape[0] > i:
if abs(gt_x[i] - seg_x[i]) < 1e-16:
new_gt_label[i] = gt_label[i]
new_gt_label = clean_gt(new_gt_label).astype(np.int)
return new_gt_label
|
291fedd887b82e6099f5aba6a006fd0e33a7fb18
| 3,648,208
|
import requests
def get_coin_price(api_url: str, currency: str) -> float:
"""
Get the USD price of a coin from Gemini
Args:
api_url: The API URL for Gemini
currency: The cryptocurrency the bot is monitoring
Returns:
coin_price: The price the coin currently holds in USD
"""
# Instantiate Gemini and query the price
coin_price = -1
api_query = "/v1/pricefeed"
try:
price_feeds = requests.get(api_url + api_query).json()
for feed in price_feeds:
if feed.get('pair') == currency + "USD":
coin_price = float(feed.get('price'))
except Exception as err:
print("ERROR: Unable to get price due to %s" % err)
print("Price feed: %s" % price_feeds)
return coin_price
|
0683554aea85faa1dd105cbf81144685d7d2deec
| 3,648,209
|
import hmac
import base64
def GenerateAuthToken(key_name, user_id, action_id='', when=None):
"""Generates a URL-safe token based on XSRFToken but for generla purpose.
Args:
key_name (str): name of secret key to generate token.
user_id (str): the user ID of the authenticated user.
action_id (str): a string identifier of the action they requested
authorization for.
when (datetime): the time when the user was authorized for this action.
If not set the current utc time is used.
Returns:
A string token.
"""
key = SecretKey.GetSecretKey(key_name)
when = when or time_util.GetUTCNow()
when_timestamp = time_util.ConvertToTimestamp(when)
digester = hmac.new(key)
digester.update(str(user_id))
digester.update(_DELIMITER)
digester.update(action_id)
digester.update(_DELIMITER)
digester.update(str(when_timestamp))
digest = digester.digest()
return base64.urlsafe_b64encode('%s%s%d' % (digest, _DELIMITER,
when_timestamp))
|
8375889ba4cdc1fc996c48330cf0cd23824f5946
| 3,648,210
|
import torch
def get_dataset_psnr(device, model, dataset, source_img_idx_shift=64,
batch_size=10, max_num_scenes=None):
"""Returns PSNR for each scene in a dataset by comparing the view predicted
by a model and the ground truth view.
Args:
device (torch.device): Device to perform PSNR calculation on.
model (models.neural_renderer.NeuralRenderer): Model to evaluate.
dataset (misc.dataloaders.SceneRenderDataset): Dataset to evaluate model
performance on. Should be one of "chairs-test" or "cars-test".
source_img_idx_shift (int): Index of source image for each scene. For
example if 00064.png is the source view, then
source_img_idx_shift = 64.
batch_size (int): Batch size to use when generating predictions. This
should be a divisor of the number of images per scene.
max_num_scenes (None or int): Optionally limit the maximum number of
scenes to calculate PSNR for.
Notes:
This function should be used with the ShapeNet chairs and cars *test*
sets.
"""
num_imgs_per_scene = dataset.num_imgs_per_scene
# Set number of scenes to calculate
num_scenes = dataset.num_scenes
if max_num_scenes is not None:
num_scenes = min(max_num_scenes, num_scenes)
# Calculate number of batches per scene
assert (num_imgs_per_scene - 1) % batch_size == 0, "Batch size {} must divide number of images per scene {}."
# Comparison are made against all images except the source image (and
# therefore subtract 1 from total number of images)
batches_per_scene = (num_imgs_per_scene - 1) // batch_size
# Initialize psnr values
psnrs = []
for i in range(num_scenes):
# Extract source view
source_img_idx = i * num_imgs_per_scene + source_img_idx_shift
img_source = dataset[source_img_idx]["img"].unsqueeze(0).repeat(batch_size, 1, 1, 1).to(device)
render_params = dataset[source_img_idx]["render_params"]
azimuth_source = torch.Tensor([render_params["azimuth"]]).repeat(batch_size).to(device)
elevation_source = torch.Tensor([render_params["elevation"]]).repeat(batch_size).to(device)
# Infer source scene
scenes = model.inverse_render(img_source)
# Iterate over all other views of scene
num_points_in_batch = 0
data_list = []
scene_psnr = 0.
for j in range(num_imgs_per_scene):
if j == source_img_idx_shift:
continue # Do not compare against same image
# Add new image to list of images we want to compare to
data_list.append(dataset[i * num_imgs_per_scene + j])
num_points_in_batch += 1
# If we have filled up a batch, make psnr calculation
if num_points_in_batch == batch_size:
# Create batch for target data
img_target, azimuth_target, elevation_target = create_batch_from_data_list(data_list)
img_target = img_target.to(device)
azimuth_target = azimuth_target.to(device)
elevation_target = elevation_target.to(device)
# Rotate scene and render image
rotated = model.rotate_source_to_target(scenes, azimuth_source,
elevation_source, azimuth_target,
elevation_target)
img_predicted = model.render(rotated).detach()
scene_psnr += get_psnr(img_predicted, img_target)
data_list = []
num_points_in_batch = 0
psnrs.append(scene_psnr / batches_per_scene)
print("{}/{}: Current - {:.3f}, Mean - {:.4f}".format(i + 1,
num_scenes,
psnrs[-1],
torch.mean(torch.Tensor(psnrs))))
return psnrs
|
0ce2274aac72d2510fd0c9c067a6efa542c103ec
| 3,648,212
|
def smallest_continuous_multiple(max_multiple):
"""
Function takes an int, and returns the smallest natural number evenly divisible by all numbers
less than or equal to the input max_multiple.
REQ: max_multiple >= 0 and whole
:param max_multiple: {int}
:return: smallest natural number evenly divisible by all number less than or equal to input
Function reduces time complexity by iteratively removing redundant factors from the
check_list, ie. suppose 12 exists in the list, then 6, 4, 3, 2 and 1 will have been removed.
This check is done to remove the factors of every int, to reduce the lookup time later.
"""
# all numbers less than or equal to 2 are evenly divisible by themselves and below
if max_multiple <= 2:
return max_multiple
check_list = []
# make a list of all ints from 1 to input
for i in range(max_multiple):
check_list.append(i + 1)
# loop through check list backwards
for i in reversed(check_list):
# get factors of i without i included
temp_factors = get_factors(i)
temp_factors.remove(i)
# loop through the remaining factors, removing them from the check_list
for j in temp_factors:
try:
check_list.remove(j)
except ValueError:
pass
temp_num = max_multiple
# loop indefinitely until find smallest int that that satisfies exit condition
while True:
# if all factors less than max_multiple divide evenly into curr, return
if all(temp_num % n == 0 for n in check_list):
return temp_num
else:
temp_num += max_multiple
|
57423ed0941d18b54a1da33bc561f79ed19ae145
| 3,648,213
|
def context_list_entities(context):
"""
Returns list of entities to be displayed in list view
"""
# log.info(context['List_rows'])
if 'List_rows' in context:
return context['List_rows']['field_value']
elif 'entities' in context:
return context['entities']
log.warning("No entity list found in context %r"%(context.keys()))
return None
|
1b00e5cd6593a7e0c8770e9bbeaae5c3b47ac78a
| 3,648,214
|
def run(arg):
"""Entry point"""
error_map = {}
validate_path(arg, None, error_map)
if len(error_map) > 0:
error_count = 0
for file, errors in error_map.items():
print(f"Error in {file}:")
for error in errors:
print(f" {error}")
error_count+=1
print("")
print(f"{error_count} error(s) found in {len(error_map)} file(s)")
return 1
return 0
|
0e124b87b62076af713b8caea686e3c44a4e83a2
| 3,648,215
|
import struct
def _bitcode_symbols_partial_impl(
*,
actions,
binary_artifact,
bitcode_symbol_maps,
dependency_targets,
label_name,
output_discriminator,
package_bitcode,
platform_prerequisites):
"""Implementation for the bitcode symbols processing partial."""
bitcode_dirs = []
bitcode_symbols = {}
if bitcode_symbol_maps:
bitcode_symbols.update(bitcode_symbol_maps)
if binary_artifact and bitcode_symbols:
bitcode_files = []
copy_commands = []
for arch in bitcode_symbols:
bitcode_file = bitcode_symbols[arch]
if not bitcode_file:
continue
bitcode_files.append(bitcode_file)
# Get the UUID of the arch slice and use that to name the bcsymbolmap file.
copy_commands.append(
("cp {bitcode_file} " +
"${{OUTPUT_DIR}}/$(dwarfdump -u {binary} " +
"| grep \"({arch})\" | cut -d' ' -f2).bcsymbolmap").format(
arch = arch,
binary = binary_artifact.path,
bitcode_file = bitcode_file.path,
),
)
if bitcode_files:
bitcode_dir = intermediates.directory(
actions = actions,
target_name = label_name,
output_discriminator = output_discriminator,
dir_name = "bitcode_files",
)
bitcode_dirs.append(bitcode_dir)
apple_support.run_shell(
actions = actions,
apple_fragment = platform_prerequisites.apple_fragment,
inputs = [binary_artifact] + bitcode_files,
outputs = [bitcode_dir],
command = "mkdir -p ${OUTPUT_DIR} && " + " && ".join(copy_commands),
env = {"OUTPUT_DIR": bitcode_dir.path},
mnemonic = "BitcodeSymbolsCopy",
xcode_config = platform_prerequisites.xcode_version_config,
)
transitive_bitcode_files = depset(
direct = bitcode_dirs,
transitive = [
x[_AppleBitcodeInfo].bitcode
for x in dependency_targets
if _AppleBitcodeInfo in x
],
)
if package_bitcode:
bundle_files = [(processor.location.archive, "BCSymbolMaps", transitive_bitcode_files)]
else:
bundle_files = []
return struct(
bundle_files = bundle_files,
providers = [_AppleBitcodeInfo(bitcode = transitive_bitcode_files)],
)
|
64606e63a7831a110585ceb83fb34699b373db0a
| 3,648,216
|
def _str_trim_left(x):
"""
Remove leading whitespace.
"""
return x.str.replace(r"^\s*", "")
|
2718086073706411929b45edf80a1d464dfaeff6
| 3,648,217
|
def zipcompress(items_list, flags_list):
"""
SeeAlso:
vt.zipcompress
"""
return [compress(list_, flags) for list_, flags in zip(items_list, flags_list)]
|
e8f85c058db442a967d89ef2f74e5e32cc58a737
| 3,648,218
|
def test_config_file_fails_missing_value(monkeypatch, presence, config):
"""Check if test fails with missing value in database configuration."""
def mock_file_config(self):
return {'database': {}}
monkeypatch.setattr(presence.builder, "fetch_file_config", mock_file_config)
status, msg = presence.check_configuration_file()
assert status == "Skipping"
assert "No configuration" in msg
assert presence.db_config == {}
|
34e14fa3b72fbd3a64930b9ce46da61e76138650
| 3,648,219
|
def construct_run_config(iterations_per_loop):
"""Construct the run config."""
# Parse hparams
hparams = ssd_model.default_hparams()
hparams.parse(FLAGS.hparams)
return dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
resnet_checkpoint=FLAGS.resnet_checkpoint,
val_json_file=FLAGS.val_json_file,
model_dir=FLAGS.model_dir,
iterations_per_loop=iterations_per_loop,
steps_per_epoch=FLAGS.num_examples_per_epoch // FLAGS.train_batch_size,
eval_samples=FLAGS.eval_samples,
transpose_input=False if FLAGS.input_partition_dims is not None else True,
use_spatial_partitioning=True
if FLAGS.input_partition_dims is not None else False,
dataset_threadpool_size=FLAGS.dataset_threadpool_size
)
|
d2989e795ab14a9931837356cb7a6c3752538429
| 3,648,220
|
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
Control points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
|
3d4ea2a42e3e4bb7ff739b262e3fc69784656fed
| 3,648,221
|
def _compact_temporaries(exprs):
"""
Drop temporaries consisting of isolated symbols.
"""
# First of all, convert to SSA
exprs = makeit_ssa(exprs)
# What's gonna be dropped
mapper = {e.lhs: e.rhs for e in exprs
if e.lhs.is_Symbol and (q_leaf(e.rhs) or e.rhs.is_Function)}
processed = []
for e in exprs:
if e.lhs not in mapper:
# The temporary is retained, and substitutions may be applied
expr = e
while True:
handle = uxreplace(expr, mapper)
if handle == expr:
break
else:
expr = handle
processed.append(handle)
return processed
|
b28b839cc17124b83b6b26b972394486cd7d8741
| 3,648,222
|
def print_formula(elements):
"""
The input dictionary, atoms and their amount, is processed to produce
the chemical formula as a string
Parameters
----------
elements : dict
The elements that form the metabolite and their corresponding amount
Returns
-------
formula : str
The formula of the metabolite
"""
formula = "".join([f"{k}{int(v)}" for k, v in elements.items()])
return formula
|
a3c404ef0d18c417e44aee21106917f4ee203065
| 3,648,223
|
def try_get_code(url):
"""Returns code of URL if exists in database, else None"""
command = """SELECT short FROM urls WHERE full=?;"""
result = __execute_command(command, (url,))
if result is None:
return None
return result[0]
|
63a88471f6fdfc44bc22383edda0eb65f9bf1b84
| 3,648,224
|
import unicodedata
def is_chinese_char(cc):
"""
Check if the character is Chinese
args:
cc: char
output:
boolean
"""
return unicodedata.category(cc) == 'Lo'
|
d376e6097e628ac2f3a7934ba42ee2772177f857
| 3,648,225
|
def resize_image(image, min_dim=None, max_dim=None, padding=False):
"""
Resizes an image keeping the aspect ratio.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
padding: If true, pads image with zeros so it's size is max_dim x max_dim
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
# Does it exceed max dim?
if max_dim:
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image and mask
if scale != 1:
image = cv2.resize(
image, (round(w * scale), round(h * scale)))
# Need padding?
if padding:
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
return image, window, scale, padding
|
17c8cb953753321f1aea169ebcb199598b7fd2f1
| 3,648,227
|
def idwt(approx, wavelets, h=np.array([1.0 / np.sqrt(2), -1.0 / np.sqrt(2)]),
g=np.array([1.0 / np.sqrt(2), 1.0 / np.sqrt(2)])):
"""
Simple inverse discrete wavelet transform.
for good reference: http://www.mathworks.com/help/wavelet/ref/dwt.html
@param approx: approximation of signal at low resolution
@param h: high pass filter (for details space)
@param g: low pass filter (for approximation space)
@return: recovered signal
"""
wave_level = iter(wavelets[::-1])
h, g = g[::-1], h[::-1]
recovered = approx
for wave in wave_level:
#upsample
recovered = np.column_stack([recovered, np.zeros(recovered.size)]).flatten()
wave_up = np.column_stack([wave, np.zeros(wave.size)]).flatten()
recovered = np.convolve(recovered, h)[:-(h.size - 1)]
recovered = recovered + np.convolve(wave_up, g)[:-(g.size - 1)]
return recovered
|
a41cd22d81de733428123681bbea10837d4c7237
| 3,648,228
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.