content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def add_review(status):
"""
Adds the flags on the tracker document.
Input: tracker document.
Output: sum of the switches.
"""
cluster = status['cluster_switch']
classify = status['classify_switch']
replace = status['replace_switch']
final = status['final_switch']
finished = status['finished_switch']
num = cluster + classify + replace + final + finished
return num
|
8f2ba4cd8b6bd4e500e868f13733146579edd7ce
| 3,648,459
|
def n_floordiv(a, b):
"""safe floordiv"""
return np.where(b != 0, o.floordiv(a, b), 1)
|
461752cfceaac911ef3be2335c2eb3893d512cc7
| 3,648,460
|
def load_encoder_inputs(encoder_np_vecs='train_body_vecs.npy'):
"""
Load variables & data that are inputs to encoder.
Parameters
----------
encoder_np_vecs : str
filename of serialized numpy.array of encoder input (issue title)
Returns
-------
encoder_input_data : numpy.array
The issue body
doc_length : int
The standard document length of the input for the encoder after padding
the shape of this array will be (num_examples, doc_length)
"""
vectorized_body = np.load(encoder_np_vecs)
# Encoder input is simply the body of the issue text
encoder_input_data = vectorized_body
doc_length = encoder_input_data.shape[1]
print('Shape of encoder input: {}'.format(encoder_input_data.shape))
return encoder_input_data, doc_length
|
571cf13f6ff23fea5bb111ed12ac8afc06cc5f8b
| 3,648,461
|
def parse_row(row):
"""Create an Event object from a data row
Args:
row: Tuple of input data.
Returns:
Event object.
"""
# Ignore either 1 or 2 columns that preceed year
if len(row) > 6:
row = row[2:]
else:
row = row[1:]
# Remove occasional 'r' or 'x' character prefix from year,
# I'm not sure what these specify.
year = row[0]
if not year[0].isdigit():
year = year[1:]
return Event(year=int(year),
latitude=float(row[1]),
longitude=float(row[2]),
depth=float(row[3]),
magnitude=float(row[4]))
|
22923ee8f8e0b3b29eab3052df0e0b8b74613f66
| 3,648,462
|
import math
import operator
from typing import Counter
def vertical_log_binning(p, data):
"""Create vertical log_binning. Used for peak sale."""
index, value = zip(*sorted(data.items(), key=operator.itemgetter(1)))
bin_result = []
value = list(value)
bin_edge = [min(value)]
i = 1
while len(value) > 0:
num_to_bin = int(math.ceil(p * len(value)))
# print num_to_bin
edge_value = value[num_to_bin - 1]
bin_edge.append(edge_value)
to_bin = list(filter(lambda x: x <= edge_value, value))
bin_result += [i] * len(to_bin)
value = list(filter(lambda x: x > edge_value, value))
# print len(bin_result) + len(value)
i += 1
# print '\n'
bin_result_dict = dict(zip(index, bin_result))
bin_distri = Counter(bin_result_dict.values())
# print len(index), len(bin_result)
return bin_result_dict, bin_edge, bin_distri
|
bf536250bc32a9bda54c8359589b10aa5936e902
| 3,648,463
|
def get_main_name(ext="", prefix=""):
"""Returns the base name of the main script. Can optionally add an
extension or prefix."""
return prefix + op.splitext(op.basename(__main__.__file__))[0] + ext
|
03beb4da53436054bf61a4f68d8b0f3d51ac13be
| 3,648,464
|
def _grad_block_to_band(op, grad):
"""
Gradient associated to the ``block_to_band`` operator.
"""
grad_block = banded_ops.band_to_block(
grad, op.get_attr("block_size"), symmetric=op.get_attr("symmetric"), gradient=True
)
return grad_block
|
638c4047b224b80feb7c4f52151f96c4a62179b9
| 3,648,465
|
def LSTM(nO, nI):
"""Create an LSTM layer. Args: number out, number in"""
weights = LSTM_weights(nO, nI)
gates = LSTM_gates(weights.ops)
return Recurrent(RNN_step(weights, gates))
|
296b1a7cb73a0e5dcb50e4aa29b33c944768c688
| 3,648,466
|
import requests
def get_token(host, port, headers, auth_data):
"""Return token for a user.
"""
url = api_url(host, port, '/Users/AuthenticateByName')
r = requests.post(url, headers=headers, data=auth_data)
return r.json().get('AccessToken')
|
4d58d50c1421c17e89fa2d8d2205f0e066749e73
| 3,648,467
|
from datetime import datetime
def generateDateTime(s):
"""生成时间"""
dt = datetime.fromtimestamp(float(s)/1e3)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time
|
8d566412230b5bb779baa395670ba06457c2074f
| 3,648,468
|
def get_activation_function():
"""
Returns tf.nn activation function
"""
return ACTIVATION_FUNCTION
|
9f55f5122f708120ce7a5181b7035681f37cc0c6
| 3,648,469
|
import requests
import json
def doi_and_title_from_citation(citation):
"""
Gets the DOI from
a plaintext citation.
Uses a search to CrossRef.org to retrive paper DOI.
Parameters
----------
citation : str
Full journal article citation.
Example: Senís, Elena, et al. "CRISPR/Cas9‐mediated genome
engineering: An adeno‐associated viral (AAV) vector
toolbox. Biotechnology journal 9.11 (2014): 1402-1412.
Returns
-------
doi : str
"""
# Encode raw citation
citation = urllib_quote(citation)
# Search for citation on CrossRef.org to try to get a DOI link
api_search_url = 'http://search.labs.crossref.org/dois?q=' + citation
try:
response = requests.get(api_search_url).json()
except json.decoder.JSONDecodeError:
return None
resp = response[0]
doi = resp.get('doi')
title = resp.get('title')
if doi is None:
return None
# If crossref returns a http://dx.doi.org/ link, retrieve the doi from it
# and save the URL to pass to doi_to_info
if 'http://dx.doi.org/' in doi:
doi = doi.replace('http://dx.doi.org/', '')
doi = doi.strip()
return doi, title
|
bd51d91c414c97a9e061d889a27917c1b487edd1
| 3,648,470
|
def prep_ciphertext(ciphertext):
"""Remove whitespace."""
message = "".join(ciphertext.split())
print("\nciphertext = {}".format(ciphertext))
return message
|
a5cd130ed3296addf6a21460cc384d8a0582f862
| 3,648,471
|
def main():
"""Runs dir()."""
call = PROCESS_POOL.submit(call_dir)
while True:
if call.done():
result = call.result().decode()
print("Results: \n\n{}".format(result))
return result
|
6e02aab50023ed9b72c2f858122a2652a2f4607f
| 3,648,473
|
def bacthing_predict_SVGPVAE_rotated_mnist(test_data_batch, vae, svgp,
qnet_mu, qnet_var, aux_data_train):
"""
Get predictions for test data. See chapter 3.3 in Casale's paper.
This version supports batching in prediction pipeline (contrary to function predict_SVGPVAE_rotated_mnist) .
:param test_data_batch: batch of test data
:param vae: fitted (!) VAE object
:param svgp: fitted (!) SVGP object
:param qnet_mu: precomputed encodings (means) of train dataset (N_train, L)
:param qnet_var: precomputed encodings (vars) of train dataset (N_train, L)
:param aux_data_train: train aux data (N_train, 10)
:return:
"""
images_test_batch, aux_data_test_batch = test_data_batch
_, w, h, _ = images_test_batch.get_shape()
# get latent samples for test data from GP posterior
p_m, p_v = [], []
for l in range(qnet_mu.get_shape()[1]): # iterate over latent dimensions
p_m_l, p_v_l, _, _ = svgp.approximate_posterior_params(index_points_test=aux_data_test_batch,
index_points_train=aux_data_train,
y=qnet_mu[:, l], noise=qnet_var[:, l])
p_m.append(p_m_l)
p_v.append(p_v_l)
p_m = tf.stack(p_m, axis=1)
p_v = tf.stack(p_v, axis=1)
epsilon = tf.random.normal(shape=tf.shape(p_m), dtype=tf.float64)
latent_samples = p_m + epsilon * tf.sqrt(p_v)
# predict (decode) latent images.
# ===============================================
# Since this is generation (testing pipeline), could add \sigma_y to images
recon_images_test_logits = vae.decode(latent_samples)
# Gaussian observational likelihood, no variance
recon_images_test = recon_images_test_logits
# Bernoulli observational likelihood
# recon_images_test = tf.nn.sigmoid(recon_images_test_logits)
# Gaussian observational likelihood, fixed variance \sigma_y
# recon_images_test = recon_images_test_logits + tf.random.normal(shape=tf.shape(recon_images_test_logits),
# mean=0.0, stddev=0.04, dtype=tf.float64)
# MSE loss for CGEN (here we do not consider MSE loss, ince )
recon_loss = tf.reduce_sum((images_test_batch - recon_images_test_logits) ** 2)
# report per pixel loss
K = tf.cast(w, dtype=tf.float64) * tf.cast(h, dtype=tf.float64)
recon_loss = recon_loss / K
# ===============================================
return recon_images_test, recon_loss
|
6603db14abbd7bbb2ba8965ee43d876d4a607b0a
| 3,648,474
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""
Set up Strava Home Assistant config entry initiated through the HASS-UI.
"""
hass.data.setdefault(DOMAIN, {})
# OAuth Stuff
try:
implementation = await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass=hass, config_entry=entry
)
except ValueError:
implementation = config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
entry.data[CONF_CLIENT_ID],
entry.data[CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
OAuth2FlowHandler.async_register_implementation(hass, implementation)
oauth_websession = config_entry_oauth2_flow.OAuth2Session(
hass, entry, implementation
)
await oauth_websession.async_ensure_token_valid()
# webhook view to get notifications for strava activity updates
def strava_update_event_factory(data, event_type=CONF_STRAVA_DATA_UPDATE_EVENT):
hass.bus.fire(event_type, data)
strava_webhook_view = StravaWebhookView(
oauth_websession=oauth_websession,
event_factory=strava_update_event_factory,
host=get_url(hass, allow_internal=False, allow_ip=False),
hass=hass,
)
hass.http.register_view(strava_webhook_view)
# event listeners
async def strava_startup_functions():
await renew_webhook_subscription(
hass=hass, entry=entry, webhook_view=strava_webhook_view
)
await strava_webhook_view.fetch_strava_data()
return True
def ha_start_handler(event):
"""
called when HA rebooted
i.e. after all webhook views have been registered and are available
"""
hass.async_create_task(strava_startup_functions())
def component_reload_handler(event):
"""called when the component reloads"""
hass.async_create_task(strava_startup_functions())
async def async_strava_config_update_handler():
"""called when user changes sensor configs"""
await strava_webhook_view.fetch_strava_data()
return
def strava_config_update_handler(event):
hass.async_create_task(async_strava_config_update_handler())
def core_config_update_handler(event):
"""
handles relevant changes to the HA core config.
In particular, for URL and Unit System changes
"""
if "external_url" in event.data.keys():
hass.async_create_task(
renew_webhook_subscription(
hass=hass, entry=entry, webhook_view=strava_webhook_view
)
)
if "unit_system" in event.data.keys():
hass.async_create_task(strava_webhook_view.fetch_strava_data())
# register event listeners
hass.data[DOMAIN]["remove_update_listener"] = []
# if hass.bus.async_listeners().get(EVENT_HOMEASSISTANT_START, 0) < 1:
hass.data[DOMAIN]["remove_update_listener"].append(
hass.bus.async_listen(EVENT_HOMEASSISTANT_START, ha_start_handler)
)
# if hass.bus.async_listeners().get(EVENT_CORE_CONFIG_UPDATE, 0) < 1:
hass.data[DOMAIN]["remove_update_listener"].append(
hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, core_config_update_handler)
)
if hass.bus.async_listeners().get(CONF_STRAVA_RELOAD_EVENT, 0) < 1:
hass.data[DOMAIN]["remove_update_listener"].append(
hass.bus.async_listen(CONF_STRAVA_RELOAD_EVENT, component_reload_handler)
)
if hass.bus.async_listeners().get(CONF_STRAVA_CONFIG_UPDATE_EVENT, 0) < 1:
hass.data[DOMAIN]["remove_update_listener"].append(
hass.bus.async_listen(
CONF_STRAVA_CONFIG_UPDATE_EVENT, strava_config_update_handler
)
)
hass.data[DOMAIN]["remove_update_listener"] = [
entry.add_update_listener(strava_config_update_helper)
]
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
|
9ba10cf00f447d0e2038b8a542a45166c264b801
| 3,648,475
|
from typing import List
from typing import Tuple
from typing import Union
def normalize_boxes(boxes: List[Tuple], img_shape: Union[Tuple, List]) -> List[Tuple]:
"""
Transform bounding boxes back to yolo format
"""
img_height = img_shape[1]
img_width = img_shape[2]
boxes_ = []
for i in range(len(boxes)):
x1, y1, x2, y2 = boxes[i]
width = x2 - x1
height = y2 - y1
x_mid = x1 + 0.5 * width
y_mid = y1 + 0.5 * height
box = [
x_mid / img_width,
y_mid / img_height,
width / img_width,
height / img_height,
]
boxes_.append(box)
return boxes_
|
086e0b069d06a4718e8ffd37189cf3d08c41d19f
| 3,648,476
|
import copy
def _make_reference_filters(filters, ref_dimension, offset_func):
"""
Copies and replaces the reference dimension's definition in all of the filters applied to a dataset query.
This is used to shift the dimension filters to fit the reference window.
:param filters:
:param ref_dimension:
:param offset_func:
:return:
"""
reference_filters = []
for ref_filter in filters:
if ref_filter.field is ref_dimension:
# NOTE: Important to apply the offset function to the start and stop properties because the date math can
# become expensive over many rows
ref_filter = copy.copy(ref_filter)
ref_filter.start = offset_func(ref_filter.start)
ref_filter.stop = offset_func(ref_filter.stop)
reference_filters.append(ref_filter)
return reference_filters
|
eeeeb74bb3618c87f3540de5b44970e197885dc6
| 3,648,477
|
def load_plane_dataset(name, num_points, flip_axes=False):
"""Loads and returns a plane dataset.
Args:
name: string, the name of the dataset.
num_points: int, the number of points the dataset should have,
flip_axes: bool, flip x and y axes if True.
Returns:
A Dataset object, the requested dataset.
Raises:
ValueError: If `name` an unknown dataset.
"""
try:
return {
'gaussian': GaussianDataset,
'crescent': CrescentDataset,
'crescent_cubed': CrescentCubedDataset,
'sine_wave': SineWaveDataset,
'abs': AbsDataset,
'sign': SignDataset,
'four_circles': FourCircles,
'diamond': DiamondDataset,
'two_spirals': TwoSpiralsDataset,
'checkerboard': CheckerboardDataset,
'rings': RingsDataset,
'8-gaussians':EightGaussiansDataset
}[name](num_points=num_points, flip_axes=flip_axes)
except KeyError:
raise ValueError('Unknown dataset: {}'.format(name))
|
aee32a6aa7f2be6ae515d6f3b1e27cda4d0f705e
| 3,648,479
|
def get_toxic(annotated_utterance, probs=True, default_probs=None, default_labels=None):
"""Function to get toxic classifier annotations from annotated utterance.
Args:
annotated_utterance: dictionary with annotated utterance, or annotations
probs: return probabilities or not
default: default value to return. If it is None, returns empty dict/list depending on probs argument
Returns:
dictionary with toxic probablilties, if probs == True, or toxic labels if probs != True
"""
default_probs = {} if default_probs is None else default_probs
default_labels = [] if default_labels is None else default_labels
return _get_etc_model(
annotated_utterance,
"toxic_classification",
probs=probs,
default_probs=default_probs,
default_labels=default_labels,
)
|
ac69075af2edd9cdc84383054ba9ebe700dddb58
| 3,648,480
|
def compute_energy_lapkmode(X,C,l,W,sigma,bound_lambda):
"""
compute Laplacian K-modes energy in discrete form
"""
e_dist = ecdist(X,C,squared =True)
g_dist = np.exp(-e_dist/(2*sigma**2))
pairwise = 0
Index_list = np.arange(X.shape[0])
for k in range(C.shape[0]):
tmp=np.asarray(np.where(l== k))
if tmp.size !=1:
tmp = tmp.squeeze()
else:
tmp = tmp[0]
# print('length of tmp ', len(tmp))
# pairwise = pairwise - W[tmp,:][:,tmp].sum() # With potts values -1/0
nonmembers = np.in1d(Index_list,tmp,invert =True) # With potts values 0/1
pairwise = pairwise + W[tmp,:][:,nonmembers].sum()
E_kmode = compute_km_energy(l,g_dist.T)
print(E_kmode)
E = (bound_lambda)*pairwise + E_kmode
return E
|
3fc5c2f9695e33eb3d1ac42a3172c30f1d81d23b
| 3,648,481
|
def calc_2d_wave_map(wave_grid, x_dms, y_dms, tilt, oversample=2, padding=10, maxiter=5, dtol=1e-2):
"""Compute the 2D wavelength map on the detector.
:param wave_grid: The wavelength corresponding to the x_dms, y_dms, and tilt values.
:param x_dms: the trace x position on the detector in DMS coordinates.
:param y_dms: the trace y position on the detector in DMS coordinates.
:param tilt: the trace tilt angle in degrees.
:param oversample: the oversampling factor of the input coordinates.
:param padding: the native pixel padding around the edge of the detector.
:param maxiter: the maximum number of iterations used when solving for the wavelength at each pixel.
:param dtol: the tolerance of the iterative solution in pixels.
:type wave_grid: array[float]
:type x_dms: array[float]
:type y_dms: array[float]
:type tilt: array[float]
:type oversample: int
:type padding: int
:type maxiter: int
:type dtol: float
:returns: wave_map_2d - an array containing the wavelength at each pixel on the detector.
:rtype: array[float]
"""
os = np.copy(oversample)
xpad = np.copy(padding)
ypad = np.copy(padding)
# No need to compute wavelengths across the entire detector, slightly larger than SUBSTRIP256 will do.
dimx, dimy = 2048, 300
y_dms = y_dms + (dimy - 2048) # Adjust y-coordinate to area of interest.
# Generate the oversampled grid of pixel coordinates.
x_vec = np.arange((dimx + 2*xpad)*os)/os - (os - 1)/(2*os) - xpad
y_vec = np.arange((dimy + 2*ypad)*os)/os - (os - 1)/(2*os) - ypad
x_grid, y_grid = np.meshgrid(x_vec, y_vec)
# Iteratively compute the wavelength at each pixel.
delta_x = 0.0 # A shift in x represents a shift in wavelength.
for niter in range(maxiter):
# Assume all y have same wavelength.
wave_iterated = np.interp(x_grid - delta_x, x_dms[::-1], wave_grid[::-1]) # Invert arrays to get increasing x.
# Compute the tilt angle at the wavelengths.
tilt_tmp = np.interp(wave_iterated, wave_grid, tilt)
# Compute the trace position at the wavelengths.
x_estimate = np.interp(wave_iterated, wave_grid, x_dms)
y_estimate = np.interp(wave_iterated, wave_grid, y_dms)
# Project that back to pixel coordinates.
x_iterated = x_estimate + (y_grid - y_estimate)*np.tan(np.deg2rad(tilt_tmp))
# Measure error between requested and iterated position.
delta_x = delta_x + (x_iterated - x_grid)
# If the desired precision has been reached end iterations.
if np.all(np.abs(x_iterated - x_grid) < dtol):
break
# Evaluate the final wavelength map, this time setting out-of-bounds values to NaN.
wave_map_2d = np.interp(x_grid - delta_x, x_dms[::-1], wave_grid[::-1], left=np.nan, right=np.nan)
# Extend to full detector size.
tmp = np.full((os*(dimx + 2*xpad), os*(dimx + 2*xpad)), fill_value=np.nan)
tmp[-os*(dimy + 2*ypad):] = wave_map_2d
wave_map_2d = tmp
return wave_map_2d
|
727002a0cc61f6219c92d6db3d31eb653f849f03
| 3,648,482
|
def export_data():
"""Exports data to a file"""
data = {}
data['adgroup_name'] = request.args.get('name')
if data['adgroup_name']:
data['sitelist'] = c['adgroups'].find_one({'name':data['adgroup_name']}, {'sites':1})['sites']
return render_template("export.html", data=data)
|
a6b43f90907e174f07773b0ed7603a48a3ff35ca
| 3,648,484
|
def thresh_bin(img, thresh_limit=60):
""" Threshold using blue channel """
b, g, r = cv2.split(img)
# mask = get_salient(r)
mask = cv2.threshold(b, 50, 255, cv2.THRESH_BINARY_INV)[1]
return mask
|
3660179d1e1c411feb44e993a8ab94f10c63d6e4
| 3,648,485
|
from typing import Any
def get_aux():
"""Get the entire auxiliary stack. Not commonly used."""
@parser
def g(c: Cursor, a: Any):
return a, c, a
return g
|
b345901f4987e8849fbe35c0c997f38480d79f04
| 3,648,486
|
def _destupidize_dict(mylist):
"""The opposite of _stupidize_dict()"""
output = {}
for item in mylist:
output[item['key']] = item['value']
return output
|
f688e25a9d308e39f47390fef493ab80d303ea15
| 3,648,487
|
def equipment_add(request, type_, id_=None):
"""Adds an equipment."""
template = {}
if request.method == 'POST':
form = EquipmentForm(request.POST)
if form.is_valid():
form.save(request.user, id_)
return redirect('settings_equipment')
template['form'] = form
elif id_:
template['form'] = EquipmentForm(instance=Equipment.objects.get(pk=id_))
else:
template['form'] = EquipmentForm()
return render(request, 'settings/equipment_add.html', template)
|
a8f2fce6c9aa64316edb96df9597fbfb516839a3
| 3,648,488
|
def _parse_text(val, **options):
"""
:return: Parsed value or value itself depends on 'ac_parse_value'
"""
if val and options.get('ac_parse_value', False):
return parse_single(val)
return val
|
cbd0d0b65237e8d3f817aa0bae1861f379a68b26
| 3,648,489
|
def get_rotation_matrix(rotation_angles):
"""Get the rotation matrix from euler's angles
Parameters
-----
rotation_angles: array-like or list
Three euler angles in the order [sai, theta, phi] where
sai = rotation along the x-axis
theta = rotation along the y-axis
phi = rotation along the z-axis
Returns
-----
A rotation matrix of shape (3, 3)
Refrences
-----
Computing Euler angles from a rotation matrix by Gregory G. Slabaugh
https://www.gregslabaugh.net/publications/euler.pdf
"""
sai = rotation_angles[0] # s
theta = rotation_angles[1] # t
phi = rotation_angles[2] # p
# find all the required sines and cosines
cs = np.cos(sai)
ct = np.cos(theta)
cp = np.cos(phi)
ss = np.sin(sai)
st = np.sin(theta)
sp = np.sin(phi)
# contruct the rotation matrix along the x-axis
rotation_matrix = np.array([
[ct*cp, ss*st*cp-cs*sp, cs*st*cp+ss*sp],
[ct*sp, ss*st*sp+cs*cp, cs*st*sp-ss*cp],
[ -st, sp*ct, cp*ct]
])
return rotation_matrix
|
2965d1ce5c688e794f7fce6e51afd2e558c1bab7
| 3,648,491
|
def _metric_list_for_check(maas_store, entity, check):
"""
Computes the metrics list for a given check.
Remote checks return a metric for each monitoring zone and
each type of metric for the check type. Agent checks return
a metric for each metric type on the check type. Check types
that Mimic doesn't know about generate an empty list.
"""
if check.type not in maas_store.check_types:
return []
if REMOTE_CHECK_TYPE_REGEX.match(check.type):
return [{'name': '{0}.{1}'.format(mz, metric.name),
'type': metric.type,
'unit': metric.unit}
for metric in maas_store.check_types[check.type].metrics
for mz in check.monitoring_zones_poll]
return [{'name': metric.name,
'type': metric.type,
'unit': metric.unit}
for metric in maas_store.check_types[check.type].metrics]
|
c295f976c8c85d60af8f6e734f666381bc0186d2
| 3,648,492
|
def filter_pdf_files(filepaths):
""" Returns a filtered list with strings that end with '.pdf'
Keyword arguments:
filepaths -- List of filepath strings
"""
return [x for x in filepaths if x.endswith('.pdf')]
|
3f44b3af9859069de866cec3fac33a9e9de5439d
| 3,648,494
|
def hue_quadrature(h: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the hue quadrature from given hue :math:`h` angle in degrees.
Parameters
----------
h
Hue :math:`h` angle in degrees.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Hue quadrature.
Examples
--------
>>> hue_quadrature(196.3185839) # doctest: +ELLIPSIS
237.6052911...
"""
h = as_float_array(h)
h_i = HUE_DATA_FOR_HUE_QUADRATURE["h_i"]
e_i = HUE_DATA_FOR_HUE_QUADRATURE["e_i"]
H_i = HUE_DATA_FOR_HUE_QUADRATURE["H_i"]
# :math:`h_p` = :math:`h_z` + 360 if :math:`h_z` < :math:`h_1, i.e. h_i[0]
h[h <= h_i[0]] += 360
# *np.searchsorted* returns an erroneous index if a *nan* is used as input.
h[np.asarray(np.isnan(h))] = 0
i = as_int_array(np.searchsorted(h_i, h, side="left") - 1)
h_ii = h_i[i]
e_ii = e_i[i]
H_ii = H_i[i]
h_ii1 = h_i[i + 1]
e_ii1 = e_i[i + 1]
H = H_ii + (
(100 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii + (h_ii1 - h) / e_ii1)
)
return as_float(H)
|
df120ae34dfc45ecbb818718885cbbb501667bdd
| 3,648,496
|
def aa_find_devices_ext (devices, unique_ids):
"""usage: (int return, u16[] devices, u32[] unique_ids) = aa_find_devices_ext(u16[] devices, u32[] unique_ids)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# devices pre-processing
__devices = isinstance(devices, int)
if __devices:
(devices, num_devices) = (array_u16(devices), devices)
else:
(devices, num_devices) = isinstance(devices, ArrayType) and (devices, len(devices)) or (devices[0], min(len(devices[0]), int(devices[1])))
if devices.typecode != 'H':
raise TypeError("type for 'devices' must be array('H')")
# unique_ids pre-processing
__unique_ids = isinstance(unique_ids, int)
if __unique_ids:
(unique_ids, num_ids) = (array_u32(unique_ids), unique_ids)
else:
(unique_ids, num_ids) = isinstance(unique_ids, ArrayType) and (unique_ids, len(unique_ids)) or (unique_ids[0], min(len(unique_ids[0]), int(unique_ids[1])))
if unique_ids.typecode != 'I':
raise TypeError("type for 'unique_ids' must be array('I')")
# Call API function
(_ret_) = api.py_aa_find_devices_ext(num_devices, num_ids, devices, unique_ids)
# devices post-processing
if __devices: del devices[max(0, min(_ret_, len(devices))):]
# unique_ids post-processing
if __unique_ids: del unique_ids[max(0, min(_ret_, len(unique_ids))):]
return (_ret_, devices, unique_ids)
|
1b84cfc3d6fd52f786c2191fde4d37a6287e8b87
| 3,648,497
|
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
#print ('lll: ', blobs['rois'])
return blobs, im_scale_factors
|
d4adb2e049a86fe1a42aab6dea52b55aabeeb0d2
| 3,648,500
|
def string_limiter(text, limit):
"""
Reduces the number of words in the string to length provided.
Arguments:
text -- The string to reduce the length of
limit -- The number of characters that are allowed in the string
"""
for i in range(len(text)):
if i >= limit and text[i] == " ":
break
return text[:i]
|
1ae70d2115be72ec628f38b2c623064607f534ef
| 3,648,501
|
def in_ellipse(xy_list,width,height,angle=0,xy=[0,0]):
"""
Find data points inside an ellipse and return index list
Parameters:
xy_list: Points needs to be deteced.
width: Width of the ellipse
height: Height of the ellipse
angle: anti-clockwise rotation angle in degrees
xy: the origin of the ellipse
"""
if isinstance(xy_list,list):
xy_list = np.array(xy_list)
if not isinstance(xy_list,np.ndarray):
raise Exception(f"Unrecoginzed data type: {type(xy_list)}, \
should be list or np.ndarray")
new_xy_list = xy_list.copy()
new_xy_list = new_xy_list - xy
#------------ define coordinate conversion matrix----------
theta = angle/180*np.pi # degree to radians
con_mat = np.zeros((2,2))
con_mat[:,0] = [np.cos(theta),np.sin(theta)]
con_mat[:,1] = [np.sin(theta),-np.cos(theta)]
tmp = np.matmul(con_mat,new_xy_list.T)
con_xy_list = tmp.T
#------------ check one by one ----------------------------
idxs = []
for i,[x,y] in enumerate(con_xy_list):
if ((x/(width/2))**2+(y/(height/2))**2) < 1:
idxs.append(i)
return idxs
|
6540520caa6eef12871847f80d3ed42279b0c1a0
| 3,648,502
|
import logging
def get_real_images(dataset,
num_examples,
split=None,
failure_on_insufficient_examples=True):
"""Get num_examples images from the given dataset/split.
Args:
dataset: `ImageDataset` object.
num_examples: Number of images to read.
split: Split of the dataset to use. If None will use the default split for
eval defined by the dataset.
failure_on_insufficient_examples: If True raise an exception if the
dataset/split does not images. Otherwise will log to error and return
fewer images.
Returns:
4-D NumPy array with images with values in [0, 256].
Raises:
ValueError: If the dataset/split does not of the number of requested number
requested images and `failure_on_insufficient_examples` is True.
"""
logging.info("Start loading real data.")
with tf.Graph().as_default():
ds = dataset.eval_input_fn(split=split)
# Get real images from the dataset. In the case of a 1-channel
# dataset (like MNIST) convert it to 3 channels.
next_batch = ds.make_one_shot_iterator().get_next()[0]
shape = [num_examples] + next_batch.shape.as_list()
is_single_channel = shape[-1] == 1
if is_single_channel:
shape[-1] = 3
real_images = np.empty(shape, dtype=np.float32)
with tf.Session() as sess:
for i in range(num_examples):
try:
b = sess.run(next_batch)
b *= 255.0
if is_single_channel:
b = np.tile(b, [1, 1, 3])
real_images[i] = b
except tf.errors.OutOfRangeError:
logging.error("Reached the end of dataset. Read: %d samples.", i)
real_images = real_images[:i]
break
if real_images.shape[0] != num_examples:
if failure_on_insufficient_examples:
raise DatasetOutOfRangeError("Not enough examples in the dataset %s: %d / %d" %
(dataset, real_images.shape[0], num_examples))
else:
logging.error("Not enough examples in the dataset %s: %d / %d", dataset,
real_images.shape[0], num_examples)
logging.info("Done loading real data.")
return real_images
|
0f9be93076b8d94b3285a1f5badb8952788e2a82
| 3,648,503
|
from typing import Callable
from typing import Any
import websockets
async def call(fn: Callable, *args, **kwargs) -> Any:
"""
Submit function `fn` for remote execution with arguments `args` and `kwargs`
"""
async with websockets.connect(WS_SERVER_URI) as websocket:
task = serialize((fn, args, kwargs))
await websocket.send(task)
message = await websocket.recv()
results = deserialize(message)
if isinstance(results, TaskExecutionError):
raise results
return results
|
073090186e4a325eb32b44fb44c1628c6842c398
| 3,648,504
|
import numpy
def wrap_array_func(func):
"""
Returns a version of the function func() that works even when
func() is given a NumPy array that contains numbers with
uncertainties.
func() is supposed to return a NumPy array.
This wrapper is similar to uncertainties.wrap(), except that it
handles an array argument instead of float arguments.
func -- version that takes and returns a single NumPy array.
"""
@uncertainties.set_doc("""\
Version of %s(...) that works even when its first argument is a NumPy
array that contains numbers with uncertainties.
Warning: elements of the first argument array that are not
AffineScalarFunc objects must not depend on uncertainties.Variable
objects in any way. Otherwise, the dependence of the result in
uncertainties.Variable objects will be incorrect.
Original documentation:
%s""" % (func.__name__, func.__doc__))
def wrapped_func(arr, *args):
# Nominal value:
arr_nominal_value = nominal_values(arr)
func_nominal_value = func(arr_nominal_value, *args)
# The algorithm consists in numerically calculating the derivatives
# of func:
# Variables on which the array depends are collected:
variables = set()
for element in arr.flat:
# floats, etc. might be present
if isinstance(element, uncertainties.AffineScalarFunc):
variables |= set(element.derivatives.iterkeys())
# If the matrix has no variables, then the function value can be
# directly returned:
if not variables:
return func_nominal_value
# Calculation of the derivatives of each element with respect
# to the variables. Each element must be independent of the
# others. The derivatives have the same shape as the output
# array (which might differ from the shape of the input array,
# in the case of the pseudo-inverse).
derivatives = numpy.vectorize(lambda _: {})(func_nominal_value)
for var in variables:
# A basic assumption of this package is that the user
# guarantees that uncertainties cover a zone where
# evaluated functions are linear enough. Thus, numerical
# estimates of the derivative should be good over the
# standard deviation interval. This is true for the
# common case of a non-zero standard deviation of var. If
# the standard deviation of var is zero, then var has no
# impact on the uncertainty of the function func being
# calculated: an incorrect derivative has no impact. One
# scenario can give incorrect results, however, but it
# should be extremely uncommon: the user defines a
# variable x with 0 standard deviation, sets y = func(x)
# through this routine, changes the standard deviation of
# x, and prints y; in this case, the uncertainty on y
# might be incorrect, because this program had no idea of
# the scale on which func() is linear, when it calculated
# the numerical derivative.
# The standard deviation might be numerically too small
# for the evaluation of the derivative, though: we set the
# minimum variable shift.
shift_var = max(var._std_dev/1e5, 1e-8*abs(var._nominal_value))
# An exceptional case is that of var being exactly zero.
# In this case, an arbitrary shift is used for the
# numerical calculation of the derivative. The resulting
# derivative value might be quite incorrect, but this does
# not matter as long as the uncertainty of var remains 0,
# since it is, in this case, a constant.
if not shift_var:
shift_var = 1e-8
# Shift of all the elements of arr when var changes by shift_var:
shift_arr = array_derivative(arr, var)*shift_var
# Origin value of array arr when var is shifted by shift_var:
shifted_arr_values = arr_nominal_value + shift_arr
func_shifted = func(shifted_arr_values, *args)
numerical_deriv = (func_shifted-func_nominal_value)/shift_var
# Update of the list of variables and associated
# derivatives, for each element:
for (derivative_dict, derivative_value) in (
zip(derivatives.flat, numerical_deriv.flat)):
if derivative_value:
derivative_dict[var] = derivative_value
# numbers with uncertainties are build from the result:
return numpy.vectorize(uncertainties.AffineScalarFunc)(
func_nominal_value, derivatives)
# It is easier to work with wrapped_func, which represents a
# wrapped version of 'func', when it bears the same name as
# 'func' (the name is used by repr(wrapped_func)).
wrapped_func.__name__ = func.__name__
return wrapped_func
|
7cbd33599b62df096db3ce84968cc13f24512fc0
| 3,648,505
|
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collides with base or pipes."""
pi = player['index']
player['w'] = fImages['player'][0].get_width()
player['h'] = fImages['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= nBaseY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = fImages['pipe'][0].get_width()
pipeH = fImages['pipe'][0].get_height()
for uPipe in upperPipes:
# pipe rect
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
# player and pipe hitmasks
pHitMask = fHitMask['player'][pi]
uHitmask = fHitMask['pipe'][0]
# if bird collided with pipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
if uCollide:
# for fury mode we want to break the pipe so we
# must return which pipe is colliding (lower or upper)
if bFuryMode:
return [True, False, True, uPipe]
# normal mode
return [True, False]
for lPipe in lowerPipes:
# pipe rect
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and pipe hitmasks
pHitMask = fHitMask['player'][pi]
lHitmask = fHitMask['pipe'][0]
# if bird collided with pipe
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if lCollide:
# for fury mode we want to break the pipe so we
# must return which pipe is colliding (lower or upper)
if bFuryMode:
return [True, False, False, lPipe]
# normal mode
return [True, False]
return [False, False]
|
e638f0ae40610fc0c4097998e8fa3df0dc6a5d56
| 3,648,506
|
def convert_gwp(context, qty, to):
"""Helper for :meth:`convert_unit` to perform GWP conversions."""
# Remove a leading 'gwp_' to produce the metric name
metric = context.split('gwp_')[1] if context else context
# Extract the species from *qty* and *to*, allowing supported aliases
species_from, units_from = extract_species(qty[1])
species_to, units_to = extract_species(to)
try:
# Convert using a (magnitude, unit) tuple with only units, and explicit
# input and output units
result = iam_units.convert_gwp(metric, (qty[0], units_from),
species_from, species_to)
except (AttributeError, ValueError):
# Missing *metric*, or *species_to* contains invalid units. pyam
# promises UndefinedUnitError in these cases. Use a subclass (above) to
# add a usage hint.
raise UndefinedUnitError(species_to) from None
except pint.DimensionalityError:
# Provide an exception with the user's inputs
raise pint.DimensionalityError(qty[1], to) from None
# Other exceptions are not caught and will pass up through convert_unit()
if units_to:
# Also convert the units
result = result.to(units_to)
else:
# *to* was only a species name. Provide units based on input and the
# output species name.
to = iam_units.format_mass(result, species_to, spec=':~')
return result, to
|
23d47e3b93f1ed694fbb5187433af5c8caa72dc8
| 3,648,508
|
import random
def getAction(board, policy, action_set):
"""
return action for policy, chooses max from classifier output
"""
# if policy doesn't exist yet, choose action randomly, else get from policy model
if policy == None:
valid_actions = [i for i in action_set if i[0] > -1]
if len(valid_actions) == 0:
return (-1,-1,0)
rand_i = random.randint(0, len(valid_actions)-1)
# du_policy = [-12.63, 6.60, -9.22,-19.77,-13.08,-10.49,-1.61, -24.04]
# action = nextInitialMove(du_policy, board)
action = valid_actions[rand_i]
else:
piece = [0]*7 # one hot encode piece
piece[board.currentShape.shape -1] = 1
tot_features = np.append(board.getFeatures(), [piece])
action_scores = policy.predict([tot_features])
best_scores = np.argwhere(action_scores == np.amax(action_scores)).flatten().tolist()
max_score = np.random.choice(best_scores)
action = action_set[max_score]
return action
|
fddb9160f0571dfaf50f945c05d5dbb176465180
| 3,648,510
|
def f_assert_must_between(value_list, args):
"""
检测列表中的元素是否为数字或浮点数且在args的范围内
:param value_list: 待检测列表
:param args: 范围列表
:return: 异常或原值
example:
:value_list [2, 2, 3]
:args [1,3]
:value_list ['-2', '-3', 3]
:args ['-5',3]
"""
assert len(args) == 2
for value in value_list:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit()
and float(args[0]) <= float(value) <= float(args[1])):
raise FeatureProcessError('%s f_assert_must_between %s Error' % (value_list, args))
return value_list
|
6e082f3df39509f0823862497249a06080bd7649
| 3,648,511
|
from scipy.stats import zscore
from scipy.ndimage import label
def annotate_muscle_zscore(raw, threshold=4, ch_type=None, min_length_good=0.1,
filter_freq=(110, 140), n_jobs=1, verbose=None):
"""Create annotations for segments that likely contain muscle artifacts.
Detects data segments containing activity in the frequency range given by
``filter_freq`` whose envelope magnitude exceeds the specified z-score
threshold, when summed across channels and divided by ``sqrt(n_channels)``.
False-positive transient peaks are prevented by low-pass filtering the
resulting z-score time series at 4 Hz. Only operates on a single channel
type, if ``ch_type`` is ``None`` it will select the first type in the list
``mag``, ``grad``, ``eeg``.
See :footcite:`Muthukumaraswamy2013` for background on choosing
``filter_freq`` and ``threshold``.
Parameters
----------
raw : instance of Raw
Data to estimate segments with muscle artifacts.
threshold : float
The threshold in z-scores for marking segments as containing muscle
activity artifacts.
ch_type : 'mag' | 'grad' | 'eeg' | None
The type of sensors to use. If ``None`` it will take the first type in
``mag``, ``grad``, ``eeg``.
min_length_good : float | None
The shortest allowed duration of "good data" (in seconds) between
adjacent annotations; shorter segments will be incorporated into the
surrounding annotations.``None`` is equivalent to ``0``.
Default is ``0.1``.
filter_freq : array-like, shape (2,)
The lower and upper frequencies of the band-pass filter.
Default is ``(110, 140)``.
%(n_jobs)s
%(verbose)s
Returns
-------
annot : mne.Annotations
Periods with muscle artifacts annotated as BAD_muscle.
scores_muscle : array
Z-score values averaged across channels for each sample.
References
----------
.. footbibliography::
"""
raw_copy = raw.copy()
if ch_type is None:
raw_ch_type = raw_copy.get_channel_types()
if 'mag' in raw_ch_type:
ch_type = 'mag'
elif 'grad' in raw_ch_type:
ch_type = 'grad'
elif 'eeg' in raw_ch_type:
ch_type = 'eeg'
else:
raise ValueError('No M/EEG channel types found, please specify a'
' ch_type or provide M/EEG sensor data')
logger.info('Using %s sensors for muscle artifact detection'
% (ch_type))
if ch_type in ('mag', 'grad'):
raw_copy.pick_types(meg=ch_type, ref_meg=False)
else:
ch_type = {'meg': False, ch_type: True}
raw_copy.pick_types(**ch_type)
raw_copy.filter(filter_freq[0], filter_freq[1], fir_design='firwin',
pad="reflect_limited", n_jobs=n_jobs)
raw_copy.apply_hilbert(envelope=True, n_jobs=n_jobs)
data = raw_copy.get_data(reject_by_annotation="NaN")
nan_mask = ~np.isnan(data[0])
sfreq = raw_copy.info['sfreq']
art_scores = zscore(data[:, nan_mask], axis=1)
art_scores = art_scores.sum(axis=0) / np.sqrt(art_scores.shape[0])
art_scores = filter_data(art_scores, sfreq, None, 4)
scores_muscle = np.zeros(data.shape[1])
scores_muscle[nan_mask] = art_scores
art_mask = scores_muscle > threshold
# return muscle scores with NaNs
scores_muscle[~nan_mask] = np.nan
# remove artifact free periods shorter than min_length_good
min_length_good = 0 if min_length_good is None else min_length_good
min_samps = min_length_good * sfreq
comps, num_comps = label(art_mask == 0)
for com in range(1, num_comps + 1):
l_idx = np.nonzero(comps == com)[0]
if len(l_idx) < min_samps:
art_mask[l_idx] = True
annot = _annotations_from_mask(raw_copy.times,
art_mask, 'BAD_muscle',
orig_time=raw.info['meas_date'])
_adjust_onset_meas_date(annot, raw)
return annot, scores_muscle
|
a09b2b9098c7dfc48b29548691e3c6c524a6b6bf
| 3,648,512
|
def circ_dist2(a, b):
"""Angle between two angles
"""
phi = np.e**(1j*a) / np.e**(1j*b)
ang_dist = np.arctan2(phi.imag, phi.real)
return ang_dist
|
db60caace70f23c656c4e97b94145a246c6b2995
| 3,648,513
|
def hinge_loss(positive_scores, negative_scores, margin=1.0):
"""
Pairwise hinge loss [1]:
loss(p, n) = \sum_i [\gamma - p_i + n_i]_+
[1] http://yann.lecun.com/exdb/publis/pdf/lecun-06.pdf
:param positive_scores: (N,) Tensor containing scores of positive examples.
:param negative_scores: (N,) Tensor containing scores of negative examples.
:param margin: Margin.
:return: Loss value.
"""
hinge_losses = tf.nn.relu(margin - positive_scores + negative_scores)
loss = tf.reduce_sum(hinge_losses)
return loss
|
daa698f012c30c8f99ba1ce08cbb73226251e3c1
| 3,648,514
|
def build(req):
"""Builder for this format.
Args:
req: flask request
Returns:
Json containing the creative data
"""
errors = []
v = {}
tdir = "/tmp/" + f.get_tmp_file_name()
index = get_html()
ext = f.get_ext(req.files["videofile"].filename)
if ext != "mp4":
return {"errors": ["Only mp4 files allowed"]}
f.save_file(req.files["videofile"], tdir + "/video.mp4")
v["backgroundColor"] = f.get_param("background_color")
v["autoclose"] = str(f.get_int_param("autoclose"))
return {"errors": errors, "dir": tdir, "index": index, "vars": v}
|
33ad1e003533407626cd3ccdd52e7f6c414e6470
| 3,648,515
|
def search(query, data, metric='euclidean', verbose=True):
"""
do search, return ranked list according to distance
metric: hamming/euclidean
query: one query per row
dat: one data point per row
"""
#calc dist of query and each data point
if metric not in ['euclidean', 'hamming']:
print 'metric must be one of (euclidean, hamming)'
sys.exit(0)
#b=time.clock()
dist=scipy.spatial.distance.cdist(query,data,metric)
sorted_idx=np.argsort(dist,axis=1)
#e=time.clock()
if verbose:
#calc avg dist to nearest 200 neighbors
nearpoints=sorted_idx[:,0:200]
d=[np.mean(dist[i][nearpoints[i]]) for i in range(nearpoints.shape[0])]
sys.stdout.write('%.4f, '% np.mean(d))
#print 'search time %.4f' % (e-b)
return sorted_idx
|
576471dfbe1dc0a2ae80235faf36d42d4b3a7f8a
| 3,648,516
|
def create_circle_widget(canvas: Canvas, x: int, y: int, color: str, circle_size: int):
"""create a centered circle on cell (x, y)"""
# in the canvas the 1st axis is horizontal and the 2nd is vertical
# we want the opposite so we flip x and y for the canvas
# to create an ellipsis, we give (x0, y0) and (x1, y1) that define the containing rectangle
pad = (CELL_SIZE - circle_size) / 2
i0 = 5 + y * CELL_SIZE + pad + 1
j0 = 5 + x * CELL_SIZE + pad + 1
i1 = 5 + (y + 1) * CELL_SIZE - pad
j1 = 5 + (x + 1) * CELL_SIZE - pad
return canvas.create_oval(i0, j0, i1, j1, fill=color, outline="")
|
b048b7d9c262c40a93cfef489468ad709a1e3883
| 3,648,517
|
def _format_program_counter_relative(state):
"""Program Counter Relative"""
program_counter = state.program_counter
operand = state.current_operand
if operand & 0x80 == 0x00:
near_addr = (program_counter + operand) & 0xFFFF
else:
near_addr = (program_counter - (0x100 - operand)) & 0xFFFF
return '${:04X}'.format(near_addr)
|
74f13e9230a6c116413b373b92e36bd884a906e7
| 3,648,518
|
def compile_program(
program: PyTEAL, mode: Mode = Mode.Application, version: int = 5
) -> bytes:
"""Compiles a PyTEAL smart contract program to the TEAL binary code.
Parameters
----------
program
A function which generates a PyTEAL expression, representing an Algorand program.
mode
The mode with which to compile the supplied PyTEAL program.
version
The version with which to compile the supplied PyTEAL program.
Returns
-------
bytes
The TEAL compiled binary code.
"""
source = compileTeal(program(), mode=mode, version=version)
return _compile_source(source)
|
50e9b4263a0622dfbe427c741ddfba2ff4007089
| 3,648,519
|
def predict(yolo_outputs, image_shape, anchors, class_names, obj_threshold, nms_threshold, max_boxes = 1000):
"""
Process the results of the Yolo inference to retrieve the detected bounding boxes,
the corresponding class label, and the confidence score associated.
The threshold value 'obj_threshold' serves to discard low confidence predictions.
The 'nms_threshold' value is used to discard duplicate boxes for a same object (IoU metric).
"""
# Init
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
total_boxes = []
total_box_scores = []
input_shape = tf.shape(yolo_outputs[0])[1 : 3] * 32
# Process output tensors
for i in range(len(yolo_outputs)):
# Get bboxes and associated scores
detected_boxes, box_scores = boxes_and_scores(yolo_outputs[i], anchors[anchor_mask[i]], len(class_names), input_shape, image_shape)
# Append bboxes and level of confidence to list
total_boxes.append(detected_boxes)
total_box_scores.append(box_scores)
# Concatenate results
total_boxes = tf.concat(total_boxes, axis=0)
total_box_scores = tf.concat(total_box_scores, axis=0)
#print('------------------------------------')
#print('Boxe scores', box_scores)
# Mask to filter out low confidence detections
mask = box_scores >= obj_threshold
# Set boxes limit
max_boxes_tensor = tf.constant(max_boxes, dtype = tf.int32)
boxes_ = []
scores_ = []
classes_ = []
items_ = []
for c in range(len(class_names)):
# Get boxes labels
class_boxes = tf.boolean_mask(total_boxes, mask[:, c])
# Get associated score
class_box_scores = tf.boolean_mask(total_box_scores[:, c], mask[:, c])
# Concatenate label and score
item = [class_boxes, class_box_scores]
# Filter out duplicates when multiple boxes are predicted for a same object
nms_index = tf.image.non_max_suppression(class_boxes, class_box_scores, max_boxes_tensor, iou_threshold = nms_threshold)
# Remove the duplicates from the list of classes and scores
class_boxes = tf.gather(class_boxes, nms_index)
class_box_scores = tf.gather(class_box_scores, nms_index)
# Multiply score by class type
classes = tf.ones_like(class_box_scores, 'int32') * c
# Append results to lists
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
# Concatenate results
boxes_ = tf.concat(boxes_, axis = 0)
scores_ = tf.concat(scores_, axis = 0)
classes_ = tf.concat(classes_, axis = 0)
return boxes_, scores_, classes_
|
df90a5baed671e316e03c0a621ce1740efc7a833
| 3,648,521
|
def ae_model(inputs, train=True, norm=True, **kwargs):
"""
AlexNet model definition as defined in the paper:
https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
You will need to EDIT this function. Please put your AlexNet implementation here.
Note:
1.) inputs['images'] is a [BATCH_SIZE x HEIGHT x WIDTH x CHANNELS] array coming
from the data provider.
2.) You will need to return 'output' which is a dictionary where
- output['pred'] is set to the output of your model
- output['conv1'] is set to the output of the conv1 layer
- output['conv1_kernel'] is set to conv1 kernels
- output['conv2'] is set to the output of the conv2 layer
- output['conv2_kernel'] is set to conv2 kernels
- and so on...
The output dictionary should include the following keys for AlexNet:
['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'pool1',
'pool2', 'pool5', 'fc6', 'fc7', 'fc8']
as well as the respective ['*_kernel'] keys for the kernels
3.) Set your variable scopes to the name of the respective layers, e.g.
with tf.variable_scope('conv1'):
outputs['conv1'] = ...
outputs['pool1'] = ...
and
with tf.variable_scope('fc6'):
outputs['fc6'] = ...
and so on.
4.) Use tf.get_variable() to create variables, while setting name='weights'
for each kernel, and name='bias' for each bias for all conv and fc layers.
For the pool layers name='pool'.
These steps are necessary to correctly load the pretrained alexnet model
from the database for the second part of the assignment.
"""
# propagate input targets
outputs = inputs
# dropout = .5 if train else None
input_to_network = inputs['images']
outputs['input'] = input_to_network
with tf.variable_scope('conv'):
outputs['relu'], outputs['conv_kernel'] = get_conv(input_to_network,[7,7,3,64],16)
with tf.variable_scope('deconv'):
outputs['deconv'] = get_deconv(outputs['relu'],[12,12,3,64],12,input_to_network.shape)
# shape = input_to_network.get_shape().as_list()
# stride = 16
# hidden_size = 2
# deconv_size = 12
# ### YOUR CODE HERE
# outputs['input'] = input_to_network
# conv_layer = K.layers.Conv2D(64,7,strides=(stride,stride),
# padding='same',
# kernel_initializer='glorot_normal')
# outputs['conv_kernel'] = conv_layer
# outputs['conv'] = conv_layer(input_to_network)
# outputs['relu'] = K.layers.Activation('relu')(outputs['conv'])
# outputs['deconv'] = K.layers.Conv2DTranspose(3,deconv_size,
# deconv_size,padding='valid',
# kernel_initializer='glorot_normal')(outputs['relu'])
### END OF YOUR CODE
for k in ['deconv']:
assert k in outputs, '%s was not found in outputs' % k
return outputs, {}
|
659908f6fbfb401941984668634382c6d30a8124
| 3,648,523
|
def filter_by_country(data, country=DEFAULT_COUNTRY):
"""
Filter provided data by country (defaults to Czechia).
data: pandas.DataFrame
country: str
"""
# Filter data by COUNTRY
return data[data[COLUMN_FILTER] == country]
|
bbf9eacd74a6032f1298cd7313d5c8233ec4a8ec
| 3,648,524
|
def scans_from_csvs(*inps, names=None):
"""
Read from csvs.
:param inps: file names of the csvs
:param names: names of the Scans
:return: list of Scans
"""
ns, temp_vals, heat_flow_vals = read_csvs(inps)
names = ns if names is None else names
return [Scan(*vals) for vals in zip(temp_vals, heat_flow_vals, names)]
|
6acddf330e10793dab6b76ec6a1edb1d2fd0660d
| 3,648,526
|
def part_b(puzzle_input):
"""
Calculate the answer for part_b.
Args:
puzzle_input (list): Formatted as the provided input from the website.
Returns:
string: The answer for part_b.
"""
return str(collect_letters(puzzle_input)[1])
|
b82597c610e8a7d03ea68ddad392385636b0e2f3
| 3,648,527
|
def data_encoder(data):
"""
Encode all categorical values in the dataframe into numeric values.
@param data: the original dataframe
@return data: the same dataframe with all categorical variables encoded
"""
le = preprocessing.LabelEncoder()
cols = data.columns
numcols = data._get_numeric_data().columns
catecols = list(set(cols) - set(numcols))
le = preprocessing.LabelEncoder()
data[catecols] = data[catecols].astype(str).apply(le.fit_transform)
return data
|
2a2177891e1930311661f6549dbd33f329704fec
| 3,648,528
|
def _search_settings(method_settings_keys, settings):
"""
We maintain a dictionary of dimensionality reduction methods
in dim_settings_keys where each key (method) stores another
dictionary (md) holding that method's settings (parameters).
The keys of md are component ids and the values are parameter
names that will be passed to dim_reduce.
For example, dim_settings_keys['dim-PCA'] holds a dictionary
dim_pca_settings_keys = {
'dim-PCA-n-components': 'n_components',
'dim-PCA-whiten': 'whiten',
'dim-PCA-solver': 'svd_solver',
'dim-PCA-random-state': 'random_state'
}
where the keys (dim-PCA-key) is the widget id and the value is the
parameter name to pass to sklearn's PCA.
Parameters
__________
method_settings_keys: dict
Dicionary holding setting id's and parameter names.
settings: tuple of list of dicts of ...
Holds all children in the method-settings elements. This
is a mixture of lists, tuples, and dicts. We recursively search
this element to find the children with id's as determined by
dim_pca_settings_keys[dim_method]. By doing this we avoid having
to write new Input elements into our callbacks every time we add
a new setting. All that needs to be done is add the setting's
id into the settings_keys dict and it will be parsed automatically.
"""
kwargs = {}
for key in method_settings_keys:
child = next(_recur_search(settings, key))
# if there exists a component with 'key' as its 'id'
# then child should never be None. 'value' may be missing
# if not manually specified when constructing the widget.
if child is None or 'value' not in child:
raise InternalError("'value' key not found in child.")
kwargs[method_settings_keys[key]] = child['value']
return kwargs
|
20a8a0e24df1dc572dfbe541f1439bc6245f6170
| 3,648,529
|
def svn_opt_resolve_revisions(*args):
"""
svn_opt_resolve_revisions(svn_opt_revision_t peg_rev, svn_opt_revision_t op_rev,
svn_boolean_t is_url, svn_boolean_t notice_local_mods,
apr_pool_t pool) -> svn_error_t
"""
return _core.svn_opt_resolve_revisions(*args)
|
5f011401c4afc044f0fa877407f7c7a3da56f576
| 3,648,530
|
from typing import Union
from pathlib import Path
from typing import Any
def write_midi(
path: Union[str, Path],
music: "Music",
backend: str = "mido",
**kwargs: Any
):
"""Write a Music object to a MIDI file.
Parameters
----------
path : str or Path
Path to write the MIDI file.
music : :class:`muspy.Music`
Music object to write.
backend: {'mido', 'pretty_midi'}
Backend to use. Defaults to 'mido'.
"""
if backend == "mido":
return write_midi_mido(path, music, **kwargs)
if backend == "pretty_midi":
return write_midi_pretty_midi(path, music)
raise ValueError("`backend` must by one of 'mido' and 'pretty_midi'.")
|
963e5aafffbc348df17861b615c2e839c170adce
| 3,648,531
|
import string
from re import VERBOSE
def find_star_column(file, column_type, header_length) :
""" For an input .STAR file, search through the header and find the column numbers assigned to a given column_type (e.g. 'rlnMicrographName', ...)
"""
with open(file, 'r') as f :
line_num = 0
for line in f :
line_num += 1
# extract column number for micrograph name
if column_type in line :
for i in line.split()[1]:
if i in string.digits :
column_num = int(i)
# search header and no further to find setup values
if line_num >= header_length :
if VERBOSE:
# print("Read though header (%s lines total)" % header_length)
print("Column value for %s is %d" % (column_type, column_num))
return column_num
|
832a63d2084b6f007c0b58fbafbe037d0a81ab38
| 3,648,532
|
def recalculate_bb(df, customization_dict, image_dir):
"""After resizing images, bb coordinates are recalculated.
Args:
df (Dataframe): A df for image info.
customization_dict (dict): Resize dict.
image_dir (list): Image path list
Returns:
Dataframe: Updated dataframe.
"""
img = cv2.imread(image_dir[0])
h, w, _ = img.shape
new_width = customization_dict['width']
new_height = customization_dict['height']
w_ratio = new_width/w
h_ratio = new_height/h
df['x_min'] = df['x_min']*w_ratio
df['x_max'] = df['x_max']*w_ratio
df['y_min'] = df['y_min']*h_ratio
df['y_max'] = df['y_max']*h_ratio
df.x_min = df.x_min.astype("int16")
df.x_max = df.x_max.astype("int16")
df.y_min = df.y_min.astype("int16")
df.y_max = df.y_max.astype("int16")
return df
|
412149195898e492405fe58aef2d8c8ce360cef7
| 3,648,533
|
def free_port():
"""Returns a free port on this host
"""
return get_free_port()
|
94765cdb1a6e502c9ad650956754b3eda7f1b060
| 3,648,534
|
def justify_to_box(
boxstart: float,
boxsize: float,
itemsize: float,
just: float = 0.0) -> float:
"""
Justifies, similarly, but within a box.
"""
return boxstart + (boxsize - itemsize) * just
|
a644d5a7a6ff88009e66ffa35498d9720b24222c
| 3,648,535
|
import time
def generate_hostname(domain, hostname):
"""If hostname defined, returns FQDN.
If not, returns FQDN with base32 timestamp.
"""
# Take time.time() - float, then:
# - remove period
# - truncate to 17 digits
# - if it happen that last digits are 0 (and will not be displayed, so
# string is shorter - pad it with 0.
#
# The result ensures that timestamp is 17 char length and is increasing.
timestamp = str(time.time()).replace('.', '')[:17].ljust(17, '0')
b32time = aws.int2str(number=int(timestamp), base=32)
if hostname[-1] == '-':
hostname = '{}{}'.format(hostname, '{time}')
return '{}.{}'.format(hostname.format(time=b32time), domain)
|
54d85cea2b2aa69cc2864b0974852530453d63ca
| 3,648,537
|
def symmetrize(M):
"""Return symmetrized version of square upper/lower triangular matrix."""
return M + M.T - np.diag(M.diagonal())
|
a2f1311aa96d91d5c4992ad21018b07ac5954d1c
| 3,648,538
|
def prep_public_water_supply_fraction() -> pd.DataFrame:
"""calculates public water supply deliveries for the commercial and industrial sectors individually
as a ratio to the sum of public water supply deliveries to residential end users and thermoelectric cooling.
Used in calculation of public water supply demand to commercial and industrial sectors.
:return: DataFrame of public water supply ratios for commercial and industrial sector.
"""
# read in data
df = prep_water_use_1995(variables=['FIPS', 'State', 'PS-DelDO', 'PS-DelPT', 'PS-DelCO', 'PS-DelIN'])
df_loc = prep_water_use_2015() # prepared list of 2015 counties with FIPS codes
# calculate ratio of commercial pws to sum of domestic and thermoelectric cooling pws
df['com_pws_fraction'] = np.where((df['PS-DelDO'] + df['PS-DelPT'] <= 0),
np.nan, (df['PS-DelCO'] / (df['PS-DelDO'] + df['PS-DelPT'])))
# calculate ratio of industrial pws to sum of domestic and thermoelectric cooling pws
df["ind_pws_fraction"] = np.where(((df['PS-DelDO'] + df['PS-DelPT']) <= 0),
np.nan, df['PS-DelIN'] / (df['PS-DelDO'] + df['PS-DelPT']))
# reduce dataframe
df = df[['FIPS', 'State', 'com_pws_fraction', 'ind_pws_fraction']]
# fill counties with 0 commercial or industrial public water supply ratios with state averages
df_mean = df.groupby('State', as_index=False).mean()
rename_list = df_mean.columns[1:].to_list()
for col in rename_list:
new_name = f"{col}_state"
df_mean = df_mean.rename(columns={col: new_name})
df_mean_all = pd.merge(df, df_mean, how='left', on=['State'])
# replace counties with consumption fractions of zero with the state average to replace missing data
rep_list = df.columns[2:].to_list()
for col in rep_list:
mean_name = f"{col}_state"
df_mean_all[col].fillna(df_mean_all[mean_name], inplace=True)
# reduce dataframe to required output
df_output = df_mean_all[['FIPS', 'State', 'com_pws_fraction', 'ind_pws_fraction']]
# merge with full list of counties from 2015 water data
df_output = pd.merge(df_loc, df_output, how='left', on=['FIPS', 'State'])
return df_output
|
bb759cfa25add08b0faf0d9232448698d0ae8d53
| 3,648,540
|
def set_matchq_in_constraint(a, cons_index):
"""
Takes care of the case, when a pattern matching has to be done inside a constraint.
"""
lst = []
res = ''
if isinstance(a, list):
if a[0] == 'MatchQ':
s = a
optional = get_default_values(s, {})
r = generate_sympy_from_parsed(s, replace_Int=True)
r, free_symbols = add_wildcards(r, optional=optional)
free_symbols = sorted(set(free_symbols)) # remove common symbols
r = sympify(r, locals={"Or": Function("Or"), "And": Function("And"), "Not":Function("Not")})
pattern = r.args[1].args[0]
cons = r.args[1].args[1]
pattern = rubi_printer(pattern, sympy_integers=True)
pattern = setWC(pattern)
res = ' def _cons_f_{}({}):\n return {}\n'.format(cons_index, ', '.join(free_symbols), cons)
res += ' _cons_{} = CustomConstraint(_cons_f_{})\n'.format(cons_index, cons_index)
res += ' pat = Pattern(UtilityOperator({}, x), _cons_{})\n'.format(pattern, cons_index)
res += ' result_matchq = is_match(UtilityOperator({}, x), pat)'.format(r.args[0])
return "result_matchq", res
else:
for i in a:
if isinstance(i, list):
r = set_matchq_in_constraint(i, cons_index)
lst.append(r[0])
res = r[1]
else:
lst.append(i)
return lst, res
|
c40f15f500736102f4abf17169715387c2f1b91b
| 3,648,541
|
def istype(klass, object):
"""Return whether an object is a member of a given class."""
try: raise object
except klass: return 1
except: return 0
|
bceb83914a9a346c59d90984730dddb808bf0e78
| 3,648,542
|
from typing import Mapping
from typing import Any
def _embed_from_mapping(mapping: Mapping[str, Any], ref: str) -> mapry.Embed:
"""
Parse the embed from the mapping.
All the fields are parsed except the properties, which are parsed
in a separate step.
:param mapping: to be parsed
:param ref: reference to the embeddable structure in the mapry schema
:return: embeddable structure without the properties
"""
return mapry.Embed(
name=mapping['name'], description=mapping['description'], ref=ref)
|
10d3894aa33d41efd47f03335c6e90547ee26e6c
| 3,648,543
|
import pdb
def generate_csv_from_pnl(pnl_file_name):
"""在.pnl文件的源路径下新生成一个.csv文件. 拷贝自export_to_csv函数. pnl_file_name需包含路径. """
pnlc = alib.read_pnl_from_file(pnl_file_name)
pnl = pnlc[1]
if pnl is None:
print('pnl文件{}不存在!'.format(pnl_file_name))
pdb.set_trace()
csv_file_name = pnl_file_name[:-4] + '.csv'
outf = open(csv_file_name, 'w')
outf.write(alib.pnl_columns + '\n')
f = ','.join(['%g'] * 14) + ',%d,%d,%d'
for d in pnl:
outf.write((f + '\n') % d)
outf.close()
return csv_file_name
|
ceed6ce7d31fe6cb738252b7458c2f404c01135c
| 3,648,544
|
def parse_number(text, allow_to_fail):
"""
Convert to integer, throw if fails
:param text: Number as text (decimal, hex or binary)
:return: Integer value
"""
try:
if text in defines:
return parse_number(defines.get(text), allow_to_fail)
return to_number(text)
except ValueError:
if allow_to_fail:
return 0
else:
raise ASMSyntaxError(f'Invalid number format: {text}')
|
90906b56e8a88fcde9f66defeed48cf12371d375
| 3,648,545
|
import importlib
def pick_vis_func(options: EasyDict):
"""Pick the function to visualize one batch.
:param options:
:return:
"""
importlib.invalidate_caches()
vis_func = getattr(
import_module("utils.vis.{}".format(options.vis.name[0])),
"{}".format(options.vis.name[1])
)
return vis_func
|
4d65f00075c984e5407af09d3680f2195be640bb
| 3,648,546
|
import numpy
def scale_quadrature(quad_func, order, lower, upper, **kwargs):
"""
Scale quadrature rule designed for unit interval to an arbitrary interval.
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights on the unit
interval.
order (int):
The quadrature order passed to the quadrature function.
lower (float):
The new lower limit for the quadrature function.
upper (float):
The new upper limit for the quadrature function.
kwargs (Any):
Extra keyword arguments passed to `quad_func`.
Returns:
Same as ``quad_func(order, **kwargs)`` except scaled to a new interval.
Examples:
>>> def my_quad(order):
... return (numpy.linspace(0, 1, order+1)[numpy.newaxis],
... 1./numpy.full(order+1, order+2))
>>> my_quad(2)
(array([[0. , 0.5, 1. ]]), array([0.25, 0.25, 0.25]))
>>> scale_quadrature(my_quad, 2, lower=0, upper=2)
(array([[0., 1., 2.]]), array([0.5, 0.5, 0.5]))
>>> scale_quadrature(my_quad, 2, lower=-0.5, upper=0.5)
(array([[-0.5, 0. , 0.5]]), array([0.25, 0.25, 0.25]))
"""
abscissas, weights = quad_func(order=order, **kwargs)
assert numpy.all(abscissas >= 0) and numpy.all(abscissas <= 1)
assert numpy.sum(weights) <= 1+1e-10
assert numpy.sum(weights > 0)
weights = weights*(upper-lower)
abscissas = (abscissas.T*(upper-lower)+lower).T
return abscissas, weights
|
f3854cee12a482bc9c92fe2809a0388dddb422e0
| 3,648,547
|
def ask_for_region(self):
"""ask user for region to select (2-step process)"""
selection = ["BACK"]
choices = []
while "BACK" in selection:
response = questionary.select(
"Select area by (you can go back and combine these choices):",
choices=["continents", "regions", "countries"],
).ask()
selection_items = getattr(self, response)
if response == "regions":
choices = (
[Choice(r) for r in selection_items if "EU" in r]
+ [Separator()]
+ [Choice(r) for r in selection_items if "EU" not in r]
)
else:
choices = [Choice(r) for r in selection_items.keys()]
# preselect previous choices
for choice in choices:
if choice.value in selection:
choice.checked = True
current_selection = questionary.checkbox("Please select", choices=choices).ask()
selection = selection + current_selection
if "BACK" not in current_selection:
selection = clean_results(selection)
print(f"Selection: {clean_results(selection)}")
selection = list(set(clean_results(selection)))
return self._extract_countries(selection)
|
dd7ea9ca33ca8348fba0d36c5661b6fd30c96090
| 3,648,548
|
import array
def peakAlign(refw,w):
""" Difference between the maximum peak positions of the signals.
This function returns the difference, in samples, between the peaks position
of the signals. If the reference signal has various peaks, the one
chosen is the peak which is closer to the middle of the signal, and if the
other signal has more than one peak also, the chosen is the one closer to
the reference peak signal.
The first signal introduced is the reference signal.
Parameters
----------
refw: array-like
the input reference signal.
w: array-like
the input signal.
Returns
-------
al: int
the difference between the two events position
Example
-------
>>> peakAlign([5,7,3,20,13,5,7],[5,1,8,4,3,10,3])
1
See also: maxAlign(), minAlign(), peakNegAlign(), infMaxAlign(), infMinAlign()
"""
p_mw = array ( peaks(array(refw),min(refw)) )
p_w = array ( peaks(array(w),min(w)) )
if (len(p_mw)>1):
min_al = argmin(abs( (len(refw)/2) - p_mw)) #to choose the peak closer to the middle of the signal
p_mw=p_mw[min_al]
if (list(p_w) == [] ):
p_w = p_mw
elif (len(p_w)>1):
min_al = argmin(abs(p_w - p_mw)) #to choose the peak closer to the peak of the reference signal
p_w=p_w[min_al]
return int(array(p_mw-p_w))
|
a7497e828008281318dff25b5547e0ab4f8e9a35
| 3,648,549
|
def get_games(by_category, n_games):
"""
This function imports the dataframe of most popular games and returns a list of game names
with the length of 'n_games' selected by 'by_category'. Valid options for 'by_category': rank, num_user_ratings
"""
df = pd.read_csv('../data/popular_games_with_image_url.csv', index_col = 0)
if by_category == 'rank':
ascending = True
elif by_category == 'num_user_ratings':
ascending = False
df = df.sort_values(by_category, ascending = ascending)
df = df.head(n_games)
game_list = []
image_list = []
for row in df.iterrows():
#game_name = row[1]['name'] + ' (' + str(row[1]['year_published']) + ')'
game_name = row[1]['name']
game_list.append(game_name)
image_url = row[1]['image_url']
image_list.append(image_url)
return game_list, image_list
|
2702d6b072ba9ac49565c9ee768d65c431441724
| 3,648,550
|
def diurnalPDF( t, amplitude=0.5, phase=pi8 ):
"""
"t" must be specified in gps seconds
we convert the time in gps seconds into the number of seconds after the most recent 00:00:00 UTC
return (1 + amplitude*sin(2*pi*t/day - phase))/day
"""
if amplitude > 1:
raise ValueError("amplitude cannot be larger than 1")
t = gps2relativeUTC(t)
return (1 + amplitude*np.sin(twopi*t/day - phase))/day
|
6bf755851d2bf2582ca98c1bcbe67aa7dc4e0a2f
| 3,648,551
|
def imap_workers(workers, size=2, exception_handler=None):
"""Concurrently converts a generator object of Workers to
a generator of Responses.
:param workers: a generator of worker objects.
:param size: Specifies the number of workers to make at a time. default is 2
:param exception_handler: Callback function, called when exception occured. Params: Worker, Exception
"""
pool = Pool(size)
def start(r):
return r.start()
for worker in pool.imap_unordered(start, workers):
if worker.response is not None:
yield worker.response
elif exception_handler:
exception_handler(worker, worker.exception)
pool.join()
|
c4ab81770b40238025055394bf43ca0dc99dd506
| 3,648,552
|
import time
def output_time(time_this:float=None,end:str=" | ")->float:
"""输入unix时间戳,按格式输出时间。默认为当前时间"""
if not time_this:
time_this=time.time()-TIMEZONE
print(time.strftime('%Y-%m-%d %H:%M:%S',time.gmtime(time_this)),end=end)
#
return time_this
|
ba17400306af7142a91bd5b62941c52fc59dbf1a
| 3,648,553
|
def blend_color(color1, color2, blend_ratio):
"""
Blend two colors together given the blend_ration
:param color1: pygame.Color
:param color2: pygame.Color
:param blend_ratio: float between 0.0 and 1.0
:return: pygame.Color
"""
r = color1.r + (color2.r - color1.r) * blend_ratio
g = color1.g + (color2.g - color1.g) * blend_ratio
b = color1.b + (color2.b - color1.b) * blend_ratio
a = color1.a + (color2.a - color1.a) * blend_ratio
return pygame.Color(int(r), int(g), int(b), int(a))
|
0bb7fa1570472e60bd93a98f6da3a515ca9dd500
| 3,648,554
|
def solve_a_star(start_id: str, end_id: str, nodes, edges):
"""
Get the shortest distance between two nodes using Dijkstra's algorithm.
:param start_id: ID of the start node
:param end_id: ID of the end node
:return: Shortest distance between start and end node
"""
solution_t_start = perf_counter()
solution = []
associations = {start_id: None}
closed = set() # Nodes that have been resolved
fringe = [] # Min-heap that holds nodes to check (aka. fringe)
start_y, start_x = nodes[start_id]
end_y, end_x = nodes[end_id]
start_node = (0 + calc_distance(start_y, start_x, end_y, end_x), 0, start_id)
heappush(fringe, start_node)
while len(fringe) > 0:
c_node = heappop(fringe)
c_f, c_distance, c_id = c_node
c_y, c_x = nodes[c_id]
if c_id == end_id:
return c_distance, solution, perf_counter() - solution_t_start, associations_to_path(associations, c_id,
nodes)
if c_id not in closed:
closed.add(c_id)
for child_id, c_to_child_distance in edges[c_id]:
if child_id not in closed:
# Add to solution path
if child_id not in associations:
associations[child_id] = c_id
child_distance = c_distance + c_to_child_distance # Cost function
child_y, child_x = nodes[child_id]
child_node = (
child_distance + calc_distance(child_y, child_x, end_y, end_x), child_distance, child_id)
heappush(fringe, child_node)
solution.append(((c_y, c_x), (child_y, child_x)))
return None
|
467257c15c7a99d217d75b69876b2f64ecd0b58e
| 3,648,556
|
def get_dhcp_relay_statistics(dut, interface="", family="ipv4", cli_type="", skip_error_check=True):
"""
API to get DHCP relay statistics
Author Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:type dut:
:param interface:
:type interface:
"""
cli_type = st.get_ui_type(dut, cli_type=cli_type)
if cli_type in ['click', 'klish']:
ip_val = "ip" if family == "ipv4" else "ipv6"
if interface:
command = "show {} dhcp-relay statistics {}".format(ip_val, interface)
else:
command = "show {} dhcp-relay statistics".format(ip_val)
return st.show(dut, command, type=cli_type, skip_error_check=skip_error_check)
elif cli_type in ['rest-patch', 'rest-put']:
return _get_rest_dhcp_relay_statistics(dut, interface=interface, family=family)
else:
st.error("Unsupported CLI_TYPE: {}".format(cli_type))
return False
|
0784cd367d638124458d9e0fb808b45bcc239a84
| 3,648,557
|
def check_rule_for_Azure_ML(rule):
"""Check if the ports required for Azure Machine Learning are open"""
required_ports = ['29876', '29877']
if check_source_address_prefix(rule.source_address_prefix) is False:
return False
if check_protocol(rule.protocol) is False:
return False
if check_direction(rule.direction) is False:
return False
if check_provisioning_state(rule.provisioning_state) is False:
return False
if rule.destination_port_range is not None:
if check_ports_in_destination_port_ranges(
required_ports,
[rule.destination_port_range]) is False:
return False
else:
if check_ports_in_destination_port_ranges(
required_ports,
rule.destination_port_ranges) is False:
return False
return True
|
fb6067d484a3698b2d10d297e3419510d1d8c4e9
| 3,648,559
|
import re
def text_cleanup(text: str) -> str:
"""
A simple text cleanup function that strips all new line characters and
substitutes consecutive white space characters by a single one.
:param text: Input text to be cleaned.
:return: The cleaned version of the text
"""
text.replace('\n', '')
return re.sub(r'\s{2,}', ' ', text)
|
84b9752f261f94164e2e83b944a2c12cee2ae5d8
| 3,648,560
|
def create_overide_pandas_func(
cls, func, verbose, silent, full_signature, copy_ok, calculate_memory
):
""" Create overridden pandas method dynamically with
additional logging using DataFrameLogger
Note: if we extracting _overide_pandas_method outside we need to implement decorator like here
https://stackoverflow.com/questions/10176226/how-do-i-pass-extra-arguments-to-a-python-decorator
:param cls: pandas class for which the method should be overriden
:param func: pandas method name to be overridden
:param silent: Whether additional the statistics get printed
:param full_signature: adding additional information to function signature
:param copy_ok: whether the dataframe is allowed to be copied to calculate more informative metadata logs
:return: the same function with additional logging capabilities
"""
def _run_method_and_calc_stats(
fn,
fn_args,
fn_kwargs,
input_df,
full_signature,
silent,
verbose,
copy_ok,
calculate_memory,
):
if copy_ok:
# If we're ok to make copies, copy the input_df so that we can compare against the output of inplace methods
try:
# Will hit infinite recursion if we use the patched copy method so use the original
original_input_df = getattr(
input_df, settings.ORIGINAL_METHOD_PREFIX + "copy"
)(deep=True)
except AttributeError:
original_input_df = input_df.copy(deep=True)
output_df, execution_stats = get_execution_stats(
cls, fn, input_df, fn_args, fn_kwargs, calculate_memory
)
if output_df is None:
# The operation was strictly in place so we just call the dataframe the output_df as well
output_df = input_df
if copy_ok:
# If this isn't true and the method was strictly inplace, input_df and output_df will just
# point to the same object
input_df = original_input_df
step_stats = StepStats(
execution_stats,
cls,
fn,
fn_args,
fn_kwargs,
full_signature,
input_df,
output_df,
)
step_stats.log_stats_if_needed(silent, verbose, copy_ok)
if isinstance(output_df, pd.DataFrame) or isinstance(output_df, pd.Series):
step_stats.persist_execution_stats()
return output_df
def _overide_pandas_method(fn):
if cls == pd.DataFrame:
register_method_wrapper = pf.register_dataframe_method
elif cls == pd.Series:
register_method_wrapper = pf.register_series_method
@register_method_wrapper
@wraps(fn)
def wrapped(*args, **fn_kwargs):
input_df, fn_args = args[0], args[1:]
output_df = _run_method_and_calc_stats(
fn,
fn_args,
fn_kwargs,
input_df,
full_signature,
silent,
verbose,
copy_ok,
calculate_memory,
)
return output_df
return wrapped
return exec(f"@_overide_pandas_method\ndef {func}(df, *args, **kwargs): pass")
|
db2bf7cb5d5395aeb700ca14211690750f056a91
| 3,648,562
|
def orthogonalize(U, eps=1e-15):
"""
Orthogonalizes the matrix U (d x n) using Gram-Schmidt Orthogonalization.
If the columns of U are linearly dependent with rank(U) = r, the last n-r columns
will be 0.
Args:
U (numpy.array): A d x n matrix with columns that need to be orthogonalized.
eps (float): Threshold value below which numbers are regarded as 0 (default=1e-15).
Returns:
(numpy.array): A d x n orthogonal matrix. If the input matrix U's cols were
not linearly independent, then the last n-r cols are zeros.
Examples:
```python
>>> import numpy as np
>>> import gram_schmidt as gs
>>> gs.orthogonalize(np.array([[10., 3.], [7., 8.]]))
array([[ 0.81923192, -0.57346234],
[ 0.57346234, 0.81923192]])
>>> gs.orthogonalize(np.array([[10., 3., 4., 8.], [7., 8., 6., 1.]]))
array([[ 0.81923192 -0.57346234 0. 0. ]
[ 0.57346234 0.81923192 0. 0. ]])
```
"""
n = len(U[0])
# numpy can readily reference rows using indices, but referencing full rows is a little
# dirty. So, work with transpose(U)
V = U.T
for i in range(n):
prev_basis = V[0:i] # orthonormal basis before V[i]
coeff_vec = np.dot(prev_basis, V[i].T) # each entry is np.dot(V[j], V[i]) for all j < i
# subtract projections of V[i] onto already determined basis V[0:i]
V[i] -= np.dot(coeff_vec, prev_basis).T
if la.norm(V[i]) < eps:
V[i][V[i] < eps] = 0. # set the small entries to 0
else:
V[i] /= la.norm(V[i])
return V.T
|
5807c0e5c7ee663391123076c8784cfb7e445760
| 3,648,563
|
def boolean(func):
"""
Sets 'boolean' attribute (this attribute is used by list_display).
"""
func.boolean=True
return func
|
9bbf731d72e53aa9814caacaa30446207af036bd
| 3,648,565
|
def load_output_template_configs(items):
"""Return list of output template configs from *items*."""
templates = []
for item in items:
template = OutputTemplateConfig(
id=item["id"],
pattern_path=item.get("pattern-path", ""),
pattern_base=item.get("pattern-base", ""),
append_username_to_name=item.get("append-username-to-name", False),
append_colorspace_to_name=item.get("append-colorspace-to-name", False),
append_passname_to_name=item.get("append-passname-to-name", False),
append_passname_to_subfolder=item.get("append-passname-to-subfolder", False),
)
templates.append(template)
return tuple(templates)
|
028502662906230bf2619fa105caa1d525ff8e75
| 3,648,566
|
def read_keyword_arguments_section(docstring: Docstring, start_index: int) -> tuple[DocstringSection | None, int]:
"""
Parse a "Keyword Arguments" section.
Arguments:
docstring: The docstring to parse
start_index: The line number to start at.
Returns:
A tuple containing a `Section` (or `None`) and the index at which to continue parsing.
"""
arguments, index = read_arguments(docstring, start_index)
if arguments:
return DocstringSection(DocstringSectionKind.keyword_arguments, arguments), index
warn(docstring, index, f"Empty keyword arguments section at line {start_index}")
return None, index
|
9c789fd4b08d2f3d9e99d4db568ab710e4765c91
| 3,648,567
|
from typing import Mapping
from typing import Iterable
def is_builtin(x, drop_callables=True):
"""Check if an object belongs to the Python standard library.
Parameters
----------
drop_callables: bool
If True, we won't consider callables (classes/functions) to be builtin.
Classes have class `type` and functions have class
`builtin_function_or_method`, both of which are builtins - however,
this is often not what we mean when we want to know if something is
built in. Note: knowing the class alone is not enough to determine if
the objects it creates are built-in; this may depend on the kwargs
passed to its constructor. This will NOT check if a class was defined
in the standard library.
Returns
-------
bool: True if the object is built-in. If the object is list-like, each
item will be checked as well the container. If the object is dict-like,
each key AND value will be checked (you can always pass in d.keys() or
d.values() for more limited checking). Again, the container itself will
be checked as well.
"""
def _builtin(x, drop_callables):
if callable(x) and drop_callables:
return False
return x.__class__.__module__ == 'builtins'
builtin = partial(_builtin, drop_callables=drop_callables)
# Check mapping before iterable because mappings are iterable.
if isinstance(x, Mapping):
return builtin(x) and all(builtin(o) for o in flatten(x.items()))
elif isinstance(x, Iterable):
return builtin(x) and all(builtin(o) for o in flatten(x))
return builtin(x)
|
d84fbd770e048172d8c59315fbe24d58046f77b8
| 3,648,568
|
import yaml
def random_pair_selection(config_path,
data_size=100,
save_log="random_sents"):
"""
randomly choose from parallel data, and save to the save_logs
:param config_path:
:param data_size:
:param save_log:
:return: random selected pairs
"""
np.random.seed(32767)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs["data_configs"]
with open(data_configs["train_data"][0], "r") as src, \
open(data_configs["train_data"][1], "r") as trg, \
open(save_log+".src", "w") as out_src, open(save_log+".trg", "w") as out_trg:
counter=0
return_src=[]
return_trg=[]
for sent_s, sent_t in zip(src,trg):
if np.random.uniform()<0.2 and counter<data_size:
counter += 1
out_src.write(sent_s)
out_trg.write(sent_t)
return_src+=[sent_s.strip()]
return_trg+=[sent_t.strip()]
return return_src, return_trg
|
417b59bae49fe8aa0566f20f8ff371c7760e1a8a
| 3,648,569
|
from typing import OrderedDict
from typing import Counter
def profile_nominal(pairs, **options):
"""Return stats for the nominal field
Arguments:
:param pairs: list with pairs (row, value)
:return: dictionary with stats
"""
result = OrderedDict()
values = [r[1] for r in pairs]
c = Counter(values)
result['top'], result['freq'] = c.most_common(1)[0]
categories = list(c)
categories.sort()
result['categories'] = categories
result['categories_num'] = len(categories)
return result
|
00ef211e8f665a02f152e764c409668481c748cc
| 3,648,571
|
from typing import List
def class_definitions(cursor: Cursor) -> List[Cursor]:
"""
extracts all class definitions in the file pointed by cursor. (typical mocks.h)
Args:
cursor: cursor of parsing result of target source code by libclang
Returns:
a list of cursor, each pointing to a class definition.
"""
cursors = cursors_in_same_file(cursor)
class_cursors = []
for descendant in cursors:
# check if descendant is pointing to a class declaration block.
if descendant.kind != CursorKind.CLASS_DECL:
continue
if not descendant.is_definition():
continue
# check if this class is directly enclosed by a namespace.
if descendant.semantic_parent.kind != CursorKind.NAMESPACE:
continue
class_cursors.append(descendant)
return class_cursors
|
c2831b787905b02865890aa2680c37b97ec2e0a8
| 3,648,572
|
def service_list_by_category_view(request, category):
"""Shows services for a chosen category.
If url doesn't link to existing category, return user to categories list"""
template_name = 'services/service-list-by-category.html'
if request.method == "POST":
contact_form = ContactForm(request.POST)
if contact_form.is_valid():
contact_form.save()
return redirect(reverse('accounts:profile'))
else:
if request.user.is_authenticated:
initial_data = {
"user": request.user,
"name": request.user.first_name + " " + request.user.last_name,
"email": request.user.email
}
form = ContactForm(
request.POST or None, initial=initial_data)
else:
form = ContactForm()
try:
obj = ServiceCategory.objects.get(name=category)
queryset = Service.objects.filter(category=obj.pk)
context = {
"obj": obj,
"queryset": queryset,
"form": form,
}
except ServiceCategory.DoesNotExist:
messages.error(request, 'No category named <em>' + category + '</em>.')
return redirect("services:services_list")
return render(request, template_name=template_name, context=context)
|
dcbed59c8b6876b7072eb82b27f6b10e829c2daa
| 3,648,574
|
def check_columns(board: list):
"""
Check column-wise compliance of the board for uniqueness (buildings of unique height) and visibility (top-bottom and vice versa).
Same as for horizontal cases, but aggregated in one function for vertical case, i.e. columns.
>>> check_columns(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_columns(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41232*', '*2*1***'])
False
>>> check_columns(['***21**', '412553*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
transformed_board = []
l = len(board)
for idx1 in range(l):
line = []
for idx2 in range(l):
line.append(board[idx2][idx1])
line = ''.join(line)
transformed_board.append(line)
if not check_horizontal_visibility(transformed_board):
return False
return True
|
26ea379a165b90eadcf89640f00857e9e95146c7
| 3,648,575
|
def get_git_tree(pkg, g, top_prd):
"""
:return:
"""
global pkg_tree
global pkg_id
global pkg_list
global pkg_matrix
pkg_tree = Tree()
pkg_id = 0
pkg_list = dict()
# pkg_list['root'] = []
if pkg == '':
return None
if pkg in Config.CACHED_GIT_REPOS:
pkg_content = Config.CACHED_GIT_REPOS[pkg]
print("^", end="", flush=True)
else:
pkg_content = get_gitpkg_content(pkg, g)
if pkg_content:
# first node in the tree
if pkg in pkg_matrix.keys():
if top_prd not in pkg_matrix[pkg]:
pkg_matrix[pkg].append(top_prd)
else:
pkg_matrix[pkg] = [top_prd]
pkg_content.key = str(pkg_id) + "." +pkg_content.name
pkg_content.component_id = top_prd.id
pkg_content.component_name = top_prd.name
# print(pkg_content.key, pkg_content.pkey, pkg_content.name, pkg_content.ups_table, ">>>>>>", end="", flush=True)
pkg_tree.create_node(pkg_content.key, pkg_content.key, data=pkg_content)
if pkg not in Config.CACHED_GIT_REPOS.keys():
Config.CACHED_GIT_REPOS[pkg] = pkg_content
print("+", end="", flush=True)
for child in pkg_content.ups_table:
walk_git_tree(child, g, pkg_content.key, top_prd)
else:
return {'tree': None, 'deps': None}
# print(pkg_tree)
return {'tree': pkg_tree, 'deps': pkg_list}
|
05434882a476c8506804918cb44624c7734bf405
| 3,648,576
|
import requests
def get_requests_session():
"""Return an empty requests session, use the function to reuse HTTP connections"""
session = requests.session()
session.mount("http://", request_adapter)
session.mount("https://", request_adapter)
session.verify = bkauth_settings.REQUESTS_VERIFY
session.cert = bkauth_settings.REQUESTS_CERT
return session
|
e5921b12d29718e9ef1f503f902fed02a7c7e82f
| 3,648,577
|
def edit_role_description(rid, description, analyst):
"""
Edit the description of a role.
:param rid: The ObjectId of the role to alter.
:type rid: str
:param description: The new description for the Role.
:type description: str
:param analyst: The user making the change.
:type analyst: str
"""
description = description.strip()
Role.objects(id=rid,
name__ne=settings.ADMIN_ROLE).update_one(set__description=description)
return {'success': True}
|
f857755766da1f8f5be0e3dc255ed34aa7ed3ed3
| 3,648,578
|
def have_questions(pair, config, info=None):
"""
Return True iff both images are annotated with questions.
"""
qas = info["qas"]
c1id = pair[0]
if qas[c1id]['qas'] == []:
return False
c2id = pair[1]
if qas[c2id]['qas'] == []:
return False
return True
|
45a5f4babcc17ad5573008ca31773d51334144cd
| 3,648,579
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.