content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def euler2rot_symbolic(angle1='ϕ', angle2='θ', angle3='ψ', order='X-Y-Z', ertype='extrinsic'):
"""returns symbolic expression for the composition of elementary rotation matrices
Parameters
----------
angle1 : string or sympy.Symbol
angle representing first rotation
angle2 : string or sympy.Symbol
angle representing second rotation
angle3 : string or sympy.Symbol
angle representing third rotation
order : string
valid string sequence that specifies the order of rotation. See `euler2rot()`
for details
ertype : string ('extrinsic' or 'intrinsic') See `euler2rot()` for details
the type of elemental rotations.
deg : bool
`True` = degree (default), `False` = radians
Example
-------
>>> R = euler2rot_symbolic('1', '2', '3', 'X-Y-Z' , 'intrinsic')
>>> c, s = sy.symbols('c, s', cls=sy.Function)
>>> R.subs({sy.cos:c, sy.sin:s})
Matrix([
[ c(2)*c(3), -c(2)*s(3), s(2)],
[ c(1)*s(3) + c(3)*s(1)*s(2), c(1)*c(3) - s(1)*s(2)*s(3), -c(2)*s(1)],
[-c(1)*c(3)*s(2) + s(1)*s(3), c(1)*s(2)*s(3) + c(3)*s(1), c(1)*c(2)]])
Note
----
The order of the input angles are specified in the order of rotations (corresponding
to the `order`). They are not specified with respect to any particular axis.
"""
X = rotX_symbolic
Y = rotY_symbolic
Z = rotZ_symbolic
order = order.split('-')
if ertype == 'extrinsic':
order.reverse()
composition = '{}(angle3)*{}(angle2)*{}(angle1)'.format(*order)
elif ertype == 'intrinsic':
composition = '{}(angle1)*{}(angle2)*{}(angle3)'.format(*order)
else:
raise ValueError('Incorrect elemental rotation parameter.')
#print(composition)
return eval(composition)
|
07069fc6c543acb9960f8203130cabcd04a762f4
| 3,642,963
|
import ctypes
def k4a_playback_get_next_imu_sample(playback_handle, imu_sample):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_next_imu_sample(k4a_playback_t playback_handle,
k4a_imu_sample_t *imu_sample);
"""
_k4a_playback_get_next_imu_sample = record_dll.k4a_playback_get_next_imu_sample
_k4a_playback_get_next_imu_sample.restype = k4a_stream_result_t
_k4a_playback_get_next_imu_sample.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_imu_sample_t),)
return _k4a_playback_get_next_imu_sample(playback_handle, imu_sample)
|
faa127b8788163de209863adee1419c349852827
| 3,642,964
|
def knapsack_iterative_numpy(items, maxweight):
"""
Iterative knapsack method
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is the set of indicies in the optimal solution
"""
#import numpy as np
items = np.array(items)
weights = items.T[1]
# Find maximum decimal place (this problem is in NP)
max_exp = max([number_of_decimals(w_) for w_ in weights])
coeff = 10 ** max_exp
# Adjust weights to be integral
weights = (weights * coeff).astype(np.int)
values = items.T[0]
MAXWEIGHT = int(maxweight * coeff)
W_SIZE = MAXWEIGHT + 1
dpmat = np.full((len(items), W_SIZE), np.inf)
kmat = np.full((len(items), W_SIZE), 0, dtype=np.bool)
idx_subset = []
for w in range(W_SIZE):
dpmat[0][w] = 0
for idx in range(1, len(items)):
item_val = values[idx]
item_weight = weights[idx]
for w in range(W_SIZE):
valid_item = item_weight <= w
prev_val = dpmat[idx - 1][w]
if valid_item:
prev_noitem_val = dpmat[idx - 1][w - item_weight]
withitem_val = item_val + prev_noitem_val
more_valuable = withitem_val > prev_val
else:
more_valuable = False
dpmat[idx][w] = withitem_val if more_valuable else prev_val
kmat[idx][w] = more_valuable
K = MAXWEIGHT
for idx in reversed(range(1, len(items))):
if kmat[idx, K]:
idx_subset.append(idx)
K = K - weights[idx]
idx_subset = sorted(idx_subset)
items_subset = [items[i] for i in idx_subset]
total_value = dpmat[len(items) - 1][MAXWEIGHT]
return total_value, items_subset
|
7ef8ab10b91e72b7625fdfc9445501c4ae8e5554
| 3,642,965
|
import torch
def gram_matrix(image: torch.Tensor):
"""https://pytorch.org/tutorials/
advanced/neural_style_tutorial.html#style-loss"""
n, c, h, w = image.shape
x = image.view(n * c, w * h)
gram_m = torch.mm(x, x.t()).div(n * c * w * h)
return gram_m
|
5912cfec026cba26a77131c3b52a8e751c0f575e
| 3,642,966
|
def photos_of_user(request, user_id):
"""Displaying user's photo gallery and adding new photos to user's gellery
view.
"""
template = 'accounts/profile/photos_gallery.html'
user_acc = get_object_or_404(TLAccount, id=user_id)
photos = user_acc.photos_of_user.all() # Custom related name
context = {
'photos': photos,
'user_acc': user_acc
}
if request.method == 'POST':
if request.user.email != user_acc.email:
return HttpResponseBadRequest()
initial = {
'photo': request.FILES['user_gallery_photo']
}
form = AddPhotoToUserGalleryForm(request.POST, initial)
if form.is_valid():
final_form = form.save(commit=False)
# final_form.place = place
final_form.author = user_acc
final_form.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
return render(request, template, context)
# If HTTP method is GET...
else:
return render(request, template, context)
|
3fd3cdfac7f1af4de13c464a3fe2bea26f72e6c2
| 3,642,967
|
def sw_update_opts_w_name_db_model_to_dict(sw_update_opts, subcloud_name):
"""Convert sw update options db model plus subcloud name to dictionary."""
result = {"id": sw_update_opts.id,
"name": subcloud_name,
"subcloud-id": sw_update_opts.subcloud_id,
"storage-apply-type": sw_update_opts.storage_apply_type,
"compute-apply-type": sw_update_opts.compute_apply_type,
"max-parallel-computes": sw_update_opts.max_parallel_computes,
"alarm-restriction-type": sw_update_opts.alarm_restriction_type,
"default-instance-action":
sw_update_opts.default_instance_action,
"created-at": sw_update_opts.created_at,
"updated-at": sw_update_opts.updated_at}
return result
|
c9c1703d9e4d0b69920d3ab06e5bf19fbb622103
| 3,642,968
|
def imf_binary_primary(m, imf, binary_fraction=constants.BIN_FRACTION):
"""
Initial mass function for primary stars of binary systems
Integrated between m' and m'' using Newton-Cotes
Returns 0 unless m is in (1.5, 16)
"""
m_inf = max(constants.B_MIN, m)
m_sup = min(constants.B_MAX, 2 * m)
if m <= 0 or m_sup <= m_inf:
return 0.0
return binary_fraction * newton_cotes(m_inf, m_sup, phi_primary(m, imf))
|
ae49a298d66a7ee844b252b5e736ea1c1846c31b
| 3,642,969
|
import torch
def compute_scene_graph_similarity(ade20k_split, threshold=None,
recall_funct=compute_recall_johnson_feiefei):
"""
:param ade20k_split:
:param threshold:
:param recall_funct:
:return:
"""
model = get_scene_graph_encoder()
model.eval()
test_results = []
with torch.no_grad():
for k, graph_dict in ade20k_split.items():
res = model(graph_dict)
test_results.append(res)
stacked_vectors = torch.stack(test_results)
category = get_categories(ade20k_split)
num_captions = stacked_vectors.shape[1]
index_inferred_caption = num_captions - 1
index_range_human_captions = index_inferred_caption
caption_dim = 1
recall_list = []
mean_rank_list = []
similarity_list = []
for index_caption in range(index_range_human_captions):
comparison = torch.cat((stacked_vectors[:, index_caption, :].unsqueeze(caption_dim),
stacked_vectors[:, index_inferred_caption, :].unsqueeze(caption_dim)),
dim=caption_dim)
similarity_caption = calculate_normalized_cosine_similarity_on_tensor(comparison)
recall_val, mean_rank = recall_funct(similarity_caption, threshold, category)
similarity_list.append(similarity_caption.diag().mean().to("cpu").numpy())
recall_list.append(recall_val)
mean_rank_list.append(mean_rank)
print(f"Threshold for retrieval: {threshold}")
recall_mean = pd.DataFrame(recall_list).mean().to_dict()
average_mean_rank = pd.DataFrame(mean_rank_list).mean()[0]
average_similarity = pd.DataFrame(similarity_list).mean()[0]
for k in recall_mean.keys():
print(f"Average {k}: {recall_mean[k]}")
recall_mean["mean_rank"] = average_mean_rank
print(f"Average Mean Rank: {average_mean_rank}")
print(f"Average Similarity{average_similarity}")
recall_mean["average_similarity"] = average_similarity
recall_mean["threshold"] = threshold
return recall_mean
|
061435209baa2c8d03af93ce09d00fcaf02adf8a
| 3,642,970
|
import importlib_resources
def load_cmudict():
"""Loads the CMU Pronouncing Dictionary"""
dict_ref = importlib_resources.files("tacotron").joinpath("cmudict-0.7b.txt")
with open(dict_ref, encoding="ISO-8859-1") as file:
cmudict = (line.strip().split(" ") for line in islice(file, 126, 133905))
cmudict = {
format_alt_entry(word): pronunciation for word, pronunciation in cmudict
}
return cmudict
|
76f3ed592cb3709d4f073c42ee7229ac0142b77a
| 3,642,971
|
def evalasm(d, text, r0 = 0, defines = defines, address = pad, thumb = False):
"""Compile and remotely execute an assembly snippet.
32-bit ARM instruction set by default.
Saves and restores r2-r12 and lr.
Returns (r0, r1).
"""
if thumb:
# In Thumb mode, we still use ARM code to save/restore registers.
assemble(d, address, '''\
push { r2-r12, lr }
adr lr, link
adr r8, text+1
bx r8
link:
pop { r2-r12, pc }
.pool
.thumb
.align 5
text:
%(text)s
bx lr
''' % locals(), defines=defines, thumb=False)
return d.blx(address, r0)
else:
# ARM mode (default)
assemble(d, address, '''\
push { r2-r12, lr }
%(text)s
pop { r2-r12, pc }
''' % locals(), defines=defines, thumb=False)
return d.blx(address, r0)
|
c5bf3f5728fc9e85dbfd2540083ae7b2b87cd452
| 3,642,972
|
import multiprocessing
def _get_thread_count():
"""Gets a thread_count based on the multiprocessing.cpu_count()."""
try:
thread_count = multiprocessing.cpu_count()
# cpu_count only gets the physical core count. There doesn't appear to be a
# simple way of determining whether a CPU supports simultaneous
# multithreading in Python, so assume that anything with 6 or more cores
# supports it.
if thread_count >= 6:
thread_count *= 2
except NotImplementedError:
# Assume a quad core if we can't get the actual core count.
thread_count = 4
return thread_count
|
f7c4959734e49a70412d87ebc1f03b811b600600
| 3,642,973
|
def weighted_avg(x, weights): # used in lego_reader.py
""" x = batch * len * d
weights = batch * len
"""
return weights.unsqueeze(1).bmm(x).squeeze(1)
|
efa08d9719ccbcc727cb7349888f0a26140521e9
| 3,642,975
|
def CYR(df, N=5, M=5):
"""
市场强弱
:param df:
:param M:
:return:
"""
VOL = df['volume']
AMOUNT = df['amount']
DIVE = 0.01 * EMA(AMOUNT, N) / EMA(VOL, N)
CRY = (DIVE / REF(DIVE, 1) - 1) * 100
MACYR = MA(CRY, M)
return pd.DataFrame({
'CRY': CRY, 'MACYR': MACYR
})
|
7d5f31064d8eb3e4aaed8f6694226760a656f4d7
| 3,642,976
|
def packCode(code):
"""Packs the given code by passing it to the compression engine"""
if code in packCache:
return packCache[code]
packed = compressor.compress(parse(code))
packCache[code] = packed
return packed
|
b20714a022e73cbec38819d515c1cb89b8157d8c
| 3,642,977
|
def langpack_submission_allowed(user, parsed_addon_data):
"""Language packs can only be submitted by people with the right
permission.
See https://github.com/mozilla/addons-server/issues/11788 and
https://github.com/mozilla/addons-server/issues/11793
"""
return (
not parsed_addon_data.get('type') == amo.ADDON_LPAPP or
action_allowed_user(user, amo.permissions.LANGPACK_SUBMIT))
|
5d26aaff3089a4e4ba6b2325f25d7ad5d759bcd9
| 3,642,979
|
import re
def process_derived_core_properties(derived_core_properties):
"""Parse DerivedCoreProperties.txt and returns its version,
and set of characters with ID_Start and ID_Continue. """
id_start = set()
id_continue = set()
m = re.match('# DerivedCoreProperties-([0-9\.]+).txt', derived_core_properties)
assert m
version = m.group(1)
for (char, prop) in read_derived_core_properties(derived_core_properties):
if prop == 'ID_Start':
id_start.add(char)
if prop == 'ID_Continue':
id_continue.add(char)
return (version, id_start, id_continue)
|
cb15993eb84e3d1e7a1f65528f2f677e1e596668
| 3,642,982
|
def error_500(error):
"""Route function for handling 500 error pages
"""
return flask.templating.render_template("errors/500.html.j2"), 500
|
8d93367e21e855c672de50901de9793a326867e6
| 3,642,983
|
def poormax(X : np.ndarray, feature_axis = 1) -> np.ndarray:
"""
对数据进行极差化 \n
:param feature_axis: 各特征所在的维度 \n
feature_axis = 1 表示每列是不同的特征 \n
"""
if not feature_axis:
X = X.T
_min = np.min(X, axis = 0)
_max = np.max(X, axis = 0)
across = _max - _min
X = (X - _min) / across
if not feature_axis:
X = X.T
return X
|
8d2c45b225d05f36951eb6fac2fc19214b6e3f31
| 3,642,984
|
def login_form(request):
"""
The request must be get
"""
menu = MenuService.visitor_menu()
requestContext = RequestContext(request, {'menu':menu,
'page_title': 'Login'} )
return render_to_response('login.html', requestContext)
|
596273f8925a4d6aa39584f94262fc0f1d53657d
| 3,642,985
|
def tz_from_dd(points):
"""Get the timezone for a coordinate pair
Args:
points: (lat, lon) | [(lat, lon),] | pd.DataFrame w/lat and lon as columns
Returns:
np.array
"""
if isinstance(points, pd.DataFrame):
points = points.values.tolist()
if not isinstance(points, list):
points = [points]
x = ztree.query(points)
x = zips.iloc[x[1]].timezone.values
return x
|
5a6b05f1bf88c3a016cc5beae024a99873715904
| 3,642,986
|
def find_touching_pixels(label_img, distance=1, selem=None):
"""
Returns a mask indicating touching regions. Either provide a diameter for a disk shape
distance or a selem mask.
:param label_img: a label image with integer labels
:param distance: =1: touching pixels, >1 pixels labels distance appart
:param selem: optional, a selection mask, e.g. skimage.morphology.disk(1) (if this is bigger than
1 the 'distance' is not true.
:return: a mask of the regions touching or are close up to a certain diameter
"""
if selem is None:
selem = morphology.disk(1)
touch_mask = np.zeros(label_img.shape)
not_bg = label_img > 0
for i in np.unique(label_img):
if i != 0:
cur_lab = (label_img == i)
# touch_mask[ndi.filters.maximum_filter(cur_lab, footprint=selem) &
# not_bg & (cur_lab == False)] = 1
touch_mask[ndi.binary_dilation(cur_lab, structure=selem, iterations=distance, mask=not_bg) &
(cur_lab == False)] = 1
return touch_mask
|
a69b2b89be2df9660f1016c008c266de7932bb90
| 3,642,988
|
def draw_boxes_on_image(img, boxes, labels_index, labelmap_dict,
**kwargs):
"""Short summary.
Parameters
----------
img : ndarray
Input image.
boxes : ndarray-like
It must has shape (n ,4) where n is the number of
bounding boxes.
labels_index : ndarray-like
An array containing index of labels of bounding boxes. If None, only
bounding boxes will be drawn.
labelmap_dict : dict
A dictionary mapping labels with its index.
Returns
-------
img
Return annotated image.
"""
# When no box is detected
if boxes is None:
return img
try:
boxes = convert(boxes,
lambda x: np.asarray(x, dtype=np.int32),
np.ndarray)
except TypeError:
raise_type_error(type(boxes), [np.ndarray])
# When no box is detected
if boxes.shape[0] == 0:
return img
if boxes.shape[1] != 4 or boxes.ndim != 2:
raise ValueError("Input bounding box must be of shape (n, 4), "
"got shape {} instead".format(boxes.shape))
else:
return _draw_boxes_on_image(img, boxes, labels_index,
labelmap_dict, **kwargs)
|
1ee1d7b4e04e8646dd4e986e1a7e72d42d3f9685
| 3,642,989
|
def guess_locations(location):
"""Convenience function to guess where other Strongholds are located."""
location = Point(*location)
return (location,
rotate(location, CLOCKWISE),
rotate(location, COUNTERCLOCKWISE))
|
34c6824d63dbd99e4b09c6bb588298add404d87a
| 3,642,990
|
def get_centroid(mol, conformer=-1):
"""
Returns the centroid of the molecule.
Parameters
---------
conformer : :class:`int`, optional
The id of the conformer to use.
Returns
-------
:class:`numpy.array`
A numpy array holding the position of the centroid.
"""
centroid = sum(x for _, x in all_atom_coords(mol, conformer))
return np.divide(centroid, mol.GetNumAtoms())
|
393b5e27a5fa1779f98c2455c88d36027036e5f2
| 3,642,991
|
import torch
def idct(X, norm=None):
"""
The inverse to DCT-II, which is a scaled Discrete Cosine Transform, Type III
Our definition of idct is that idct(dct(x)) == x
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/ scipy.fftpack.dct.html
:param X: the input signal
:param norm: the normalization, None or 'ortho'
:return: the inverse DCT-II of the signal over the last dimension
"""
x_shape = X.shape
N = x_shape[-1]
X_v = X.contiguous().view(-1, x_shape[-1]) / 2
if norm == 'ortho':
X_v[:, 0] *= np.sqrt(N) * 2
X_v[:, 1:] *= np.sqrt(N / 2) * 2
k = torch.arange(x_shape[-1], dtype=X.dtype,
device=X.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V_t_r = X_v
V_t_i = torch.cat([X_v[:, :1] * 0, -X_v.flip([1])[:, :-1]], dim=1)
V_r = V_t_r * W_r - V_t_i * W_i
V_i = V_t_r * W_i + V_t_i * W_r
V = torch.cat([V_r.unsqueeze(2), V_i.unsqueeze(2)], dim=2)
v = torch.irfft(V, 1, onesided=False)
x = v.new_zeros(v.shape)
x[:, ::2] += v[:, :N - (N // 2)]
x[:, 1::2] += v.flip([1])[:, :N // 2]
return x.view(*x_shape)
|
f0b86dbbe80fe9b2e4b442f55ea67b82f7eaa019
| 3,642,992
|
def div25():
"""
Returns the divider 44444444444444444444444
:return: divider25
"""
return divider25
|
6bb38e50a6cd7fe80c9aef5dbb2d829c0c5a6fb5
| 3,642,993
|
def comp_periodicity(self, wind_mat=None):
"""Computes the winding matrix (anti-)periodicity
Parameters
----------
self : Winding
A Winding object
wind_mat : ndarray
Winding connection matrix
Returns
-------
per_a: int
Number of spatial periods of the winding
is_aper_a: bool
True if the winding is anti-periodic over space
"""
if wind_mat is None:
wind_mat = self.get_connection_mat()
assert len(wind_mat.shape) == 4, "dim 4 expected for wind_mat"
# Summing on all the layers (Nlay_r and Nlay_theta)
wind_mat2 = squeeze(np_sum(np_sum(wind_mat, axis=1), axis=0))
qs = wind_mat.shape[3] # Number of phase
Zs = wind_mat.shape[2] # Number of Slot
Nperw = 1 # Number of electrical period of the winding
Nperslot = 1 # Periodicity of the winding in number of slots
# Looking for the periodicity of each phase
for q in range(0, qs):
k = 1
is_per = False
while k <= Zs and not is_per:
# We shift the array arround the slot and check if it's the same
if array_equal(wind_mat2[:, q], roll(wind_mat2[:, q], shift=k)):
is_per = True
else:
k += 1
# least common multiple to find common periodicity between different phase
Nperslot = lcm(Nperslot, k)
# If Nperslot > Zs no symmetry
if Nperslot > 0 and Nperslot < Zs:
# nb of periods of the winding (2 means 180°)
Nperw = Zs / float(Nperslot)
# if Zs cannot be divided by Nperslot (non integer)
if Nperw % 1 != 0:
Nperw = 1
# Check for anti symmetries in the elementary winding pattern
if (
Nperslot % 2 == 0
and norm(
wind_mat2[0 : Nperslot // 2, :] + wind_mat2[Nperslot // 2 : Nperslot, :]
)
== 0
):
is_aper_a = True
Nperw = Nperw * 2
else:
is_aper_a = False
return int(Nperw), is_aper_a
|
f1c7074cdc55be6af3c5511a071a1df0835e666e
| 3,642,994
|
def _is_valid_target(target, target_name, target_ports, is_pair):
"""Return True if the specified target is valid, False otherwise."""
if is_pair:
return (target[:utils.PORT_ID_LENGTH] in target_ports and
target_name == _PAIR_TARGET_NAME)
if (target[:utils.PORT_ID_LENGTH] not in target_ports or
not target_name.startswith(utils.TARGET_PREFIX) or
target_name == _PAIR_TARGET_NAME):
return False
return True
|
58a7c2ceb7b3206777c01122b0c3ef01a5887b65
| 3,642,995
|
def _get_span_name(servicer_context):
"""Generates a span name based off of the gRPC server rpc_request_info"""
method_name = servicer_context._rpc_event.call_details.method[1:]
if isinstance(method_name, bytes):
method_name = method_name.decode('utf-8')
method_name = method_name.replace('/', '.')
return '{}.{}'.format(RECV_PREFIX, method_name)
|
5527820fa766fe29009e6fe060e76c01a75e3c37
| 3,642,996
|
def calculateNDFairnessPara(_ranking, _protected_group, _cut_point, _gf_measure, _normalizer, items_n, proItems_n ):
"""
Calculate group fairness value of the whole ranking.
Calls function 'calculateFairness' in the calculation.
:param _ranking: A permutation of N numbers (0..N-1) that represents a ranking of N individuals,
e.g., [0, 3, 5, 2, 1, 4]. Each number is an identifier of an individual.
Stored as a python array.
:param _protected_group: A set of identifiers from _ranking that represent members of the protected group
e.g., [0, 2, 3]. Stored as a python array for convenience, order does not matter.
:param _cut_point: Cut range for the calculation of group fairness, e.g., 10, 20, 30,...
:param _gf_measure: Group fairness measure to be used in the calculation,
one of 'rKL', 'rND', 'rRD'.
:param _normalizer: The normalizer of the input _gf_measure that is computed externally for efficiency.
:param
:param
:return: returns fairness value of _ranking, a float, normalized to [0, 1]
"""
#print("calculateNDFairnessPara")
#user_N=len(_ranking)
#pro_N=len(_protected_group)
if _normalizer==0:
raise ValueError("Normalizer equals to zero")
# error handling for input type
if not isinstance(_ranking, (list, tuple, np.ndarray)) and not isinstance( _ranking, str ):
raise TypeError("Input ranking must be a list-wise structure defined by '[]' symbol")
if not isinstance(_protected_group, (list, tuple, np.ndarray)) and not isinstance( _protected_group, str ):
raise TypeError("Input protected group must be a list-wise structure defined by '[]' symbol")
if not isinstance( _cut_point, ( int ) ):
raise TypeError("Input batch size must be an integer larger than 0")
if not isinstance( _normalizer, (int, float, complex) ):
raise TypeError("Input normalizer must be a number larger than 0")
if not isinstance( _gf_measure, str ):
raise TypeError("Input group fairness measure must be a string that choose from ['rKL', 'rND', 'rRD']")
discounted_gf=0 #initialize the returned gf value
for countni in range(len(_ranking)):
countni=countni+1
if(countni%_cut_point ==0):
ranking_cutpoint=_ranking[0:countni]
pro_cutpoint=set(ranking_cutpoint).intersection(_protected_group)
gf=calculateFairness(ranking_cutpoint,pro_cutpoint,items_n, proItems_n,_gf_measure)
#discounted_gf+=gf/math.log(countni+1,LOG_BASE) # log base -> global variable
#print("counttni : ", countni)
discounted_gf+=gf/(1.1**(countni-10/1000)) # log base -> global variable
# make a call to compute, or look up, the normalizer; make sure to check that it's not 0!
# generally, think about error handling
return discounted_gf/_normalizer
|
b1c0dfa53d1842f8d93a6ed6d2ae2ddd9ebafd7b
| 3,642,997
|
import json
import requests
def change_server(name: str = None, description: str = None, repo_url: str = None, main_status: int = None, components: dict = None, password: str = None):
"""Change server according to arguments (using package config).
This will automatically change the config so it has the right credentials."""
check_config()
global server_name
global server_password
payload = {"name": server_name, "password": server_password}
if name != None:
if type(name) != str:
raise TypeError("name expected to be of type str.")
payload["newName"] = name
if description != None:
if type(description) != str:
raise TypeError("description expected to be of type str.")
payload["description"] = description
if repo_url != None:
if type(repo_url) != str:
raise TypeError("repo_url expected to be of type str.")
payload["repoURL"] = repo_url
if main_status != None:
if type(main_status) != int:
raise TypeError("main_status expected to be of type int.")
payload["mainStatus"] = main_status
if components != None:
if type(components) != dict:
raise TypeError("components expected to be of type dict.")
payload["components"] = json.dumps(components)
if password != None:
if type(password) != str:
raise TypeError("password expected to be of type str.")
payload["newPassword"] = password
try:
r = requests.post(_url + "api/changeserver",
json.dumps(payload), timeout=3.05)
if r.status_code == 200:
if name != None:
server_name = name
if password != None:
server_password = password
return True
else:
return (False, r.status_code, r.text)
except requests.exceptions.ConnectTimeout:
raise ConnectionTimeout
|
f7a5334da8ef011969c8ffb5c31c1b4f477ed2a5
| 3,642,999
|
def tokenize(lines, token='word'):
"""Split text lines into word or character tokens."""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('ERROR: unknown token type: ' + token)
|
c30c8b3f1ea5d5752e17bc9fd514acaf097cba18
| 3,643,000
|
def gravatar(environ):
"""
Generate a gravatar link.
"""
email = environ.get('tank.user_info', {}).get('email', '')
return GRAVATAR % md5(email.lower()).hexdigest()
|
0464d409f4e0c1fef251927930618236146ac3f1
| 3,643,002
|
def keyrep(kspec, enc="utf-8"):
"""
Instantiate a Key given a set of key/word arguments
:param kspec: Key specification, arguments to the Key initialization
:param enc: The encoding of the strings. If it's JSON which is the default
the encoding is utf-8.
:return: Key instance
"""
if enc:
_kwargs = {}
for key, val in kspec.items():
if isinstance(val, str):
_kwargs[key] = val.encode(enc)
else:
_kwargs[key] = val
else:
_kwargs = kspec
if kspec["kty"] == "RSA":
item = RSAKey(**_kwargs)
elif kspec["kty"] == "oct":
item = SYMKey(**_kwargs)
elif kspec["kty"] == "EC":
item = ECKey(**_kwargs)
else:
item = Key(**_kwargs)
return item
|
25524953376a83562859b33a91ba10ae85c2c25d
| 3,643,003
|
import math
def aa2matrix(axis, angle, radians=True, random=False):
"""
Given an axis and an angle, return a 3x3 rotation matrix.
Based on:
https://en.wikipedia.org/wiki/Rotation_matrix#Axis_and_angle
Args:
axis: a vector about which to perform a rotation
angle: the angle of rotation
radians: whether the supplied angle is in radians (True)
or in degrees (False)
random: whether or not to choose a random rotation matrix. If True, the
axis and angle are ignored, and a random orientation is generated
Returns:
a 3x3 numpy array representing a rotation matrix
"""
#Convert to radians if necessary
if radians is not True:
angle *= rad
#Allow for generation of random rotations
if random is True:
a = rand()
axis = [rand(),rand(),rand()]
angle = rand()*pi*2
#Ensure axis is a unit vector
axis = axis / np.linalg.norm(axis)
#Define quantities which are reused
x = np.real(axis[0])
y = np.real(axis[1])
z = np.real(axis[2])
c = math.cos(angle)
s = math.sin(angle)
C = 1 - c
#Define the rotation matrix
Q = np.zeros([3,3])
Q[0][0] = x*x*C + c
Q[0][1] = x*y*C - z*s
Q[0][2] = x*z*C + y*s
Q[1][0] = y*x*C + z*s
Q[1][1] = y*y*C + c
Q[1][2] = y*z*C - x*s
Q[2][0] = z*x*C - y*s
Q[2][1] = z*y*C + x*s
Q[2][2] = z*z*C + c
return Q
|
d41460663edd36e5da1255636514468180e20511
| 3,643,004
|
def expand_value_range(value_range_expression):
"""Expand the value range expression.
Args:
value_range_expression: Value range or expression to expand.
Return:
iterable.
"""
if type(value_range_expression) is str:
# Grid search
if value_range_expression.startswith('np.arange'):
value_range_expression = arange(value_range_expression)
# Random search
elif value_range_expression.startswith('np.random'):
raise NotImplementedError('Random search space '
'not implemented yet')
# If not an iterable, make it an iterable
try:
iter(value_range_expression)
except TypeError:
value_range_expression = [value_range_expression]
return value_range_expression
|
bddfc2fd4ed65101ecb3d8ca2bc5d11de58374bd
| 3,643,005
|
def date_range(df):
"""Takes the dataframe returns date range.
Example here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html
Returns as Days
"""
start_date = df.tail(1)['date']
start = pd.Timestamp.date(list(start_date.to_dict().values())[0])
end_date = df.head(1)['date']
end = pd.Timestamp.date(list(end_date.to_dict().values())[0])
rng = pd.date_range(start, end)
return rng
|
1b577b29ccc7ed6751e8162f11b076042178c590
| 3,643,006
|
from datetime import datetime
def last_hit_timestamp(hit_count_rules, month):
"""
Get list of last hit timestamp to rule
:param hit_count_rules:
dictionary which contain json response with all hit count rules
:param month:
number of month elapsed since the rule was triggered last
:return:
list with rules that older than value in param month (contain rule name, id, type and access policy name)
"""
rule = []
for i in hit_count_rules:
last_refresh = datetime.datetime.strptime(i["lastFetchTimeStamp"], '%Y-%m-%dT%H:%M:%SZ')
limit = last_refresh - datetime.timedelta(month * 365 / 12)
if i["lastHitTimeStamp"] != " ":
last_hit = datetime.datetime.strptime(i["lastHitTimeStamp"], '%Y-%m-%dT%H:%M:%SZ')
if last_hit < limit:
rule.append(i["rule"])
return rule
|
048d63e9ad77bf19974b4506aedb66d98fb84403
| 3,643,007
|
def url(should_be=None):
"""Like the default ``url()``, but can be called without arguments,
in which case it returns the current url.
"""
if should_be is None:
return get_browser().get_url()
else:
return twill.commands.url(should_be)
|
a9faa937ffe994136d16e5c86082f22600368431
| 3,643,008
|
def remove_outliers(X_train,y_train):
"""
This function deletes outliers on the given numpy arrays,
and returns clean version of them.
Parameters
----------
X_train: dataset to remove outliers with k features
y_train: dataset to remove outliers with k features
"""
clf = LocalOutlierFactor(n_neighbors=2)
out1 = clf.fit_predict(X_train)
out2 = clf.fit_predict(y_train)
indexes = np.argwhere(out1+out2 != 2)
X_train = np.delete(X_train,indexes,axis=0)
y_train = np.delete(y_train,indexes,axis=0)
return X_train,y_train
|
35c86ba50ca6398ec70e95c07091bb1ffc6811d2
| 3,643,009
|
import tqdm
def query_data(regions, filepath_nl, filepath_lc, filepath_pop):
"""
Query raster layer for each shape in regions.
"""
shapes = []
csv_data = []
for region in tqdm(regions):
geom = shape(region['geometry'])
population = get_population(geom, filepath_pop)
pop_density_km2, area_km2 = get_density(geom, population, 'epsg:4326', 'epsg:3857')
shapes.append({
'type': region['type'],
'geometry': mapping(geom),
# 'id': region['id'],
'properties': {
'population': population,
'pop_density_km2': pop_density_km2,
'area_km2': area_km2,
'geotype': define_geotype(pop_density_km2),
'GID_2': region['properties']['GID_2'],
'GID_3': region['properties']['GID_3'],
}
})
csv_data.append({
'population': population,
'pop_density_km2': pop_density_km2,
'area_km2': area_km2,
'geotype': define_geotype(pop_density_km2),
'GID_2': region['properties']['GID_2'],
'GID_3': region['properties']['GID_3'],
})
return shapes, csv_data
|
a4ecd234d04fc1cf677d276afe11c716a1c3b854
| 3,643,010
|
from bs4 import BeautifulSoup
import re
def scrape_urls(html_text, pattern):
"""Extract URLs from raw html based on regex pattern"""
soup = BeautifulSoup(html_text,"html.parser")
anchors = soup.find_all("a")
urls = [a.get("href") for a in anchors]
return [url for url in urls if re.match(pattern, url)!=None]
|
dfba40df7894db91575b51a82d89fef0f824d362
| 3,643,011
|
from typing import List
def get_num_weight_from_name(model: nn.Module, names: List[str]) -> List[int]:
"""Get list of number of weights from list of name of modules."""
numels = []
for n in names:
module = multi_getattr(model, n)
num_weights = module.weight.numel()
numels.append(num_weights)
return numels
|
ae6c3bfb5abe3522ff6d276cde052a5270e5741e
| 3,643,012
|
from typing import OrderedDict
def _categories_level(keys):
"""use the Ordered dict to implement a simple ordered set
return each level of each category
[[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]
"""
res = []
for i in zip(*(keys)):
tuplefied = _tuplify(i)
res.append(list(OrderedDict([(j, None) for j in tuplefied])))
return res
|
35f62244c3d3b893008d7ba7b8a9f651528c198e
| 3,643,013
|
def to_usd(my_price):
"""
Converts a numeric value to usd-formatted string, for printing and display purposes.
Param: my_price (int or float) like 4000.444444
Example: to_usd(4000.444444)
Returns: $4,000.44
"""
return f"${my_price:,.2f}"
|
a8959cdca7f011a435e35b4a4a5d2d43911a55da
| 3,643,014
|
def computeNodeDerivativeHermiteLagrange(cache, coordinates, node1, derivative1, scale1, node2, scale2):
"""
Computes the derivative at node2 from quadratic Hermite-Lagrange interpolation of
node1 value and derivative1 to node2 value.
:param cache: Field cache to evaluate in.
:param coordinates: Coordinates field.
:param node1, node2: Start and end nodes.
:param derivative1: Node value label for derivative at node1.
:param scale1, scale2: Scaling to apply to derivatives at nodes, e.g. -1.0 to reverse.
:return: dx_dxi at node2
"""
cache.setNode(node1)
result, v1 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 )
result, d1 = coordinates.getNodeParameters(cache, -1, derivative1, 1, 3 )
d1 = [ d*scale1 for d in d1 ]
cache.setNode(node2)
result, v2 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 )
d2 = interpolateHermiteLagrangeDerivative(v1, d1, v2, 1.0)
d2 = [ d*scale2 for d in d2 ]
return d2
|
7eb98502341e94e277b4d7b98b68293ff28f395b
| 3,643,015
|
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if not state.marked[row, path[count, 1]] == 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state._clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
|
4d6e50164724b6fdaa42fa41423677dd80500a3e
| 3,643,016
|
def encode_ascii_xml_array(data):
"""Encode an array-like container of strings as fixed-length 7-bit ASCII
with XML-encoding for characters outside of 7-bit ASCII.
"""
if isinstance(data, np.ndarray) and \
data.dtype.char == STR_DTYPE_CHAR and \
data.dtype.itemsize > 0:
return data
convert = lambda s: encode_ascii_xml(s) if s is not None else ''
ascii_data = map(convert, data)
fixed_len = max(len(s) for s in ascii_data)
fixed_len = max(1, fixed_len)
dtype = '%s%d' % (STR_DTYPE_CHAR, fixed_len)
# note: python3 would require np.fromiter
return np.array(ascii_data, dtype=dtype)
|
a9baf0ca562b78ce36c49e1d64c8f8a9015df097
| 3,643,017
|
from typing import Union
from pathlib import Path
from typing import Optional
from typing import List
import queue
def download_dataset(
period: str, output_dir: Union[Path, str], fewer_threads: bool, datasets_path: Optional[Union[Path, str]] = None
) -> List[Path]:
"""Download files from the given dataset with the provided selections.
Args:
period: Name of the period to be downloaded.
output_dir: Path to where the data should be stored.
fewer_threads: If True, reduce the number of threads by half.
dataset_config_filename: Filename of the configuration file. Default: None,
in which case, the files will be taken from those defined in the package.
Returns:
None.
"""
# Validation
output_dir = Path(output_dir)
if datasets_path:
datasets_path = Path(datasets_path)
# Setup the dataset
dataset = _extract_dataset_from_yaml(period=period, datasets_path=datasets_path)
# Setup
q: FilePairQueue = queue.Queue()
queue_filler = DatasetDownloadFiller(
dataset=dataset,
output_dir=output_dir,
q=q,
)
download(queue_filler=queue_filler, q=q, fewer_threads=fewer_threads)
# Return the files that are stored corresponding to this period.
period_specific_dir = output_dir / dataset.data_type / str(dataset.year) / dataset.period
period_files = sorted(Path(period_specific_dir).glob(f"**/{dataset.filename}"))
logger.info(f"period_specific_dir: {period_specific_dir}, number of files: {len(period_files)}")
# Write out the file list
filelist = Path(output_dir) / "filelists" / f"{dataset.period}{dataset.file_type}.txt"
filelist.parent.mkdir(exist_ok=True, parents=True)
# Add the suffix to access the ROOT file if it's contained in a zip archive.
suffix = ""
if ".zip" in dataset.filename:
suffix = "#AliAOD.root" if dataset.file_type == "AOD" else "#AliESDs.root"
with open(filelist, "w") as f:
# One file per line.
f.write("\n".join([f"{p}{suffix}" for p in period_files]))
return period_files
|
0c98956bb54f6f948a31097e47ba6008e91ebefc
| 3,643,018
|
def monospaced(fields, context):
"""
Make text monospaced.
In HTML: use tags
In Markdown: use backticks
In Text: use Unicode characters
"""
content = fields[0]
target = context['target']
if target == 'md':
return wrapper('`')([content], context)
if target == 'html':
multiline = False
for chunk in content:
if type(chunk) is str and '\n' in chunk:
multiline = True
break
if multiline:
tag = 'pre'
else:
tag = 'code'
return taggifier(tag)([content], context)
if target == 'txt':
return keymapper('monospaced')([content], context)
|
eed91b414ce8cb486b115d0d203db4e7ed81e5d5
| 3,643,019
|
def indexview(request):
"""
initial page
shows all the domains in columns
"""
domdb = Domain.objects
if not request.user.has_perm('editapp.see_all'): # only see mine
domdb = domdb.filter(owner__username=request.user.username)
domains = [ d.domain for d in domdb.order_by('domain') ]
# show in four columns
# so slice into four arrays
dslice = int((len(domains)+3)/4)
c1,c2,c3,c4 = [ [d for d in domains[n*dslice:(n+1)*dslice]] for n in range(4) ]
return render(request, 'editapp/index.html',
{
'c1': c1, 'c2': c2, 'c3': c3, 'c4': c4,
'bpnav': bpnav(request, 'index')
})
|
b99e8b3499f7d7a282b5a93606dddf7527a5e93b
| 3,643,020
|
def MC_swap(alloy, N, E, T):
"""
Randomly selects an atom and one of its neighbours in a
matrix and calculates the change in energy if the two atoms were swapped.
The following assignment is used to represent the neighbouring directions:
1 = up
2 = right
3 = down
4 = left
"""
kT = 8.617332*10**-5*T
random_atom = np.random.randint(0,N,2)
atom1 = alloy[random_atom[0],random_atom[1]]
random_neighbour = np.random.randint(1,5,1)
# Select appropriate neighbour
if random_neighbour==1:
row2=(random_atom[0]-2)%N
column2 = random_atom[1]
elif random_neighbour==2:
row2 = random_atom[0]
column2 = (random_atom[1])%N
elif random_neighbour==3:
row2 = (random_atom[0])%N
column2 = random_atom[1]
else:
row2 = random_atom[0]
column2 = (random_atom[0]-2)%N
atom2 = alloy[row2, column2]
if atom1==atom2:
e=0
else:
# Need to calculate the energy before and after atom one and two swap
# Atom 1
up1= (random_atom[0]-2)%N
down1 = (random_atom[0]%N)
left1 = (random_atom[1]-2)%N
right1 = (random_atom[1]%N)
# Atom 2
up2=(row2-2)%N
down2=(row2%N)
left2=(column2-2)%N
right2=(column2%N)
# Change in energy
Bonds1 = alloy[down1, random_atom[1]] + alloy[up1, random_atom[1]] + alloy[random_atom[0], right1] + alloy[random_atom[0], left1]
Bonds2 = alloy[down2, column2] + alloy[up2, column2] + alloy[row2, right2] + alloy[row2, left2]
# Count number of A-B bonds for atoms 1 and 2
if atom1==0:
Initial1=Bonds1
End1=4-Bonds1
Initial2=4-Bonds2
End2=Bonds2
else:
Initial1=4-Bonds1
End1=Bonds1
Initial2=Bonds2
End2=4-Bonds2
e = E*(End1+End2-Initial1-Initial2) # Energy difference for swapping atoms
#Swapping atoms if there is enough energy to do so
if e<0:
alloy[random_atom[0],random_atom[1]]=atom2
alloy[row2, column2]=atom1
elif np.exp(-e/kT)>np.random.uniform(0,1):
alloy[random_atom[0],random_atom[1]]=atom2
alloy[row2, column2]=atom1
return alloy
|
aea84cd605389e480d89e78fcca9806bc68e0c83
| 3,643,021
|
def _try_type(value, dtype):
"""
Examples
--------
>>> _try_type("1", int)
1
>>> _try_type(1.0, int)
1
>>> _try_type("ab", float)
'ab'
"""
try:
return dtype(value)
except ValueError:
return value
|
4a188e57dfafca96e6cd8a815dbbb162c74df01b
| 3,643,022
|
from datetime import datetime
def get_all_codes(date=None):
"""
获取某个交易日的所有股票代码列表,如果没有指定日期,则从当前日期一直向前找,直到找到有
数据的一天,返回的即是那个交易日的股票代码列表
:param date: 日期
:return: 股票代码列表
"""
datetime_obj = datetime.now()
if date is None:
date = datetime_obj.strftime('%Y-%m-%d')
codes = []
while len(codes) == 0:
code_cursor = DB_CONN.basic.find(
{'date': date},
projection={'code': True, '_id': False})
codes = [x['code'] for x in code_cursor]
datetime_obj = datetime_obj - timedelta(days=1)
date = datetime_obj.strftime('%Y-%m-%d')
return codes
|
b5d861f1991763e8196f1f336faffefc00b58df4
| 3,643,023
|
def cluster_config(request_data, op_ctx: ctx.OperationContext):
"""Request handler for cluster config operation.
Required data: cluster_name
Optional data and default values: org_name=None, ovdc_name=None
(data validation handled in broker)
:return: Dict
"""
_raise_error_if_pks_not_enabled()
cluster, broker = _get_cluster_info(request_data, op_ctx, telemetry=False) # noqa: E501
telemetry_handler.record_user_action_details(
cse_operation=CseOperation.PKS_CLUSTER_CONFIG,
cse_params=_get_telemetry_data(request_data, cluster))
return broker.get_cluster_config(data=request_data)
|
985e9633d54c0b7ccfc235f6c34bb4d4c5086ebf
| 3,643,024
|
from .pyazureutils_errors import PyazureutilsError
def iotcentral_cli_handler(args):
"""
CLI entry point for command: iotcentral
"""
logger = getLogger(__name__)
try:
if args.action == "register-device":
status = _action_register_device(args)
except PyazureutilsError as exc:
logger.error("Operation failed with %s: %s", type(exc).__name__, exc)
return STATUS_FAILURE
|
e5c78f24c459ff45ab8a88198697eae0a9bb7abe
| 3,643,026
|
def kelly_kapowski(s, g, w, its=45, r=0.025, m=1.5, **kwargs):
"""
Compute cortical thickness using the DiReCT algorithm.
Diffeomorphic registration-based cortical thickness based on probabilistic
segmentation of an image. This is an optimization algorithm.
Arguments
---------
s : ANTsimage
segmentation image
g : ANTsImage
gray matter probability image
w : ANTsImage
white matter probability image
its : integer
convergence params - controls iterations
r : scalar
gradient descent update parameter
m : scalar
gradient field smoothing parameter
kwargs : keyword arguments
anything else, see KellyKapowski help in ANTs
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') ,2)
>>> img = ants.resample_image(img, (64,64),1,0)
>>> mask = ants.get_mask( img )
>>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask)
>>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1],
w=segs['probabilityimages'][2], its=45,
r=0.5, m=1)
"""
if isinstance(s, iio.ANTsImage):
s = s.clone('unsigned int')
d = s.dimension
outimg = g.clone()
kellargs = {'d': d,
's': s,
'g': g,
'w': w,
'c': its,
'r': r,
'm': m,
'o': outimg}
for k, v in kwargs.items():
kellargs[k] = v
processed_kellargs = utils._int_antsProcessArguments(kellargs)
libfn = utils.get_lib_fn('KellyKapowski')
libfn(processed_kellargs)
return outimg
|
809d119d5691e64504671a4915525a109d0ae375
| 3,643,027
|
def get_word_count(frame, pattern_list, group_by_name):
"""
Compute word count and return a dataframe
:param frame:
:param pattern_list:
:param column_name:
:return: frame with count or None if pattern_list is empty
"""
if not pattern_list or len(pattern_list) == 0:
return None
else:
return pd.DataFrame(frame[frame.words.isin(pattern_list)].
groupby(group_by_name).words.value_counts()
.to_frame())
|
06a1c82b387bfc2194e3d4ee0a4526e5b4e3f800
| 3,643,028
|
def parse(text, from_timezone=None):
"""
:rtype: TimeeDT
"""
timee_dt = None
if from_timezone:
timee_dt = parse_with_maya(text, timezone=from_timezone)
return timee_dt
else:
for parse_method in parsing_methods():
result = parse_method(text)
if result is not None:
timee_dt = result
break
return timee_dt
|
c7a8b7819031ee7f97c54c9903f19d4b24112c4a
| 3,643,029
|
import collections
def _command_line_objc_copts(objc_fragment):
"""Returns copts that should be passed to `clang` from the `objc` fragment.
Args:
objc_fragment: The `objc` configuration fragment.
Returns:
A list of `clang` copts, each of which is preceded by `-Xcc` so that they can be passed
through `swiftc` to its underlying ClangImporter instance.
"""
# In general, every compilation mode flag from native `objc_*` rules should be passed, but `-g`
# seems to break Clang module compilation. Since this flag does not make much sense for module
# compilation and only touches headers, it's ok to omit.
clang_copts = objc_fragment.copts + objc_fragment.copts_for_current_compilation_mode
return collections.before_each("-Xcc", [copt for copt in clang_copts if copt != "-g"])
|
8c55d1297b0aa116b9f6dc859cad1dfda1901f00
| 3,643,030
|
import logging
import urllib
def handle_incoming_mail(addr=None):
"""Handle an incoming email by making a task to examine it.
This code checks some basic properties of the incoming message
to make sure that it is worth examining. Then it puts all the
relevent fields into a dict and makes a new Cloud Task which
is futher processed in python 3 code.
"""
logging.info('Request Headers: %r', flask.request.headers)
logging.info('\n\n\nPOST for InboundEmail and addr is %r', addr)
if addr != settings.INBOUND_EMAIL_ADDR:
logging.info('Message not sent directly to our address')
return {'message': 'Wrong address'}
if flask.request.content_length > MAX_BODY_SIZE:
logging.info('Message too big, ignoring')
return {'message': 'Too big'}
msg = get_incoming_message()
precedence = msg.get('precedence', '')
if precedence.lower() in ['bulk', 'junk']:
logging.info('Precedence: %r indicates an autoresponder', precedence)
return {'message': 'Wrong precedence'}
from_addrs = (_extract_addrs(msg.get('x-original-from', '')) or
_extract_addrs(msg.get('from', '')))
if from_addrs:
from_addr = from_addrs[0]
else:
logging.info('could not parse from addr')
return {'message': 'Missing From'}
in_reply_to = msg.get('in-reply-to', '')
body = u''
for part in msg.walk():
# We only process plain text emails.
if part.get_content_type() == 'text/plain':
body = part.get_payload(decode=True)
if not isinstance(body, unicode):
body = body.decode('utf-8')
break # Only consider the first text part.
to_addr = urllib.unquote(addr)
subject = msg.get('subject', '')
task_dict = {
'to_addr': to_addr,
'from_addr': from_addr,
'subject': subject,
'in_reply_to': in_reply_to,
'body': body,
}
logging.info('task_dict is %r', task_dict)
response = call_py3_task_handler('/tasks/detect-intent', task_dict)
if response.status_code and response.status_code != 200:
logging.warning('Handoff to py3 failed.')
flask.abort(400)
return {'message': 'Done'}
|
9bef81cf818d433cc833e64b5291b5c371605424
| 3,643,031
|
def split(df, partition, column):
"""
:param df: The dataframe to split
:param partition: The partition to split
:param column: The column along which to split
: returns: A tuple containing a split of the original partition
"""
dfp = df[column][partition]
if column in categorical:
values = dfp.unique()
lv = set(values[:len(values)//2])
rv = set(values[len(values)//2:])
return dfp.index[dfp.isin(lv)], dfp.index[dfp.isin(rv)]
else:
median = dfp.median()
dfl = dfp.index[dfp < median]
dfr = dfp.index[dfp >= median]
return (dfl, dfr)
|
8d87d025695a0a2dde681e1abbbf0f5acccdc914
| 3,643,032
|
def get_gaussian_kernel(l=5, sig=1.):
"""
creates gaussian kernel with side length l and a sigma of sig
"""
ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))
return kernel / np.sum(kernel)
|
71982928ee89d3ac98ae8d74dcc079dd2c4ca0d8
| 3,643,033
|
def get_data_tbl(path, tblname):
"""Wrapper function around @merge_json
"""
files = get_annon_db_file(path, tblname)
log.info("files: {}".format(files))
K,V = common.merge_json(files)
return K,V
|
55381098b1a702a5497a965169b0588192e0a439
| 3,643,034
|
def costes_coloc(im_1, im_2, psf_width=3, n_scramble=1000, thresh_r=0.0,
roi=None, roi_method='all', do_manders=True):
"""
Perform Costes colocalization analysis on a pair of images.
Parameters
----------
im_1: array_like
Intensity image for colocalization. Must be the
same shame as `im_1`.
im_2: array_like
Intensity image for colocalization. Must be the
same shame as `im_2`.
psf_width: int, default 3
Width, in pixels of the point spread function.
n_scramble: int, default 1000
Number of strambled image comparisons to do to get statistics.
thresh_r: float, default 0.0
Threshold Pearson r value to be considered colocalized.
roi: array_like, dtype bool, default None
Boolean image the same shape as `im_1` and `im_2` that
is True for pixels within the ROI.
roi_method: str, default 'all'
If 'all', all pixels of a given subimage must be within
the ROI for the subimage itself to be considered part
of the ROI. If 'any', if any one pixel is within the ROI,
the subimage is considered part of the ROI.
do_manders: bool, default True
If True, compute the Manders coefficients.
Returns
-------
output: A CostesColocalization instance.
The CostesColocalization instance has the following attributes.
im_1, im_2, psf_width, n_scramble, thresh_r, roi,
roi_method: As in the input parameters.
a: slope of the regression line I_2 = a * I_1 + b
b: intercept of regression line I_2 = a * I_1 + b
M_1: Manders coefficient for image 1
M_2: Manders coefficient for image 2
pearson_r: Pearson coerrelaction coefficient of the pixels
in the two images.
p_coloc: The probability of colocalization being present
in the two images.
"""
# Make float mirrored boundaries in preparation for scrambling
im_1_mirror = mirror_edges(im_1, psf_width).astype(float)
im_2_mirror = mirror_edges(im_2, psf_width).astype(float)
# Set up ROI
if roi is None:
roi = np.ones_like(im_1, dtype='bool')
# Rename images to be sliced ROI and convert to float
im_1 = im_1[roi].astype(float)
im_2 = im_2[roi].astype(float)
# Mirror ROI at edges
roi_mirror = mirror_edges(roi, psf_width)
# Compute the blocks that we'll scramble
blocks_1 = im_to_blocks(im_1_mirror, psf_width, roi_mirror, roi_method)
blocks_2 = im_to_blocks(im_2_mirror, psf_width, roi_mirror, roi_method)
# Compute the Pearson coefficient
pearson_r = _pearson_r(blocks_1.ravel(), blocks_2.ravel())
# Do image scrambling and r calculations
r_scr = scrambled_r(blocks_1, blocks_2, n=n_scramble)
# Compute percent chance of coloc
p_coloc = (r_scr < pearson_r).sum() / n_scramble
# Now do work to compute adjusted Manders's coefficients
if do_manders:
# Get the linear relationship between im_2 and im_1
a, b = _odr_linear(im_1.ravel(), im_2.ravel())
# Perform threshold calculation
thresh_1 = _find_thresh(im_1, im_2, a, b, thresh_r=thresh_r)
thresh_2 = a * thresh_1 + b
# Compute Costes's update to the Manders's coefficients
inds = (im_1 > thresh_1) & (im_2 > thresh_2)
M_1 = im_1[inds].sum() / im_1.sum()
M_2 = im_2[inds].sum() / im_2.sum()
# Toss results into class for returning
return _CostesColocalization(
im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method,
psf_width=psf_width, n_scramble=n_scramble, thresh_r=thresh_r,
thresh_1=thresh_1, thresh_2=thresh_2, a=a, b=b, M_1=M_1,
M_2=M_2, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc)
else:
return _CostesColocalization(
im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method,
psf_width=psf_width, n_scramble=n_scramble, thresh_r=None,
thresh_1=None, thresh_2=None, a=None, b=None, M_1=None,
M_2=None, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc)
|
34776cc4e8845e696f61750718736feae6105dee
| 3,643,035
|
def get_induced_dipole_count(efpobj):
"""Gets the number of polarization induced dipoles in `efpobj` computation.
Returns
-------
int
Total number of polarization induced dipoles.
"""
(res, ndip) = efpobj._efp_get_induced_dipole_count()
_result_to_error(res)
return ndip
|
1c7bbd25c17e0a1326e48319c00ad8298174a4b7
| 3,643,036
|
def _gen_parabola(phase: float, start: float, mid: float, end: float) -> float:
"""Gets a point on a parabola y = a x^2 + b x + c.
The Parabola is determined by three points (0, start), (0.5, mid), (1, end) in
the plane.
Args:
phase: Normalized to [0, 1]. A point on the x-axis of the parabola.
start: The y value at x == 0.
mid: The y value at x == 0.5.
end: The y value at x == 1.
Returns:
The y value at x == phase.
"""
mid_phase = 0.5
delta_1 = mid - start
delta_2 = end - start
delta_3 = mid_phase ** 2 - mid_phase
coef_a = (delta_1 - delta_2 * mid_phase) / delta_3
coef_b = (delta_2 * mid_phase ** 2 - delta_1) / delta_3
coef_c = start
return coef_a * phase ** 2 + coef_b * phase + coef_c
|
bdd808339e808a26dd1a4bf22552a1d32244bb02
| 3,643,037
|
import uuid
def grant_perms(obj: element, mast: element, read_only: bool, meta):
"""
Grants another user permissions to access a Jaseci object
Param 1 - target element
Param 2 - master to be granted permission
Param 3 - Boolean read_only flag
Return - Sorted list
"""
mast = meta['h'].get_obj(meta['m_id'], uuid.UUID(meta['m_id']))
return mast.object_perms_grant(obj=obj,
mast=mast,
read_only=read_only)['success']
|
3a73baf583214d95c31011e8dfc427ea364edb4a
| 3,643,038
|
def pkgdir(tmpdir, monkeypatch):
"""
temp directory fixture containing a readable/writable ./debian/changelog.
"""
cfile = tmpdir.mkdir('debian').join('changelog')
text = """
testpkg (1.1.0-1) stable; urgency=medium
* update to 1.1.0
* other rad packaging updates
* even more cool packaging updates that take a lot of text to describe so
the change wraps on multiple lines
-- Ken Dreyer <kdreyer@redhat.com> Tue, 06 Jun 2017 14:46:37 -0600
testpkg (1.0.0-2redhat1) stable; urgency=medium
* update to 1.0.0 (rhbz#123)
-- Ken Dreyer <kdreyer@redhat.com> Mon, 05 Jun 2017 13:45:36 -0600
""".lstrip("\n")
cfile.write(text)
monkeypatch.chdir(tmpdir)
return tmpdir
|
0717aba1d5181e48eb11fa1e91b72933cda1af14
| 3,643,040
|
import configparser
def read_plot_config(filename):
"""Read in plotting config file.
Args:
filename (str): Full path and name of config file.
Returns:
dict: Contents of config file.
"""
config = configparser.ConfigParser()
config.read(filename)
out = {}
for section in config.sections():
out[section] = _get_section(config, section)
return out
|
876a84b2976807d2ef02c79806c9c2d14874997a
| 3,643,041
|
def parse(file_path, prec=15):
"""
Simple helper
- file_path: Path to the OpenQASM file
- prec: Precision for the returned string
"""
qasm = Qasm(file_path)
return qasm.parse().qasm(prec)
|
5303753da86780854f1b2b9abff18ad9531e1ea8
| 3,643,042
|
def sinusoid(amplitude=1.0, frequency=1.0, phase=0.0, duration=60.0, samplerate=100.0):
"""Generate a sinusoid"""
t = np.arange(0, duration, 1.0/samplerate)
d = np.sin(2.0 * np.pi * frequency * t)
return t, d
|
ea55aec9519321221946e74504732209771b0b23
| 3,643,043
|
def get_model():
"""
Returns a compiled convolutional neural network model. Assume that the
`input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`.
The output layer should have `NUM_CATEGORIES` units, one for each category.
"""
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(
32, (3, 3), input_shape=(
IMG_WIDTH, IMG_HEIGHT, 3)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(64, (3, 3)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(64, (4, 4)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(128, (4, 4)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(3, 3)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(NUM_CATEGORIES))
model.add(tf.keras.layers.Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
|
d6d5ad41ec6ba61ebcf7d2dfb962f18a24b7a8a1
| 3,643,044
|
def check_datetime_str(datetime_str):
"""
Tries to parse the datetime string to a datetime object. If it fails,
it will return False
:param str datetime_str:
:return: returns True or False depending on the validity of the datetime string
:rtype: bool
"""
try:
parse_datetime_str(datetime_str)
return True
except ValueError:
return False
|
8129a3ef87d377bc488bfbd151012241f673e07d
| 3,643,045
|
def _df_pitch(df: pd.DataFrame, xcol: str = 'x',
ycol: str = 'y', zcol: str = 'z'):
"""Find angular pitch for each row in an accelerometer dataframe.
Args:
df (pd.DataFrame): accelerometer dataframe
xcol, ycol, zcol (str): column names for x, y, and z acceleration
Returns:
pd.Series: pitch
"""
out = pd.Series(pitch(df[xcol].values, df[ycol].values, df[zcol].values),
name='pitch')
return out
|
50c6e40e535b5cd7acead652edf1a9420125fee8
| 3,643,046
|
def gan_masked_generate_face(generator_fun, face_img: np.array):
"""
Generated a face from the seed one considering a generator_fun which should output alpha mask and bgr results
:param generator_fun: takes an image and returns alpha mask concatenated with bgr results
:param face_img: img to feed to the generator
:return:
"""
gen_res = generator_fun(face_img)
gen_mask = gen_res[:, :, 0]
gen_bgr = gen_res[:, :, 1:]
gen_mask = np.clip(gen_mask * 255, 0, 255).astype(np.uint8)
# stack mask such as we have three channels
gen_mask = np.stack([gen_mask, gen_mask, gen_mask], axis=2)
return gen_bgr, gen_mask
|
9b6ce882f509851b0a9c52364bb602909db45cb6
| 3,643,047
|
def feat_row_sum_inv_normalize(x):
"""
:param x: np.ndarray, raw features.
:return: np.ndarray, normalized features
"""
x_feat = x.astype(dtype=np.float64)
inv_x_rowsum = np.power(x_feat.sum(axis=1), -1).flatten()
inv_x_rowsum[np.isinf(inv_x_rowsum)] = 0.
x_diag_mat = np.diag(inv_x_rowsum)
normalized_x = x_diag_mat.dot(x_feat)
return normalized_x
|
ea55c7826054ca13f810852a24cf315f268dfd6a
| 3,643,049
|
def cross3(v1, v2):
"""
cross3
"""
return (v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0])
|
f3bb2b82acf54d929ffc14177fde120970617886
| 3,643,050
|
import copy
def api_request(request, viewset, method, url_kwargs={}, get_params={}):
"""
Call an API route on behalf of the user request. Examples:
data = api_request(request, CaseDocumentViewSet, 'list', get_params={'q': 'foo'}).data
data = api_request(request, CaseDocumentViewSet, 'retrieve', url_kwargs={'id': '123'}).data
"""
# copy selected fields due to infinite recursion for some
# request copies
if isinstance(request, rest_framework.request.Request):
request = request._request
api_request = copy(request)
api_request.method = 'GET'
api_request.GET = QueryDict(mutable=True)
api_request.GET.update(get_params)
return viewset.as_view({'get': method})(api_request, **url_kwargs)
|
c3d118d1a9857e9522f3e518a77da6e51e443ef7
| 3,643,051
|
def ca_restart(slot):
"""
:param slot:
"""
LOG.info("CA_Restart: attempting to restart")
ret = CA_Restart(CK_ULONG(slot))
LOG.info("CA_Restart: Ret Value: %s", ret)
return ret
|
1192f371c14bdf8f773b1402f77e66d24d3aee94
| 3,643,052
|
def db_query_map(db_or_el, query, func_match, func_not) -> tuple:
"""
Helper function to find elems from query and transform them,
to generate 2 lists of matching/not-matching elements.
"""
expr = parse_query_expr(query)
elems1, elems2 = [], []
for el in _db_or_elems(db_or_el):
m = el_to_meta(el)
ok = [func(m.get(prop), val) for prop, func, val in expr]
if ok and all(ok):
r = func_match(el)
if r is not None:
elems1.append(r)
else:
r = func_not(el)
if r is not None:
elems2.append(r)
return elems1, elems2
|
d7b56e8d62d0c80c4bfdb026879d5a848b7d3b8f
| 3,643,056
|
import inspect
def route(pattern, method = HTTP_METHOD.GET):
"""
Decorator to declare the routing rule of handler methods.
"""
def decorator(func):
frm = inspect.stack()[1]
class_name = frm[3]
module_name = frm[0].f_back.f_globals["__name__"]
full_class_name = module_name + '.' + class_name
real_pattern = '^' + pattern + '$'
add_handler(method, real_pattern, full_class_name, func)
return asynchronous(func)
return decorator
|
28d107abbce1d36611fa5313b0d52491000a1f73
| 3,643,057
|
from operator import invert
def div_q(a: ElementModPOrQorInt, b: ElementModPOrQorInt) -> ElementModQ:
"""Compute a/b mod q."""
b = _get_mpz(b)
inverse = invert(b, _get_mpz(get_small_prime()))
return mult_q(a, inverse)
|
285a8aa161748d8c7aaa38bd04f81fe7c22e5e43
| 3,643,058
|
import pyarrow
def pyarrow_to_r_schema(
obj: 'pyarrow.lib.Schema'
):
"""Create an R `arrow::Schema` object from a pyarrow Schema.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2. By default it will be an `rpy2.robjects.Environment`.
"""
schema_ptr = rarrow.allocate_arrow_schema()[0]
try:
obj._export_to_c(int(schema_ptr))
r_schema = rarrow.ImportSchema(schema_ptr)
finally:
rarrow.delete_arrow_schema(schema_ptr)
return r_schema
|
0eb461451ea805b3ac888084b4f46ca9cbbd7c00
| 3,643,060
|
import calendar
def validate_days(year, month, day):
"""validate no of days in given month and year
>>> validate_days(2012, 8, 31)
31
>>> validate_days(2012, 8, 32)
31
"""
total_days = calendar.monthrange(year, month)
return (total_days[1] if (day > total_days[1]) else day)
|
7499dc9654ec9ffd7f534cf27444a3236dd82e81
| 3,643,062
|
import json
def save_to_s3(bucket_name, file_name, data):
""" Saves data to a file in the bucket
bucket_name - - The name of the bucket you're saving to
file_name - - The name of the file
dat - - data to be saved """
s3 = boto3.resource('s3')
obj = s3.Object(bucket_name, file_name)
resp = obj.put(Body=json.dumps(data))
return resp
|
520599136418d635cfbaf67c7bffbb2da105985c
| 3,643,063
|
def linsearch_fun_BiCM_exp(xx, args):
"""Linsearch function for BiCM newton and quasinewton methods.
This is the linesearch function in the exponential mode.
The function returns the step's size, alpha.
Alpha determines how much to move on the descending direction
found by the algorithm.
:param xx: Tuple of arguments to find alpha:
solution, solution step, tuning parameter beta,
initial alpha, function f
:type xx: (numpy.ndarray, numpy.ndarray, float, float, func)
:param args: Tuple, step function and arguments.
:type args: (func, tuple)
:return: Working alpha.
:rtype: float
"""
x = xx[0]
dx = xx[1]
beta = xx[2]
alfa = xx[3]
f = xx[4]
step_fun = args[0]
arg_step_fun = args[1]
i = 0
s_old = -step_fun(x, arg_step_fun)
while (
sof.sufficient_decrease_condition(
s_old, -step_fun(x + alfa * dx, arg_step_fun), alfa, f, dx
)
is False
and i < 50
):
alfa *= beta
i += 1
return alfa
|
c88f974c76ceec84a12a67ef9c6f71ae357f472b
| 3,643,064
|
def getCountryName(countryID):
"""
Pull out the country name from a country id.
If there's no "name" property in the object, returns null
"""
try:
countryObj = getCountry(countryID)
return(countryObj['name'])
except:
pass
|
72b90de7e49911983fe60e18b00cc577f423785d
| 3,643,065
|
def get_placeholder(default_tensor=None, shape=None, name=None):
"""Return a placeholder_wirh_default if default_tensor given, otherwise a new placeholder is created and return"""
if default_tensor is not None:
return default_tensor
else:
if shape is None:
raise ValueError('One of default_tensor and shape must be given')
return tf.placeholder(tf.float32, shape=shape, name=name)
|
e62fe4ca8244ae45ac853a0398754375454626dc
| 3,643,067
|
def get_targets(args):
"""
Gets the list of targets for cmake and kernel/build.sh
:param args: The args variable generated by parse_parameters
:return: A string of targets suitable for cmake or kernel/build.sh
"""
if args.targets:
targets = args.targets
elif args.full_toolchain:
targets = "all"
else:
targets = "AArch64;ARM;BPF;Hexagon;Mips;PowerPC;RISCV;SystemZ;X86"
return targets
|
81eb31fe416303bc7e881ec2c10cfeeea4fdab05
| 3,643,068
|
def _format_warning(message, category, filename, lineno, line=None): # noqa: U100, E501
"""
Simple format for warnings issued by ProPlot. See the
`internal warning call signature \
<https://docs.python.org/3/library/warnings.html#warnings.showwarning>`__
and the `default warning source code \
<https://github.com/python/cpython/blob/master/Lib/warnings.py>`__.
"""
return f'{filename}:{lineno}: ProPlotWarning: {message}\n'
|
f5709df0a84d9479d6b895dccb3eae8292791f74
| 3,643,069
|
def piocheCarte(liste_pioche, x):
""" Cette fonction renvoie le nombre x de cartes de la pioche.
Args:
x (int): Nombre de cartes à retourner.
Returns:
list: Cartes retournées avec le nombre x.
"""
liste_carte = []
for i in range(x):
liste_carte.append(liste_pioche[i])
del liste_pioche[0]
return liste_carte
|
ed31c47d699447870207a4066a3da9c35333ada8
| 3,643,070
|
def cost_logistic(p, x, y):
"""
Sum of absolute deviations of obs and logistic function
:math:`L/(1+exp(-k(x-x0)))`
Parameters
----------
p : iterable of floats
parameters (`len(p)=3`)
- `p[0]` = L = Maximum of logistic function
- `p[1]` = k = Steepness of logistic function
- `p[2]` = x0 = Inflection point of logistic function
x : float or array_like of floats
independent variable
y : float or array_like of floats
dependent variable, observations
Returns
-------
float
sum of absolute deviations
"""
return np.sum(np.abs(y-logistic_p(x, p)))
|
4985d19ff792bf2df8fe5692330cb9c32d329cab
| 3,643,072
|
import requests
def get_price(token: str, sellAmount=1000000000000000000):
"""
get_price uses the 0x api to get the most accurate eth price for the token
:param token: token ticker or token address
:param buyToken: token to denominate price in, default is WETH
:param sellAmount: token amount to sell in base unit, default is 1e18
:return: eth/bnb price per token for the specified amount to sell
"""
if curr_network == "bsc" or curr_network == "bsc-fork":
endpoint = "https://bsc.api.0x.org/"
buyToken = "WBNB"
elif curr_network == "eth":
endpoint = "https://api.0x.org/"
buyToken = "WETH"
else:
raise ValueError("Unrecognized network")
params = (
"swap/v1/quote?buyToken="
+ buyToken
+ "&sellToken="
+ token
+ "&sellAmount="
+ str(sellAmount)
)
r = requests.get(endpoint + params)
data = r.json()
if not data.get("guaranteedPrice"):
console.log(data)
raise ValueError("Price could not be fetched")
return data["guaranteedPrice"]
|
b1dae25571eccb28433b9bfe7c3be6f006f05184
| 3,643,073
|
def _get_all_errors_if_unrecognized_properties(model: dict, props: list) -> iter:
"""Get error messages if the model has unrecognized properties."""
def get_error_if_property_is_unrecognized(key):
if key not in props:
return f"unrecognized field named '{key}' found in model '{model}'"
return map(get_error_if_property_is_unrecognized, model.keys())
|
e7c380b750606adc466f335a2411619eab11312f
| 3,643,075
|
def get_get_single_endpoint_schema(class_name, id_field_where_type, response_schema):
"""
:param class_name:
:param id_field_where_type:
:param response_schema:
"""
return {
"tags": [class_name],
"description": f"Get a {class_name} model representation",
"parameters": [
{
"name": "id",
"description": f"{class_name} identifier",
"in": "path",
"schema": {"type": "integer" if id_field_where_type == "int:" else "string"},
}
],
"responses": {
"200": {"description": f"{class_name} response model", "content": {"application/json": {"schema": response_schema}}},
"404": {"description": "Not found response model", "content": {"application/json": {"schema": not_found_swagger_schema}}},
"500": {"description": "Operation fail", "content": {"application/json": {"schema": error_swagger_schema}}},
},
}
|
98daaaa20e5e52c2480ce6aa1805ee3da6b163d7
| 3,643,078
|
from typing import Optional
def overlapping_template_matching(
sequence,
template_size: Optional[int] = None,
blocksize: Optional[int] = None,
matches_ceil: Optional[int] = None,
):
"""Overlapping matches to template per block is compared to expected result
The sequence is split into blocks, where the number of overlapping patterns
matches to the template in each block is found. This is referenced to the
expected mean and variance in matches of a hypothetically truly random sequence.
Parameters
----------
sequence : array-like with two distinct values
Sequence containing 2 distinct elements
template_size : ``int``
Size of the template to be generated
blocksize : ``int``
Size of the blocks that partition the given sequence
matches_ceil : ``int``
Group matches of this value and higher as one single tally
Returns
-------
result : ``OverlappingTemplateMatchingTestResult``
Dataclass that contains the test's statistic and p-value as well as
other relevant information gathered.
"""
return _randtests.overlapping_template_matching(
sequence,
template_size=template_size,
blocksize=blocksize,
matches_ceil=matches_ceil,
)
|
035ce0c333c69bdf437f1e6f93071c9342154e92
| 3,643,079
|
def _extract_options(config, options, *args):
"""Extract options values from a configparser, optparse pair.
Options given on command line take precedence over options read in the
configuration file.
Args:
config (dict): option values read from a config file through
configparser
options (optparse.Options): optparse 'options' object containing options
values from the command line
*args (str tuple): name of the options to extract
"""
extract = {}
for key in args:
if key not in args:
continue
extract[key] = config[key]
option = getattr(options, key, None)
if option is not None:
extract[key] = option
return extract
|
3d74857b3dcdd242950a35b84d3bcaae557a390b
| 3,643,080
|
def _calc_fans(shape):
"""
:param shape: tuple with the shape(4D - for example, filters, depth, width, height)
:return: (fan_in, fan_out)
"""
if len(shape) == 2:
# Fully connected layer (units, input)
fan_in = shape[1]
fan_out = shape[0]
elif len(shape) in {3, 4, 5}:
# Convolutional kernals
k_size = np.prod(shape[2:])
fan_in = k_size * shape[1]
fan_out = k_size * shape[0]
else:
raise ValueError("Incompatible shape")
return fan_in, fan_out
|
70535fd002f08bbaadf1a0af4ec980851e52ad92
| 3,643,081
|
def statRobustness(compromised, status):
"""produce data for robustness stats"""
rob = {0:{"empty":0, "login based":0, "top 10 common":0, "company name":0},
1:{"top 1000 common":0, "login extrapolation":0, "company context related":0, "4 char or less":0},
2:{"top 1M common":0, "6 char or less":0, "2 charsets or less":0},
3:{"present in attack wordlist":0, "present in locale attack wordlist":0, "leaked":0, "undetermined":0}}
for acc in compromised:
if status == 'all' or 'account_disabled' not in compromised[acc]["status"]:
rob[compromised[acc]["robustness"]][compromised[acc]["reason"]] += 1
return rob
|
46920b466b96fa37a94888e788104c1d901a9227
| 3,643,083
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.