content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import pathlib
def get_enabled_gems(cmake_file: pathlib.Path) -> set:
"""
Gets a list of enabled gems from the cmake file
:param cmake_file: path to the cmake file
:return: set of gem targets found
"""
cmake_file = pathlib.Path(cmake_file).resolve()
if not cmake_file.is_file():
logger.error(f'Failed to locate cmake file {cmake_file}')
return set()
gem_target_set = set()
with cmake_file.open('r') as s:
in_gem_list = False
for line in s:
line = line.strip()
if line.startswith(enable_gem_start_marker):
# Set the flag to indicate that we are in the ENABLED_GEMS variable
in_gem_list = True
# Skip pass the 'set(ENABLED_GEMS' marker just in case their are gems declared on the same line
line = line[len(enable_gem_start_marker):]
if in_gem_list:
# Since we are inside the ENABLED_GEMS variable determine if the line has the end_marker of ')'
if line.endswith(enable_gem_end_marker):
# Strip away the line end marker
line = line[:-len(enable_gem_end_marker)]
# Set the flag to indicate that we are no longer in the ENABLED_GEMS variable after this line
in_gem_list = False
# Split the rest of the line on whitespace just in case there are multiple gems in a line
gem_name_list = list(map(lambda gem_name: gem_name.strip('"'), line.split()))
gem_target_set.update(gem_name_list)
return gem_target_set
|
0b4c8c68230b075d2c27d72b1290217864fc6888
| 3,646,952
|
from operator import add
from operator import mul
def celeryAdd3(a,b):
"""This is for a specific Celery workflow
f = (a+b) * (a+b)
We'll use chord, group and chain"""
if request.method == 'GET':
# When a worker receives an expired task it will mark the task as REVOKED
res = (group(add.s(a,b), add.s(a,b)) | mul.s()).apply_async(expires=60) #https://docs.celeryproject.org/en/stable/userguide/calling.html#expiration
_ret = """ <p>result: 200</p>
<p>msg: "Added value is calculating at task ID: {0}"</p>
<p>htmlmsg: <a href="/api/v1_0/status/{0}">{0}</a></p>""".format(res.id)
# return jsonify(_ret)
return _ret
|
84599389542663207ff57a07e3b58cecc9b6427b
| 3,646,953
|
def create_unmerge_cells_request(sheet_id, start, end):
"""
Create v4 API request to unmerge rows and/or columns for a
given worksheet.
"""
start = get_cell_as_tuple(start)
end = get_cell_as_tuple(end)
return {
"unmergeCells": {
"range": {
"sheetId": sheet_id,
"startRowIndex": start[ROW] - 1,
"endRowIndex": end[ROW],
"startColumnIndex": start[COL] - 1,
"endColumnIndex": end[COL],
}
}
}
|
3fd560a82522738099bacd3f606bbea948de7226
| 3,646,954
|
def list_to_str(slist, seperator=None):
"""Convert list of any type to string seperated by seperator."""
if not seperator:
seperator = ','
if not slist:
return ""
slist = squash_int_range(slist)
return seperator.join([str(e) for e in slist])
|
64d20b744a7b465e58e50caf60e0e1aaf9b0c2e7
| 3,646,955
|
def log_web_error(msg):
"""Take a screenshot of a web browser based error
Use this function to capture a screen shot of the web browser
when using Python's `assert` keyword to perform assertions.
"""
screenshot = selene.helpers.take_screenshot(selene.browser.driver(),)
msg = '''{original_msg}
screenshot: file://{screenshot}'''.format(original_msg=msg, screenshot=screenshot)
return msg
|
8f5e9f6c586e6739d6581986c66689881d812316
| 3,646,956
|
def parent_id_name_and_quotes_for_table(sqltable):
""" Return tuple with 2 items (nameof_field_of_parent_id, Boolean)
True - if field data type id string and must be quoted), False if else """
id_name = None
quotes = False
for colname, sqlcol in sqltable.sql_columns.iteritems():
# root table
if not sqltable.root.parent and \
sqlcol.node == sqltable.root.get_id_node():
id_name = colname
if sqlcol.typo == "STRING":
quotes = True
break
else: # nested table
if sqlcol.node.reference:
id_name = colname
if sqlcol.typo == "STRING":
quotes = True
break
return (id_name, quotes)
|
6f3319dc6ae0ea70af5d2c9eda90fb1a9fb9daac
| 3,646,957
|
def client():
"""Returns a Flask client for the app."""
return app.test_client()
|
40d3cf2c330d2f82b6ae7514e833ef5d1bcb9594
| 3,646,958
|
from enthought.mayavi import version
from .maps_3d import plot_map_3d, m2screenshot
from enthought.tvtk.api import tvtk
from enthought.mayavi import mlab
from enthought.mayavi.core.registry import registry
def plot_map(map, affine, cut_coords=None, anat=None, anat_affine=None,
figure=None, axes=None, title=None, threshold=None,
annotate=True, draw_cross=True,
do3d=False, **kwargs):
""" Plot three cuts of a given activation map (Frontal, Axial, and Lateral)
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
affine : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats or None
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order.
If None is given, the cut point is calculated automaticaly.
anat : 3D ndarray or False, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used. If False, no anat is displayed.
anat_affine : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
figure : integer or matplotlib figure, optional
Matplotlib figure used or its number. If None is given, a
new figure is created.
axes : matplotlib axes or 4 tuple of float: (xmin, xmax, ymin, ymin), optional
The axes, or the coordinates, in matplotlib figure space,
of the axes used to display the plot. If None, the complete
figure is used.
title : string, optional
The title dispayed on the figure.
threshold : a number, None, or 'auto'
If None is given, the maps are not thresholded.
If a number is given, it is used to threshold the maps:
values below the threshold are plotted as transparent. If
auto is given, the threshold is determined magically by
analysis of the map.
annotate: boolean, optional
If annotate is True, positions and left/right annotation
are added to the plot.
draw_cross: boolean, optional
If draw_cross is True, a cross is drawn on the plot to
indicate the cut plosition.
do3d: {True, False or 'interactive'}, optional
If True, Mayavi is used to plot a 3D view of the
map in addition to the slicing. If 'interactive', the
3D visualization is displayed in an additional interactive
window.
kwargs: extra keyword arguments, optional
Extra keyword arguments passed to pylab.imshow
Notes
-----
Arrays should be passed in numpy convention: (x, y, z)
ordered.
Use masked arrays to create transparency:
import numpy as np
map = np.ma.masked_less(map, 0.5)
plot_map(map, affine)
"""
map, affine = _xyz_order(map, affine)
nan_mask = np.isnan(np.asarray(map))
if np.any(nan_mask):
map = map.copy()
map[nan_mask] = 0
# Deal with automatic settings of plot parameters
if threshold == 'auto':
threshold = _fast_abs_percentile(map)
if cut_coords is None:
x_map, y_map, z_map = find_cut_coords(map,
activation_threshold=threshold)
cut_coords = coord_transform(x_map, y_map, z_map, affine)
if threshold is not None:
if threshold == 0:
map = np.ma.masked_equal(map, 0, copy=False)
else:
map = np.ma.masked_inside(map, -threshold, threshold, copy=False)
if do3d:
try:
if not int(version.version[0]) > 2:
raise ImportError
except ImportError:
warnings.warn('Mayavi > 3.x not installed, plotting only 2D')
do3d = False
# Make sure that we have a figure
if not isinstance(figure, Figure):
if do3d:
size = (10, 2.6)
else:
size = (6.6, 2.6)
fig = pl.figure(figure, figsize=size, facecolor='w')
else:
fig = figure
if isinstance(axes, Axes):
assert axes.figure is figure, ("The axes passed are not "
"in the figure")
canonical_anat = False
if anat is None:
try:
anat, anat_affine, vmax_anat = _AnatCache.get_anat()
canonical_anat = True
except OSError, e:
anat = False
warnings.warn(repr(e))
# Use Mayavi for the 3D plotting
if do3d:
version = tvtk.Version()
offscreen = True
if (version.vtk_major_version, version.vtk_minor_version) < (5, 2):
offscreen = False
if do3d == 'interactive':
offscreen = False
cmap = kwargs.get('cmap', pl.cm.cmap_d[pl.rcParams['image.cmap']])
# Computing vmin and vmax is costly in time, and is needed
# later, so we compute them now, and store them for future
# use
vmin = kwargs.get('vmin', map.min())
kwargs['vmin'] = vmin
vmax = kwargs.get('vmax', map.max())
kwargs['vmax'] = vmax
plot_map_3d(np.asarray(map), affine, cut_coords=cut_coords,
anat=anat, anat_affine=anat_affine,
offscreen=offscreen, cmap=cmap,
threshold=threshold,
vmin=vmin, vmax=vmax)
ax = fig.add_axes((0.001, 0, 0.29, 1))
ax.axis('off')
m2screenshot(mpl_axes=ax)
axes = (0.3, 0, .7, 1.)
if offscreen:
# Clean up, so that the offscreen engine doesn't become the
# default
mlab.clf()
engine = mlab.get_engine()
for key, value in registry.engines.iteritems():
if value is engine:
registry.engines.pop(key)
break
if axes is None:
axes = [0., 0., 1., 1.]
if operator.isSequenceType(axes):
axes = fig.add_axes(axes)
axes.axis('off')
ortho_slicer = OrthoSlicer(cut_coords, axes=axes)
# Check that we should indeed plot an anat: we have one, and the
# cut_coords are in its range
x, y, z = cut_coords
if (anat is not False
and np.all(
np.array(coord_transform(x, y, z, np.linalg.inv(anat_affine)))
< anat.shape)):
anat_kwargs = kwargs.copy()
anat_kwargs['cmap'] = pl.cm.gray
anat_kwargs.pop('alpha', 1.)
if canonical_anat:
# We special-case the 'canonical anat', as we don't need
# to do a few transforms to it.
anat_kwargs['vmin'] = 0
anat_kwargs['vmax'] = vmax_anat
else:
anat_kwargs.pop('vmin', None)
anat_kwargs.pop('vmax', None)
anat, anat_affine = _xyz_order(anat, anat_affine)
ortho_slicer.plot_map(anat, anat_affine, **anat_kwargs)
ortho_slicer.plot_map(map, affine, **kwargs)
if annotate:
ortho_slicer.annotate()
if draw_cross:
ortho_slicer.draw_cross(color='k')
if title is not None and not title == '':
ortho_slicer.title(title)
return ortho_slicer
|
d7ef70bb98849532e94d7b975303cbd370fe8bbe
| 3,646,960
|
def get_mode(h5,songidx=0):
"""
Get mode from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.mode[songidx]
|
9a9eb7cfed2bc525a3b5d3c8cb251c7e170a589c
| 3,646,961
|
import json
def read_label_schema(path):
"""
Reads json file and returns deserialized LabelSchema.
"""
with open(path, encoding="UTF-8") as read_file:
serialized_label_schema = json.load(read_file)
return LabelSchemaMapper().backward(serialized_label_schema)
|
8f15a6e63864c6f737f465abb3011193ce136db6
| 3,646,962
|
def Dump(root):
"""Return a string representing the contents of an object.
This function works only if root.ValidateExports() would pass.
Args:
root: the object to dump.
Returns:
A big string containing lines of the format:
Object.SubObject.
Object.SubObject.ParameterName = %r
"""
h = Handle(root)
out = []
for i in h.ListExports(recursive=True):
if i.endswith('.'):
out.append(' %s' % (i,))
else:
out.append(' %s = %r' % (i, h.GetExport(i)))
return '\n'.join(out)
|
7f6a9229f6b0c250a56324570fae249c0bf1d246
| 3,646,963
|
def clean_profit_data(profit_data):
"""清理权益全为0的垃圾结算日"""
for i in list(range(len(profit_data)))[::-1]:
profit = profit_data[i][1] == 0
closed = profit_data[i][2] == 0
hold = profit_data[i][3] == 0
if profit and closed and hold:
profit_data.pop(i)
return profit_data
|
d1b7fe9d747a1149f04747b1b3b1e6eba363c639
| 3,646,964
|
def convert_single_example(ex_index, example, max_word_length,max_sen_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
text_sen = example.text_sen.strip().split()
text_pos=example.text_pos.strip().split()
text_ps = example.text_ps.strip().split() # 这里就是一个元素【日期】
if example.text_label is None:
text_label=["o"*len(text_sen)]
else:
text_label = example.text_label.strip().split() #这里训练集的时候和句子一样长,测试集的时候为一个【'o'】
assert len(text_sen)==len(text_pos)
assert len(text_ps)==1
assert len(text_label)==len(text_sen)
text_word=[]
for word in text_sen:
text_word.append(tokenizer.tokenize(word))
#这里是二位列表
# [
# [许,海,明],
# [喜 ,欢] ,
# [玩]
# ]
text_sen=text_word
# Account for [SEP] with "- 1" #注意这里是句子的长度 原来的
if len(text_sen) > max_sen_length - 1:
text_sen = text_sen[0:(max_sen_length - 1)]
text_pos = text_pos[0:(max_sen_length - 1)]
text_label=text_label[0:(max_sen_length - 1)]
text_sen.append(["[SEP]"])
text_pos.append(["[SEP]"])
text_label.append("o")
len_sen=len(text_word)
len_pos=len(text_pos)
len_label=len(text_label)
while len(text_sen) < max_sen_length:
text_sen.append(["[PAD]"])
text_pos.append(["[PAD]"])
text_label.append("o")
'''
处理单词级别
'''
#处理每个单词
# Account for [CLS] ,[SEP] with "- 2" #注意这里是每个单词的长度
for i,wordlist in enumerate(text_sen):
if len(wordlist) > max_word_length - 2:
text_word[i]=wordlist[0:(max_word_length - 2)]
# 为每一个单词添加 [CLS] [SEP]
segment_ids=[] #这是一个二维列表
len_words=[]
for i,wordlist in enumerate(text_sen):
wordlist.insert(0,"[CLS]")
wordlist.append("[SEP]")
len_words.append(len(wordlist))
while len(wordlist) < max_word_length:
wordlist.append(["PAD"])
segment_ids.append([0]*len(wordlist))
text_sen[i]=wordlist
input_word_ids =[]
for tokens in text_sen:
input_word_ids.append(tokenizer.convert_tokens_to_ids(tokens)) #这是一个二维
input_pos_ids = tokenizer.convert_pos_to_ids(text_pos) #这是一个list
input_ps_id = tokenizer.convert_ps_to_ids(text_ps)[0] #这就是一个数字 0到48
input_label_ids= tokenizer.convert_label_to_ids(text_label)
# 制作一个input_sen_mask 这是句子级别的
input_sen_mask = [1] * len_sen
input_pos_mask = [1] * len_pos
input_label_mask = [1]*len_label
# Zero-pad up to the sequence length.
while len(input_sen_mask) < max_sen_length:
input_sen_mask.append(0)
input_pos_mask.append(0)
input_label_mask.append(0)
#为每一个单词制作一个mask
input_words_mask=[]
for word_len in len_words:
word_mask = [1] * word_len
while len(word_mask) < max_word_length:
word_mask.append(0)
input_words_mask.append(word_mask)
assert len(input_word_ids) == max_sen_length #句子长度
assert len(input_pos_ids) == max_sen_length #句子长度
assert len(input_label_ids)==max_sen_length
assert len(input_word_ids[0])==max_word_length
assert len(input_pos_mask) == max_sen_length
assert len(input_label_mask) == max_sen_length
assert len(input_words_mask) == max_sen_length
assert len(segment_ids) == max_sen_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("句子单词: %s" % " ".join(
["["+" ".join(x)+"]" for x in text_word]))
tf.logging.info("句子的ids: %s" % " ".join(
["[" + ",".join(list(map(str,word_ids)))+"]" for word_ids in input_word_ids]))
tf.logging.info("句子的mask: %s" % " ".join([str(x) for x in input_sen_mask]))
tf.logging.info("句子中每个单词的mask: %s" % " ".join(
["[" + ",".join(list(map(str,word_ids)))+"]" for word_ids in input_words_mask]))
print("\n")
tf.logging.info("input_pos_ids: %s" % " ".join(
["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_pos_ids]))
tf.logging.info("input_pos_ids: %s" % " ".join(
["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_pos_ids]))
tf.logging.info("input_label_ids: %s" % " ".join(
["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_label_ids]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("ps: %s (id = %d)" % (example.text_ps, input_ps_id))
feature = InputFeatures(
input_words_ids=input_word_ids,
input_pos_ids=input_pos_ids,
input_ps_id=input_ps_id,
input_label_ids=input_label_ids,
input_sen_mask=input_sen_mask,
input_words_mask=input_words_mask,
input_pos_mask=input_pos_mask,
input_label_mask=input_label_mask,
segment_ids=segment_ids,
is_real_example=True)
return feature
|
faf13bd6db6a07a4546531cc968bad5443b95a12
| 3,646,965
|
def scrub_literal(value):
"""
Scrubs control characters from the incoming values to remove
things like form feeds (\f) and line breaks (\n) which might
cause problems with Jena.
Data with these characters was found in the Backstage data.
"""
if not value:
return None
if isinstance(value, int):
return value
text = ''.join([c for c in value if not ascii.iscntrl(c)\
if not ascii.isctrl(c)])
text = text.replace('"', '')
text = text.replace('\ufffd', '')
text = clean_char(text)
if isinstance(text, str):
text = str(text, errors='replace')
return text.strip()
|
e0e77bb0edecc810cc6fe051020936ca0ee9bf62
| 3,646,966
|
def mock_interface_settings_mismatch_protocol(mock_interface_settings, invalid_usb_device_protocol):
"""
Fixture that yields mock USB interface settings that is an unsupported device protocol.
"""
mock_interface_settings.getProtocol.return_value = invalid_usb_device_protocol
return mock_interface_settings
|
61958439a2869d29532e50868efb39fe3da6c8b5
| 3,646,967
|
from typing import Mapping
from typing import Any
import shutil
def run_eval(exp_name: str) -> Mapping[str, Any]:
""" """
pred_log_dir = f"{_ROOT}/test_data/eval_tracking_dummy_logs_pred"
gt_log_dir = f"{_ROOT}/test_data/eval_tracking_dummy_logs_gt"
out_fpath = f"{_ROOT}/test_data/{exp_name}.txt"
out_file = open(out_fpath, "w")
eval_tracks(
path_tracker_output_root=pred_log_dir,
path_dataset_root=gt_log_dir,
d_min=0,
d_max=100,
out_file=out_file,
centroid_method="average",
diffatt=None,
category="VEHICLE",
)
out_file.close()
with open(out_fpath, "r") as f:
result_lines = f.readlines()
result_vals = result_lines[0].strip().split(" ")
fn, num_frames, mota, motp_c, motp_o, motp_i, idf1 = result_vals[:7]
most_track, most_lost, num_fp, num_miss, num_sw, num_frag = result_vals[7:]
result_dict = {
"filename": fn,
"num_frames": int(num_frames),
"mota": float(mota),
"motp_c": float(motp_c),
"motp_o": float(motp_o),
"motp_i": float(motp_i),
"idf1": float(idf1),
"most_track": float(most_track),
"most_lost": float(most_lost),
"num_fp": int(num_fp),
"num_miss": int(num_miss),
"num_sw": int(num_sw),
"num_frag": int(num_frag),
}
shutil.rmtree(pred_log_dir)
shutil.rmtree(gt_log_dir)
return result_dict
|
3eaead879a39a30d2524da037d82f4d9b68d17e7
| 3,646,968
|
def MakeLocalSsds(messages, ssd_configs):
"""Constructs the repeated local_ssd message objects."""
if ssd_configs is None:
return []
local_ssds = []
disk_msg = (
messages.
AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk)
interface_msg = disk_msg.InterfaceValueValuesEnum
for s in ssd_configs:
if s['interface'].upper() == 'NVME':
interface = interface_msg.NVME
else:
interface = interface_msg.SCSI
m = disk_msg(
diskSizeGb=s['size'],
interface=interface)
local_ssds.append(m)
return local_ssds
|
128e7a0358221fe3d93da4726924a7a783c65796
| 3,646,970
|
def valid_variant(s, is_coding=True):
"""
Returns True if s is a valid coding or noncoding variant, else False.
Parameters
----------
s : `str`
Variant string to validate.
is_coding : `bool`
Indicates if the variant string represents a coding variant.
"""
_validate_str(s)
if s == WILD_TYPE_VARIANT:
return True
else:
if is_coding:
for mut in s.split(", "):
match = re_coding.match(mut)
if match is None:
return False
return True
else:
for mut in s.split(", "):
match = re_noncoding.match(mut)
if match is None:
return False
return True
|
8cb6c37bed303a052a8655dfb0832bfba638f0d6
| 3,646,971
|
def is_icon_address_valid(address: str) -> bool:
"""Check whether address is in icon address format or not
:param address: (str) address string including prefix
:return: (bool)
"""
try:
if isinstance(address, str) and len(address) == 42:
prefix, body = split_icon_address(address)
if prefix == ICON_EOA_ADDRESS_PREFIX or \
prefix == ICON_CONTRACT_ADDRESS_PREFIX:
return is_lowercase_hex_string(body)
finally:
pass
return False
|
9666d22d04d568706356b7bafd0f202cb9178892
| 3,646,972
|
import base64
def _b64urldec(input: str) -> bytes:
"""
Deocde data from base64 urlsafe with stripped padding (as specified in the JWS RFC7515).
"""
# The input is stripped of padding '='. These are redundant when decoding (only relevant
# for concatenated sequences of base64 encoded data) but the decoder checks for them.
# Appending two (the maximum number) of padding '=' is the easiest way to ensure it won't choke
# on too little padding.
return base64.urlsafe_b64decode(input + '==')
|
fb535072b560b8565916ae8ec3f32c61c41115d8
| 3,646,973
|
def get_sns_topic_arn(aws_creds, ec2_region):
"""
Retrieves the sns topic arn for the account
"""
rgt_client = ResourceGroupsTaggingClient(aws_creds, ec2_region, logger)
sns_topic_arn = rgt_client.get_sns_topic_arn(SNS_TOPIC_TAG_KEY, SNS_TOPIC_TAG_VALUE)
if not sns_topic_arn:
raise SnsTopicNotFound(f"Account doesn't have the SNS topic tagged with "
f"key: '{SNS_TOPIC_TAG_KEY}' and value: '{SNS_TOPIC_TAG_VALUE}'")
return sns_topic_arn
|
760caa77acf414eacf4bb177dd9252fe6578a505
| 3,646,974
|
import scipy
def create_bspline_basis(knots, spline_order, dt=0.02):
"""Create B-spline basis."""
# The repeated boundary knots are appended as it is required for Cox de Boor
# recursive algorithm. See https://math.stackexchange.com/questions/2817170/
# what-is-the-purpose-of-having-repeated-knots-in-a-b-spline and the link
# https://en.wikipedia.org/wiki/De_Boor%27s_algorithm.
knots = list(knots)
knots = [knots[0]] * spline_order + knots + [knots[-1]] * spline_order
num_basis = len(knots) - spline_order - 1
# Query token is in format: [knots, basis coefficients, spline order]
# See https://docs.scipy.org/doc/scipy/reference/generated/
# scipy.interpolate.splev.html
query_token = [0, 0, spline_order]
query_token[0] = np.array(knots)
time_line = np.linspace(knots[0], knots[-1], int(np.round(knots[-1]/dt)) + 1)
# Add column for the constent term.
basis_matrix = np.zeros((len(time_line), num_basis + 1))
basis_matrix[:, -1] = np.ones(len(time_line)) # Constant term.
for basis_index in range(num_basis):
basis_coefficients = np.zeros(num_basis)
basis_coefficients[basis_index] = 1.0
query_token[1] = basis_coefficients.tolist()
base = scipy.interpolate.splev(time_line, query_token)
basis_matrix[:, basis_index] = base
return basis_matrix, time_line
|
8256b282ffc5f19a9e00d59c689e57664600b2f4
| 3,646,975
|
def execute(
device,
commands,
creds=None,
incremental=None,
with_errors=False,
timeout=settings.DEFAULT_TIMEOUT,
command_interval=0,
force_cli=False
):
"""
Connect to a ``device`` and sequentially execute all the commands in the
iterable ``commands``.
Returns a Twisted ``Deferred`` object, whose callback will get a sequence
of all the results after the connection is finished.
``commands`` is usually just a list, however, you can have also make it a
generator, and have it and ``incremental`` share a closure to some state
variables. This allows you to determine what commands to execute
dynamically based on the results of previous commands. This implementation
is experimental and it might be a better idea to have the ``incremental``
callback determine what command to execute next; it could then be a method
of an object that keeps state.
BEWARE: Your generator cannot block; you must immediately
decide what next command to execute, if any.
Any ``None`` in the command sequence will result in a ``None`` being placed
in the output sequence, with no command issued to the device.
If any command returns an error, the connection is dropped immediately and
the errback will fire with the failed command. You may set ``with_errors``
to get the exception objects in the list instead.
Connection failures will still fire the errback.
`~trigger.exceptions.LoginTimeout` errors are always possible if the login
process takes longer than expected and cannot be disabled.
:param device:
A `~trigger.netdevices.NetDevice` object
:param commands:
An iterable of commands to execute (without newlines).
:param creds:
(Optional) A 2-tuple of (username, password). If unset it will fetch it
from ``.tacacsrc``.
:param incremental:
(Optional) A callback that will be called with an empty sequence upon
connection and then called every time a result comes back from the
device, with the list of all results.
:param with_errors:
(Optional) Return exceptions as results instead of raising them
:param timeout:
(Optional) Command response timeout in seconds. Set to ``None`` to
disable. The default is in ``settings.DEFAULT_TIMEOUT``.
`~trigger.exceptions.CommandTimeout` errors will result if a command
seems to take longer to return than specified.
:param command_interval:
(Optional) Amount of time in seconds to wait between sending commands.
:param force_cli:
(Optional) Juniper-only: Force use of CLI instead of Junoscript.
:returns: A Twisted ``Deferred`` object
"""
execute_func = _choose_execute(device, force_cli=force_cli)
return execute_func(device=device, commands=commands, creds=creds,
incremental=incremental, with_errors=with_errors,
timeout=timeout, command_interval=command_interval)
|
ead00377f7c50d8bfdb6da39a7a1fe1820d9bcc7
| 3,646,976
|
import requests
def get_proxy(usage: str):
"""
通过WEB API接口获取代理
:param usage: 目标站点,对应WEB_AVAILABLE_PROXIES的key
:return: 可用代理或None
"""
url = API_SERVER + "/proxy?usage={}".format(usage)
res = requests.get(url, timeout=5)
try:
if res.status_code == 200:
return res.json().get("resource").get("proxy")
else:
return None
except Exception:
return None
|
2c836f0a7a4dce2e5442080ee93a1b32d10dac3d
| 3,646,978
|
def column_names_get(subject: str) -> tuple:
""" Returns column names. """
if subject == c.SUBJECT.PLANETS:
return c.HEADERS.PLANETS
elif subject == c.SUBJECT.STARSHIPS:
return c.HEADERS.STARSHIPS
elif subject == c.SUBJECT.VEHICLES:
return c.HEADERS.VEHICLES
elif subject == c.SUBJECT.PEOPLE:
return c.HEADERS.PEOPLE
else:
raise ValueError(f'There are no column names for the {subject} subject.')
|
a544574d5ac66e2ea7e045fa2bba37fe78df20f5
| 3,646,979
|
def relabel_prometheus(job_config):
"""Get some prometheus configuration labels."""
relabel = {
'path': '__metrics_path__',
'scheme': '__scheme__',
}
labels = {
relabel[key]: value
for key, value in job_config.items()
if key in relabel.keys()
}
# parse __param_ parameters
for param, value in job_config.get('params', {}).items():
labels['__param_%s' % (param,)] = value
return labels
|
eb08f617903fe66f462a5922f8149fd8861556ad
| 3,646,981
|
import random
def q_geography_capital():
"""Ask what the capital of a given country is."""
question = QuestionGenerator()
question.set_type('geography')
# select country
all_countries = facts.get_geography_countries_list()
country = random.choice(all_countries)
# formulate question
question.ask(f"Was ist die Hauptstadt von {country}")
# answer
capital = facts.get_geography_capital(country)
question.set_answer(capital)
# other options
other_capitals = [c for c in facts.get_geography_capitals_set() if c != capital]
for c in other_capitals:
question.add_wrong_option(c)
return question.create(num_options=3)
|
63362aae1eb0e117da3ade0c9bff22edb5504689
| 3,646,982
|
from functools import reduce
def binary_stabilizer_to_pauli_stabilizer(stabilizer_tableau):
"""
Convert a stabilizer tableau to a list of PauliTerms
:param stabilizer_tableau: Stabilizer tableau to turn into pauli terms
:return: a list of PauliTerms representing the tableau
:rytpe: List of PauliTerms
"""
stabilizer_list = []
num_qubits = (stabilizer_tableau.shape[1] - 1) // 2
for nn in range(stabilizer_tableau.shape[0]): # iterate through the rows
stabilizer_element = []
for ii in range(num_qubits):
if stabilizer_tableau[nn, ii] == 1 and stabilizer_tableau[nn, ii + num_qubits] == 0:
stabilizer_element.append(sX(ii))
elif stabilizer_tableau[nn, ii] == 0 and stabilizer_tableau[nn, ii + num_qubits] == 1:
stabilizer_element.append(sZ(ii))
elif stabilizer_tableau[nn, ii] == 1 and stabilizer_tableau[nn, ii + num_qubits] == 1:
stabilizer_element.append(sY(ii))
stabilizer_term = reduce(lambda x, y: x * y, stabilizer_element) * ((-1) ** stabilizer_tableau[nn, -1])
stabilizer_list.append(stabilizer_term)
return stabilizer_list
|
78183d9ecd436267d7732ba50cb6591fea54984e
| 3,646,983
|
def checkGroup(self, group, colls):
"""
Args:
group:
colls:
Returns:
"""
cut = []
for elem in group:
if elem in colls:
cut.append(elem)
if len(cut) == len(group):
return cut
else:
return []
|
ca30648c536bcf26a1438d908f93a5d3dcc131c9
| 3,646,984
|
def get_node_shapes(input_graph_def, target_nodes):
"""Get shapes of target nodes from input_graph_def, shapes may be partial"""
node_shapes = []
for target in target_nodes:
for node in input_graph_def.node:
if node.name == target:
if not 'shape' in node.attr:
print("Warning: Fail to get output shape of node: {}".format(node))
node_shapes.append(
tensor_shape.as_shape(node.attr['shape'].shape).as_list())
return node_shapes
|
0a70a81f0be826697d47b52dc2a2e63c0c73b3b4
| 3,646,985
|
def calculate_cost(A3, Y):
"""
计算损失函数cost值
Args:
A3: 正向传播的输出,尺寸大小为(输出尺寸, 样本数量)
Y: 真实标签向量,尺寸大小和a3相同
Return:
cost: 损失函数cost值
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(A3), Y) + np.multiply(
-np.log(1 - A3), 1 - Y)
cost = 1. / m * np.nansum(logprobs)
return cost
|
0a85baae5acc6f9ceec417f2942727cd3d96a34e
| 3,646,986
|
def qsammobilenetv2(**kwargs):
"""Constructs a QSAMMobileNetv2 model.
"""
model = QSAMMobileNetV2(**kwargs)
return model
|
4d0b9f23a3c40ab2386d30fe45ca70a401f41b1a
| 3,646,988
|
def get_frame_labels_fields(
sample_collection,
frame_labels_field=None,
frame_labels_prefix=None,
frame_labels_dict=None,
dataset_exporter=None,
required=False,
force_dict=False,
):
"""Gets the frame label field(s) of the sample collection matching the
specified arguments.
Provide one of ``frame_labels_field``, ``frame_labels_prefix``,
``frame_labels_dict``, or ``dataset_exporter``.
Args:
sample_collection: a :class:`SampleCollection`
frame_labels_field (None): the name of the frame labels field to
export
frame_labels_prefix (None): a frame labels field prefix; the returned
labels dict will contain all frame-level fields whose name starts
with the given prefix
frame_labels_dict (None): a dictionary mapping frame-level label field
names to keys
dataset_exporter (None): a
:class:`fiftyone.utils.data.exporters.DatasetExporter` to use to
choose appropriate frame label field(s)
required (False): whether at least one matching frame field must be
found
force_dict (False): whether to always return a labels dict rather than
an individual label field
Returns:
a frame label field or dict mapping frame label fields to keys
"""
if frame_labels_prefix is not None:
frame_labels_dict = _get_frame_labels_dict_for_prefix(
sample_collection, frame_labels_prefix
)
if frame_labels_dict is not None:
return frame_labels_dict
if frame_labels_field is None and dataset_exporter is not None:
frame_labels_field = _get_default_frame_label_fields_for_exporter(
sample_collection, dataset_exporter, required=required
)
if frame_labels_field is None and required:
raise ValueError(
"Unable to find any frame label fields matching the provided "
"arguments"
)
if (
force_dict
and frame_labels_field is not None
and not isinstance(frame_labels_field, dict)
):
return {frame_labels_field: frame_labels_field}
return frame_labels_field
|
0bdb1346f154f125b2f39f638929ae6e5d661db7
| 3,646,989
|
def _rpc_code_to_error_code(rpc_code):
"""Maps an RPC code to a platform error code."""
return _RPC_CODE_TO_ERROR_CODE.get(rpc_code, exceptions.UNKNOWN)
|
7cb6ef3d7b751c915673f99a88800bdb53e81f72
| 3,646,990
|
def peak_values(dataframe_x, dataframe_y, param):
"""Outputs x (potentials) and y (currents) values from data indices
given by peak_detection function.
Parameters
----------
DataFrame_x : pd.DataFrame
should be in the form of a pandas DataFrame column.
For example, df['potentials'] could be input as the
column of x data.
DataFrame_y : pd.DataFrame
should be in the form of a pandas DataFrame column.
For example, df['currents'] could be input as the
column of y data.
param: dict
Dictionary of parameters governing the CV run.
Returns
-------
peak_array : np.array
Array of coordinates at peaks in the following order:
potential of peak on top curve, current of peak on top curve,
potential of peak on bottom curve, current of peak on bottom
curve
"""
peak_values = []
potential_p, potential_n = split(dataframe_x, param)
current_p, current_n = split(dataframe_y, param)
peak_top_index = peak_detection(current_p, 'positive')
peak_bottom_index = peak_detection(current_n, 'negative')
# TOPX (bottom part of curve is
peak_values.append(potential_p[(peak_top_index['peak_top'])])
# the first part of DataFrame)
# TOPY
peak_values.append(current_p[(peak_top_index['peak_top'])])
# BOTTOMX
peak_values.append(potential_n[(peak_bottom_index['peak_bottom'])])
# BOTTOMY
peak_values.append(current_n[(peak_bottom_index['peak_bottom'])])
peak_array = np.array(peak_values)
return peak_array
|
3e0d656d80ef5806abcd2d71e80be544eca585cb
| 3,646,991
|
import typing
import scipy
def random_rotation_operator_tensor (operand_space_shape:typing.Tuple[int,...]) -> np.ndarray:
"""NOTE: Not a uniform distribution."""
if vorpy.tensor.dimension_of_shape(operand_space_shape) == 0:
raise Exception(f'invalid dimension for vector space having rotation')
A = random_antisymmetric_operator_tensor(np.pi, operand_space_shape)
return scipy.linalg.expm(vorpy.tensor.as_linear_operator(A)).reshape(A.shape)
|
fd4442bb6178824fe71c6050f44539f8af34c149
| 3,646,993
|
def get_early_stopping(callback_config:dict):
""" Get tf keras EarlyStopping callback.
Args:
callback_config: config info to build callback
"""
return keras.callbacks.EarlyStopping(**callback_config)
|
6f9f5e26b69765ff817c89b6ebbb59a62bc76266
| 3,646,995
|
def decode(rdf, hint=[]):
"""Decode ReDIF document."""
def decode(encoding):
rslt = rdf.decode(encoding)
if rslt.lower().find("template-type") == -1:
raise RuntimeError("Decoding Error")
return rslt
encodings = hint + ["windows-1252", "utf-8", "utf-16", "latin-1"]
if rdf[:3] == b"\xef\xbb\xbf":
encodings = ["utf-8-sig"] + encodings
for enc in encodings:
try:
return decode(enc)
except Exception:
continue
raise RuntimeError("Decoding Error")
|
f42eed2caaba90f4d22622643885b4d87b9df98b
| 3,646,996
|
def pad_square(x):
""" Pad image to meet square dimensions """
r,c = x.shape
d = (c-r)/2
pl,pr,pt,pb = 0,0,0,0
if d>0: pt,pd = int(np.floor( d)),int(np.ceil( d))
else: pl,pr = int(np.floor(-d)),int(np.ceil(-d))
return np.pad(x, ((pt,pb),(pl,pr)), 'minimum')
|
3a0b248f9403d0cb392e1aff306af435b5a43396
| 3,646,997
|
from typing import Union
from typing import Any
from typing import Tuple
def get_env_properties(
env: Union[gym.Env, VecEnv], network: Union[str, Any] = "mlp"
) -> (Tuple[int]):
"""
Finds important properties of environment
:param env: Environment that the agent is interacting with
:type env: Gym Environment
:param network: Type of network architecture, eg. "mlp", "cnn"
:type network: str
:returns: (State space dimensions, Action space dimensions,
discreteness of action space and action limit (highest action value)
:rtype: int, float, ...; int, float, ...; bool; int, float, ...
"""
if network == "cnn":
state_dim = env.framestack
elif network == "mlp":
state_dim = env.observation_space.shape[0]
elif isinstance(network, (BasePolicy, BaseValue)):
state_dim = network.state_dim
elif isinstance(network, BaseActorCritic):
state_dim = network.actor.state_dim
else:
raise TypeError
if isinstance(env.action_space, gym.spaces.Discrete):
action_dim = env.action_space.n
discrete = True
action_lim = None
elif isinstance(env.action_space, gym.spaces.Box):
action_dim = env.action_space.shape[0]
action_lim = env.action_space.high[0]
discrete = False
else:
raise NotImplementedError
return state_dim, action_dim, discrete, action_lim
|
6a377830cb24bc215b7d1c6b09b08ed63ab383ef
| 3,646,998
|
def mahalanobis(data, produce=None):
"""
Calculate mahalanobis distance on a matrix of column vectors.
Assumes that rows are observations and columns are features.
Parameters
----------
data : numpy array or pandas dataframe
The data to calculate distances on (columns are variables, rows are
observations).
produce : str, optional
Variation of the output to produce, either `squared`, `leverage',
or `sqrt` (None). The default is None.
Returns
-------
numpy array
Array containing the distances.
"""
arr = np.array(data).reshape(data.shape[0], -1)
cent = arr - arr.mean(axis=0)
covmat = np.cov(cent, rowvar=False)
invcov = None
if arr.shape[1] == 1:
invcov = 1/covmat
else:
try:
invcov = np.linalg.inv(covmat)
except np.linalg.LinAlgError:
invcov = np.linalg.pinv(covmat)
md2 = np.sum(cent.dot(invcov) * cent, axis=1)
if produce == "squared":
return md2
elif produce == "leverage":
n = data.shape[0]
return ((md2/(n - 1)) + (1/n))
else:
return np.sqrt(md2)
|
b6dff6cfe12b4c44b6a97a6bd1f51a2250b7b63f
| 3,647,000
|
def text(el):
"""
Helper to get the text content of a BeautifulSoup item
"""
return el.get_text().strip()
|
7b34c77c79677a73cc66532fe6305635b1bdac43
| 3,647,001
|
def get_sha512_manifest(zfile):
"""
Get MANIFEST.MF from a bar file.
:param zfile: Open (!!!) ZipFile instance.
:type zfile: zipfile.ZipFile
"""
names = zfile.namelist()
manifest = None
for name in names:
if name.endswith("MANIFEST.MF"):
manifest = name
break
if manifest is None:
raise SystemExit
return manifest
|
7ef150bb3e89f8723649ee983085a413ec8a31df
| 3,647,003
|
def plot_heatmap(filename, xdata, ydata, binx, biny, title = None, xlabel = None, ylabel = None, dpi = 150, figsize = (10,10), tfont = 17, lfont = 14):
"""
Present variables as a 2D heatmap
to correlate magnitude and direction.
"""
def get_bin_id(mybins, vv):
for ibin in range(len(mybins)-1):
if vv >= mybins[ibin] and vv < mybins[ibin+1]:
return ibin + 1
return 0
total = len(xdata)
if total == 0:
print('Not enough data to produce heatmap, exiting...')
return
nx, nxbins = np.histogram(xdata, bins = binx)
ny, nybins = np.histogram(ydata, bins = biny)
temp_x = np.zeros(total)
temp_y = np.zeros(total)
for ij in range(total):
temp_x[ij] = get_bin_id(nxbins, xdata[ij])
temp_y[ij] = get_bin_id(nybins, ydata[ij])
table2d = np.zeros((len(nybins)-1,len(nxbins)-1))
for ij in range(len(temp_x)):
table2d[int(temp_y[ij])-1, int(temp_x[ij])-1] += 1
x_labels = []
y_labels = []
for ij in range(len(nxbins)-1):
x_labels.append('{:.2f}'.format(0.5*(nxbins[ij] + nxbins[ij+1])))
for ij in range(len(nybins)-1):
y_labels.append('{:.1f}'.format(0.5*(nybins[ij] + nybins[ij+1])))
fig, ax = plt.subplots()
fig.set_size_inches(figsize[0], figsize[1])
im = ax.imshow(table2d)
# We want to show all ticks...
ax.set_xticks(np.arange(len(x_labels)))
ax.set_yticks(np.arange(len(y_labels)))
# ... and label them with the respective list entries
ax.set_xticklabels(x_labels)
ax.set_yticklabels(y_labels)
if title:
ax.set_title(title, fontsize = tfont)
if ylabel:
ax.set_ylabel(ylabel, fontsize = lfont)
if xlabel:
ax.set_xlabel(xlabel, fontsize = lfont)
ylims = ax.get_yticks()
rr = ylims[1] - ylims[0]
ax.set_ylim(ylims[0] - rr/2., ylims[-1] + rr/2.)
cfont = max([8, lfont-2])
ax.tick_params(axis = 'both', which = 'major', labelsize = cfont)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
for i in range(len(nxbins)-1):
for j in range(len(nybins)-1):
text = ax.text(i, j, int(100.0*table2d[j, i]/total), ha="center", va="center", color="w")
fig.tight_layout()
if isinstance(filename, list):
for item in filename:
fig.savefig(item, dpi = dpi)
else:
fig.savefig(filename, dpi = dpi)
plt.close()
return 0
|
3397bf2fc02932056411ef8addde264fa50b9ea5
| 3,647,004
|
def scattering_transform1d(n_classes, sequence_length):
""" Scattering transform
"""
log_eps = 1e-6
x_in = layers.Input(shape=(sequence_length))
x = Scattering1D(8, 12)(x_in)
x = layers.Lambda(lambda x: x[..., 1:, :])(x)
x = layers.Lambda(lambda x: tf.math.log(tf.abs(x) + log_eps))(x)
x = layers.GlobalAveragePooling1D(data_format='channels_first')(x)
x = layers.BatchNormalization(axis=1)(x)
x_out = layers.Dense(n_classes, activation='softmax')(x)
model = tf.keras.models.Model(x_in, x_out)
return model
|
53547918c5a0efa5c0e3766c770903b146eff19e
| 3,647,006
|
import zlib
def addFileContent(session, filepath, source_file_name, content_hash,
encoding):
"""
Add the necessary file contents. If the file is already stored in the
database then its ID returns. If content_hash in None then this function
calculates the content hash. Or if is available at the caller and is
provided then it will not be calculated again.
This function must not be called between addCheckerRun() and
finishCheckerRun() functions when SQLite database is used! addCheckerRun()
function opens a transaction which is closed by finishCheckerRun() and
since SQLite doesn't support parallel transactions, this API call will
wait until the other transactions finish. In the meantime the run adding
transaction times out.
"""
source_file_content = None
if not content_hash:
source_file_content = get_file_content(source_file_name, encoding)
hasher = sha256()
hasher.update(source_file_content)
content_hash = hasher.hexdigest()
file_content = session.query(FileContent).get(content_hash)
if not file_content:
if not source_file_content:
source_file_content = get_file_content(source_file_name, encoding)
try:
compressed_content = zlib.compress(source_file_content,
zlib.Z_BEST_COMPRESSION)
fc = FileContent(content_hash, compressed_content)
session.add(fc)
session.commit()
except sqlalchemy.exc.IntegrityError:
# Other transaction moght have added the same content in
# the meantime.
session.rollback()
file_record = session.query(File) \
.filter(File.content_hash == content_hash,
File.filepath == filepath) \
.one_or_none()
if not file_record:
try:
file_record = File(filepath, content_hash)
session.add(file_record)
session.commit()
except sqlalchemy.exc.IntegrityError as ex:
LOG.error(ex)
# Other transaction might have added the same file in the
# meantime.
session.rollback()
file_record = session.query(File) \
.filter(File.content_hash == content_hash,
File.filepath == filepath) \
.one_or_none()
return file_record.id
|
fdd77f23151ed9627c5d9bbfb157839810c9655a
| 3,647,007
|
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)
# TRAIN and EVAL
if not is_predicting:
(loss, predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
train_op = bert.optimization.create_optimizer(
loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)
# Calculate evaluation metrics.
def metric_fn(label_ids, predicted_labels):
accuracy = tf.metrics.accuracy(label_ids, predicted_labels)
f1_score = tf.contrib.metrics.f1_score(
label_ids,
predicted_labels)
auc = tf.metrics.auc(
label_ids,
predicted_labels)
recall = tf.metrics.recall(
label_ids,
predicted_labels)
precision = tf.metrics.precision(
label_ids,
predicted_labels)
true_pos = tf.metrics.true_positives(
label_ids,
predicted_labels)
true_neg = tf.metrics.true_negatives(
label_ids,
predicted_labels)
false_pos = tf.metrics.false_positives(
label_ids,
predicted_labels)
false_neg = tf.metrics.false_negatives(
label_ids,
predicted_labels)
return {
"eval_accuracy": accuracy,
"f1_score": f1_score,
"auc": auc,
"precision": precision,
"recall": recall,
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg
}
eval_metrics = metric_fn(label_ids, predicted_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=eval_metrics)
else:
(predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
predictions = {
'probabilities': log_probs,
'labels': predicted_labels
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Return the actual model function in the closure
return model_fn
|
570f5297fbcc57eaae1d08e9ee816207db707ffd
| 3,647,008
|
def FancyAnalyzer(expression=r"\s+", stoplist=STOP_WORDS, minsize=2,
maxsize=None, gaps=True, splitwords=True, splitnums=True,
mergewords=False, mergenums=False):
"""Composes a RegexTokenizer with an IntraWordFilter, LowercaseFilter, and
StopFilter.
>>> ana = FancyAnalyzer()
>>> [token.text for token in ana(u"Should I call getInt or get_real?")]
[u"should", u"call", u"getInt", u"get", u"int", u"get_real", u"get", u"real"]
:param expression: The regular expression pattern to use to extract tokens.
:param stoplist: A list of stop words. Set this to None to disable
the stop word filter.
:param minsize: Words smaller than this are removed from the stream.
:param maxsize: Words longer that this are removed from the stream.
:param gaps: If True, the tokenizer *splits* on the expression, rather
than matching on the expression.
"""
ret = RegexTokenizer(expression=expression, gaps=gaps)
iwf = IntraWordFilter(splitwords=splitwords, splitnums=splitnums,
mergewords=mergewords, mergenums=mergenums)
lcf = LowercaseFilter()
swf = StopFilter(stoplist=stoplist, minsize=minsize)
return ret | iwf | lcf | swf
|
50fddbbdc22770b3a9b732bb328bf48c0407aafe
| 3,647,010
|
def find_res_shift(x_min, x_max, y_min, y_max, z_min, z_max, target_id, my_sites, res_two_three_dict, my_mols, color_list, button_list):
"""Function to find the relavant residue shifts"""
print "FINDING MAX SHIFTS"
max_shift = []
# Get the delta value
delta = 5.0
# Filter residues to the ones within 1.0 A of any molecule AND then sort by size
tot_res = Residue.objects.filter(target_id=target_id)
if x_max:
criterion1 = Q(x_max__gte=x_max + delta)
criterion2 = Q(x_max__gte=x_min + delta)
near_res = tot_res.exclude(criterion1 & criterion2)
criterion1 = Q(x_min__lte=x_max - delta)
criterion2 = Q(x_min__lte=x_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
criterion1 = Q(y_max__gte=y_max + delta)
criterion2 = Q(y_max__gte=y_min + delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now do y_min
criterion1 = Q(y_min__lte=y_max - delta)
criterion2 = Q(y_min__lte=y_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now do Z
# First Z_max
criterion1 = Q(z_max__gte=z_max + delta)
criterion2 = Q(z_max__gte=z_min + delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now Z min
criterion1 = Q(z_min__lte=z_max - delta)
criterion2 = Q(z_min__lte=z_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
near_res = set(near_res.filter().values_list("res_name", "res_num"))
else:
tot_near_res = []
tot_res_d = {}
for my_site in my_sites:
criterion1 = Q(x_max__gte=my_site.x_max + delta)
criterion2 = Q(x_max__gte=my_site.x_min + delta)
near_res = tot_res.exclude(criterion1 & criterion2)
criterion1 = Q(x_min__lte=my_site.x_max - delta)
criterion2 = Q(x_min__lte=my_site.x_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
criterion1 = Q(y_max__gte=my_site.y_max + delta)
criterion2 = Q(y_max__gte=my_site.y_min + delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now do y_min
criterion1 = Q(y_min__lte=my_site.y_max - delta)
criterion2 = Q(y_min__lte=my_site.y_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now do Z
# First Z_max
criterion1 = Q(z_max__gte=my_site.z_max + delta)
criterion2 = Q(z_max__gte=my_site.z_min + delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now Z min
criterion1 = Q(z_min__lte=my_site.z_max - delta)
criterion2 = Q(z_min__lte=my_site.z_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now we get the near res for this site
near_res = set(near_res.filter().values_list("res_name", "res_num"))
for res in near_res:
if res in tot_res_d:
tot_res_d[res].append(my_site.pk)
else:
tot_res_d[res] = [my_site.pk]
tot_near_res.extend(list(near_res))
near_res = tot_near_res
print "Getting clusters"
my_res = ResShift.objects.filter(target_id=target_id, res_name__in=[x[0] for x in near_res], res_num__in=[x[1] for x in near_res])
# Only find those close to the BOX / main
out_res_d = {}
for i, val in enumerate(sorted(my_res.values_list("max_shift", "res_name", "pk", "res_num"),reverse=True)):
my_mol = Molecule()
# Define the site the residues are in
res_hash = (val[1], val[3])
if res_hash in tot_res_d:
my_mol.sites = " ".join(["SITE"+ str(x) for x in tot_res_d[res_hash]])
#my_mol.my_list = [(x[0]) for x in sorted(ResShift.objects.filter(target_id=target).values_list("max_shift"),reverse=True)[:5]]
if val[1] in res_two_three_dict:
this_res_name = res_two_three_dict[val[1]]
else:
this_res_name = "UNI"
my_mol.res = "^" + this_res_name + str(val[3])
out_res_d[my_mol.res] = {}
my_mol.my_name = val[1] + ": " + str(val[3])
my_mol.shift = val[0]
my_mol.button = button_list[i % len(button_list)]
my_mol.bg = color_list[i % len(color_list)]
my_mol.res_cl = {}
# Now get how the molecules rank on this residue move
# instead we want to go trhrough molecules
my_mol.my_list = []
# Now colour the clusters
for item in my_mols:
this_res = tot_res.filter(res_name=val[1], res_num=val[3],
prot_id__molecule=item)
if len(this_res) ==0:
new_mol = Molecule()
# Get the PK from here
new_mol.pk = item.pk
new_mol.shift = 0.0
new_mol.colour = ""
out_res_d[my_mol.res][item.prot_id.code] = ""
my_mol.my_list.append(new_mol)
elif len(this_res) == 1:
this_res = this_res[0]
new_mol = Molecule()
# Get the PK from here
new_mol.pk = item.pk
new_mol.shift = this_res.max_shift
new_mol.clus_id = "RESCL" + str(this_res.clust_id) + "_" + val[1] + "_" + str(val[3])
my_mol.res_cl["RESCL" + str(this_res.clust_id) + "_" + val[1] + "_" + str(val[3])] = [color_list[this_res.clust_id % len(color_list)], button_list[this_res.clust_id % len(button_list)]]
new_mol.colour = color_list[this_res.clust_id % len(color_list)]
out_res_d[my_mol.res][this_res.prot_id.code] = button_list[this_res.clust_id % len(button_list)]
my_mol.my_list.append(new_mol)
else:
print "ERROR MORE THAN ONE MOLS"
# Now append this guy to the list
max_shift.append(my_mol)
return json.dumps(out_res_d), max_shift
|
d46a146071f5cd48ab1382d03ac4678cc2c301fd
| 3,647,011
|
def lookup(*getters):
"""Find data by provided parameters and group by type respectively"""
getters = list(reversed(getters))
def wrap(struct):
while getters:
_type, getter = getters.pop()
if _type == G_TYPE_KEY:
struct = getter(struct)
continue
if _type == G_TYPE_ARR:
n_getters = list(reversed(getters))
return [lookup(*n_getters)(elem) for elem in getter(struct)]
return struct
return wrap
|
937a44e8366016cb136f0b40a91448b97c52357d
| 3,647,012
|
def compute_one(t, lhs, rhs, **kwargs):
""" Join two pandas data frames on arbitrary columns
The approach taken here could probably be improved.
To join on two columns we force each column to be the index of the
dataframe, perform the join, and then reset the index back to the left
side's original index.
"""
result = pd.merge(lhs, rhs,
left_on=t.on_left, right_on=t.on_right,
how=t.how)
return result.reset_index()[t.columns]
|
c050fdeae2e354be3748984a32ad96b81593355b
| 3,647,013
|
def simulate(mat, det, e0=20.0, dose=defaultDose, withPoisson=True, nTraj=defaultNumTraj, sf=defaultCharFluor, bf=defaultBremFluor, xtraParams=defaultXtraParams):
"""simulate(mat,det,[e0=20.0],[withPoisson=True],[nTraj=defaultNumTraj],[dose=defaultDose],[sf=defaultCharFluor],[bf=defaultBremFluor],[xtraParams=defaultXtraParams])
Simulate a bulk spectrum for the material mat on the detector det at beam energy e0 (in keV). If \
sf then simulate characteristic secondary fluorescence. If bf then simulate bremsstrahlung secondary \
fluorescence. nTraj specifies the number of electron trajectories. dose is in nA*sec."""
mat = dtsa2.material(mat)
if not isinstance(mat, epq.Material):
print u"Please provide a material with a density - %s" % mat
tmp = u"MC simulation of bulk %s at %0.1f keV%s%s" % (mat, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
print tmp
res = base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildBulk, { "Material" : mat }, xtraParams)
res.getProperties().setCompositionProperty(epq.SpectrumProperties.StandardComposition, mat)
return res
|
5ffdf63038fa2ba4305001f1b1ec5da0c13ebf3d
| 3,647,014
|
def link_match_family(link, family_name):
"""Checks whether the a link can be used in a given family.
When this function is used with built-in family names, it tests whether the link name can be
used with the given built-in family. If the family name is not known, we return True because
the user is working with a custom ``Family`` object.
Which links can work with which families are taken from statsmodels.
"""
if family_name in FAMILY_LINKS:
return link in FAMILY_LINKS[family_name]
# Custom family, we don't know what link functions can be used
return True
|
7d95556b5ff6537bc994d7b017263ced13d4efc0
| 3,647,015
|
def convert_AST_to_expr(ast):
"""Creates expression from the AST."""
converter = ASTToInstrBlockConverter()
instrs = converter.my_visit(ast)
return instrs[0]
|
b4dca77c48cd0001a2f55c71a077a6b195a181ce
| 3,647,017
|
import time
def add_data_from_api(service, repo, variable_type, keys):
"""Retrieves Github API data. Utilizes the function from github_api/github.py to do so.
This function adds the retrieved variables directly to the data dictionary.
Args:
service (Service): Service object with API connection and metadata vars
repo (Repo) : Repository variables bundled together
variable_type (string): which type of variable should be retrieved.
Supported are: contributors, languages, readmes
keys (list): A list of the keys for the retrieved data
Returns:
boolean: Whether the request was successful or not.
In case of unsuccessful request, skip repository
"""
# for nested data only, otherwise key can be directly used
if variable_type in ("contributors", "languages"):
data[variable_type] = []
retrieved_data = get_data_from_api(service, repo, variable_type, verbose=False)
if retrieved_data is not None:
if variable_type in ("contributors", "languages"):
for entry in retrieved_data:
data[variable_type].append(dict(zip(keys, entry[1:])))
elif variable_type == "readmes":
data[keys[0]] = retrieved_data[1]
else:
return False
time.sleep(2)
return True
|
32361d85fb92efd03b79f74f8db2e02a8fcd9866
| 3,647,018
|
def part1(data):
"""
>>> part1(read_input())
0
"""
return data
|
1482c41b112a3e74775e71c4aabbd588de2b6553
| 3,647,019
|
import torch
def get_rectanguloid_mask(y, fat=1):
"""Get a rectanguloid mask of the data"""
M = y.nonzero().max(0)[0].tolist()
m = y.nonzero().min(0)[0].tolist()
M = [min(M[i] + fat, y.shape[i] - 1) for i in range(3)]
m = [max(v - fat, 0) for v in m]
mask = torch.zeros_like(y)
mask[m[0] : M[0], m[1] : M[1], m[2] : M[2]] = 1
return mask
|
0ff3ab25f2ab109eb533c7e4fafd724718dbb986
| 3,647,020
|
import re
def colorize_output(output):
"""Add HTML colors to the output."""
# Task status
color_output = re.sub(r'(ok: [-\w\d\[\]]+)',
r'<font color="green">\g<1></font>',
output)
color_output = re.sub(r'(changed: [-\w\d\[\]]+)',
r'<font color="orange">\g<1></font>',
color_output)
if not re.search(r'failed: 0', color_output):
color_output = re.sub(r'(failed: [-\w\d\[\]]+)',
r'<font color="red">\g<1></font>',
color_output)
color_output = re.sub(r'(fatal: [-\w\d\[\]]+):',
r'<font color="red">\g<1></font>',
color_output)
# Play recap
color_output = re.sub(r'(ok=[\d]+)',
r'<font color="green">\g<1></font>',
color_output)
color_output = re.sub(r'(changed=[\d]+)',
r'<font color="orange">\g<1></font>',
color_output)
color_output = re.sub(r'(failed=[1-9][0-9]*)',
r'<font color="red">\g<1></font>',
color_output)
return color_output
|
80759da16262d850b45278faede4b60b7aa4a7c6
| 3,647,021
|
def parse_user_date(usr_date: str) -> date:
"""
Parses a user's date input, prompts the user to input useful date data if user's date was
invalid
Args:
usr_date : str, user input of date info. Should be in <yyyy/mm/dd> format
Returns:
valid datetime.date() object
"""
expected_len = len("yyyy/mm/dd")
if usr_date is None:
return prompt_user_date()
try:
dt_list = usr_date[0:expected_len].split("/")
# Ensure right number of fields
if len(dt_list) >= 3:
try:
# Ensure year is long enough to be useful
if len(dt_list[0]) == 4:
year = int(dt_list[0])
else:
raise BreakoutError()
# set rest of info
month = int(dt_list[1])
day = int(dt_list[2])
# deal with bad user characters
except ValueError:
raise BreakoutError()
# create date if user isn't a dingus
calendar_date = date(year, month, day)
else:
raise BreakoutError()
except BreakoutError:
# Make user give us a useful date if they are a dingus
calendar_date = prompt_user_date()
return calendar_date
|
10becdce6ef4fdc5606ce110b09e102c186dfc04
| 3,647,023
|
def up_sampling_block(x, n_filter, kernel_size, name,
activation='relu', up_size=(2, 2)):
"""Xception block
x => sepconv block -> sepconv block -> sepconv block-> add(Act(x)) =>
"""
x = layers.UpSampling2D(size=up_size, name=name+'up')(x)
if activation:
x = layers.Activation('relu', name=name+'_act')(x)
x = sepconv_bn_relu(x, n_filter, kernel_size, padding='same', activation=None, name=name+'_sepconv1')
return x
|
001fdb6475da138bedfdb891af6e657e5ce6160c
| 3,647,024
|
def connected_components(graph):
"""
Connected components.
@attention: Indentification of connected components is meaningful only for non-directed graphs.
@type graph: graph
@param graph: Graph.
@rtype: dictionary
@return: Pairing that associates each node to its connected component.
"""
visited = {}
count = 1
# For 'each' node not found to belong to a connected component, find its connected component.
for each in graph:
if (each not in visited):
_dfs(graph, visited, count, each)
count = count + 1
return visited
|
80c5bfc679c1dc274db6a3bf8f8becfa1fc99d4f
| 3,647,025
|
import typing
def format_keyvals(
entries: typing.Iterable[typing.Tuple[str, typing.Union[None, str, urwid.Widget]]],
key_format: str = "key",
value_format: str = "text",
indent: int = 0
) -> typing.List[urwid.Columns]:
"""
Format a list of (key, value) tuples.
Args:
entries: The list to format. keys must be strings, values can also be None or urwid widgets.
The latter makes it possible to use the result of format_keyvals() as a value.
key_format: The display attribute for the key.
value_format: The display attribute for the value.
indent: Additional indent to apply.
"""
max_key_len = max((len(k) for k, v in entries if k is not None), default=0)
max_key_len = min(max_key_len, KEY_MAX)
if indent > 2:
indent -= 2 # We use dividechars=2 below, which already adds two empty spaces
ret = []
for k, v in entries:
if v is None:
v = urwid.Text("")
elif not isinstance(v, urwid.Widget):
v = urwid.Text([(value_format, v)])
ret.append(
urwid.Columns(
[
("fixed", indent, urwid.Text("")),
(
"fixed",
max_key_len,
urwid.Text([(key_format, k)])
),
v
],
dividechars=2
)
)
return ret
|
eb1769a3d7b47b6b4f24f02dcffd3639592c8dc6
| 3,647,026
|
def get_item_workdays(scorecard):
""" Gets the number of days in this period"""
supplier = frappe.get_doc('Supplier', scorecard.supplier)
total_item_days = frappe.db.sql("""
SELECT
SUM(DATEDIFF( %(end_date)s, po_item.schedule_date) * (po_item.qty))
FROM
`tabPurchase Order Item` po_item,
`tabPurchase Order` po
WHERE
po.supplier = %(supplier)s
AND po_item.received_qty < po_item.qty
AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s
AND po_item.parent = po.name""",
{"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0)[0][0]
if not total_item_days:
total_item_days = 0
return total_item_days
|
cec620114ae784e5c272d41b6e1028175b466691
| 3,647,027
|
def time_human(x):
""" Gets time as human readable """
# Round time
x = round(x, 2)
for number, unit in [(60, "s"), (60, "min"), (24, "h"), (365, "days")]:
if abs(x) < number:
return f"{x:.2f} {unit}"
x /= number
return f"{x:.2f} years"
|
3f7f51ac7454e429fc30da64eed075aaf1f10b5b
| 3,647,029
|
from typing import Dict
def transaction_json_to_binary_codec_form(
dictionary: Dict[str, XRPL_VALUE_TYPE]
) -> Dict[str, XRPL_VALUE_TYPE]:
"""
Returns a new dictionary in which the keys have been formatted as CamelCase and
standardized to be serialized by the binary codec.
Args:
dictionary: The dictionary to be reformatted.
Returns:
A new dictionary object that has been reformatted.
"""
# This method should be made private when it is removed from `xrpl.transactions`
return {
_key_to_tx_json(key): _value_to_tx_json(value)
for (key, value) in dictionary.items()
}
|
94516b8418fc25d1966d6f5c969f9b4e411100ab
| 3,647,030
|
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""3x3 convolution with padding"""
return nn.Conv1d(in_planes, out_planes, kernel_size=7, stride=stride,
padding=3, bias=False, groups=groups)
|
90fa7549a2ba8722edab3712bac4d3af7fb5f2f2
| 3,647,031
|
def limit_sub_bbox(bbox, sub_bbox):
"""
>>> limit_sub_bbox((0, 1, 10, 11), (-1, -1, 9, 8))
(0, 1, 9, 8)
>>> limit_sub_bbox((0, 0, 10, 10), (5, 2, 18, 18))
(5, 2, 10, 10)
"""
minx = max(bbox[0], sub_bbox[0])
miny = max(bbox[1], sub_bbox[1])
maxx = min(bbox[2], sub_bbox[2])
maxy = min(bbox[3], sub_bbox[3])
return minx, miny, maxx, maxy
|
fa5b7763b30442fba137814ac7b0336528c4540b
| 3,647,032
|
def _load_taxa_incorp_list(inFile, config):
"""Loading list of taxa that incorporate isotope.
Parameters
----------
inFile : str
File name of taxon list
config : config object
Returns
-------
{library:[taxon1, ...]}
"""
taxa = {}
with open(inFile, 'rb') as inFH:
for line in inFH:
line = line.rstrip().split('\t')
# if 1 column, using config-defined libraries
if len(line) == 1:
line = [[x,line[0]] for x in config.keys()]
else:
line = [line]
for x in line:
try:
taxa[x[0]].append(x[1])
except KeyError:
taxa[x[0]] = [x[1]]
return taxa
|
d614f2be0c5ad4fa61d1d70915428324d7af97b4
| 3,647,033
|
def get_subsections(config: Config) -> t.List[t.Tuple[str, t.Dict]]:
"""Collect parameter subsections from main configuration.
If the `parameters` section contains subsections (e.g. '[parameters.1]',
'[parameters.2]'), collect the subsection key-value pairs. Otherwise,
return an empty dictionary (i.e. there are no subsections).
This is useful for specifying multiple API keys for your configuration.
For example:
```
[parameters.alice]
api_key=KKKKK1
api_url=UUUUU1
[parameters.bob]
api_key=KKKKK2
api_url=UUUUU2
[parameters.eve]
api_key=KKKKK3
api_url=UUUUU3
```
"""
return [(name, params) for name, params in config['parameters'].items()
if isinstance(params, dict)] or [('default', {})]
|
0cb022fb6ae192736186a519c6ffbcf9bcfdf541
| 3,647,034
|
def infer_printed_type(t):
"""Infer the types that should be printed.
The algorithm is as follows:
1. Replace all constant types with None.
2. Apply type-inference on the resulting type.
3. For the first internal type variable that appears, find a constant
whose type contains that variable, set that constant to print_type.
4. Repeat until no internal type variables appear.
"""
def clear_const_type(t):
if t.is_const() and not hasattr(t, "print_type"):
t.backupT = t.T
t.T = None
elif t.is_comb():
clear_const_type(t.fun)
clear_const_type(t.arg)
elif t.is_abs():
if not hasattr(t, "print_type"):
t.backup_var_T = t.var_T
t.var_T = None
clear_const_type(t.body)
def recover_const_type(t):
if t.is_const():
t.T = t.backupT
elif t.is_comb():
recover_const_type(t.fun)
recover_const_type(t.arg)
elif t.is_abs():
t.var_T = t.backup_var_T
recover_const_type(t.body)
for i in range(100):
clear_const_type(t)
type_infer(t, forbid_internal=False)
def has_internalT(T):
return any(is_internal_type(subT) for subT in T.get_tsubs())
to_replace, to_replaceT = None, None
def find_to_replace(t):
nonlocal to_replace, to_replaceT
if (t.is_zero() or t.is_one() or \
(t.is_comb('of_nat', 1) and t.arg.is_binary() and t.arg.dest_binary() >= 2)) and \
has_internalT(t.get_type()):
replT = t.get_type()
if t.is_comb():
t = t.fun
if to_replace is None or replT.size() < to_replaceT.size():
to_replace = t
to_replaceT = replT
elif t.is_const() and has_internalT(t.T):
if to_replace is None or t.T.size() < to_replaceT.size():
to_replace = t
to_replaceT = t.T
elif t.is_abs():
if has_internalT(t.var_T):
if to_replace is None or t.var_T.size() < to_replaceT.size():
to_replace = t
to_replaceT = t.var_T
find_to_replace(t.body)
elif t.is_comb():
find_to_replace(t.fun)
find_to_replace(t.arg)
find_to_replace(t)
recover_const_type(t)
if to_replace is None:
break
to_replace.print_type = True
assert i != 99, "infer_printed_type: infinite loop."
return None
|
1ad880fc92db2e64ba6ea81f7481efa99b0bd044
| 3,647,036
|
def bias_variable(shape):
"""
返回指定形状的偏置量
:param shape:
:return:
"""
b = tf.Variable(tf.constant(0.0, shape=shape))
return b
|
ff2bb945414508d1dfc1db0b028cf1feeebeb6d8
| 3,647,037
|
def drag_eqn(times,g,r):
"""define scenario and integrate"""
param = np.array([ g, r])
hinit = np.array([0.0,0.0]) # initial values (position and velocity, respectively)
h = odeint(deriv, hinit, times, args = (param,))
return h[:,0], h[:,1]
|
d79150dd894244c11fa882d62da2f33b1173c144
| 3,647,038
|
def virtual_potential_temperature_monc(theta, thref, q_v, q_cl):
"""
Virtual potential temperature.
Derived variable name: th_v_monc
Approximate form as in MONC
Parameters
----------
theta : numpy array or xarray DataArray
Potential Temperature. (K)
thref : numpy array or xarray DataArray
Reference Potential Temperature (usually 1D). (K)
q_v : numpy array or xarray DataArray
specific humidity
q_cl : numpy array or xarray DataArray
specific cloud liquid water content.
Returns
-------
theta_v: numpy array or xarray DataArray
Virtual potential temperature (K)
"""
th_v = theta + thref * (tc.c_virtual * q_v - q_cl)
if type(th_v) is xr.core.dataarray.DataArray:
th_v.name = 'th_v_monc'
return th_v
|
d4c3da0a5f4f2826edce53f610f8ba384845ebb2
| 3,647,039
|
def promote_user(username):
"""Give admin privileges from a normal user."""
user = annotator.credentials.find_one({'username': username})
if user:
if user['admin']:
flash("User {0} is already an administrator".format(username), 'warning')
else:
annotator.credentials.update_one(user, {'$set': {'admin': True}})
flash("User {0} promoted to administrator successfully".format(username), 'info')
else:
flash("Cannot promote unknown user {0} to administrator".format(username), 'warning')
return redirect(url_for('manage_users'))
|
6a938c341f152991741d35dfd1c693743c07f805
| 3,647,040
|
def slide_number_from_xml_file(filename):
"""
Integer slide number from filename
Assumes /path/to/Slidefile/somekindofSlide36.something
"""
return int(filename[filename.rfind("Slide") + 5:filename.rfind(".")])
|
dcfbc322b30a39041ab15b8496f097a5a5329865
| 3,647,041
|
import io
def massivescan(websites):
"""scan multiple websites / urls"""
# scan each website one by one
vulnerables = []
for website in websites:
io.stdout("scanning {}".format(website))
if scanner.scan(website):
io.stdout("SQL injection vulnerability found")
vulnerables.append(website)
if vulnerables:
return vulnerables
io.stdout("no vulnerable websites found")
return False
|
b2be56bf07d00c8839813d66acd337c75b9823ef
| 3,647,042
|
import re
def is_strong_pass(password):
"""
Verify the strength of 'password'
Returns a dict indicating the wrong criteria
A password is considered strong if:
8 characters length or more
1 digit or more
1 symbol or more
1 uppercase letter or more
1 lowercase letter or more
"""
# calculating the length
length_error = len(password) < 8
# searching for digits
digit_error = re.search(r"\d", password) is None
# searching for uppercase
uppercase_error = re.search(r"[A-Z]", password) is None
# searching for lowercase
lowercase_error = re.search(r"[a-z]", password) is None
# searching for symbols
symbol_error = re.search(r"[ !#$@%&'()*+,-./[\\\]^_`{|}~" + r'"]', password) is None
# overall result
password_ok = not (length_error or digit_error or uppercase_error or lowercase_error or symbol_error)
return password_ok
|
bfd1832951ba3059d8c542fa0b9d708a2416a4d4
| 3,647,043
|
def plot_config(config, settings=None):
"""
plot_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom experiment plot configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings)
config['plot_style'] = 'whitegrid' if 'plot_style' not in config else config['plot_style']
config['plot_color'] = 'gray' if 'plot_color' not in config else config['plot_color']
config['plot_dpi'] = 300 if 'plot_dpi' not in config else config['plot_dpi']
config['plot_ext'] = '.png' if 'plot_ext' not in config else config['plot_ext']
return config
|
3b17e97c68bcec31856cb0dc4d7f3db4280a748f
| 3,647,044
|
def evaluate_fN(model, NHI):
""" Evaluate an f(N,X) model at a set of NHI values
Parameters
----------
NHI : array
log NHI values
Returns
-------
log_fN : array
f(NHI,X) values
"""
# Evaluate without z dependence
log_fNX = model.__call__(NHI)
return log_fNX
|
e952a29fdf5864b26dc534140b2ccfb0b59fe24b
| 3,647,046
|
def pipe(bill_texts_df):
"""
soup = bs(text, 'html.parser')
raw_text = extractRawText(soup)
clean_text = cleanRawText(raw_text)
metadata = extract_metadata(soup)
"""
bill_texts_df['soup'] = \
bill_texts_df['html'].apply(lambda x: bs(x, 'html.parser'))
bill_texts_df['content'] = \
bill_texts_df['soup'].apply(lambda x: extractRawText(x.body))
bill_texts_df['long_title'] = \
bill_texts_df['soup'].apply(lambda x: extractLongTitle(x.body))
bill_texts_df['table_info'] = \
bill_texts_df['soup'].apply(lambda x: extractTableContent(x.body))
return None
|
73a8a850fa15f8ad33f9f823f9b2b4d6f808826b
| 3,647,048
|
def _as_static(data, fs):
"""Get data into the Pyglet audio format."""
fs = int(fs)
if data.ndim not in (1, 2):
raise ValueError('Data must have one or two dimensions')
n_ch = data.shape[0] if data.ndim == 2 else 1
audio_format = AudioFormat(channels=n_ch, sample_size=16,
sample_rate=fs)
data = data.T.ravel('C')
data[data < -1] = -1
data[data > 1] = 1
data = (data * (2 ** 15)).astype('int16').tostring()
return StaticMemorySourceFixed(data, audio_format)
|
b76d4c49107f8b9679e975bd2ce114314289d181
| 3,647,049
|
def preprocess_data(cubes, time_slice: dict = None):
"""Regrid the data to the first cube and optional time-slicing."""
# Increase TEST_REVISION anytime you make changes to this function.
if time_slice:
cubes = [extract_time(cube, **time_slice) for cube in cubes]
first_cube = cubes[0]
# regrid to first cube
regrid_kwargs = {
'grid': first_cube,
'scheme': iris.analysis.Nearest(),
}
cubes = [cube.regrid(**regrid_kwargs) for cube in cubes]
return cubes
|
82e851bda39a4ab7716c7b9cd6038743961d9faf
| 3,647,050
|
import base64
def password_to_str(password):
"""
加密
:param password:
:return:
"""
def add_to_16(password):
while len(password) % 16 != 0:
password += '\0'
return str.encode(password) # 返回bytes
key = 'saierwangluo' # 密钥
aes = AES.new(add_to_16(key), AES.MODE_ECB) # 初始化aes加密器
des3 = DES3.new(add_to_16(key), DES3.MODE_ECB) # 初始化3des加密器
# aes加密
encrypted_text = str(
base64.encodebytes(
aes.encrypt(add_to_16(password))), encoding='utf8'
).replace('\n', '')
des_encrypted_text = str(
base64.encodebytes(des3.encrypt(add_to_16(encrypted_text))), encoding='utf8'
).replace('\n', '') # 3des加密
# 返回加密后数据
return des_encrypted_text
|
60a6d361d6de3c41d2a27cd24312006920ad1013
| 3,647,051
|
from src.Emails.checker.mailru import checker
import re
import requests
def email_checker_mailru(request: Request, email: str):
"""
This API check email from mail.ru<br>
<pre>
:return: JSON<br>
</pre>
Example:<br>
<br>
<code>
https://server1.majhcc.xyz/api/email/checker/mailru?email=oman4omani@mail.ru
"""
# regex mail.ru
if re.match(r'^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]@mail\.ru', email):
try:
result = checker(email)
if result:
return {
'status': 'success',
'available': True
}
elif not result:
return {
'status': 'success',
'available': False
}
elif result == None:
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'
}
else:
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'
}
except Exception as e:
data = {
'content': f'Check email from mail.ru api Error: ***{str(e)}***'
}
requests.post(WEBHOOKURL, data=data)
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'}
else:
return {
'status': 'error',
'result': 'Invalid email'
}
|
2835439d3c7781efa0c244c881f42a404a8d3cad
| 3,647,052
|
from typing import Callable
def guild_only() -> Callable:
"""A :func:`.check` that indicates this command must only be used in a
guild context only. Basically, no private messages are allowed when
using the command.
This check raises a special exception, :exc:`.NoPrivateMessage`
that is inherited from :exc:`.CheckFailure`.
"""
def predicate(ctx: InteractionContext) -> bool:
if ctx.guild is None:
raise NoPrivateMessage()
return True
return check(predicate)
|
40307b2a8672180b2a3532380f11b2701bcf0dd8
| 3,647,053
|
from typing import Union
from typing import List
from typing import Callable
from typing import Any
from typing import Sequence
def make_lvis_metrics(
save_folder=None,
filename_prefix="model_output",
iou_types: Union[str, List[str]] = "bbox",
summarize_to_stdout: bool = True,
evaluator_factory: Callable[
[Any, List[str]], DetectionEvaluator
] = LvisEvaluator,
gt_api_def: Sequence[
SupportedDatasetApiDef
] = DEFAULT_SUPPROTED_DETECTION_DATASETS,
):
"""
Returns an instance of :class:`DetectionMetrics` initialized for the LVIS
dataset.
:param save_folder: path to the folder where to write model output
files. Defaults to None, which means that the model output of
test instances will not be stored.
:param filename_prefix: prefix common to all model outputs files.
Ignored if `save_folder` is None. Defaults to "model_output"
:param iou_types: list of (or a single string) strings describing
the iou types to use when computing metrics.
Defaults to "bbox". Valid values are "bbox" and "segm".
:param summarize_to_stdout: if True, a summary of evaluation metrics
will be printed to stdout (as a table) using the Lvis API.
Defaults to True.
:param evaluator_factory: Defaults to :class:`LvisEvaluator` constructor.
:param gt_api_def: Defaults to the list of supported datasets (LVIS is
supported in Avalanche through class:`LvisDataset`).
:return: A metric plugin that can compute metrics on the LVIS dataset.
"""
return DetectionMetrics(
evaluator_factory=evaluator_factory,
gt_api_def=gt_api_def,
save_folder=save_folder,
filename_prefix=filename_prefix,
iou_types=iou_types,
summarize_to_stdout=summarize_to_stdout,
)
|
cbb3df8d8e9daa7976a7be7d6c0588e943aecd5e
| 3,647,054
|
def _calculate_cos_loop(graph, threebody_cutoff=4.0):
"""
Calculate the cosine theta of triplets using loops
Args:
graph: List
Returns: a list of cosine theta values
"""
pair_vector = get_pair_vector_from_graph(graph)
_, _, n_sites = tf.unique_with_counts(graph[Index.BOND_ATOM_INDICES][:, 0])
start_index = 0
cos = []
for n_site in n_sites:
for i in range(n_site):
for j in range(n_site):
if i == j:
continue
vi = pair_vector[i + start_index].numpy()
vj = pair_vector[j + start_index].numpy()
di = np.linalg.norm(vi)
dj = np.linalg.norm(vj)
if (di <= threebody_cutoff) and (dj <= threebody_cutoff):
cos.append(vi.dot(vj) / np.linalg.norm(vi) / np.linalg.norm(vj))
start_index += n_site
return cos
|
3a3283a67c743b2bb7f7a9627e6847dcfc286276
| 3,647,055
|
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Firefox()
|
26df11d662b3d4f98a294df9c61841c1ab76e8fc
| 3,647,056
|
import logging
def temp_url_page(rid):
"""
Temporary page where receipts are stored. The user, which visits it first, get the receipt.
:param rid: (str) receipt id (user is assigned to receipt with this id)
"""
if not user_handler.assign_rid_user(rid, flask.session['username']):
logging.warn('Trying to steal receipt! {ip} has visited page: {url}! Cancelling request!'.
format(ip=flask.request.remote_addr, url=flask.request.url))
flask.abort(400)
return
return flask.redirect(flask.url_for('dashboard_page'))
|
e5ebfe4602e427b4d96cdf1c0298057a5b472052
| 3,647,057
|
def extract_dependencies(content):
"""
Extract the dependencies from the CMake code.
The `find_package()` and `pkg_check_modules` calls must be on a single line
and the first argument must be a literal string for this function to be
able to extract the dependency name.
:param str content: The CMake source code
:returns: The dependencies name
:rtype: list
"""
return \
extract_find_package_calls(content) | \
_extract_pkg_config_calls(content)
|
d9f114695cb3622f4a8dbc23db3a97ed53b164ad
| 3,647,058
|
def _block(x, out_channels, name, conv=conv2d, kernel=(3, 3), strides=(2, 2), dilations=(1, 1), update_collection=None,
act=tf.nn.leaky_relu, pooling='avg', padding='SAME', batch_norm=False):
"""Builds the residual blocks used in the discriminator in GAN.
Args:
x: The 4D input vector.
out_channels: Number of features in the output layer.
name: The variable scope name for the block.
conv: Convolution function. Options conv2d or snconv2d
kernel: The height and width of the convolution kernel filter (Default value = (3, 3))
strides: Rate of convolution strides (Default value = (2, 2))
dilations: Rate of convolution dilation (Default value = (1, 1))
update_collection: The update collections used in the in the spectral_normed_weight. (Default value = None)
downsample: If True, downsample the spatial size the input tensor .
If False, the spatial size of the input tensor is unchanged. (Default value = True)
act: The activation function used in the block. (Default value = tf.nn.relu)
pooling: Strategy of pooling. Default: average pooling. Otherwise, no pooling, just using strides
padding: Padding type (Default value = 'SAME')
batch_norm: A flag that determines if batch norm should be used (Default value = False)
Returns:
A tensor representing the output of the operation.
"""
with tf.variable_scope(name):
if batch_norm:
bn0 = BatchNorm(name='bn_0')
bn1 = BatchNorm(name='bn_1')
input_channels = x.shape.as_list()[-1]
x_0 = x
x = conv(x, out_channels, kernel, dilations=dilations, name='conv1', padding=padding)
if batch_norm:
x = bn0(x)
x = act(x, name="before_downsampling")
x = down_sampling(x, conv, pooling, out_channels, kernel, strides, update_collection, 'conv2', padding)
if batch_norm:
x = bn1(x)
if strides[0] > 1 or strides[1] > 1 or input_channels != out_channels:
x_0 = down_sampling(x_0, conv, pooling, out_channels, kernel, strides, update_collection, 'conv3',
padding)
out = x_0 + x # No RELU: http://torch.ch/blog/2016/02/04/resnets.html
return out
|
21851730e1326b85023d88661da13020c37aa723
| 3,647,059
|
def createLaplaceGaussianKernel(sigma, size):
"""构建高斯拉普拉斯卷积核
Args:
sigma ([float]): 高斯函数的标准差
size ([tuple]): 高斯核的大小,奇数
Returns:
[ndarray]: 高斯拉普拉斯卷积核
"""
H, W = size
r, c = np.mgrid[0:H:1, 0:W:1]
r = r - (H - 1) / 2
c = c - (W - 1) / 2
sigma2 = pow(sigma, 2.0)
norm2 = np.power(r, 2.0) + np.power(c, 2.0)
LoGKernel = (norm2 / sigma2 - 2)*np.exp(-norm2 / (2 * sigma2))
return LoGKernel
|
aae788ba324a243691391a61b02e6a5f1b358c4e
| 3,647,060
|
import warnings
def mean_bias_removal(hindcast, alignment, cross_validate=True, **metric_kwargs):
"""Calc and remove bias from py:class:`~climpred.classes.HindcastEnsemble`.
Args:
hindcast (HindcastEnsemble): hindcast.
alignment (str): which inits or verification times should be aligned?
- maximize/None: maximize the degrees of freedom by slicing ``hind`` and
``verif`` to a common time frame at each lead.
- same_inits: slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- same_verif: slice to a common/consistent verification time frame prior
to computing metric. This philosophy follows the thought that each lead
should be based on the same set of verification dates.
cross_validate (bool): Use properly defined mean bias removal function. This
excludes the given initialization from the bias calculation. With False,
include the given initialization in the calculation, which is much faster
but yields similar skill with a large N of initializations.
Defaults to True.
Returns:
HindcastEnsemble: bias removed hindcast.
"""
if hindcast.get_initialized().lead.attrs["units"] != "years":
warnings.warn(
"HindcastEnsemble.remove_bias() is still experimental and is only tested "
"for annual leads. Please consider contributing to "
"https://github.com/pangeo-data/climpred/issues/605"
)
def bias_func(a, b, **kwargs):
return a - b
bias_metric = Metric("bias", bias_func, True, False, 1)
# calculate bias lead-time dependent
bias = hindcast.verify(
metric=bias_metric,
comparison="e2o",
dim=[], # not used by bias func, therefore best to add [] here
alignment=alignment,
**metric_kwargs,
).squeeze()
# how to remove bias
if cross_validate: # more correct
mean_bias_func = _mean_bias_removal_cross_validate
else: # faster
mean_bias_func = _mean_bias_removal_quick
bias_removed_hind = mean_bias_func(hindcast._datasets["initialized"], bias, "init")
bias_removed_hind = bias_removed_hind.squeeze()
# remove groupby label from coords
for c in ["dayofyear", "skill", "week", "month"]:
if c in bias_removed_hind.coords and c not in bias_removed_hind.dims:
del bias_removed_hind.coords[c]
# replace raw with bias reducted initialized dataset
hindcast_bias_removed = hindcast.copy()
hindcast_bias_removed._datasets["initialized"] = bias_removed_hind
return hindcast_bias_removed
|
01155462155d9f718fa2a12053297903d47b6661
| 3,647,063
|
def index():
"""
vista principal
"""
return "<i>API RestFull PARCES Version 0.1</i>"
|
8b8b963f75395df665bcf0283528c9641b3ea20e
| 3,647,065
|
def tag(dicts, key, value):
"""Adds the key value to each dict in the sequence"""
for d in dicts:
d[key] = value
return dicts
|
ffcfda13845fb8b522e50211184104a11da50398
| 3,647,066
|
def openpairshelf(filename, flag='c', protocol=None, writeback=False):
"""Returns a ProteinPairDB object, with similar functionality to shelve.open()"""
return ProteinPairDB(filename, flag, protocol, writeback)
|
886a474aa67f729461995fe5427d5f68b9db9fe0
| 3,647,067
|
def createUser(emailid, password, contact_no, firstname, lastname, category, address, description, company_url, image_url, con=None, cur=None, db=None):
"""
Tries to create a new user with the given data.
Returns:
- dict: dict object containing all user data, if query was successfull
- False: If query was unsuccessful
"""
sql = """Insert into users(
emailid,
password,
firstname,
lastname,
contact_no,
category,
address,
description,
company_url,
image_url
) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
db(sql, (emailid,
password,
firstname,
lastname,
contact_no,
category,
address,
description,
company_url,
image_url))
con.commit()
# close database connection
user = getUserUsingEmail(emailid)
return user or False
|
05dc71db991e126d43fd9ddd044f1cf65f3e97c1
| 3,647,068
|
async def discordView(cls:"PhaazebotWeb", WebRequest:ExtendedRequest) -> Response:
"""
Default url: /discord/view/{guild_id:\d+}
"""
PhaazeDiscord:"PhaazebotDiscord" = cls.BASE.Discord
if not PhaazeDiscord:
return await cls.Tree.errors.notAllowed(cls, WebRequest, msg="Discord module is not active")
guild_id:str = WebRequest.match_info.get("guild_id", "")
Guild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id))
if not Guild:
return await cls.Tree.Discord.discordinvite.discordInvite(WebRequest, msg=f"Phaaze is not on this Server", guild_id=guild_id)
ViewPage:HTMLFormatter = HTMLFormatter("Platforms/Web/Content/Html/Discord/view.html")
ViewPage.replace(
guild_id=Guild.id,
guild_icon_url=Guild.icon_url,
guild_name=Guild.name
)
site:str = cls.HTMLRoot.replace(
replace_empty=True,
title="Phaaze | Discord - View",
header=getNavbar(active="discord"),
main=ViewPage
)
return cls.response(
body=site,
status=200,
content_type='text/html'
)
|
76f222bdd5164c23c95803d47fc1af48d89192e2
| 3,647,070
|
def update_max_braking_decel(vehicle, mbd):
"""
Updates the max braking decel of the vehicle
:param vehicle: vehicle
:param mbd: new max braking decel
:type vehicle: VehicleProfile
:return: Updated vehicle
"""
return vehicle.update_max_braking_decel(mbd)
|
dea3bf14ca14363246539fd81cf853cd2c0ad980
| 3,647,071
|
from scipy.spatial.distance import pdist, squareform
def get_outlier_removal_mask(xcoords, ycoords, nth_neighbor=10, quantile=.9):
"""
Parameters
----------
xcoords :
ycoords :
nth_neighbor :
(Default value = 10)
quantile :
(Default value = .9)
Returns
-------
"""
D = squareform(pdist(np.vstack((xcoords, ycoords)).T))
distances = D[np.argsort(D, axis=0)[nth_neighbor - 1, :], 0]
return distances <= np.quantile(distances, quantile)
|
8d01088401405613696ced2dbbd9c03940417f10
| 3,647,072
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.