content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def hrnetv2_w32(**kwargs):
"""
HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs)
|
859642b2631457fd3fd8389370d2618666269ebe
| 3,643,556
|
from .utils.globalcache import c
def medicare_program_engagement():
"""
Produces a wide dataset at the NPI level that shows when a provider entered
and exited the three different medicare databases: Part B, Part D, and
Physician Compare
"""
partd = part_d_files(summary=True,
usecols=['npi', 'total_claim_count'])
partd_engage = (partd.assign(PartD_Max_Year=lambda df: df.Year,
PartD_Min_Year=lambda df: df.Year)
.groupby('npi', as_index=False)
.agg({'PartD_Min_Year': min, 'PartD_Max_Year': max})
)
partb = part_b_files(summary=True,
columns=['National Provider Identifier',
'Number of Medicare Beneficiaries'])
partb_engage = (partb.assign(PartB_Max_Year=lambda df: df.Year,
PartB_Min_Year=lambda df: df.Year)
.groupby('National Provider Identifier',
as_index=False)
.agg({'PartB_Min_Year': min, 'PartB_Max_Year': max})
.rename(columns={'National Provider Identifier':
'npi'}))
pc = c.physician_compare_select_vars([],
drop_duplicates=False,
date_var=True)
pc_engage = (pc.assign(Year=pc.date.dt.year)
.drop(columns='date')
.drop_duplicates())
pc_engage = (pc_engage.assign(PC_Max_Year=lambda df: df.Year,
PC_Min_Year=lambda df: df.Year)
.groupby('NPI', as_index=False)
.agg({'PC_Min_Year': min, 'PC_Max_Year': max})
.rename(columns={'NPI': 'npi'}))
df = (pc_engage
.merge(partd_engage, how='outer')
.merge(partb_engage, how='outer')
.convert_dtypes({x: 'Int64' for x in pc_engage.columns}))
df.loc[((df.PC_Max_Year == 2020)
| (df.PartD_Max_Year == 2017)
| (df.PartB_Max_Year == 2017))
& ~((df.PartD_Max_Year.notnull()
& df.PartB_Max_Year.notnull()
& (df.PC_Max_Year < 2020))), 'maybe_active'] = True
df = df.assign(maybe_active=df.maybe_active.fillna(False))
df.loc[df.PC_Max_Year == 2020, 'active_2020'] = True
df = df.assign(active_2020=df.active_2020.fillna(False))
return df
|
3a4bd0545473f229c8452680fc38c6ded2cb14bf
| 3,643,557
|
def _is_bumf(value):
"""
Return true if this value is filler, en route to skipping over empty lines
:param value: value to check
:type value: object
:return: whether the value is filler
:rtype: bool
"""
if type(value) in (unicode, str):
return value.strip() == ''
return value is None
|
1812e82036ed4bdbdee4e2e032886ac2c788a5ff
| 3,643,558
|
from .perceptron import tag as tag_
from artagger import Tagger
from .unigram import tag as tag_
def pos_tag(words, engine="unigram", corpus="orchid"):
"""
Part of Speech tagging function.
:param list words: a list of tokenized words
:param str engine:
* unigram - unigram tagger (default)
* perceptron - perceptron tagger
* artagger - RDR POS tagger
:param str corpus:
* orchid - annotated Thai academic articles
* pud - Parallel Universal Dependencies (PUD) treebanks
:return: returns a list of labels regarding which part of speech it is
"""
if not words:
return []
if engine == "perceptron":
elif engine == "artagger":
def tag_(words, corpus=None):
if not words:
return []
words_ = Tagger().tag(" ".join(words))
return [(word.word, word.tag) for word in words_]
else: # default, use "unigram" ("old") engine
return tag_(words, corpus=corpus)
|
8c8328950fba9082220d9c6be3b9fc8f9e6c3332
| 3,643,559
|
import warnings
def derive(control):
"""
gui.derive will be removed after mGui 2.2; for now it's going to issue a deprecation warning and call `wrap()`
"""
warnings.warn("gui.derive() should be replaced by gui.wrap()", PendingDeprecationWarning)
return wrap(control)
|
a2f463c9a66425e5066c504803b5754c2260cbc9
| 3,643,560
|
import base64
def hex_to_base64(hex_):
""" Converts hex string to base64 """
return base64.b64encode(bytes.fromhex(hex_))
|
26f42b25c9e804bc1b786aadab033db104882f4b
| 3,643,561
|
def dt2iso(orig_dt):
"""datetime to is8601 format."""
return timeutils.isotime(orig_dt)
|
9887db04c4b3703a4f0c43c874c8d907cc744ea5
| 3,643,562
|
def catalog(access_token, user_id, query=None): # noqa: E501
"""Query the list of all the RDF graphs' names (URIs) and the response will be JSON format.
# noqa: E501
:param access_token: Authorization access token string
:type access_token: dict | bytes
:param user_id: the ID of the organization of the client application
:type user_id: str
:param query: Query GraphsSPARQL Query expression (max 1536). Note the common lowest limit for the entrie url is 2048 as the limit. The query SPARQL string must be url-encoded. The example below is not url-encoded to show the un-encoded SPARQL content.
:type query: str
:rtype: GraphListType
"""
if connexion.request.is_json:
access_token = AccessToken.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
f3819e76be5a1559f60140542d151de1f1b50b0e
| 3,643,563
|
import json
def _make_chrome_policy_json():
"""Generates the json string of chrome policy based on values in the db.
This policy string has the following form:
{
"validProxyServers": {"Value": map_of_proxy_server_ips_to_public_key},
"enforceProxyServerValidity": {"Value": boolean}
}
Returns:
A json string of current chrome policy.
"""
proxy_servers = models.ProxyServer.query.all()
proxy_server_dict = {}
for server in proxy_servers:
proxy_server_dict[server.ip_address] = (
server.get_public_key_as_authorization_file_string())
proxy_server_value_dict = {"Value" : proxy_server_dict}
config = ufo.get_user_config()
config_value_dict = {"Value" : config.proxy_server_validity}
policy_dictionary = {
"validProxyServers": proxy_server_value_dict,
"enforceProxyServerValidity": config_value_dict,
}
return json.dumps(policy_dictionary)
|
629450bc9bb0c2c0ce61b25568a4689b20c89766
| 3,643,564
|
def get_rgb_color(party_id):
"""Get RGB color of party."""
if party_id not in PARTY_TO_COLOR_OR_PARTY:
return UNKNOWN_PARTY_COLOR
color_or_party = PARTY_TO_COLOR_OR_PARTY[party_id]
if isinstance(color_or_party, tuple):
return color_or_party
return get_rgb_color(color_or_party)
|
18585d46551e1a1646e28d4371d68537e94975ac
| 3,643,565
|
from datetime import datetime
def view_application(application_id):
"""Views an application with ID.
Args:
application_id (int): ID of the application.
Returns:
str: redirect to the appropriate url.
"""
# Get user application.
application = ApplicationModel.query.filter_by(id=application_id).first()
isPersonalApplication = False
# Redirect if application does not exist.
if not application:
flash("Application with ID {} is not present in the database.".format(str(application_id)), 'danger')
current_app.logger.info("{} tried to view application with ID {} which does not exist in the database".format(current_user.name, str(application_id)))
return redirect(url_for('hr.index'))
# check if application is a personal application.
if current_user.application and current_user.application.id == application_id:
isPersonalApplication = True
# Check if application corp is the user's corp.
if not isPersonalApplication and application.corporation.id is not current_user.get_corp().id:
flash('That application is not to your corp.', 'danger')
current_app.logger.info("{} tried to view application which is not to their corporation.".format(current_user.name))
return redirect(url_for('hr.index'))
# Check if user is viewing a personal application or someone else's application.
if not isPersonalApplication and not current_user.has_permission('read_applications'):
flash("You do not have the required permission to view other people's applications.", "danger")
current_app.logger.info("{} tried to illegally access someone else's application but didn't have the required read_applications permission.".format(current_user.name))
return redirect(url_for('hr.index'))
# Make application forms.
removeApplicationForm = RemoveApplicationForm()
editApplicationForm = EditNoteForm(notes=application.character.notes)
# Removal of applications.
if request.method == 'POST':
# Check if notes were updated.
if 'btn' not in request.form:
if 'notes' in request.form and editApplicationForm.validate_on_submit():
oldNote = application.character.notes
application.character.notes = editApplicationForm.notes.data
Database.session.commit()
flash("Successfully updated note.", "success")
current_app.logger.info("{} updated {}'s note from '{}' to '{}'.".format(current_user.name, application.character.name, oldNote, editApplicationForm.notes.data))
return redirect(url_for('hr.view_application', application_id=application.id))
# Check other button presses.
if request.form['btn'] == "RemoveApplication":
# Check if application is valid.
if not removeApplicationForm.validate_on_submit():
flash('Please make sure you provide a reason when removing an application.', 'danger')
return redirect(url_for('hr.view_application', application_id=application.id))
characterName = application.character.name
corpName = application.corporation.name
rejectionReason = removeApplicationForm.rejection_reason.data
# Add note with rejection reason.
# If there are already notes, add an enter.
if application.character.notes:
application.character.notes += "\n"
application.character.notes += "Application removed ({}) by {}: {}".format(datetime.utcnow().strftime('%Y/%m/%d'), current_user.name, rejectionReason)
Database.session.delete(application)
Database.session.commit()
flash("Successfully removed application of {} to {}.".format(characterName, corpName), 'success')
current_app.logger.info("{} removed application of {} to {} with reason '{}'.".format(current_user.name, characterName, corpName, rejectionReason))
elif request.form['btn'] == "RemovePersonalApplication":
characterName = application.character.name
corpName = application.corporation.name
Database.session.delete(application)
Database.session.commit()
flash("Successfully removed application of {} to {}.".format(characterName, corpName), 'success')
current_app.logger.info("{} removed application of {} to {}.".format(current_user.name, characterName, corpName))
elif request.form['btn'] == "UpdateApplication":
application.ready_accepted = not application.ready_accepted
newStatus = "Ready to be accepted" if application.ready_accepted else "Being processed"
Database.session.commit()
flash("Successfully set {} application status to {}.".format(application.character.name, newStatus), 'success')
current_app.logger.info("{} edited status of {} application to {}".format(current_user.name, application.character.name, newStatus))
return redirect(url_for('hr.view_application', application_id=application.id))
return redirect(url_for('hr.index'))
return render_template('hr/view_application.html', application=application, personal_application=isPersonalApplication,
remove_form=removeApplicationForm, edit_form=editApplicationForm, discord_url=current_app.config['DISCORD_RECRUITMENT_INVITE'],
client_id=EveAPI['full_auth_preston'].client_id, client_secret=EveAPI['full_auth_preston'].client_secret, scopes=EveAPI['full_auth_preston'].scope)
|
dda04250b45a1a166c254b48039155e85ca62ea3
| 3,643,566
|
import logging
def build_save_containers(platforms, bucket) -> int:
"""
Entry point to build and upload all built dockerimages in parallel
:param platforms: List of platforms
:param bucket: S3 bucket name
:return: 1 if error occurred, 0 otherwise
"""
if len(platforms) == 0:
return 0
platform_results = Parallel(n_jobs=len(platforms), backend="multiprocessing")(
delayed(_build_save_container)(platform, bucket)
for platform in platforms)
is_error = False
for platform_result in platform_results:
if platform_result is not None:
logging.error('Failed to generate {}'.format(platform_result))
is_error = True
return 1 if is_error else 0
|
9744577efabbd800c16e9c7f57c9c7b31654cec1
| 3,643,567
|
def get_object_record(obj_key):
"""
Query the object's record.
Args:
obj_key: (string) The key of the object.
Returns:
The object's data record.
"""
record = None
model_names = OBJECT_KEY_HANDLER.get_models(obj_key)
for model_name in model_names:
try:
# Get record.
model_obj = apps.get_model(settings.WORLD_DATA_APP, model_name)
record = model_obj.objects.get(key=obj_key)
break
except Exception, e:
ostring = "Can not get record %s: %s." % (obj_key, e)
print(ostring)
print(traceback.print_exc())
continue
return record
|
c32bd3f12babc4f7c30567d6f2529dd037e3e563
| 3,643,568
|
def diff_cars(c1, c2):
"""
diffs two cars
returns a DiffSet containing DiffItems that tell what's missing in c1
as compared to c2
:param c1: old Booking object
:param c2: new Booking object
:return: DiffSet (c1-c2)
"""
strategy = Differ.get_strategy(CAR_DIFF_STRATEGY)
return strategy.diff(c1, c2)
|
fda0e12bea0fd70fbed1a0e2c445941dc44f8cb7
| 3,643,569
|
import json
def main(request, response):
"""Helper handler for Beacon tests.
It handles two forms of requests:
STORE:
A URL with a query string of the form 'cmd=store&sid=<token>&tidx=<test_index>&tid=<test_name>'.
Stores the receipt of a sendBeacon() request along with its validation result, returning HTTP 200 OK.
Parameters:
tidx - the integer index of the test.
tid - a friendly identifier or name for the test, used when returning results.
STAT:
A URL with a query string of the form 'cmd=stat&sid=<token>&tidx_min=<min_test_index>&tidx_max=<max_test_index>'.
Retrieves the results of test with indices [min_test_index, max_test_index] and returns them as
a JSON array and HTTP 200 OK status code. Due to the eventual read-once nature of the stash, results for a given test
are only guaranteed to be returned once, though they may be returned multiple times.
Parameters:
tidx_min - the lower-bounding integer test index.
tidx_max - the upper-bounding integer test index.
Example response body:
[{"id": "Test1", error: null}, {"id": "Test2", error: "some validation details"}]
Common parameters:
cmd - the command, 'store' or 'stat'.
sid - session id used to provide isolation to a test run comprising multiple sendBeacon()
tests.
"""
session_id = request.GET.first("sid");
command = request.GET.first("cmd").lower();
# Workaround to circumvent the limitation that cache keys
# can only be UUID's.
def wrap_key(key, path):
return (str(path), str(key))
request.server.stash._wrap_key = wrap_key
# Append CORS headers if needed.
if "origin" in request.GET:
response.headers.set("Access-Control-Allow-Origin", request.GET.first("origin"))
if "credentials" in request.GET:
response.headers.set("Access-Control-Allow-Credentials", request.GET.first("credentials"))
# Handle the 'store' and 'stat' commands.
if command == "store":
# The test id is just used to make the results more human-readable.
test_id = request.GET.first("tid")
# The test index is used to build a predictable stash key, together
# with the unique session id, in order to retrieve a range of results
# later knowing the index range.
test_idx = request.GET.first("tidx")
test_data_key = build_stash_key(session_id, test_idx)
test_data = { "id": test_id, "error": None }
payload = ""
if "Content-Type" in request.headers and \
"form-data" in request.headers["Content-Type"]:
if "payload" in request.POST:
# The payload was sent as a FormData.
payload = request.POST.first("payload")
else:
# A FormData was sent with an empty payload.
pass
else:
# The payload was sent as either a string, Blob, or BufferSource.
payload = request.body
payload_parts = filter(None, payload.split(":"))
if len(payload_parts) > 0:
payload_size = int(payload_parts[0])
# Confirm the payload size sent matches with the number of characters sent.
if payload_size != len(payload_parts[1]):
test_data["error"] = "expected %d characters but got %d" % (payload_size, len(payload_parts[1]))
else:
# Confirm the payload contains the correct characters.
for i in range(0, payload_size):
if payload_parts[1][i] != "*":
test_data["error"] = "expected '*' at index %d but got '%s''" % (i, payload_parts[1][i])
break
# Store the result in the stash so that it can be retrieved
# later with a 'stat' command.
request.server.stash.put(test_data_key, test_data)
elif command == "stat":
test_idx_min = int(request.GET.first("tidx_min"))
test_idx_max = int(request.GET.first("tidx_max"))
# For each result that has come in, append it to the response.
results = []
for test_idx in range(test_idx_min, test_idx_max+1): # +1 because end is exclusive
test_data_key = build_stash_key(session_id, test_idx)
test_data = request.server.stash.take(test_data_key)
if test_data:
results.append(test_data)
response.headers.set("Content-Type", "text/plain")
response.content = json.dumps(results)
else:
response.status = 400
|
5d970bb10d689bb55f70cd841bd01501d88428c7
| 3,643,571
|
def calc_chi2(model, dof=None):
"""
Calculate chi-square statistic.
Parameters
----------
model : Model
Model.
dof : int, optional
Degrees of freedom statistic. The default is None.
Returns
-------
tuple
chi2 statistic and p-value.
"""
if dof is None:
dof = calc_dof(model)
if model.last_result.name_obj == 'FIML':
stat = model.last_result.fun / model.n_samples
else:
stat = model.n_samples * model.last_result.fun
return stat, 1 - chi2.cdf(stat, dof)
|
46ed27fca1f36fdc8a044136da1ea4a032be1554
| 3,643,572
|
def QuadraticCommandAddControl(builder, control):
"""This method is deprecated. Please switch to AddControl."""
return AddControl(builder, control)
|
9b775f34400a0deeea93fd58a211915462735fed
| 3,643,573
|
def authenticated_api(username, api_root=None, parser=None):
"""Return an oauthenticated tweety API object."""
auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET)
try:
user = User.objects.get(username__iexact=username)
sa = user.social_auth.all()[0]
auth.set_access_token(sa.tokens['oauth_token'],
sa.tokens['oauth_token_secret'])
return tweepy.API(auth,
api_root=api_root or settings.TWITTER_API_ROOT,
parser=parser or JSONParser())
except:
return None
|
82237d40b89ad860720ae3830fa37de76439a2be
| 3,643,574
|
def get_model_header(fpath):
"""
:param fpath:
:return:
"""
with gz.open(fpath, 'rt') as modelfile:
header = modelfile.readline().strip().strip('#').split()
return header
|
bd3600d831d212821c160b994ea73c24ee04ce6d
| 3,643,575
|
def clean_tag(tag):
"""clean up tag."""
if tag is None:
return None
t = tag
if isinstance(t, list):
t = t[0]
if isinstance(t, tuple):
t = t[0]
if t.startswith('#'):
t = t[1:]
t = t.strip()
t = t.upper()
t = t.replace('O', '0')
t = t.replace('B', '8')
return t
|
1d2709323c4d80f290701d5cdc3a993b4bac25d4
| 3,643,577
|
def massM2(param):
""" Mass term in the neutrino mass basis.
@type param : PhysicsConstants
@param param : set of physical parameters to be used.
@rtype : numpy array
@return : mass matrix in mass basis.
"""
M2 = np.zeros([param.numneu,param.numneu],complex)
for k in np.arange(1,param.numneu,1):
M2[k,k] = param.dmsq[k+1]
return M2
|
38997454d308b4730e4eac5a764977fc72a6b373
| 3,643,578
|
import json
def get_input_data(train_file_path='train.json', train=True):
"""Retrieves training (X) and label (y) matrices. Note that this can take a few seconds to run.
Args:
train_file_path is the path of the file containing training data.
Returns:
A tuple containing the X training matrix in the first position, and the y label matrix in the second position.
X is of shape (N, 75, 75, 3), where N is the number of training images, 75 x 75 is the dimension of the images,
and 3 represents the number of channels for each image.
"""
with open(train_file_path, 'r') as train_file:
json_data = train_file.read()
train_data = json.loads(json_data)
band_1 = [instance['band_1'] for instance in train_data]
band_2 = [instance['band_2'] for instance in train_data]
ids = [instance['id'] for instance in train_data]
band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in band_1])
band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in band_2])
# Combine all three channels into an array of 1604 tensors (number of training images) with dimension 75 x 75 x 3
X_train = np.concatenate([band_1[:, :, :, np.newaxis], band_2[:, :, :, np.newaxis]], axis=-1)
if train:
# True labels of data, either iceberg or not iceberg
y_train = np.array([instance['is_iceberg'] for instance in train_data])
return X_train, y_train, ids
else:
return X_train, ids
|
5b42339917f0ec97ae584a03ba415881221e639c
| 3,643,579
|
def dice_coef(y_true, y_pred):
"""
:param y_true: the labeled mask corresponding to a mammogram scan
:param y_pred: the predicted mask of the scan
:return: A metric that accounts for precision and recall
on the scale from 0 - 1. The closer to 1, the
better.
Dice = 2 * (|X & Y|)/ |X|+ |Y|)
= sum(|A*B|)/(sum(|A|)+sum(|B|))
Citation (MIT License): https://github.com/jocicmarko/
ultrasound-nerve-segmentation/blob/
master/train.py
"""
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + SMOOTH) / (K.sum(y_true_f) + K.sum(y_pred_f) + SMOOTH)
|
e0f24abe29771f384e640e9e2f2420add040492f
| 3,643,580
|
def linmsg(x, end_pts_msg=None, max_msg=None, fill_value=1.e20):
"""
Linearly interpolates to fill in missing values.
x = Ngl.linmsg(x,end_pts_msg=None,max_msg=None,fill_value=1.e20)
x -- A numpy or masked array of any dimensionality that contains missing values.
end_pts_msg -- how missing beginning and end points will be
returned. If this value is greater than or equal to 0,
then the beginning and end points will be returned as
missing (default option). If this value is less
than 0, then they will be set to the nearest
non-missing value.
max_msg -- the maximum number of consecutive missing values to be
interpolated. If not set, then this function will try
to interpolate as many values as it can.
fill_value -- The missing value for x. Defaults to 1.e20 if not set.
"""
#
# Set defaults for input parameters not specified by user.
#
if end_pts_msg is None:
end_pts_msg = 0
#
# Setting max_msg to 0 will cause the C wrapper to set this to
# npts before going into the Fortran routine.
#
if max_msg is None:
max_msg = 0
#
# If input array is a numpy masked array, return a numpy masked array.
# Otherwise missing values are dealt with using the fill_value.
#
fv = _get_fill_value(x)
if (any(fv is None)):
return fplib.linmsg(_promote_scalar(x),end_pts_msg,max_msg,fill_value)
else:
aret = fplib.linmsg(x.filled(fv), end_pts_msg, max_msg, fv)
return ma.masked_array(aret, fill_value=fv)
|
342abdc7536d8a1866c156cdc238e06338a20398
| 3,643,581
|
def get_or_create_actor_by_name(name):
"""
Return the actor corresponding to name if it does not exist,
otherwise create actor with name.
:param name: String
"""
return ta.ActorSystem().createActor(MyClass, globalName=name)
|
cc1ad620bc29139d6230e5a134ff72c3639a2bb1
| 3,643,582
|
def client():
"""Client to call tests against"""
options = {
'bind': '%s:%s' % ('0.0.0.0', '8080'),
'workers': str(number_of_workers()),
}
return testing.TestClient(falcon.API(), options)
|
3c075eb528e88a51a8f2c13e1197da6b2831197a
| 3,643,583
|
import math
def hard_negative_mining(loss, labels, neg_pos_ratio=3):
"""
用于训练过程中正负例比例的限制.默认在训练时,负例数量是正例数量的三倍
Args:
loss (N, num_priors): the loss for each example.
labels (N, num_priors): the labels.
neg_pos_ratio: 正负例比例: 负例数量/正例数量
"""
pos_mask = labels > 0
num_pos = pos_mask.long().sum(dim=1, keepdim=True)
num_neg = num_pos * neg_pos_ratio
loss[pos_mask] = -math.inf # 无穷
# 两次sort 找出元素在排序中的位置
_, indexes = loss.sort(dim=1, descending=True) # descending 降序 ,返回 value,index
_, orders = indexes.sort(dim=1)
neg_mask = orders < num_neg # loss 降序排, 背景为-无穷, 选择排前num_neg的 负无穷,也就是 背景
return pos_mask | neg_mask
|
3b2e38ab2b0bbd9732fceafdfd023ea220b3c5eb
| 3,643,584
|
def groups():
"""
Return groups
"""
return _clist(getAddressBook().groups())
|
16db4befa0863b15055fd7b557ecfefa8da55e20
| 3,643,585
|
def round_temp(value):
"""Round temperature for publishing."""
return round(value, dev_fan.round_temp)
|
39f7d5e55d0ba444b675b8ae612f5f38350af050
| 3,643,586
|
def get_key_from_property(prop, key, css_dict=None, include_commented=False):
"""Returns the entry from the dictionary using the given key"""
if css_dict is None:
css_dict = get_css_dict()[0]
cur = css_dict.get(prop) or css_dict.get(prop[1:-1])
if cur is None:
return None
value = cur.get(key)
if value is not None:
return value
for v in cur['values']:
if (v.startswith('<') or (include_commented and v.startswith('<_'))) and v.endswith('>'):
ret = get_key_from_property(v, key, css_dict, include_commented)
if ret is not None:
return ret
|
169a4369a8fc5cc9cfde18b302a308bafa1d4def
| 3,643,587
|
def bbox_area(gt_boxes):
"""
gt_boxes: (K, 4) ndarray of float
area: (k)
"""
K = gt_boxes.size(0)
gt_boxes_area = ((gt_boxes[:,2] - gt_boxes[:,0] + 1) *
(gt_boxes[:,3] - gt_boxes[:,1] + 1)).view(K)
return gt_boxes_area
|
57ad16b8b339e4515dcd7e7126b9c6b35b6c3d8b
| 3,643,588
|
def DecodedMessage(tG,x):
"""
Let G be a coding matrix. tG its transposed matrix. x a n-vector received after decoding.
DecodedMessage Solves the equation on k-bits message v: x = v.G => G'v'= x' by applying GaussElimination on G'.
-------------------------------------
Parameters:
tG: Transposed Coding Matrix. Must have more rows than columns to solve the linear system. Must be full rank.
x: n-array. Must be in the Code (in Ker(H)).
"""
n,k = tG.shape
if n < k:
raise ValueError('Coding matrix G must have more columns than rows to solve the linear system on v\': G\'v\' = x\'')
rtG, rx = GaussElimination(tG,x)
rank = sum([a.any() for a in rtG])
if rank!= k:
raise ValueError('Coding matrix G must have full rank = k to solve G\'v\' = x\'')
message=np.zeros(k).astype(int)
message[k-1]=rx[k-1]
for i in reversed(range(k-1)):
message[i]=abs(rx[i]-BinaryProduct(rtG[i,list(range(i+1,k))],message[list(range(i+1,k))]))
return message
|
47968c4feed23a32abbbf34da1bed4521689f3d2
| 3,643,589
|
def get_ttp_card_info(ttp_number):
"""
Get information from the specified transport card number.
The number is the concatenation of the last 3 numbers of the first row and all the numbers of the second row.
See this image: https://tarjetatransportepublico.crtm.es/CRTM-ABONOS/archivos/img/TTP.jpg
:param str ttp_number: The number that identifies a transport card. It must be a string of the last 3 numbers
of the first row and all the numbers of the second row.
:return dict: A dictionary with information of the transport card. It has information regarding the titles
in that card, expiring dates, purchase dates, title types (young, normal, old, ...), among others.
"""
if ttp_number is not None:
client = Client(Urls.CITRAM_CARD_SERVICE.value)
result = client.service.ConsultaSaldo1(sNumeroTTP=ttp_number)
final_result = {'status': result['iCallLogField'],
'card_info': xmltodict.parse(result['sResulXMLField'])}
return final_result
else:
raise NotEnoughParametersException('You must specify a transport card number.')
|
fc8fb31ae5daf17173567d53a9a122c3d8e11ca5
| 3,643,590
|
import re
def tag_matches(tag, impl_version='trunk', client_version='trunk'):
"""Test if specified versions match the tag.
Args:
tag: skew test expectation tag, e.g. 'impl_lte_5' or 'client_lte_2'.
impl_version: WebLayer implementation version number or 'trunk'.
client_version: WebLayer implementation version number or 'trunk'.
Returns:
True if the specified versions match the tag.
Raises:
AssertionError if the tag is invalid.
"""
# 'All' is special cased to match anything.
if tag == 'all':
return True
# Extract the three components from the tag.
match = re.match(r'(client|impl)_([gl]te)_([0-9]+)', tag)
assert match is not None, (
'tag must be of the form "{client,impl}_{gte,lte}_$version", found %r' %
tag)
target_str, op_str, tag_version_str = match.groups()
# If a version is specified see if the tag refers to the same target or
# return False otherwise.
if impl_version != 'trunk' and target_str != 'impl':
return False
if client_version != 'trunk' and target_str != 'client':
return False
version = impl_version if impl_version != 'trunk' else client_version
assert type(version) == int, 'Specified version must be an integer.'
tag_version = int(tag_version_str)
op = OP_MAP[op_str]
return op(version, tag_version)
|
dab3494063cd382615648d12d5dae03a47963af6
| 3,643,591
|
def load_suites_from_directory(dir, recursive=True):
# type: (str, bool) -> List[Suite]
"""
Load a list of suites from a directory.
If the recursive argument is set to True, sub suites will be searched in a directory named
from the suite module: if the suite module is "foo.py" then the sub suites directory must be "foo".
Raise SuiteLoadingError if one or more suite could not be loaded.
"""
if not osp.exists(dir):
raise SuiteLoadingError("Directory '%s' does not exist" % dir)
suites = {}
for filename in get_py_files_from_dir(dir):
suite = load_suite_from_file(filename)
if not suite.hidden:
suites[filename] = suite
if recursive:
for dirname in _get_sub_dirs_from_dir(dir):
suite = suites.get(dirname + ".py")
if not suite:
suite_name = osp.basename(dirname)
suite = Suite(None, suite_name, build_description_from_name(suite_name))
suites[suite.name] = suite
for sub_suite in load_suites_from_directory(dirname, recursive=True):
suite.add_suite(sub_suite)
return sorted(sorted(filter(lambda s: not s.is_empty(), suites.values()), key=lambda s: s.name), key=lambda s: s.rank)
|
5bb0c83ee39537b0bb38a663b110f5ef6225833e
| 3,643,593
|
def deep_parameters_back(param, back_node, function_params, count, file_path, lineno=0, vul_function=None,
isback=False):
"""
深层递归分析外层逻辑,主要是部分初始化条件和新递归的确定
:param isback:
:param lineno:
:param vul_function:
:param param:
:param back_node:
:param function_params:
:param count:
:param file_path:
:return:
"""
count += 1
padding = {}
is_co, cp, expr_lineno = parameters_back(param, back_node, function_params, lineno, vul_function=vul_function,
file_path=file_path, isback=isback)
if count > 20:
logger.warning("[Deep AST] depth too big, auto exit...")
return is_co, cp, expr_lineno
return is_co, cp, expr_lineno
|
5cc5669a3c071d14b5d4898f60315da27e397a8b
| 3,643,594
|
from typing import Optional
import re
def get_latest_runtime(dotnet_dir: Optional[str] = None, version_major: Optional[int] = 5,
version_minor: Optional[int] = 0, version_build: Optional[int] = 0) -> Optional[str]:
"""
Search and select the latest installed .NET Core runtime directory.
"""
dotnet_dir = dotnet_dir or get_dotnet_dir()
if not dotnet_dir:
return None
if "DOTNETRUNTIMEVERSION" in dotnet_const.ENVIRON:
tmp = join(dotnet_dir, "shared", "Microsoft.NETCore.App", dotnet_const.ENVIRON["DOTNETRUNTIMEVERSION"])
if isdir(tmp):
return tmp
runtime = None
for r in get_dotnet_runtimes():
if r.name == "Microsoft.NETCore.App":
vmatch = re.match(r"^(?P<major>\d+)\.(?P<minor>\d+)\.(?P<build>\d+)", r.version)
if vmatch:
tmp_major = int(vmatch.group("major"))
tmp_minor = int(vmatch.group("minor"))
tmp_build = int(vmatch.group("build"))
if tmp_major > version_major:
version_major = tmp_major
version_minor = tmp_minor
version_build = tmp_build
runtime = r
continue
if version_major == tmp_major:
if tmp_minor > version_minor:
version_minor = tmp_minor
version_build = tmp_build
runtime = r
continue
if version_minor == tmp_minor:
if tmp_build > version_build:
version_build = tmp_build
runtime = r
continue
if runtime is None:
runtime = r
continue
if runtime is None:
return None
tmp = join(dotnet_dir, "shared", "Microsoft.NETCore.App", runtime.version)
if isdir(tmp):
return tmp
tmp = join(runtime.path, runtime.version)
if isdir(tmp):
return tmp
return None
|
46db4e55163e6110d48264ed5ad4394662ade336
| 3,643,595
|
def choose_action(state, mdp_data):
"""
Choose the next action (0 or 1) that is optimal according to your current
mdp_data. When there is no optimal action, return a random action.
Args:
state: The current state in the MDP
mdp_data: The parameters for your MDP. See initialize_mdp_data.
Returns:
int, 0 or 1. The index of the optimal action according to your current MDP.
"""
# BONUS LEARNING OPPORTUNITY: When you have finished the problem set, try
# un-commenting the following two lines. This will implement a strategy
# called epsilon greedy, which drastically improves performance. Why do you
# think this works so well?
#
# if np.random.uniform() < 0.1: # 10% of the time, choose a random action
# return 0 if np.random.uniform() < 0.5 else 1
action = None
# *** START CODE HERE ***
right = mdp_data['transition_probs'][state, 0, :].dot(mdp_data['value'])
left = mdp_data['transition_probs'][state, 1, :].dot(mdp_data['value'])
if right > left:
action = 0
elif right < left:
action = 1
else:
action = np.random.choice([0, 1])
# *** END CODE HERE ***
return action
|
2cb1f50a62ec006367fb61d8e57eb95005670e31
| 3,643,596
|
import inspect
def specialize_on(names, maxsize=None):
"""
A decorator that wraps a function, partially evaluating it with the parameters
defined by ``names`` (can be a string or an iterable of strings) being fixed.
The partially evaluated versions are cached based on the values of these parameters
using ``functools.lru_cache`` with the provided ``maxsize``
(consequently, these values should be hashable).
"""
if isinstance(names, str):
names = [names]
names_set = set(names)
def _specializer(func):
signature = inspect.signature(func)
if not names_set.issubset(signature.parameters):
missing_names = names_set.intersection(signature.parameters)
raise ValueError(
"The provided function does not have parameters: "
+ ", ".join(missing_names))
@lru_cache(maxsize=maxsize)
def get_pevaled_func(args):
return partial_apply(func, **{name:val for name, val in args})
def _wrapper(*args, **kwds):
bargs = signature.bind(*args, **kwds)
call_arguments = bargs.arguments.copy()
for name in list(bargs.arguments):
if name not in names_set:
del bargs.arguments[name] # automatically changes .args and .kwargs
else:
del call_arguments[name]
cache_args = tuple((name, val) for name, val in bargs.arguments.items())
pevaled_func = get_pevaled_func(cache_args)
bargs.arguments = call_arguments # automatically changes .args and .kwargs
return pevaled_func(*bargs.args, **bargs.kwargs)
return _wrapper
return _specializer
|
218cb169661507124acf1dae8076fa47eb313f1a
| 3,643,598
|
def parse_docstring(docstring: str, signature) -> str:
"""
Parse a docstring!
Note:
to try notes.
Args:
docstring: this is the docstring to parse.
Raises:
OSError: no it doesn't lol.
Returns:
markdown: the docstring converted to a nice markdown text.
"""
params = {}
exceptions = {}
returns = ""
lines = docstring.split("\n")
new_lines = []
i = 0
while i < len(lines):
if lines[i].lower() in ("args:", "arguments:", "params:", "parameters:"):
j = i + 1
name = None
while j < len(lines) and lines[j].startswith(" "):
if lines[j].startswith(" ") and params[name]:
params[name] += " " + lines[j].lstrip(" ")
else:
name, description = lines[j].lstrip(" ").split(":", 1)
params[name] = description.lstrip(" ")
j += 1
new_lines.append("**Parameters**\n")
new_lines.append("| Name | Type | Description |")
new_lines.append("| ---- | ---- | ----------- |")
for param_name, param_description in params.items():
param_name, param_default, param_type = get_param_info(signature, param_name)
# if param_default:
# param_default = f"`{param_default}`"
new_lines.append(f"| `{param_name}` | `{param_type}` | {param_description} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ("raise:", "raises:", "except:", "exceptions:"):
j = i + 1
name = None
while j < len(lines) and lines[j].startswith(" "):
if lines[j].startswith(" ") and exceptions[name]:
exceptions[name] += " " + lines[j].lstrip(" ")
else:
name, description = lines[j].lstrip(" ").split(":", 1)
exceptions[name] = description.lstrip(" ")
j += 1
new_lines.append("**Exceptions**\n")
new_lines.append("| Type | Description |")
new_lines.append("| ---- | ----------- |")
for exception_name, exception_description in exceptions.items():
new_lines.append(f"| `{exception_name}` | {exception_description} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ("return:", "returns:"):
j = i + 1
while j < len(lines) and lines[j].startswith(" "):
description = lines[j].lstrip(" ")
returns += " " + description
j += 1
new_lines.append("**Returns**\n")
new_lines.append("| Type | Description |")
new_lines.append("| ---- | ----------- |")
new_lines.append(f"| `{get_return_type(signature)}` | {returns} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ADMONITIONS.keys():
j = i + 1
admonition = []
while j < len(lines) and lines[j].startswith(" ") or lines[j] == "":
admonition.append(lines[j])
j += 1
new_lines.append(f"!!! {ADMONITIONS[lines[i].lower()]}")
new_lines.append("\n".join(admonition))
new_lines.append("")
i = j - 1
else:
new_lines.append(lines[i])
i += 1
return "\n".join(new_lines)
|
f831cda6046853312f6b0afe28683d3fc81dc874
| 3,643,599
|
def get_rgeo(coordinates):
"""Geocode specified coordinates
:argument coordinates: address coordinates
:type coordinates: tuple
:returns tuple
"""
params = {'language': GEOCODING_LANGUAGE,
'latlng': ','.join([str(crdnt) for crdnt in coordinates])}
result = get(url=GEOCODING_URL, params=params)
return result, coordinates
|
ca8d07f526260d48955dee1b32d18bf14b21f9f6
| 3,643,600
|
def norm_lib_size_log(assay, counts: daskarr) -> daskarr:
"""
Performs library size normalization and then transforms the
values into log scale.
Args:
assay: An instance of the assay object
counts: A dask array with raw counts data
Returns: A dask array (delayed matrix) containing normalized data.
"""
return np.log1p(assay.sf * counts / assay.scalar.reshape(-1, 1))
|
3fdcde36daa3c3c491c3b85f718d75e6276af8fa
| 3,643,601
|
def compare_dicts(cloud1, cloud2):
"""
Compare the dicts containing cloud images or flavours
"""
if len(cloud1) != len(cloud2):
return False
for item in cloud1:
if item in cloud2:
if cloud1[item] != cloud2[item]:
return False
else:
return False
return True
|
4c13ed92da2cd40b543b75fac119b5da302717e3
| 3,643,602
|
import json
def ajax_stats():
"""
获取客户统计
:return:
"""
time_based = request.args.get('time_based', 'hour')
result_customer_middleman = customer_middleman_stats(time_based)
result_customer_end_user = customer_end_user_stats(time_based)
line_chart_data = {
'labels': [label for label, _ in result_customer_middleman],
'datasets': [
{
'label': '同行',
'backgroundColor': 'rgba(220,220,220,0.5)',
'borderColor': 'rgba(220,220,220,1)',
'pointBackgroundColor': 'rgba(220,220,220,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_customer_middleman]
},
{
'label': '终端',
'backgroundColor': 'rgba(151,187,205,0.5)',
'borderColor': 'rgba(151,187,205,1)',
'pointBackgroundColor': 'rgba(151,187,205,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_customer_end_user]
}
]
}
return json.dumps(line_chart_data, default=json_default)
|
a467bd656535695333030ded34ccb299d57c8ef7
| 3,643,603
|
import string
def str2int(string_with_int):
""" Collect digits from a string """
return int("".join([char for char in string_with_int if char in string.digits]) or 0)
|
86955812fa3b2e6af0b98a04a1516897ccf95c25
| 3,643,604
|
def grid_to_3d(reward: np.ndarray) -> np.ndarray:
"""Convert gridworld state-only reward R[i,j] to 3D reward R[s,a,s']."""
assert reward.ndim == 2
reward = reward.flatten()
ns = reward.shape[0]
return state_to_3d(reward, ns, 5)
|
f848900b3b9ba7eb94fc1539fb1b24107e3db551
| 3,643,605
|
def find_routes(paths) -> list:
"""returns routes as tuple from path as list\
like 1,2,3 --> (1,2)(2,3)"""
routes = []
for path in paths:
for i in range(len(path)):
try:
route = (path[i], path[i + 1])
if route not in routes:
routes.append(route)
except IndexError:
pass
return routes
|
67fb8eb575dd45879f5e5b465a7886f2a2387b26
| 3,643,606
|
def z_step_ncg_hess_(Z, v, Y, F, phi, C_Z, eta_Z):
"""A wrapper of the hess-vector product for ncg calls."""
return z_step_tron_hess(v, Y, F, phi, C_Z, eta_Z)
|
2c6e800040e5090333cbba0924985bf7fe17c873
| 3,643,607
|
def list_servers(**kwargs) -> "list[NovaServer]":
"""List all servers under the current project.
Args:
kwargs: Keyword arguments, which will be passed to
:func:`novaclient.v2.servers.list`. For example, to filter by
instance name, provide ``search_opts={'name': 'my-instance'}``
Returns:
All servers associated with the current project.
"""
return nova().servers.list(**kwargs)
|
3e12a6e24687e74942cc86bc616d57ebdb5a6521
| 3,643,608
|
from typing import Optional
from typing import cast
def resolve_xref(
app: Sphinx,
env: BuildEnvironment,
node: nodes.Node,
contnode: nodes.Node,
) -> Optional[nodes.reference]:
"""
Resolve as-yet-unresolved XRefs for :rst:role:`tconf` roles.
:param app: The Sphinx application.
:param env: The Sphinx build environment.
:param node: The cross reference node which has not yet been.
:param contnode: The child node of the reference node, which provides the formatted text.
"""
if not isinstance(node, nodes.Element): # pragma: no cover
return None
if node.get("refdomain", None) != "std": # pragma: no cover
return None
elif node.get("reftype", None) != "tconf": # pragma: no cover
return None
elif not node.get("reftarget"): # pragma: no cover
return None
std_domain = cast(StandardDomain, env.get_domain("std"))
objtypes = std_domain.objtypes_for_role("tconf") or []
reftarget = node["reftarget"]
candidates = []
for (obj_type, obj_name), (docname, labelid) in std_domain.objects.items():
if not docname: # pragma: no cover
continue
if obj_type in objtypes:
if obj_name.endswith(f".{reftarget}"):
candidates.append((docname, labelid, obj_name))
if not candidates:
return None # pragma: no cover
elif len(candidates) > 1:
logger.warning(
__("more than one target found for cross-reference %r: %s"),
reftarget,
", ".join(c[2] for c in candidates),
type="ref",
subtype="tconf",
location=node,
)
return make_refnode(
app.builder,
env.docname,
candidates[0][0], # docname
candidates[0][1], # labelid
contnode,
)
|
d4bc46765de1e892aa6753678fab5ad2ff693f68
| 3,643,609
|
def deploy_tester_contract(
web3,
contracts_manager,
deploy_contract,
contract_deployer_address,
get_random_address,
):
"""Returns a function that can be used to deploy a named contract,
using conract manager to compile the bytecode and get the ABI"""
def f(contract_name, libs=None, args=None):
json_contract = contracts_manager.get_contract(contract_name)
contract = deploy_contract(
web3,
contract_deployer_address,
json_contract['abi'],
json_contract['bin'],
args,
)
return contract
return f
|
ee925e9632f3bfd66a843d336bd287c92543b2ed
| 3,643,610
|
def make_hashable_params(params):
"""
Checks to make sure that the parameters submitted is hashable.
Args:
params(dict):
Returns:
"""
tuple_params = []
for key, value in params.items():
if isinstance(value, dict):
dict_tuple = tuple([(key2, value2) for key2, value2 in value.items()])
tuple_params.append(dict_tuple)
else:
if isinstance(value, (list, set)):
tuple_params.append((key, tuple(value)))
else:
tuple_params.append((key, value))
tuple_params = tuple(tuple_params)
try:
hash(tuple_params)
except TypeError:
raise TypeError('The values of keywords given to this class must be hashable.')
return tuple_params
|
39d5de594b8caf776d2732e0e58b1c11127e5047
| 3,643,611
|
def check_member_role(member: discord.Member, role_id: int) -> bool:
"""
Checks if the Member has the Role
"""
return any(role.id == role_id for role in member.roles)
|
500c9c33dd0e25a6a4704165add3d39c05d510d2
| 3,643,612
|
import itertools
def tag_bedpe(b, beds, verbose=False):
"""
Tag each end of a BEDPE with a set of (possibly many) query BED files.
For example, given a BEDPE of interacting fragments from a Hi-C experiment,
identify the contacts between promoters and ChIP-seq peaks. In this case,
promoters and ChIP-seq peaks of interest would be provided as BED files.
The strategy is to split the BEDPE into two separate files. Each file is
intersected independently with the set of queries. The results are then
iterated through in parallel to tie the ends back together. It is this
iterator that is returned (see example below).
Parameters
----------
bedpe : str
BEDPE-format file. Must be name-sorted.
queries : dict
Dictionary of BED/GFF/GTF/VCF files to use. After splitting the BEDPE,
these query files (values in the dictionary) will be passed as the `-b`
arg to `bedtools intersect`. The keys are passed as the `names`
argument for `bedtools intersect`.
Returns
-------
Tuple of (iterator, n, extra).
`iterator` is described below. `n` is the total number of lines in the
BEDPE file, which is useful for calculating percentage complete for
downstream work. `extra` is the number of extra fields found in the BEDPE
(also useful for downstream processing).
`iterator` yields tuples of (label, end1_hits, end2_hits) where `label` is
the name field of one line of the original BEDPE file. `end1_hits` and
`end2_hits` are each iterators of BED-like lines representing all
identified intersections across all query BED files for end1 and end2 for
this pair.
Recall that BEDPE format defines a single name and a single score for each
pair. For each item in `end1_hits`, the fields are::
chrom1
start1
end1
name
score
strand1
[extra fields]
query_label
fields_from_query_intersecting_end1
where `[extra fields]` are any additional fields from the original BEDPE,
`query_label` is one of the keys in the `beds` input dictionary, and the
remaining fields in the line are the intersecting line from the
corresponding BED file in the `beds` input dictionary.
Similarly, each item in `end2_hits` consists of:
chrom2
start2
end2
name
score
strand2
[extra fields]
query_label
fields_from_query_intersecting_end2
At least one line is reported for every line in the BEDPE file. If there
was no intersection, the standard BEDTools null fields will be shown. In
`end1_hits` and `end2_hits`, a line will be reported for each hit in each
query.
Example
-------
Consider the following BEDPE (where "x1" is an aribtrary extra field).
>>> bedpe = pybedtools.example_bedtool('test_bedpe.bed')
>>> print(bedpe) # doctest: +NORMALIZE_WHITESPACE
chr1 1 10 chr1 50 90 pair1 5 + - x1
chr1 2 15 chr1 200 210 pair2 1 + + y1
<BLANKLINE>
And the following transcription start sites (TSSes) in BED4 format:
>>> tsses = pybedtools.example_bedtool('test_tsses.bed')
>>> print(tsses) # doctest: +NORMALIZE_WHITESPACE
chr1 5 6 gene1
chr1 60 61 gene2
chr1 88 89 gene3
<BLANKLINE>
And the following called peaks as BED6:
>>> peaks = pybedtools.example_bedtool('test_peaks.bed')
>>> print(peaks) # doctest: +NORMALIZE_WHITESPACE
chr1 3 4 peak1 50 .
<BLANKLINE>
Then we can get the following iterator, n, and extra:
>>> from pybedtools.contrib.long_range_interaction import tag_bedpe
>>> iterator, n, extra = tag_bedpe(bedpe, {'tss': tsses, 'pk': peaks})
>>> print(n)
2
>>> print(extra)
1
The following illustrates that each item in the iterator represents one
pair, and each item in each group represents an intersection with one end:
>>> for (label, end1_hits, end2_hits) in iterator:
... print('PAIR = {}'.format(label))
... print('end1_hits:')
... for i in end1_hits:
... print(i, end='')
... print('end2_hits:')
... for i in end2_hits:
... print(i, end='') # doctest: +NORMALIZE_WHITESPACE
PAIR = pair1
end1_hits:
chr1 1 10 pair1 5 + x1 pk chr1 3 4 peak1 50 . 1
chr1 1 10 pair1 5 + x1 tss chr1 5 6 gene1 1
end2_hits:
chr1 50 90 pair1 5 - x1 tss chr1 60 61 gene2 1
chr1 50 90 pair1 5 - x1 tss chr1 88 89 gene3 1
PAIR = pair2
end1_hits:
chr1 2 15 pair2 1 + y1 pk chr1 3 4 peak1 50 . 1
chr1 2 15 pair2 1 + y1 tss chr1 5 6 gene1 1
end2_hits:
chr1 200 210 pair2 1 + y1 . -1 -1 . -1 . 0
See the `cis_trans_interactions()` function for one way of summarizing
these data.
"""
b = pybedtools.BedTool(b)
# Figure out if the supplied bedpe had any extra fields. If so, the fields
# are repeated in each of the split output files.
observed = b.field_count()
extra = observed - 10
extra_inds = [10 + i for i in range(extra)]
end1_fn = pybedtools.BedTool._tmp()
end2_fn = pybedtools.BedTool._tmp()
# Performance notes:
# We don't need the overhead of converting every line into
# a pybedtools.Interval object just so we can grab the fields. Doing so
# takes 3.5x more time than simply splitting each line on a tab.
if verbose:
print('splitting BEDPE into separate files')
n = 0
with open(end1_fn, 'w') as end1_out, open(end2_fn, 'w') as end2_out:
for line in open(b.fn):
n += 1
f = line.strip().split('\t')
end1_out.write(
'\t'.join(
(f[i] for i in [0, 1, 2, 6, 7, 8] + extra_inds)) + '\n')
end2_out.write(
'\t'.join(
(f[i] for i in [3, 4, 5, 6, 7, 9] + extra_inds)) + '\n')
# Performance notes:
#
# For small BEDPE and large set of query files, it would be faster to sort
# these independently, intersect with sorted=True, and then re-sort by name
# for the grouping. For large BEDPE, I don't think the sorted=True
# performance gain outweighs the hit from sorting twice.
#
# On the other hand, if BEDPE was coord-sorted in the first place, only
# end2 would need to be sorted and re-sorted. On the other (third!?) hand,
# BEDPE creation from BAM implies name-sorting, so it's probably not
# reasonable to assume coord-sorted.
#
# In the end: don't do any sorting.
end1_bt = pybedtools.BedTool(end1_fn)
end2_bt = pybedtools.BedTool(end2_fn)
names, fns = [], []
for name, fn in beds.items():
names.append(name)
if isinstance(fn, pybedtools.BedTool):
fns.append(fn.fn)
else:
fns.append(fn)
if verbose:
print('intersecting end 1')
end1_hits = end1_bt.intersect(list(fns), names=names, wao=True)
if verbose:
print('intersecting end 2')
end2_hits = end2_bt.intersect(list(fns), names=names, wao=True)
grouped_end1 = itertools.groupby(end1_hits, lambda f: f[3])
grouped_end2 = itertools.groupby(end2_hits, lambda f: f[3])
def gen():
for (label1, group1), (label2, group2) \
in itertools.izip(grouped_end1, grouped_end2):
assert label1 == label2
yield label1, group1, group2
return gen(), n, extra
|
a1b95e04abd9401a6494fad2c2b6d48ecb14d414
| 3,643,613
|
from typing import Tuple
def point(x: float, y: float, z: float) -> Tuple:
"""Create a point."""
return Tuple(x, y, z, 1.0)
|
035f01d990d16634867b147b7fcb7e9d5edf7f92
| 3,643,614
|
def partial_pipeline_data(backend, user=None, *args, **kwargs): # pragma: no cover
"""
Add the session key to a signed base64 encoded signature on the email request.
"""
data = backend.strategy.request_data()
if 'signature' in data:
try:
signed_details = signing.loads(data['signature'], key=settings.SECRET_KEY)
session = Session.objects.get(pk=signed_details['session_key'])
except (BadSignature, Session.DoesNotExist) as e:
raise InvalidEmail(backend)
session_details = session.get_decoded()
backend.strategy.session_set('email_validation_address', session_details['email_validation_address'])
backend.strategy.session_set('next', session_details.get('next'))
backend.strategy.session_set('partial_pipeline', session_details['partial_pipeline'])
backend.strategy.session_set(backend.name + '_state', session_details.get(backend.name + '_state'))
backend.strategy.session_set(backend.name + 'unauthorized_token_name',
session_details.get(backend.name + 'unauthorized_token_name'))
partial = backend.strategy.session_get('partial_pipeline', None)
if partial:
idx, backend_name, xargs, xkwargs = \
backend.strategy.partial_from_session(partial)
if backend_name == backend.name:
kwargs.setdefault('pipeline_index', idx)
if user: # don't update user if it's None
kwargs.setdefault('user', user)
kwargs.setdefault('request', backend.strategy.request_data())
xkwargs.update(kwargs)
return xargs, xkwargs
else:
backend.strategy.clean_partial_pipeline()
|
54c0124b49fead91fed238ded15f6c3167f0aed4
| 3,643,615
|
def arrayinv(F, Fx):
"""
Args:
F: dx.ds function value at x
Fx: dx.dx.ds derivative of function at x
Returns:
"""
return np.array([np.linalg.solve(a, b) for a, b in zip(Fx.swapaxes(0,2), F.T)]).T
|
ac412bf0cb03a77d0a18295b899aeabd8bcdbfb3
| 3,643,616
|
def mil(val):
"""convert mil to mm"""
return float(val) * 0.0254
|
9071b0116a7062ef93d6bee56a08db2b9bec906a
| 3,643,618
|
def ask_number(question, low, high):
"""Poproś o podanie liczby z określonego zakresu."""
response = None
while type(response) != int:
try:
response = int(input(question))
while response not in range(low, high):
response = int(input(question))
except ValueError:
print("Value must be a number")
return response
|
fdae37e6a0cd34d36b647a23f4a0f58cad46680a
| 3,643,619
|
import numpy
from typing import Tuple
import math
def _beams_longitude_latitude(
ping_header: PingHeader, along_track: numpy.ndarray, across_track: numpy.ndarray
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Calculate the longitude and latitude for each beam.
https://en.wikipedia.org/wiki/Geographic_coordinate_system
For lonitude and latitude calculations:
* lat_m_sf = A - B * cos(2 * lat) + C * cos(4 * lat) - D * cos(6 * lat)
* lon_m_sf = E * cos(lat) - F * cos(3 * lat) + G * cos(5 * lat)
"""
# see https://math.stackexchange.com/questions/389942/why-is-it-necessary-to-use-sin-or-cos-to-determine-heading-dead-reckoning # noqa: E501
lat_radians = math.radians(ping_header.latitude)
coef_a = WGS84Coefficients.A.value
coef_b = WGS84Coefficients.B.value
coef_c = WGS84Coefficients.C.value
coef_d = WGS84Coefficients.D.value
coef_e = WGS84Coefficients.E.value
coef_f = WGS84Coefficients.F.value
coef_g = WGS84Coefficients.G.value
lat_mtr_sf = (
coef_a
- coef_b * math.cos(2 * lat_radians)
+ coef_c * math.cos(4 * lat_radians)
- coef_d * math.cos(6 * lat_radians)
)
lon_mtr_sf = (
coef_e * math.cos(lat_radians)
- coef_f * math.cos(3 * lat_radians)
+ coef_g * math.cos(5 * lat_radians)
)
delta_x = math.sin(math.radians(ping_header.heading))
delta_y = math.cos(math.radians(ping_header.heading))
lon2 = (
ping_header.longitude
+ delta_y / lon_mtr_sf * across_track
+ delta_x / lon_mtr_sf * along_track
)
lat2 = (
ping_header.latitude
- delta_x / lat_mtr_sf * across_track
+ delta_y / lat_mtr_sf * along_track
)
return lon2, lat2
|
c43171830206c5db878a817a03a4830aae878765
| 3,643,621
|
def true_range_nb(high: tp.Array2d, low: tp.Array2d, close: tp.Array2d) -> tp.Array2d:
"""Calculate true range."""
prev_close = generic_nb.fshift_nb(close, 1)
tr1 = high - low
tr2 = np.abs(high - prev_close)
tr3 = np.abs(low - prev_close)
tr = np.empty(prev_close.shape, dtype=np.float_)
for col in range(tr.shape[1]):
for i in range(tr.shape[0]):
tr[i, col] = max(tr1[i, col], tr2[i, col], tr3[i, col])
return tr
|
7b7594a1a5adf4e280a53af3e01d9aec5bd3b80c
| 3,643,622
|
def laplacian_operator(data):
"""
apply laplacian operator on data
"""
lap = []
lap.append(0.0)
for index in range(1, len(data) - 1):
lap.append((data[index + 1] + data[index - 1]) / 2.0 - data[index])
lap.append(0.0)
return lap
|
3d7755cdc52352cc445d5942e34c09f65f3e11db
| 3,643,623
|
def _stringmatcher(pattern):
"""
accepts a string, possibly starting with 're:' or 'literal:' prefix.
returns the matcher name, pattern, and matcher function.
missing or unknown prefixes are treated as literal matches.
helper for tests:
>>> def test(pattern, *tests):
... kind, pattern, matcher = _stringmatcher(pattern)
... return (kind, pattern, [bool(matcher(t)) for t in tests])
exact matching (no prefix):
>>> test('abcdefg', 'abc', 'def', 'abcdefg')
('literal', 'abcdefg', [False, False, True])
regex matching ('re:' prefix)
>>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
('re', 'a.+b', [False, False, True])
force exact matches ('literal:' prefix)
>>> test('literal:re:foobar', 'foobar', 're:foobar')
('literal', 're:foobar', [False, True])
unknown prefixes are ignored and treated as literals
>>> test('foo:bar', 'foo', 'bar', 'foo:bar')
('literal', 'foo:bar', [False, False, True])
"""
if pattern.startswith('re:'):
pattern = pattern[3:]
try:
regex = re.compile(pattern)
except re.error, e:
raise error.ParseError(_('invalid regular expression: %s')
% e)
return 're', pattern, regex.search
elif pattern.startswith('literal:'):
pattern = pattern[8:]
return 'literal', pattern, pattern.__eq__
|
76a673133aaf7493b531b4f73364af2d16dd214b
| 3,643,624
|
def enu_to_ecef(ref_lat_rad, ref_lon_rad, ref_alt_m, e_m, n_m, u_m):
"""Convert ENU coordinates relative to reference location to ECEF coordinates.
This converts local east-north-up (ENU) coordinates relative to a given
reference position to earth-centered, earth-fixed (ECEF) cartesian
coordinates. The reference position is specified by its geodetic latitude,
longitude and altitude.
Parameters
----------
ref_lat_rad, ref_lon_rad : float or array
Geodetic latitude and longitude of reference position, in radians
ref_alt_m : float or array
Geodetic altitude of reference position, in metres above WGS84 ellipsoid
e_m, n_m, u_m : float or array
East, North, Up coordinates, in metres
Returns
-------
x_m, y_m, z_m : float or array
X, Y, Z coordinates, in metres
"""
# ECEF coordinates of reference point
ref_x_m, ref_y_m, ref_z_m = lla_to_ecef(ref_lat_rad, ref_lon_rad, ref_alt_m)
sin_lat, cos_lat = np.sin(ref_lat_rad), np.cos(ref_lat_rad)
sin_lon, cos_lon = np.sin(ref_lon_rad), np.cos(ref_lon_rad)
x_m = ref_x_m - sin_lon*e_m - sin_lat*cos_lon*n_m + cos_lat*cos_lon*u_m
y_m = ref_y_m + cos_lon*e_m - sin_lat*sin_lon*n_m + cos_lat*sin_lon*u_m
z_m = ref_z_m + cos_lat*n_m + sin_lat*u_m
return x_m, y_m, z_m
|
a6a7e8e3a67a17894d68d6c62b2ac7fcef7a09ec
| 3,643,625
|
import re
import requests
def is_file_url(share_url: str) -> bool:
"""判断是否为文件的分享链接"""
base_pat = r'https?://[a-zA-Z0-9-]*?\.?lanzou[a-z].com/.+' # 子域名可个性化设置或者不存在
user_pat = r'https?://[a-zA-Z0-9-]*?\.?lanzou[a-z].com/i[a-zA-Z0-9]{5,}/?' # 普通用户 URL 规则
if not re.fullmatch(base_pat, share_url):
return False
elif re.fullmatch(user_pat, share_url):
return True
else: # VIP 用户的 URL 很随意
try:
html = requests.get(share_url, headers=headers).text
html = remove_notes(html)
return True if re.search(r'class="fileinfo"|id="file"|文件描述', html) else False
except (requests.RequestException, Exception):
return False
|
d9b56a2187cedeb79cb848192b544026a5d85e29
| 3,643,626
|
def get_compton_fraction_artis(energy):
"""Gets the Compton scattering/absorption fraction
and angle following the scheme in ARTIS
Parameters
----------
energy : float
Energy of the gamma-ray
Returns
-------
float
Scattering angle
float
Compton scattering fraction
"""
energy_norm = kappa_calculation(energy)
fraction_max = 1.0 + 2.0 * energy_norm
fraction_min = 1.0
normalization = np.random.random() * compton_opacity_partial(
energy_norm, fraction_max
)
epsilon = 1.0e20
count = 0
while epsilon > 1.0e-4:
fraction_try = (fraction_max + fraction_min) / 2.0
sigma_try = compton_opacity_partial(energy_norm, fraction_try)
if sigma_try > normalization:
fraction_max = fraction_try
epsilon = (sigma_try - normalization) / normalization
else:
fraction_min = fraction_try
epsilon = (normalization - sigma_try) / normalization
count += 1
if count > 1000:
print("Error, failure to get a Compton fraction")
break
angle = np.arccos(1.0 - ((fraction_try - 1) / energy_norm))
return angle, fraction_try
|
2121712c542c967ef7008a4bdf8b88a8e2bcdb6c
| 3,643,627
|
def is_argspec_compatible_with_types(argspec, *args, **kwargs):
"""Determines if functions matching 'argspec' accept given 'args'/'kwargs'.
Args:
argspec: An instance of inspect.ArgSpec to verify agains the arguments.
*args: Zero or more positional arguments, all of which must be instances of
computation_types.Type or something convertible to it by
computation_types.to_type().
**kwargs: Zero or more keyword arguments, all of which must be instances of
computation_types.Type or something convertible to it by
computation_types.to_type().
Returns:
True or false, depending on the outcome of the test.
Raises:
TypeError: if the arguments are of the wrong computation_types.
"""
try:
callargs = get_callargs_for_argspec(argspec, *args, **kwargs)
if not argspec.defaults:
return True
except TypeError:
return False
# As long as we have been able to construct 'callargs', and there are no
# default values to verify against the given types, there is nothing more
# to do here, otherwise we have to verify the types of defaults against
# the types we've been given as parameters to this function.
num_specargs_without_defaults = len(argspec.args) - len(argspec.defaults)
for idx, default_value in enumerate(argspec.defaults):
if default_value is not None:
arg_name = argspec.args[num_specargs_without_defaults + idx]
call_arg = callargs[arg_name]
if call_arg is not default_value:
arg_type = computation_types.to_type(call_arg)
default_type = type_utils.infer_type(default_value)
if not type_utils.is_assignable_from(arg_type, default_type):
return False
return True
|
5103fa00737f4faeda49441f9d67388f34599d09
| 3,643,628
|
def get_span_feats_stopwords(stopwords):
"""Get a span dependency tree unary function"""
return partial(get_span_feats, stopwords=stopwords)
|
86fd8c597f39f71c489665c05d164e0a3e1e69c0
| 3,643,629
|
def get_argument_parser(argparser):
"""Augments the given ArgumentParser for use with the Bonobo ETL framework."""
return bonobo.get_argument_parser(parser=argparser)
|
584fc867660f85998a679d1883828ea7a8c3896f
| 3,643,630
|
from pathlib import Path
def input_file_path(directory: str, file_name: str) -> Path:
"""Given the string paths to the result directory, and the input file
return the path to the file.
1. check if the input_file is an absolute path, and if so, return that.
2. if the input_file is a relative path, combine it with the result_directory
and return that.
The resultant path must exist and be a file, otherwise raise an FileNotFoundException.
"""
path_to_file = Path(file_name)
if path_to_file.is_absolute() and path_to_file.is_file():
return path_to_file
input_directory_path = Path(directory)
path_to_file = input_directory_path / path_to_file
if path_to_file.is_file():
return path_to_file.resolve()
else:
raise FileNotFoundError(
'did not find the input file using result_directory={directory}, input_file={input_file}'.format(
directory=directory, input_file=file_name
)
)
|
dd866a5f8b6f776238269844d64686f7fb28347c
| 3,643,631
|
def loss(S, K, n_samples=None):
"""Loss function for time-varying graphical lasso."""
if n_samples is None:
n_samples = np.ones(S.shape[0])
return sum(
-ni * logl(emp_cov, precision)
for emp_cov, precision, ni in zip(S, K, n_samples))
|
07ad436bf5aee5e8b1dc53e89b894c4c8883cedd
| 3,643,632
|
def flat_dict(df):
"""
Add each key-value of a nested dictionary that is saved in a dataframe, as a new column
"""
for col in df.columns:
if type(df[col][0]) == dict:
df = pd.concat(
[df.drop([col], axis=1), df[col].apply(pd.Series)], axis=1)
# sometimes a column is dropped but column 0 stays
df = df.drop([0], axis=1, errors='ignore')
return df
|
ec817b9c7a08aab95bb29981dafbb1f1e03821eb
| 3,643,633
|
from typing import List
async def run_setup_pys(
targets_with_origins: TargetsWithOrigins,
setup_py_subsystem: SetupPySubsystem,
console: Console,
python_setup: PythonSetup,
distdir: DistDir,
workspace: Workspace,
union_membership: UnionMembership,
) -> SetupPy:
"""Run setup.py commands on all exported targets addressed."""
validate_args(setup_py_subsystem.args)
# Get all exported targets, ignoring any non-exported targets that happened to be
# globbed over, but erroring on any explicitly-requested non-exported targets.
exported_targets: List[ExportedTarget] = []
explicit_nonexported_targets: List[Target] = []
for target_with_origin in targets_with_origins:
tgt = target_with_origin.target
if tgt.has_field(PythonProvidesField):
exported_targets.append(ExportedTarget(tgt))
elif isinstance(target_with_origin.origin, AddressLiteralSpec):
explicit_nonexported_targets.append(tgt)
if explicit_nonexported_targets:
raise TargetNotExported(
"Cannot run setup.py on these targets, because they have no `provides=` clause: "
f'{", ".join(so.address.spec for so in explicit_nonexported_targets)}'
)
if setup_py_subsystem.transitive:
# Expand out to all owners of the entire dep closure.
transitive_targets = await Get(
TransitiveTargets, Addresses(et.target.address for et in exported_targets)
)
owners = await MultiGet(
Get(ExportedTarget, OwnedDependency(tgt))
for tgt in transitive_targets.closure
if is_ownable_target(tgt, union_membership)
)
exported_targets = list(FrozenOrderedSet(owners))
py2 = is_python2(
python_setup.compatibilities_or_constraints(
target_with_origin.target.get(PythonInterpreterCompatibility).value
for target_with_origin in targets_with_origins
)
)
chroots = await MultiGet(
Get(SetupPyChroot, SetupPyChrootRequest(exported_target, py2))
for exported_target in exported_targets
)
# If args were provided, run setup.py with them; Otherwise just dump chroots.
if setup_py_subsystem.args:
setup_py_results = await MultiGet(
Get(
RunSetupPyResult,
RunSetupPyRequest(exported_target, chroot, setup_py_subsystem.args),
)
for exported_target, chroot in zip(exported_targets, chroots)
)
for exported_target, setup_py_result in zip(exported_targets, setup_py_results):
addr = exported_target.target.address.spec
console.print_stderr(f"Writing dist for {addr} under {distdir.relpath}/.")
workspace.write_digest(setup_py_result.output, path_prefix=str(distdir.relpath))
else:
# Just dump the chroot.
for exported_target, chroot in zip(exported_targets, chroots):
addr = exported_target.target.address.spec
provides = exported_target.provides
setup_py_dir = distdir.relpath / f"{provides.name}-{provides.version}"
console.print_stderr(f"Writing setup.py chroot for {addr} to {setup_py_dir}")
workspace.write_digest(chroot.digest, path_prefix=str(setup_py_dir))
return SetupPy(0)
|
713f0b7f3558e2a69dcca0a7a251f4991ee49073
| 3,643,634
|
def list_tasks():
"""
显示所有任务列表,方便管理任务
:return:
"""
try:
task_id = request.args.get("task_id")
task_status = request.args.get('status')
# 构造条件查询元组
task_info_list = list()
tasks = TaskService.get_tasks_url_num(task_id=task_id, task_status=task_status)
for task in tasks:
hook_rule = task.hook_rule
# RedisService.get_task(task.id)["hook_rule"]
unscaned_urls_num = task.unscaned_urls_num
scaned_urls_num = task.scaned_urls_num
total_url_num = unscaned_urls_num + scaned_urls_num
if task.task_status in [TaskStatus.KILLED, TaskStatus.DONE]:
percent = 100
else:
percent = 0 if total_url_num == 0 else int((scaned_urls_num / total_url_num) * 100)
task_info_list.append({'receiver_emails': task.receivers_email, 'task_name': task.task_name,
'create_time': task.created_time.strftime("%Y-%m-%d %H:%M"), 'percent': percent,
'unscaned_url_num': unscaned_urls_num, 'scaned_url_num': scaned_urls_num,
'total_url_num': total_url_num, 'hook_rule': hook_rule, 'task_id': task.id,
'task_access_key': task.access_key, 'task_status': task.task_status,
"create_user_name": task.create_user_name})
task_info_list.reverse()
response = jsonify(status=200, message="查询成功", data=task_info_list)
return response
except Exception as e:
logger.exception("show_current_tasks rasie error")
if isinstance(e, BaseHunterException):
return jsonify(status=400, message=str(e), data={"extra_info": "查询任务时传入非法的task_id"})
return jsonify(status=500, message="未知异常", data={"extra_info": "查询任务时出现未知异常,请联系管理员查看异常日志"})
|
c6d205e95bd7a1a2e76baf7f89c917310b683bc0
| 3,643,635
|
import itertools
import torch
def make_fixed_size(
protein,
shape_schema,
msa_cluster_size,
extra_msa_size,
num_res=0,
num_templates=0,
):
"""Guess at the MSA and sequence dimension to make fixed size."""
pad_size_map = {
NUM_RES: num_res,
NUM_MSA_SEQ: msa_cluster_size,
NUM_EXTRA_SEQ: extra_msa_size,
NUM_TEMPLATES: num_templates,
}
for k, v in protein.items():
# Don't transfer this to the accelerator.
if k == "extra_cluster_assignment":
continue
shape = list(v.shape)
schema = shape_schema[k]
msg = "Rank mismatch between shape and shape schema for"
assert len(shape) == len(schema), f"{msg} {k}: {shape} vs {schema}"
pad_size = [
pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)
]
padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)]
padding.reverse()
padding = list(itertools.chain(*padding))
if padding:
protein[k] = torch.nn.functional.pad(v, padding)
protein[k] = torch.reshape(protein[k], pad_size)
return protein
|
1125e1cdbe8f12d6613fb8dd9374afdbf1fd065a
| 3,643,636
|
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_urlhaus package"""
reload_params = {"package": u"fn_urlhaus",
"incident_fields": [],
"action_fields": [],
"function_params": [u"urlhaus_artifact_type", u"urlhaus_artifact_value"],
"datatables": [],
"message_destinations": [u"fn_urlhaus"],
"functions": [u"fn_urlhaus", u"fn_urlhaus_submission"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_urlhaus_lookup", u"example_urlhaus_url_submission"],
"actions": [u"Example: URLhaus Lookup", u"Example: URLhaus URL Submission"]
}
return reload_params
|
1665121ab3305f517242b122e2aaae2b12fe57f0
| 3,643,637
|
def urls(page, baseurl=auto, direct=True, prev=True, next=True):
"""
Return a list of pagination URLs extracted form the page.
When baseurl is None relative URLs are returned; pass baseurl
to get absolute URLs.
``prev``, ``next`` and ``direct`` arguments control whether to return
'next page', 'previous page' links and links to specific pages.
By default, all link types are returned.
"""
return get_shared_autopager().urls(page, baseurl, direct, prev, next)
|
70f0337b5ed1a1cd8c0cfd1f99f8ad67da85b23d
| 3,643,638
|
def sinc_filter(audio: tf.Tensor,
cutoff_frequency: tf.Tensor,
window_size: int = 512,
sample_rate: int = None,
padding: Text = 'same') -> tf.Tensor:
"""Filter audio with sinc low-pass filter.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
cutoff_frequency: Frequency cutoff for low-pass sinc filter. If the
sample_rate is given, cutoff_frequency is in Hertz. If sample_rate is
None, cutoff_frequency is normalized ratio (frequency/nyquist) in the
range [0, 1.0]. Shape [batch_size, n_time, 1].
window_size: Size of the Hamming window to apply to the impulse.
sample_rate: Optionally provide the sample rate.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
window_size - 1).
Returns:
Filtered audio. Tensor of shape
[batch, audio_timesteps + window_size - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
"""
impulse_response = sinc_impulse_response(cutoff_frequency,
window_size=window_size,
sample_rate=sample_rate)
return fft_convolve(audio, impulse_response, padding=padding)
|
ea13a320744bb380b20643c2a995be67fc9d1303
| 3,643,639
|
def _getDataFlows(blocks):
"""
Given a block dictonary from bifrost.proclog.load_by_pid(), return a list
of chains that give the data flow.
"""
# Find out what rings we have to work with and which blocks are sources
# or sinks
rings = []
sources, sourceRings = [], []
sinks, sinkRings = [], []
for block in blocks.keys():
rins, routs = [], []
rFound = False
for log in blocks[block].keys():
if log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
rFound = True
value = blocks[block][log][key]
if value not in rings:
rings.append( value )
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
if rFound:
if len(rins) == 0:
sources.append( block )
sourceRings.extend( routs )
if len(routs) == 0:
sinks.append( block )
sinkRings.extend( rins )
# Find out the chains
chains = []
for refRing in rings:
for block in blocks.keys():
rins, routs = [], []
for log in blocks[block].keys():
if log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
value = blocks[block][log][key]
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
if refRing in routs:
refBlock = block
refROuts = routs
for block in blocks.keys():
rins, routs = [], []
dtype = None
for log in blocks[block].keys():
if log.startswith('sequence'):
try:
bits = blocks[block][log]['nbit']
if blocks[block][log]['complex']:
bits *= 2
name = 'cplx' if blocks[block][log]['complex'] else 'real'
dtype = '%s%i' % (name, bits)
except KeyError:
pass
elif log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
value = blocks[block][log][key]
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
for ring in rins:
if ring in refROuts:
#print refRing, rins, block
chains.append( {'link':(refBlock,block), 'dtype':dtype} )
# Find out the associations (based on core binding)
associations = []
for block in blocks:
refBlock = block
refCores = []
for i in xrange(32):
try:
refCores.append( blocks[block]['bind']['core%i' % i] )
except KeyError:
break
if len(refCores) == 0:
continue
for block in blocks:
if block == refBlock:
continue
cores = []
for i in xrange(32):
try:
cores.append( blocks[block]['bind']['core%i' % i] )
except KeyError:
break
if len(cores) == 0:
continue
for core in cores:
if core in refCores:
if (refBlock,block) not in associations:
if (block,refBlock) not in associations:
associations.append( (refBlock, block) )
return sources, sinks, chains, associations
|
197cc64b5bf7ecd8e5c7d912239c93a1feffcd14
| 3,643,640
|
def find_lowest_cost_node(costs: dict, processed: list) -> dict:
"""Return the node with the lowest cost"""
lowest_cost = float("inf") # Infinity
lowest_cost_node = None
for node in costs:
cost = costs[node]
if cost < lowest_cost and node not in processed:
lowest_cost = cost
lowest_cost_node = node
return lowest_cost_node
|
aeb0ef046619bc9280d3d712329c672f76e36c90
| 3,643,641
|
def scale_img(image, random_coordinate=False):
"""
对原图大小进行处理,
:param image:
:param random_coordinate:
:return:
"""
h, w, c = image.shape
if max(h, w) > 640:
f_scale = min(640./h, 640./w) # scale factor
image = cv2.resize(src=image, dsize=None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_CUBIC)
else:
f_scale = 1.
h_s, w_s, c_s = image.shape # h scaled
image_full = 255 * np.zeros((640, 640, c), dtype=np.uint8)
if random_coordinate: # random coordinate
h_random = np.random.randint(0, 640 - h + 1)
w_random = np.random.randint(0, 640 - w + 1)
image_full[h_random:h_random + h_s, w_random:w_random + w_s, :] = image.astype(np.uint8)
else:
image_full[0:h_s, 0:w_s, :] = image.astype(np.uint8)
return image_full / 255., f_scale
|
6a0b93f4564c6d83e60f6f7a250822f801e0b65b
| 3,643,642
|
import math
def magnitude(v: Vector) -> float:
"""computes the magnitude (length) of a vector"""
return math.sqrt(sum_of_squares(v))
|
881f2a3e75520b3f8da7ea093765e36d78e48c57
| 3,643,643
|
import time
def supply_domes1finesk():
"""
Real Name: b'"Supply Domes-1Finesk"'
Original Eqn: b'MIN("Domes-1 Demad finesk" (Time), (outflow Finesk) )'
Units: b'MCM/Month'
Limits: (None, None)
Type: component
b''
"""
return np.minimum(domes1_demad_finesk(time()), (outflow_finesk()))
|
e7bbbdc49e45044179053a02c4b76c1dda798bc0
| 3,643,646
|
def poll(handle):
"""
Polls an push_pull handle to determine whether underlying
asynchronous operation has completed. After `poll()` returns `True`, `synchronize()`
will return without blocking.
Arguments:
handle: A handle returned by an push_pull asynchronous
operation.
Returns:
A flag indicating whether the operation has completed.
"""
return c_lib.byteps_torch_poll(handle) != 0
|
e228183068517962e7886c020e662b8c1a1f2912
| 3,643,647
|
from typing import Tuple
def _increase_explicit_hydrogen_for_bond_atom(
rwmol: Chem.rdchem.RWMol,
remove_bidx: bool,
bidx: int,
remove_eidx: bool,
eidx: int,
ai_to_remove: list,
) -> Tuple[Chem.rdchem.RWMol, list]:
"""Increase number of explicit hydrogens for atom in a bond.
Args:
rwmol: An RDKit RWmolecule (rdkit.Chem.rdchem.RWMol)
remove_bidx: Begin atom in bond will increase explicit hydrogens (bool)
remove_eidx: End atom in bond will increase explicit hydrogens (bool)
Returns:
Tuple with an RDKit RWmolecule and an updated list to remove
(rdkit.Chem.rdchem.RWMol, list).
"""
if remove_bidx or remove_eidx:
if remove_bidx:
ai_to_remove.append(bidx)
_increase_explicit_hydrogens(rwmol, eidx)
if remove_eidx:
ai_to_remove.append(eidx)
_increase_explicit_hydrogens(rwmol, bidx)
rwmol.RemoveBond(bidx, eidx)
return rwmol, ai_to_remove
|
cf0276730ee0837d43098f9712f7c199ba93b268
| 3,643,648
|
def plot_historical_actuals_forecast(e, title=None, ylabel='',
include_pred_int=False,
years_prior_include=2,
forecast_display_start=None,
e2=None):
"""Produce a plot of the ensemble forecasts
Returns
----------
plt object
"""
if e.forecast['consensus'] is None:
raise Exception('No forecast found.')
if title is None and e.validation['consensus'] is not None:
title = 'Training, forecast and actuals'
if title is None and e.validation['consensus'] is None:
title = 'Training and forecast'
fig, ax = plt.subplots(figsize=(13, 11))
fig.suptitle(title, fontsize=24)
plt.ylabel(ylabel, fontsize=20)
plt.rc('legend', fontsize=18)
plt.rc('ytick', labelsize=18)
plt.rc('xtick', labelsize=18)
plt.xticks(rotation = 30)
ax.xaxis.set_major_locator(mdates.AutoDateLocator(maxticks=12))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d-%y'))
ax.yaxis.set_major_formatter(FuncFormatter(human_format))
if forecast_display_start is None:
forecast_display_start = min(e.forecast['consensus'].dt)
forecast_mask = (e.forecast['consensus'].dt >= forecast_display_start)
forecast_len = forecast_mask.sum()
max_vals = []
for yp in list(range(1, years_prior_include + 1)):
if len(e.periods_agg) > 0 and max(e.periods_agg) > 1:
agg_str = 'period' + str(max(e.periods_agg))
range_train_yp = {'min':(forecast_display_start -
_datetime_delta(yp, 'Y') +
_datetime_delta(yp, 'D')),
'max':(max(e.forecast['consensus'].dt) -
_datetime_delta(yp, 'Y') +
_datetime_delta(yp, 'D'))}
training_mask = (
(e.training['aggregated'][agg_str].dt >= range_train_yp['min']) &
(e.training['aggregated'][agg_str].dt <= range_train_yp['max']))
train_len = training_mask.sum()
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask][:train_len],
e.training['aggregated'][agg_str].actual.loc[
training_mask][:forecast_len],
label='actuals ' + str(int(yp)) + 'YA',
linewidth=4)
history_len = e.training['aggregated'][agg_str].shape[0]
max_vals = max_vals + [max(
e.training['aggregated'][agg_str].actual.loc[
training_mask][:forecast_len])]
else:
range_train_yp = {'min':(forecast_display_start -
_datetime_delta(yp, 'Y')),
'max':(max(e.forecast['consensus'].dt) -
_datetime_delta(yp, 'Y'))}
training_mask = (
(e.training['history'].dt >= range_train_yp['min']) &
(e.training['history'].dt <= range_train_yp['max']))
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask],
e.training['history'].actual.loc[training_mask],
label='actuals ' + str(int(yp)) + 'YA', linewidth=2)
history_len = e.training['history'].shape[0]
max_vals = max_vals + [max(
e.training['history'].actual.loc[training_mask])]
total_len = history_len + e.forecast['consensus'].shape[0]
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask],
e.forecast['consensus'].forecast.loc[forecast_mask],
label='forecast',
linewidth=2 + 2 * int(total_len < 400),
c='indianred')
max_vals = max_vals + [max(
e.forecast['consensus'].forecast.loc[forecast_mask])]
if include_pred_int:
fp = plt.fill_between(e.forecast['consensus'].dt.loc[forecast_mask],
e.forecast['consensus'].forecast_lower.loc[
forecast_mask],
e.forecast['consensus'].forecast_upper.loc[
forecast_mask],
color='indianred', alpha=0.3,
label=str(round(
e.pred_level * 100)) + '% prediction band')
max_vals = max_vals + [max(e.forecast['consensus'].forecast_upper.loc[
forecast_mask])]
if (e.validation['consensus'] is not None and
len(e.validation['consensus']) > 0):
fp = plt.plot(e.validation['consensus'].dt.loc[forecast_mask],
e.validation['consensus'].actual.loc[forecast_mask],
label='actuals', c='mediumseagreen',
linewidth=2 + 2 * int(total_len < 400))
max_vals = max_vals + [max(
e.validation['consensus'].actual.loc[forecast_mask])]
if (e2 is not None and
len(e.forecast['consensus'].dt) > 0):
forecast_mask2 = (e2.forecast['consensus'].dt >= forecast_display_start)
fp = plt.plot(e2.forecast['consensus'].dt.loc[forecast_mask2],
e2.forecast['consensus'].forecast.loc[forecast_mask2],
label='latest forecast',
linewidth=2 + 2 * int(total_len < 400),
c='purple')
max_vals = max_vals + [max(
e2.forecast['consensus'].forecast.loc[forecast_mask2])]
plt.ylim([0, 1.05 * max(max_vals)])
plt.legend(loc='lower center', ncol=3, framealpha=0.05)
plt.grid()
return fp
|
e6604fe35ce6a65ff61ee45a387167d019be867a
| 3,643,650
|
import time
import threading
def f2(a, b):
"""
concurrent_num = 600 不用怕,因为这是智能线程池,如果函数耗时短,不会真开那么多线程。
这个例子是测试函数耗时是动态变化的,这样就不可能通过提前设置参数预估函数固定耗时和搞鬼了。看看能不能实现qps稳定和线程池自动扩大自动缩小
要说明的是打印的线程数量也包含了框架启动时候几个其他的线程,所以数量不是刚好和所需的线程计算一样的。
## 可以在运行控制台搜索 新启动线程 这个关键字,看看是不是何时适合扩大线程数量。
## 可以在运行控制台搜索 停止线程 这个关键字,看看是不是何时适合缩小线程数量。
"""
result = a + b
sleep_time = 0.01
if time.time() - t_start > 60: # 先测试函数耗时慢慢变大了,框架能不能按需自动增大线程数量
sleep_time = 7
if time.time() - t_start > 120:
sleep_time = 31
if time.time() - t_start > 200:
sleep_time = 79
if time.time() - t_start > 400: # 最后把函数耗时又减小,看看框架能不能自动缩小线程数量。
sleep_time = 0.8
if time.time() - t_start > 500:
sleep_time = None
print(f'{time.strftime("%H:%M:%S")} ,当前线程数量是 {threading.active_count()}, {a} + {b} 的结果是 {result}, sleep {sleep_time} 秒')
if sleep_time is not None:
time.sleep(sleep_time) # 模拟做某事需要阻塞n秒种,必须用并发绕过此阻塞。
return result
|
4f555d2b684e06d171a821fde6c10d2a72596396
| 3,643,651
|
def minimize_loss_single_machine_manual(loss,
accuracy,
layer_collection,
device=None,
session_config=None):
"""Minimize loss with K-FAC on a single machine(Illustrative purpose only).
This function does inverse and covariance computation manually
for illustrative pupose. Check `minimize_loss_single_machine` for
automatic inverse and covariance op placement and execution.
A single Session is responsible for running all of K-FAC's ops. The covariance
and inverse update ops are placed on `device`. All model variables are on CPU.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance describing model architecture.
Used by K-FAC to construct preconditioner.
device: string or None. The covariance and inverse update ops are run on
this device. If empty or None, the default device will be used.
(Default: None)
session_config: None or tf.ConfigProto. Configuration for tf.Session().
Returns:
final value for 'accuracy'.
"""
device_list = [] if not device else [device]
# Train with K-FAC.
g_step = tf.train.get_or_create_global_step()
optimizer = kfac.KfacOptimizer(
learning_rate=0.0001,
cov_ema_decay=0.95,
damping=0.001,
layer_collection=layer_collection,
placement_strategy="round_robin",
cov_devices=device_list,
inv_devices=device_list,
trans_devices=device_list,
momentum=0.9)
(cov_update_thunks,
inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()
def make_update_op(update_thunks):
update_ops = [thunk() for thunk in update_thunks]
return tf.group(*update_ops)
cov_update_op = make_update_op(cov_update_thunks)
with tf.control_dependencies([cov_update_op]):
inverse_op = tf.cond(
tf.equal(tf.mod(g_step, _INVERT_EVERY), 0),
lambda: make_update_op(inv_update_thunks), tf.no_op)
with tf.control_dependencies([inverse_op]):
with tf.device(device):
train_op = optimizer.minimize(loss, global_step=g_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
global_step_, loss_, accuracy_, _ = sess.run(
[g_step, loss, accuracy, train_op])
if global_step_ % _REPORT_EVERY == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %s",
global_step_, loss_, accuracy_)
return accuracy_
|
c5f53d7eddabe3ea5ac30ae4ecc050ee43ffa5e7
| 3,643,652
|
def bass_call_0(function, *args):
"""Makes a call to bass and raises an exception if it fails. Does not consider 0 an error."""
res = function(*args)
if res == -1:
code = BASS_ErrorGetCode()
raise BassError(code, get_error_description(code))
return res
|
9355f12b7277914e2397c64103666be0f5b801e5
| 3,643,653
|
def port_speed(value : str | None = None) -> int | None:
"""Port speed -> Mb/s parcer"""
if value is None:
return None
elif value == "X":
return 0
elif value == "M":
return 100
elif value == "G":
return 1000
elif value == "Q":
return 2500
else:
raise(AsusRouterNotImplementedError(value))
|
2bb41bf66211724a12bdf392ecf018c71836f42b
| 3,643,654
|
def convert_flag_frame_to_strings(flag_frame, sep=', ', empty='OK'):
"""
Convert the `flag_frame` output of :py:func:`~convert_mask_into_dataframe`
into a pandas.Series of strings which are the active flag names separated
by `sep`. Any row where all columns are false will have a value of `empty`.
Parameters
----------
flag_frame : pandas.DataFrame
Boolean DataFrame with descriptive column names
sep : str
String to separate column names by
empty : str
String to replace rows where no columns are True
Returns
-------
pandas.Series
Of joined column names from `flag_frame` separated by `sep` if True.
Has the same index as `flag_frame`.
"""
return np.logical_and(flag_frame, flag_frame.columns + sep).replace(
False, '').sum(axis=1).str.rstrip(sep).replace('', empty)
|
fa7f0cc427e4b6e4c703ea2011af59f1bad090ab
| 3,643,655
|
def pp_file_to_dataframe(pp_filename):
""" read a pilot point file to a pandas Dataframe
Parameters
----------
pp_filename : str
pilot point file
Returns
-------
df : pandas.DataFrame
a dataframe with pp_utils.PP_NAMES for columns
"""
df = pd.read_csv(pp_filename, delim_whitespace=True,
header=None, names=PP_NAMES,usecols=[0,1,2,3,4])
df.loc[:,"name"] = df.name.apply(str).apply(str.lower)
return df
|
777272db75f0e6c7bd1eee0b24d4879bf2ceb66a
| 3,643,656
|
def edit_product(request, product_id):
""" Edit a product in the store """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
product = get_object_or_404(Product, pk=product_id)
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES, instance=product)
if form.is_valid():
form.save()
messages.success(request, 'Successfully updated product!')
return redirect(reverse('individual_product', args=[product.id]))
else:
messages.error(
request, 'Failed to update product. '
'Please ensure the form is valid.')
else:
form = ProductForm(instance=product)
messages.info(request, f'You are editing {product.name}')
template = 'products/edit_product.html'
context = {
'form': form,
'product': product,
}
return render(request, template, context)
|
0f22ca856ca71e973bd8eed85bba7f54ce3a3464
| 3,643,658
|
def _resolve_target(target, target_frame='icrs'):
"""Return an `astropy.coordinates.SkyCoord` form `target` and its frame."""
if target_frame == 'icrs':
return parse_coordinates(target)
return SkyCoord(target, frame=target_frame)
|
b2b8132ca15b6bcfbb6d67c90abf36760be6a2d1
| 3,643,659
|
import itertools
def iter_fragments(fragiter, start_frag_id = None, stop_frag_id = None):
"""Given a fragment iterator and a start and end fragment id,
return an iterator which yields only fragments within the range.
"""
if start_frag_id and stop_frag_id:
dpred = lambda f: fragment_id_lt(f.fragment_id, start_frag_id)
tpred = lambda f: fragment_id_le(f.fragment_id, stop_frag_id)
return itertools.takewhile(tpred, itertools.dropwhile(dpred, fragiter))
elif start_frag_id and not stop_frag_id:
dpred = lambda f: fragment_id_lt(f.fragment_id, start_frag_id)
return itertools.dropwhile(dpred, fragiter)
elif not start_frag_id and stop_frag_id:
tpred = lambda f: fragment_id_le(f.fragment_id, stop_frag_id)
return itertools.takewhile(tpred, fragiter)
return fragiter
|
a1ab1245a6cb450cdb363a7029147501adf913db
| 3,643,660
|
from bst import BST
def bst_right_imbalanced():
"""Bst that extends right."""
test_bst = BST((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
return test_bst
|
4cdb45770634c389831057832b33755fe0a8db23
| 3,643,662
|
import time
def retry(exception_to_check, tries=4, delay=0.5, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
Args:
exception_to_check (Exception): the exception to check.
may be a tuple of exceptions to check
tries (int): number of times to try (not retry) before giving up
delay (float, int): initial delay between retries in seconds
backoff (int): backoff multiplier e.g. value of 2 will double the delay
each retry
logger (logging.Logger): logger to use. If None, print
"""
def deco_retry(func):
@wraps(func)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return func(*args, **kwargs)
except exception_to_check as exc:
msg = "%s, Retrying in %s seconds..." % (str(exc), mdelay)
if logger:
logger.warning(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return func(*args, **kwargs)
return f_retry # true decorator
return deco_retry
|
8e607104abf1cd5165199b7792f6955084e674cd
| 3,643,663
|
def HESSIAN_DIAG(fn):
"""Generates a function which computes per-argument partial Hessians."""
def h_fn(*args, **kwargs):
args = (args,) if not isinstance(args, (tuple, list)) else tuple(args)
ret = [
jaxm.hessian(
lambda arg: fn(*args[:i], arg, *args[i + 1 :], **kwargs)
)(arg)
for (i, arg) in enumerate(args)
]
return ret
return h_fn
|
01075519f7c3ae052a553bd3911e0447fa8da6ce
| 3,643,664
|
from scipy.spatial import cKDTree
import numpy
def match_xy(x1, y1, x2, y2, neighbors=1):
"""Match x1 & y1 to x2 & y2, neighbors nearest neighbors.
Finds the neighbors nearest neighbors to each point in x2, y2 among
all x1, y1."""
vec1 = numpy.array([x1, y1]).T
vec2 = numpy.array([x2, y2]).T
kdt = cKDTree(vec1)
dist, idx = kdt.query(vec2, neighbors)
m1 = idx.ravel()
m2 = numpy.repeat(numpy.arange(len(vec2), dtype='i4'), neighbors)
dist = dist.ravel()
dist = dist
m = m1 < len(x1) # possible if fewer than neighbors elements in x1.
return m1[m], m2[m], dist[m]
|
cd360ee6fc0ec83fad565313f6cbb0e8a4292ca2
| 3,643,665
|
def make_doc():
""" Only used for sphinx documentation """
doc_app = Flask(__name__)
doc_app.register_blueprint(blueprint())
return doc_app
|
beff9149ceffb04f80071f6a595ef13e72ebc838
| 3,643,666
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.