content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def tnaming_Displace(*args):
"""
* Application de la Location sur les shapes du label et de ses sous labels.
:param label:
:type label: TDF_Label &
:param aLocation:
:type aLocation: TopLoc_Location &
:param WithOld: default value is Standard_True
:type WithOld: bool
:rtype: void
"""
return _TNaming.tnaming_Displace(*args)
|
91d2550b5ecb108a2ddcceb6a3a227e48ab16f24
| 3,636,693
|
def get_capital_np(markets,signals,size,commiRate,climit = 4, wlimit = 2, op=True):
"""使用numpy回测,标签的盈亏, op 表示是否延迟一个tick以后撮合"""
postions = np.zeros(len(signals))
actions = np.zeros(len(signals))
costs = np.zeros(len(signals))
pnls = np.zeros(len(signals))
lastsignal = 0
lastpos = 0
lastcost = 0
num = 0
for num in range(1,len(signals)):
postions[num] = lastpos
actions[num] = 0
costs[num] = lastcost
pnls[num] = 0
# 止盈止损
if lastpos > 0 and \
(markets[num,1]<=lastcost-climit or markets[num,1]>=lastcost+wlimit):
postions[num] = 0
actions[num] = -1
costs[num] = 0
fee = (markets[num,1]+lastcost)*size*commiRate
pnls[num] = (markets[num,1]-lastcost)*size-fee
elif lastpos < 0 and \
(markets[num,0]>=lastcost+climit or markets[num,0]<=lastcost-wlimit):
postions[num] = 0
actions[num] = 1
costs[num] = 0
fee = (markets[num,0]+lastcost)*size*commiRate
pnls[num] = (lastcost-markets[num,0])*size-fee
# 开仓
if op:
lastsignal = signals[num]
if lastsignal > 0 and lastpos == 0:
postions[num] = 1
actions[num] = 1
costs[num] = markets[num,0]
elif lastsignal < 0 and lastpos == 0:
postions[num] = -1
actions[num] = -1
costs[num] = markets[num,1]
lastpos = postions[num]
lastcost = costs[num]
lastsignal = signals[num]
return pnls,actions
|
4604bb16c298e4ea32d2fca5f8332e5d21e4aada
| 3,636,694
|
def slicename_to_hostname(vs_name):
"""Converts a vserver slice name into a canonical FQDN.
Slice names use a pattern like: <some site>_<some name>.
Example:
If vs_name is 'mlab_utility' and the system hostname is
'mlab4.nuq01.measurement-lab.org', then slicename_to_hostname will return
'utility.mlab.mlab4.nuq01.measurement-lab.org'.
Args:
vs_name: str, name of a vserver slice, e.g. mlab_utility.
Returns:
str, the canonical FQDN based on system hostname and slice name.
"""
fields = vs_name.split('_')
if len(fields) == 1:
prefix = vs_name
else:
# The vs_name prefix is the PlanetLab site name.
# The rest is user-chosen. Place the site name after user-chosen name.
prefix = '.'.join(fields[1:] + [fields[0]])
return '%s.%s' % (prefix, _root_hostname)
|
7f8b6ff17ab402cfa89ee732f1e4c61ddffee7c2
| 3,636,695
|
from typing import Dict
def swim_for_a_day(life_counts: Dict[int, int]):
"""Process the shoal, decrement the life_counts:
any that get to -1 have procreated in the last day, their offspring are
created with 8 day life_counts, whilst they get reset to 6 days… and are
added to the count of any fish that moved down from 7 days.
"""
new_counts = {d - 1: p for d, p in life_counts.items()}
if -1 in new_counts.keys():
new_counts[8] = new_counts[-1]
new_counts[6] = new_counts[-1] + new_counts.get(6, 0)
del new_counts[-1]
return new_counts
|
3d5d3f48942a5a1f4eba3100e903df592d933e23
| 3,636,696
|
def page_not_found (error):
"""
Generic Error Message
"""
return "Unable to find Distill."
|
ed764c2c2814487c33f9945b17b85a234ae45645
| 3,636,697
|
def dict_to_obj(our_dict):
"""
Function that takes in a dict and returns a custom object associated with
the dict. This function makes use of the "__module__" and "__class__"
metadata in the dictionary to know which object type to create.
"""
if "__class__" in our_dict:
# Pop ensures we remove metadata from the dict to leave only the
# instance arguments
class_name = our_dict.pop("__class__")
# Get the module name from the dict and import it
module_name = our_dict.pop("__module__")
# We use the built in __import__ function since the module name is not
# yet known at runtime
module = __import__(module_name, globals(), locals(), [class_name])
# Get the class from the module
class_ = getattr(module, class_name)
# Use dictionary unpacking to initialize the object
obj = class_(**our_dict)
else:
obj = our_dict
return obj
|
4ad11ff943d2055d37643b6d7058175f504f8271
| 3,636,698
|
def display_fips( collection_of_fips, fig, **kwargs ):
"""
Method that is very similar to :py:meth:`display_fips_geom <covid19_stats.engine.viz.display_fips_geom>`, except this *also* displays the FIPS code of each county. For example, for `Rhode Island`_, this is.
.. _viz_display_fips_rhodeisland:
.. figure:: /_static/viz/viz_display_fips_rhodeisland.png
:width: 100%
:align: left
Demonstration of this method showing the counties in `Rhode Island`_. The FIPS code of each county is shown in red. One can extract the patches in this object to manually change the colors of these county polygons.
Here are the arguments.
:param collection_of_fips: can be a :py:class:`list`, :py:class:`set`, or other iterable of FIPS codes to visualize and label.
:param fig: the :py:class:`Figure <matplotlib.figure.Figure>` onto which to draw this :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`.
:rtype: :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`
.. _`Rhode Island`: https://en.wikipedia.org/wiki/Rhode_Island
"""
bdict = core.get_boundary_dict( collection_of_fips )
bbox = gis.calculate_total_bbox( chain.from_iterable( bdict.values( ) ) )
ax = create_and_draw_fromfig( fig, bbox, **kwargs )
fc = list( to_rgba( '#1f77b4' ) )
fc[-1] = 0.25
for fips in sorted( bdict ):
for shape in bdict[ fips ]:
poly = Polygon(
shape, closed = True,
edgecolor = 'k', linewidth = 2.0, linestyle = 'dashed',
facecolor = tuple( fc ), alpha = 1.0, transform = ccrs.PlateCarree( ) )
ax.add_patch( poly )
lng_cent = shape[:,0].mean( )
lat_cent = shape[:,1].mean( )
ax.text(
lng_cent, lat_cent, fips, fontsize = 10, fontweight = 'bold', color = 'red',
transform = ccrs.PlateCarree( ) )
return ax
|
ea609e19f4f42032a0533c12ea8ebc9ded6412aa
| 3,636,699
|
import itertools
import re
def _apply_constraints(password_hash, size, is_non_alphanumeric):
"""
Fiddle with the password a bit after hashing it so that it will
get through most website filters. We require one upper and lower
case, one digit, and we look at the user's password to determine
if there should be at least one alphanumeric or not.
"""
starting_size = 0 if size < 4 else size - 4
result = password_hash[:starting_size]
extras = itertools.chain((ord(ch) for ch in password_hash[starting_size:]),
itertools.repeat(0))
extra_chars = (chr(ch) for ch in extras)
def next_between(start, end):
interval = ord(end) - ord(start) + 1
offset = next(extras) % interval
return chr(ord(start) + offset)
chars_ranges = (("A", "Z"), ("a", "z"), ("0", "9"))
for first, last in chars_ranges:
any_of_chars = re.compile("[{}-{}]".format(first, last))
if any_of_chars.search(result):
result += next(extra_chars)
else:
result += next_between(first, last)
non_word = re.compile(r"\W")
if non_word.search(result) and is_non_alphanumeric:
result += next(extra_chars)
else:
result += "+"
while non_word.search(result) and not is_non_alphanumeric:
result = non_word.sub(next_between("A", "Z"), result, 1)
flip_place = next(extras) % len(result)
result = result[flip_place:] + result[:flip_place]
return result.strip("\x00")
|
8757c3197052fb1606a95dfa417a13ba833cdb43
| 3,636,700
|
def SplitLineRecursive(linepts,i,j,THRESHOLD=5.0,ds_min=50.0):
"""
Choose best point at which to split a line to minimize total reprojection error
"""
max_err = np.max(ProjectionError(np.stack((linepts[:,i],linepts[:,j])).T, linepts[:,i:j]))
if max_err < THRESHOLD:
ds = np.cumsum(np.sqrt(np.sum(np.diff(linepts[:,i:j])**2,axis=0)))
if ds[-1] > ds_min:
k = i + np.argmin((ds - ds[-1]/2.)**2) + 1
return k
else:
return j
errors1 = np.zeros(j-(i+1))
errors2 = np.zeros(j-(i+1))
max_errors1 = np.zeros(j-(i+1))
max_errors2 = np.zeros(j-(i+1))
for k in range(i+1,j):
l1 = np.stack((linepts[:,i],linepts[:,k])).T
l2 = np.stack((linepts[:,k],linepts[:,j])).T
errors1[k-i-1] = np.sum(ProjectionError(l1, linepts[:,i+1:k])) / (k-i)
errors2[k-i-1] = np.sum(ProjectionError(l2, linepts[:,k+1:j])) / (j-k)
max_errors1[k-i-1] = np.max(ProjectionError(l1, linepts[:,i:k]))
max_errors2[k-i-1] = np.max(ProjectionError(l2, linepts[:,k:j]))
k = i+1 + np.argmin(errors1 + errors2)
# max_err1 = np.max(max_errors1)
# max_err2 = np.max(max_errors2)
return k
|
fe17a756d588468b04db999f72274d459fec0d65
| 3,636,701
|
def create_report() -> FlaskResponse:
"""Creates a new report.
Note: This is the existing implementation, currently used for the v1 endpoint.
Returns:
FlaskResponse: details of the report just created or a list of errors with the corresponding HTTP status code.
"""
logger.info("Creating a new report")
try:
report_name = create_report_job()
report_details = get_reports_details(report_name)
return created(reports=report_details)
except Exception as e:
msg = f"{ERROR_UNEXPECTED} ({type(e).__name__})"
logger.error(msg)
logger.exception(e)
return internal_server_error(msg)
|
dbe64f79e05dc67932beac0aa06b6cd6c4f998c5
| 3,636,702
|
def sample_exercise():
"""Create a sample exercise"""
return ExerciseModel.objects.create(
name='exercise name',
duration=10,
calories=10
)
|
c4c9424aa987a8cb2d0fa493c63920b438ae5b73
| 3,636,704
|
def der_kinetic_integral(a,bfi,bfj):
"""
The kinetic energy operator does not depend on the atomic position so we only
have to consider differentiating the Gaussian functions. There are 4 possible
cases we have to evaluate
Case 1: Neither of the basis functions depends on the position of atom A which gives:
dT_ij/dXa = 0
Cases 2 and 3: Only one of the basis functions depends the position of atom A which
gives us either of the following possible integrals to evaluate
dT_ij/dXa = integral{dr dg_i/dXa T g_j }
dT_ij/dXa = integral{dr g_i T dg_j/dXa }
Case 4: Both of the basis functions depend on the position of atom A which gives the
following integral to evaluate
dT_ij/dXa = integral{dr dg_i/dXa T g_j + g_i T dg_j/dXa }
"""
dTij_dXa,dTij_dYa,dTij_dZa = 0.0,0.0,0.0
#we use atom ids on the CGBFs to evaluate which of the 4 above case we have
#bfi is centered on atom a
if bfi.atid==a:
for upbf in bfj.prims():
for vpbf in bfi.prims():
alpha = vpbf.exp()
l,m,n = vpbf.powers()
origin = vpbf.origin()
coefs = upbf.coef()*vpbf.coef()
#x component
v = PGBF(alpha,origin,(l+1,m,n))
v.normalize()
terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.kinetic(upbf)
if l>0:
v.reset_powers(l-1,m,n)
v.normalize()
termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dXa += terma + termb
#y component
v.reset_powers(l,m+1,n)
v.normalize()
terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.kinetic(upbf)
if m>0:
v.reset_powers(l,m-1,n)
v.normalize()
termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dYa += terma + termb
#z component
v.reset_powers(l,m,n+1)
v.normalize()
terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.kinetic(upbf)
if n>0:
v.reset_powers(l,m,n-1)
v.normalize()
termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dZa += terma + termb
#bfj is centered on atom a
if bfj.atid==a:
for upbf in bfi.prims():
for vpbf in bfj.prims():
alpha = vpbf.exp()
l,m,n = vpbf.powers()
origin = vpbf.origin()
coefs = upbf.coef()*vpbf.coef()
#x component
v = PGBF(alpha,origin,(l+1,m,n))
v.normalize()
terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.kinetic(upbf)
if l>0:
v.reset_powers(l-1,m,n)
v.normalize()
termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dXa += terma + termb
#y component
v.reset_powers(l,m+1,n)
v.normalize()
terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.kinetic(upbf)
if m>0:
v.reset_powers(l,m-1,n)
v.normalize()
termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dYa += terma + termb
#z component
v.reset_powers(l,m,n+1)
v.normalize()
terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.kinetic(upbf)
if n>0:
v.reset_powers(l,m,n-1)
v.normalize()
termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dZa += terma + termb
return dTij_dXa,dTij_dYa,dTij_dZa
|
5c84eea3fcd1f44bd41a9c14d0c104d9b3af0390
| 3,636,705
|
import torch
import datasets
def get_celeba():
"""Get and preprocess the CelebA dataset.
"""
transform = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor()])
# Use only 18/40 labels as described in Appendix C.1
mask = torch.tensor(
[False, True, False, True, False, True, False, False, True, True,
False, True, True, True, False, True, False, False, True, False,
True, False, False, False, True, False, True, False, True, False,
False, True, False, True, False, False, False, False, True, True])
train = datasets.CelebA('./data', split='train', transform=transform,
download=True)
valid = datasets.CelebA('./data', split='valid', transform=transform,
download=True)
test = datasets.CelebA('./data', split='test', transform=transform,
download=True)
# Use 'train' split and 'valid' split as the training set
train = [(img, label[mask]) for img, label in chain(train, valid)]
test = [(img, label[mask]) for img, label in test]
return train, test
|
05789f37a9fa0c360ae4d5a2cdcfe5f9a2a4c440
| 3,636,706
|
def waa_adjust_baseline(rsl, baseline, wet, waa_max, delta_t, tau):
"""Calculate baseline adjustion due to wet antenna
Parameters
----------
rsl : iterable of float
Time series of received signal level
baseline : iterable of float
Time series of baseline for rsl
waa_max : float
Maximum value of wet antenna attenuation
delta_t : float
Parameter for wet antnenna attenation model
tau : float
Parameter for wet antnenna attenation model
wet : iterable of int or iterable of float
Time series with wet/dry classification information.
Returns
-------
iterable of float
Adjusted time series of baseline
iterable of float
Time series of wet antenna attenuation
"""
if type(rsl) == pd.Series:
rsl = rsl.values
if type(baseline) == pd.Series:
baseline = baseline.values
if type(wet) == pd.Series:
wet = wet.values
rsl = rsl.astype(np.float64)
baseline = baseline.astype(np.float64)
wet = wet.astype(np.float64)
waa = _numba_waa_schleiss(rsl, baseline, waa_max, delta_t, tau, wet)
#return baseline + waa, waa
return baseline + waa
|
80bdec1a9cdd5dcf22008a6efdc08c5a7ae9ec1f
| 3,636,707
|
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter(
)
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr
|
727d303adf144407e45a013ca36b0bac592bc522
| 3,636,708
|
def space2():
"""Create a Space with two real dimensions."""
space = Space()
space.register(Real("lr", "uniform", 0, 1))
space.register(Real("weight_decay", "uniform", 0, 1))
return space
|
acc6bc1529fdc26c6e3c8140c89c0db3d7703ea7
| 3,636,709
|
def calculate_thresh(twindow, pctile, skipna):
"""Calculate threshold for one cell grid at the time
Parameters
----------
twindow: xarray DataArray
Stacked array timeseries with new 'z' dimension representing
a window of width 2*w+1
pctile: int
Threshold percentile used to detect events
skipna: bool
If True percentile and mean function will use skipna=True.
Using skipna option is much slower
Returns
-------
thresh_climYear: xarray DataArray
Climatological threshold
"""
thresh_climYear = (twindow
.groupby('doy')
.quantile(pctile/100., dim='z', skipna=skipna))
# calculate value for 29 Feb from mean of 28-29 feb and 1 Mar
thresh_climYear = thresh_climYear.where(thresh_climYear.doy!=60,
feb29(thresh_climYear))
thresh_climYear = thresh_climYear.chunk({'doy': -1})
return thresh_climYear
|
37228003d1c564205067e6d71a78ab83ffdeaf2f
| 3,636,710
|
def edit_car(item_id):
"""
Edit item
:param item_id:
:return mix:
"""
# get user
user = get_user_by_id(session['uid'])
# Get car
car = get_item_by_id(item_id)
# Check the user is the owner
if int(session['uid']) != int(car.author):
flash('You don\'t have permission to edit it.', 'error')
return redirect('/profile', 302)
# Get token
token = user.generate_auth_token(3600)
if request.method == 'POST' and request.form['csrf_token'] == csrf_token:
_car = dict()
# cleaning data
try:
_car['description'] = clean(request.form['description'])
_car['title'] = clean(request.form['title'])
_car['model'] = clean(request.form['model'])
_car['price'] = clean(request.form['price'])
_car['brand'] = clean(request.form['brand'])
_car['author'] = session['uid']
except TypeError:
flash('fields can\'t be empty', 'error')
return render('catalog/new_car.html',
brands=brands, csrf=csrf_token)
# update car, create success message and redirect user
item = update_item(_car, item_id)
flash('Record "%s" was successfully updated' % item.title, 'success')
return redirect('/profile', 302)
return render('catalog/edit_car.html',
brands=brands,
car=car.serialize,
token=token,
user=user.serialize,
csrf_token=csrf_token)
|
498b9b07292cf2de3ac8a929f624d0e93ee793b7
| 3,636,711
|
def replace_ensembl_ids(expression_df, gene_id_mapping):
"""
Replaces ensembl gene ids with hgnc symbols
Arguments
---------
expression_df: df
gene expression data matrix (sample x gene)
gene_id_mapping: df
Dataframe mapping ensembl ids (used in DE_stats_file) to hgnc symbols,
used in Crow et. al.
NOTE:
-----
This function is deprecated due to large memory usage: when `expression_df`
is a large dataframe, manipulating it inside the momory becomes very slow
(and sometimes even impossible) due to large memory consumption.
The same functionality has been refactored into `get_renamed_columns()` and
`map_recount2_data()` functions in this module.
THIS FUNCTION IS KEPT AS A REFERENCE ONLY.
"""
# Some columns are duplicates, for example:
# (ENSG00000223773.7, ENSG00000223773) --> CD99P1
# (ENSG00000124334.17, ENSG00000124334) --> IL9R
# We keep the first occurence of duplicated ensembl ids
updated_mapping = gene_id_mapping.loc[
~gene_id_mapping.index.duplicated(keep="first")
]
# Same ensembl ids are mapped to different gene symbol twice (CCL3L1, CCL3L3)
# ENSG00000187510.7 ENSG00000187510 C12orf74
# ENSG00000187510.7 ENSG00000187510 PLEKHG7
# Manually mapping them based on what is found on ensembl site
manual_mapping = {
"ENSG00000187510.7": "PLEKHG7",
"ENSG00000230417.11": "LINC00595",
"ENSG00000255374.3": "TAS2R45",
"ENSG00000276085.1": "CCL3L1",
}
# Apply manual mappings to `updated_mapping`
for ensembl_id, gene_symbol in manual_mapping.items():
updated_mapping.loc[ensembl_id].hgnc_symbol = gene_symbol
# Remove paralogs.
# Some ensembl ids are paralogs (for example, "geneA" and "geneA_PAR_Y").
# They map to the same hgnc symbol. Homologous sequences are paralogous
# if they were separated by a gene duplication event: if a gene in an
# organism is duplicated to occupy two different positions in the same
# genome, then the two copies are paralogous.
updated_expression_df = expression_df.iloc[
:, ~expression_df.columns.str.contains("PAR_Y")
]
# Replace ensembl ids with gene symbol
updated_expression_df.columns = updated_expression_df.columns.map(
updated_mapping["hgnc_symbol"]
)
# Remove columns whose mapped ensembl id is an empty string
updated_expression_df = updated_expression_df.iloc[
:, updated_expression_df.columns != ""
]
# Remove columns whose mapped ensembl id is `NaN`
updated_expression_df = updated_expression_df.iloc[
:, updated_expression_df.columns.notnull()
]
return updated_expression_df
|
db21341c337481f897da47e482a6667b3e4b9c8e
| 3,636,712
|
async def get_latency(ctx: Context) -> dict[str, str]:
"""
Get the bot's latency and database latency.
Parameters
----------
ctx : Context
The context.
"""
now = perf_counter()
collection = ctx.bot.db['test']['TESTS']
if await collection.find_one({'_id': PAYLOAD['_id']}) is None:
await collection.insert_one(PAYLOAD)
else:
await collection.find_one({'_id': PAYLOAD['_id']})
bot_latency = f'{round(ctx.bot.latency * 1000)}ms'
database_latency = f'{round(perf_counter() - now)}ms'
data: dict[str, str] = {
'bot': bot_latency,
'database': database_latency
}
return data
|
f42e8d3456a72b9b9b6520a2c60b777d74924cd3
| 3,636,713
|
def ssl_allowed(fn):
"""
Decorator - marks a route as allowing ssl, but not requiring it. It can be served over http and https.
NOTE: This must go BEFORE the route!
"""
fn.ssl_allowed = True
return fn
|
d8a22ed69a356189bca69a08516fd0a1187e4866
| 3,636,714
|
def fold_with_enum_index(xtypes, x):
"""
see MixedIntegerContext.fold_with_enum_index
"""
x = np.atleast_2d(x)
xfold = np.zeros((x.shape[0], len(xtypes)))
unfold_index = 0
for i, xtyp in enumerate(xtypes):
if xtyp == FLOAT or xtyp == INT:
xfold[:, i] = x[:, unfold_index]
unfold_index += 1
elif isinstance(xtyp, tuple) and xtyp[0] == ENUM:
index = np.argmax(x[:, unfold_index : unfold_index + xtyp[1]], axis=1)
xfold[:, i] = index
unfold_index += xtyp[1]
else:
_raise_value_error(xtyp)
return xfold
|
42a2385f591ac3349d9a7c25870adb23eb0a8fe8
| 3,636,717
|
import redis
def unlock(arguments):
"""Unlock the database."""
u = coil.utils.ask("Redis URL", "redis://localhost:6379/0")
db = redis.StrictRedis.from_url(u)
db.set('site:lock', 0)
print("Database unlocked.")
return 0
|
859ec2ec159529ab5cb5e05c32703a3164666e68
| 3,636,718
|
from typing import cast
def filter_atom_tokens(entity: SerializableEntity) -> bool:
"""
When locating tokens for equations, only detect atom tokens (i.e., skipping affix tokens like
arrows and hats), because affixes be colorized by wrapping them in colorization commands.
"""
token = cast(SerializableToken, entity)
return token.type_ == "atom"
|
42c60615a7e7c87dee40d2326d5e41518644ac88
| 3,636,719
|
def xmp_extract(fns, type_map):
"""xmp_extract
:param fns:
:param type_map:
"""
logger.info("Extracting raw XMP data.")
func = partial(xmp_to_vec, type_map=type_map)
xmp_to_vec(fns[0], type_map=type_map)
xmp_data = imap_unordered_bar(func, fns, n_proc=2)
xmp_data = pd.DataFrame(xmp_data)
# convert the data types
data_fields, data = convert_types(xmp_data, type_map)
df = pd.DataFrame(data).transpose()
df.columns = data_fields
df['fn'] = fns
return df
|
413d179dd5dd8579e12ffb648f05802e1ff7501e
| 3,636,721
|
import textwrap
def fisbUnavailable(db):
"""Create string containing any FIS-B Unavailable messages.
Args:
db (object): Handle to database connection.
Returns:
str: Containing any FIS-B Unavailable information.
"""
if SHOW_UNAVAILABLE == False:
return ''
fisbStr = ''
for r in db.MSG.find({'type': 'FIS_B_UNAVAILABLE'},{'contents': 1, 'centers': 1}):
centerList = ','.join(r['centers'])
centerStr = ' [' + centerList + ']'
fisbEntry = r['contents'] + centerStr
fisbStr = fisbStr + textwrap.fill(fisbEntry, 78, subsequent_indent=' ') + '\n'
if fisbStr != '':
fisbStr = '\n' + fisbStr
return fisbStr
|
319a1477c0873741d7c67550b9c27d64f2707c73
| 3,636,722
|
def rotate_file(filename, copy=False):
"""
Rotate file like logrotate.
If given filename already exists, rename it to "filename".n, n=1...
Filename with larger n is older one.
"""
# If not exist,
if not os.path.isfile(filename):
return
# make list [ [filename, number], ... ]
old_list = []
dot_files = glob.glob(filename + ".*")
for f in dot_files:
suffix = f.replace(filename+".", "")
try:
i = int(suffix)
if str(i) == suffix: # ignore if suffix was such as 003...
old_list.append([f, i])
except ValueError, e:
continue
old_list.sort(lambda x,y: x[1]-y[1])
# rotate files
for f, i in reversed(old_list):
os.rename(f, "%s.%d" % (f[:f.rfind(".")], i+1))
if copy:
shutil.copyfile(filename, filename + ".1")
else:
os.rename(filename, filename + ".1")
return filename + ".1"
|
1d2ddbc5153b8b79e4f8130c82fdf34437f4a4d6
| 3,636,724
|
def change_master(host, confirm=False):
"""
Change to different master host.
Arguments:
- host (str): Hostname of the new master to change to.
Optional arguments:
- confirm (bool): Acknowledge the execution of this command. Default is 'False'.
"""
if not confirm:
raise salt.exceptions.CommandExecutionError(
"This command will replace your current master host to '{:s}' - add parameter 'confirm=true' to continue anyway".format(host))
ret = {}
ret["master_key_removed"] = __salt__["file.remove"]("/etc/salt/pki/minion/minion_master.pub")
ret["config_changed"] = __salt__["file.replace"]("/etc/salt/minion", "^master:.*$", "master: {:s}".format(host))
ret["restart"] = restart()
return ret
|
e24da255ef2c85b18266e3143e31d19d8d4c3136
| 3,636,725
|
import six
def decode_text(s):
"""
Decodes a PDFDocEncoding string to Unicode.
Adds py3 compatability to pdfminer's version.
"""
if type(s) == bytes and s.startswith(b'\xfe\xff'):
return six.text_type(s[2:], 'utf-16be', 'ignore')
else:
ords = (ord(c) if type(c) == str else c for c in s)
return ''.join(PDFDocEncoding[o] for o in ords)
|
9a98160acff455bb77dca6223454a57a0058a418
| 3,636,726
|
def normalize_bound(sig, lb=0, ub=1):
"""
Normalize a signal between the lower and upper bound.
Parameters
----------
sig : ndarray
Original signal to be normalized.
lb : int, float, optional
Lower bound.
ub : int, float, optional
Upper bound.
Returns
-------
ndarray
Normalized signal.
"""
mid = ub - (ub - lb) / 2
min_v = np.min(sig)
max_v = np.max(sig)
mid_v = max_v - (max_v - min_v) / 2
coef = (ub - lb) / (max_v - min_v)
return sig * coef - (mid_v * coef) + mid
|
a9f609da88d05f76ce4c244eb516405956d79acb
| 3,636,727
|
import itertools
def get_chisqr3d(res3d):
"""Extract fit3d result chisqr attribute into a 3d volume
Args:
res3d -- 3d numpy array of model.ModelResult; output of fit3d
Return:
attr3d -- numpy arrays of chi-square statistics of fit
"""
# create empty array
data_type = type(res3d[0,0,0].chisqr)
shape = res3d.shape
attr3d = np.zeros(shape,dtype=data_type)
# fill the arrays
for x, y, z in itertools.product(*map(range, shape)):
attr3d[x,y,z] = res3d[x,y,z].chisqr
return attr3d
|
e0f0571237a4b79694e14abe1b0376d864b656fd
| 3,636,728
|
def detect_Telephony_SMS_abuse(x) :
"""
@param x : a VMAnalysis instance
@rtype : a list of formatted strings
"""
formatted_str = []
structural_analysis_results = x.tainted_packages.search_methods("Landroid/telephony/SmsManager","sendTextMessage", ".")
#structural_analysis_results = x.tainted_packages.search_methods("Lcom/geinimi/c/i","a", ".")
print (structural_analysis_results)
"""
# ke added
print (structural_analysis_results[0])
print (show_Path(structural_analysis_results))
print (structural_analysis_results[0].get_name())
print (structural_analysis_results[0].get_class_name())
print (structural_analysis_results[0].get_idx())
print (structural_analysis_results[0].get_descriptor())
#print (len(structural_analysis_results))
raw_input()
"""
for result in xrange(len(structural_analysis_results)) :
registers = data_flow_analysis(structural_analysis_results, result, x)
#print (registers)
#print (result)
#raw_input(" Y_______Y ")
"""
if len(registers) > 3 :
target_phone_number = get_register_value(1, registers)
sms_message = get_register_value(3, registers)
local_formatted_str = "This application sends an SMS message '%s' to the '%s' phone number" % (sms_message, target_phone_number)
if not(local_formatted_str in formatted_str) :
formatted_str.append(local_formatted_str)
"""
return formatted_str
|
1d337cc66ea8d536832b2582beabd1985c88a2f2
| 3,636,729
|
import torch
from typing import Optional
from typing import Tuple
def depth_map_to_point_cloud(
depth_map: torch.Tensor,
valid_map: Optional[torch.Tensor],
focal_x: float,
focal_y: float,
center_x: float,
center_y: float,
trunc_depth_min: Optional[float],
trunc_depth_max: Optional[float],
flip_h: bool = True,
device: Optional[torch.device] = None,
_validate_args: bool = True
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Generate point clouds from `depth_map`. The generated point clouds are
in the camera space. X: camera right, Y: camera up, Z: forward (depth).
The rank of `depth_map` must be at least 4D (b, c, h, w). If not, it is
interpreted as (h, w), (c, h, w), (b, c, h, w) and (b, ..., h, w) for higher
ranks.
Note that the `valid_map` must be a image type as `depth_map`. (h, w), (c, h, w)
or (b, c, h, w). Note that (b, h, w) is not allowed and will be interpreted as
(c, h, w) without warnings.
Args:
depth_map (torch.Tensor): UNNORMALIZED depth map, which means the range of
values is [min_depth, max_depth]. torch.float32. The rank must be at
least 4D (b, c, h, w). If not, it is converted automatically.
valid_map (torch.Tensor, optional): binary mask to indicate which pixels are
valid or invalid. Invalid pixels are discard during scattering. torch.bool
focal_x (float): focal length on x direction.
focal_y (float): focal length on y direction.
center_x (float): center coordinate of depth map.
center_y (float): center coordinate of depth map.
trunc_depth_min (float): depth below this value is truncated. None to disable.
trunc_depth_max (float): depth above this value is truncated. None to disable.
flip_h (bool, optional): whether to flip the horizontal axis. Note that in
OpenCV format, the origin (0, 0) of an image is at the upper left corner,
which should be flipped before converting to point cloud. Defaults
to True.
device (torch.device, optional): torch device. Defaults to None.
Returns:
torch.Tensor: point cloud in shape (..., 3)
torch.Tensor: mask in shape (..., h, w) indicating the valid area.
"""
if _validate_args:
# Convert to tensors and ensure they are on the same device
depth_map = utils.validate_tensors(depth_map, same_device=device or True)
# Ensure tensor shape at least 4D (b, ..., h, w)
depth_map = utils.to_4D_image(depth_map) # (b, c, h, w)
# Ensure dtypes
depth_map = depth_map.to(dtype=torch.float32)
if valid_map is not None:
valid_map = utils.to_tensor(valid_map, device=depth_map.device)
valid_map = utils.to_4D_image(valid_map) # (b, c, h, w)
valid_map = valid_map.to(dtype=torch.bool)
device = depth_map.device
x, y = utils.generate_image_coords(
depth_map.shape,
dtype = torch.float32,
device = device
) # same shape as depth_map
z = depth_map # (..., h, w)
points = torch.stack((x, y, z), dim=-1)
point_cloud = image_to_camera_space(
points = points,
focal_x = focal_x,
focal_y = focal_y,
center_x = center_x,
center_y = center_y,
flip_h = flip_h,
height = depth_map.shape[-2],
_validate_args = False
) # (..., h, w, 3)
valid_area = torch.ones_like(z, dtype=torch.bool) # (..., h, w)
# Truncate invalid values
if trunc_depth_max is not None:
valid_area = torch.logical_and(z <= trunc_depth_max, valid_area)
if trunc_depth_min is not None:
valid_area = torch.logical_and(z >= trunc_depth_min, valid_area)
if valid_map is not None:
valid_area = torch.logical_and(valid_area, valid_map)
return point_cloud, valid_area
|
555dc106e8e9075d6f05ba5b221bba3cb8dccb34
| 3,636,730
|
import logging
def symbol_definitions(goto, wkdir, srcdir=None):
"""Symbol definitions appearing in symbol table.
Source file path names in symbol table are absolute or relative to
wkdir. If srcdir is given, return only symbols defined in files
under srcdir.
"""
wkdir = srcloct.abspath(wkdir)
srcdir = srcloct.abspath(srcdir)
symbols = {}
for dfn in parse_symbol_table(symbol_table(goto), wkdir):
sym, src, num = dfn['symbol'], dfn['file'], dfn['line']
if sym is None or src is None or num is None:
logging.info("Skipping symbol table entry: %s: %s, %s",
sym, src, num)
continue
if srcdir and not src.startswith(srcdir):
logging.info("Skipping symbol table entry: %s: %s, %s",
sym, src, num)
continue
srcloc = srcloct.make_srcloc(src, None, num, wkdir, srcdir)
if sym in symbols and srcloc != symbols[sym]:
logging.warning("Skipping redefinition of symbol name: %s", sym)
logging.warning(" Old symbol %s: file %s, line %s",
sym, symbols[sym]["file"], symbols[sym]["line"])
logging.warning(" New symbol %s: file %s, line %s",
sym, srcloc["file"], srcloc["line"])
continue
symbols[sym] = srcloc
return symbols
|
4030fa44407146f3339088fb6a34ff2822410b83
| 3,636,731
|
import re
def get_my_ip() -> None:
"""
Funtion to get current ip in Network
"""
url = "http://checkip.dyndns.com/"
return re.compile(r"Address: (\d+.\d+.\d+.\d+)").search(get(url).text).group(1)
|
0c52ad85ec29a1dfb65f2699b13e66b038fc31a8
| 3,636,733
|
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
|
bcb7677eb648ad559e56b853de4f9246da638ff2
| 3,636,734
|
def match_login_url_with(username, default="https://foo.bar/login"):
"""
Match a given username with the corresponding
login URL
:param username: username of user. type str
:returns URL: login URL for user. type str
"""
return matches(
{
"yelluw": "https://yelluw.com/login",
"Pablo": "https://pablojuan.com/login",
},
username,
default=default
)
|
f6351e978b2010818092bfb3340fde1b84635d32
| 3,636,736
|
def odict_1to1(from_sp, to_sp):
"""
Filtered flat odict with only 1to1 orthologs.
"""
od = odict(from_sp, to_sp)
od_rev = odict(to_sp, from_sp)
return dict([(k,list(v)[0]) for k,v in od.items() if len(v)==1 and
len(od_rev[list(v)[0]])==1])
|
1ad1deca32d883f2bd8637d93b8b1a3578a05a75
| 3,636,737
|
import fnmatch
def find_devices(vendor=None, product=None, serial_number=None, custom_match=None, **kwargs):
"""Find connected USB devices matching certain keywords.
Wildcards can be used for vendor, product and serial_number.
:param vendor: name or id of the vendor (manufacturer)
:param product: name or id of the product
:param serial_number: serial number.
:param custom_match: callable returning True or False that takes a device as only input.
:param kwargs: other properties to match. See usb.core.find
:return:
"""
kwargs = kwargs or {}
attrs = {}
if isinstance(vendor, str):
attrs['manufacturer'] = vendor
elif vendor is not None:
kwargs['idVendor'] = vendor
if isinstance(product, str):
attrs['product'] = product
elif product is not None:
kwargs['idProduct'] = product
if serial_number:
attrs['serial_number'] = str(serial_number)
if attrs:
def cm(dev):
if custom_match is not None and not custom_match(dev):
return False
info = DeviceInfo.from_device(dev)
for attr, pattern in attrs.items():
if not fnmatch(getattr(info, attr).lower(), pattern.lower()):
return False
return True
else:
cm = custom_match
return usb.core.find(find_all=True, custom_match=cm, **kwargs)
|
7067b27d9d3c27eabe28150e460661682f02045d
| 3,636,738
|
def get_static_graph(app_name=None, app_dict=None, *args, **kwargs):
""" Explicityl avoid request and user. """
return get_graph(app_name=app_name, app_dict=app_dict, request=None)
|
8628f88e88080b18ead39871f3e3f69ba07c09a6
| 3,636,739
|
def downsample_spectrum(ar_wavelength, ar_flux, ar_ivar, scale):
"""
:type ar_wavelength: np.ndarray
:type ar_flux: np.ndarray
:type ar_ivar: np.ndarray
:type scale: int
:return: (np.ndarray, np.ndarray, np.ndarray)
"""
new_length = ar_wavelength.size // scale
old_length_clipped = new_length * scale
ar_wavelength_2d = ar_wavelength[:old_length_clipped].reshape((new_length, scale))
ar_flux_2d = ar_flux[:old_length_clipped].reshape((new_length, scale))
ar_ivar_2d = ar_ivar[:old_length_clipped].reshape((new_length, scale))
ar_weighted_flux_2d = ar_flux_2d * ar_ivar_2d
ar_wavelength_small = np.nanmean(ar_wavelength_2d, axis=1)
ar_ivar_small = np.nansum(ar_ivar_2d, axis=1)
with np.errstate(invalid='ignore'):
ar_flux_small = np.nansum(ar_weighted_flux_2d, axis=1) / ar_ivar_small
return ar_wavelength_small, ar_flux_small, ar_ivar_small
|
443a917c02eab7bdfc2c544c9bec431dbe1691ac
| 3,636,740
|
def format_date(date):
"""
Format date for creation of Twitter URL and Facebook API.
Format a datetime object to a string in the form of '%Y-%m-%d', e.g. '2018-01-21'
Parameters
----------
date : datetime
date to be formated
Returns
-------
str
date in string representation
"""
return date.strftime('%Y-%m-%d')
|
d76e81613d2c3b06623cadb30d706c537555ad51
| 3,636,741
|
import base64
def basic_token(username, password):
"""Generate the Authorization token for Resource Orchestrator (SO-ub container).
Args:
username (str): the SO-ub username
password (str): the SO-ub password
Returns:
str: the Basic token
"""
if not isinstance(username, str):
raise TypeError("The given type of username is `{}`. Expected str.".format(type(username)))
if not isinstance(password, str):
raise TypeError("The given type of password is `{}`. Expected str.".format(type(password)))
credentials = str.encode(username + ":" + password)
return bytes.decode(base64.b64encode(credentials))
|
054fccad28d1c18a34d630a664742f77e15ee4fe
| 3,636,742
|
import csv
def read_alias(alias_csv_path):
"""Reads alias.csv at the specified path.
Then returns a dict mapping from alias to monster id.
"""
with open(alias_csv_path) as alias_csv:
return {
alias: int(monster_id)
for alias, monster_id in csv.reader(alias_csv)}
|
3a3818b81a916b4dd18ca7cab5fbcbe1b4050d03
| 3,636,743
|
async def get_pipeline_run_node_steps(request: web.Request, organization, pipeline, run, node) -> web.Response:
"""get_pipeline_run_node_steps
Retrieve run node steps details for an organization pipeline
:param organization: Name of the organization
:type organization: str
:param pipeline: Name of the pipeline
:type pipeline: str
:param run: Name of the run
:type run: str
:param node: Name of the node
:type node: str
"""
return web.Response(status=200)
|
8ad3b987500366d562a5f6f59cc106fe374f50aa
| 3,636,744
|
def query_available_collections(opts: Options) -> pd.DataFrame:
"""Search for the available collections."""
# Graphql query to get the collections
query = create_collections_query()
# Call the server
reply = query_server(opts.web, query)
collections = json_properties_to_dataframe(reply["collections"])
print("Available collections:\n", collections)
return collections
|
f1ce5738739956a4a9a4258f35cdfadd1a75dffc
| 3,636,745
|
from typing import List
from typing import Dict
from typing import Any
from typing import Tuple
def _create_agent_object_list(
trial_list: List[List[Dict[str, Any]]],
agent_object_config_list: List[ObjectConfigWithMaterial],
unit_size: Tuple[float, float]
) -> List[Dict[str, Any]]:
"""Create and return the MCS scene's agent object list using the given
trial list from the JSON file data."""
agent_object_list = []
# Retrieve the agent data from the first frame of the first trial.
# Assume only one agent and the agent will never change shape/color.
json_agent = trial_list[0][0]['agent']
json_coords = json_agent[0]
json_radius = json_agent[1]
json_size = [json_radius * 2, json_radius * 2]
# Create the MCS agent object.
config_with_material = agent_object_config_list[0]
agent_object = _create_object(
'agent_',
config_with_material.object_type,
config_with_material.material,
[config_with_material.center_y, config_with_material.scale_y],
[config_with_material.scale_x, config_with_material.scale_z],
json_coords,
json_size,
unit_size
)
agent_object[tags.SCENE.UNTRAINED_SHAPE] = config_with_material.untrained
agent_object_list.append(agent_object)
# Remove the agent's first appearance (we will override it later).
agent_object['shows'] = []
agent_object['boundsAtStep'] = []
# Add data for the agent's movement across the frames to each step.
step = 0
for trial in trial_list:
for frame in trial:
json_agent = frame['agent']
json_coords = json_agent[0]
json_radius = json_agent[1]
json_size = [json_radius * 2, json_radius * 2]
# Move the agent to its new position for the step.
agent_object['shows'].append(_create_show(
step,
agent_object['configHeight'],
agent_object['configSize'],
json_coords,
json_size,
unit_size
))
step += 1
agent_object['boundsAtStep'].append(
agent_object['shows'][-1]['boundingBox']
)
# Add 1 for the EndHabituation action step at the end of the trial.
step += 1
agent_object['boundsAtStep'].append(
agent_object['shows'][-1]['boundingBox']
)
# Remove the scale from each element in 'shows' except for the first, or
# it will really mess up the simulation.
for show in agent_object['shows'][1:]:
del show['scale']
return agent_object_list
|
e30b172a7c2dc2c35e180955a65cd5de98a43ec1
| 3,636,746
|
async def send_data_controller_details_message_handler(request: web.BaseRequest):
"""Send data controller details message to remote agent hosted by Data Controller."""
context = request.app["request_context"]
connection_id = request.match_info["connection_id"]
# Initialise MyData DID Manager.
mydata_did_manager: ADAManager = ADAManager(context=context)
try:
# Call the function
await mydata_did_manager.send_data_controller_details_message(connection_id)
except (ConnectionManagerError, BaseModelError, ADAManagerError) as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
except Exception as err:
raise web.HTTPInternalServerError(reason=str(err)) from err
return web.json_response({}, status=200)
|
65437e2f3ea79c9d09d04bfabe1fce6ef02294a4
| 3,636,747
|
def test_llhelper(monkeypatch):
"""Show how to get function pointers used in type slots"""
FT = lltype.FuncType([], lltype.Signed)
FTPTR = lltype.Ptr(FT)
def make_wrapper(self, space):
def wrapper():
return self.callable(space)
return wrapper
monkeypatch.setattr(pypy.module.cpyext.api.ApiFunction, '_make_wrapper', make_wrapper)
@specialize.memo()
def get_tp_function(space, typedef):
@slot_function([], lltype.Signed, error=-1)
def slot_tp_function(space):
return typedef.value
api_func = slot_tp_function.api_func
return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
class Space:
_cache = {}
@specialize.memo()
def fromcache(self, key):
try:
return self._cache[key]
except KeyError:
result = self._cache[key] = self.build(key)
return result
def _freeze_(self):
return True
class TypeDef:
def __init__(self, value):
self.value = value
def _freeze_(self):
return True
class W_Type:
def __init__(self, typedef):
self.instancetypedef = typedef
def _freeze(self):
try:
del self.funcptr
except AttributeError:
pass
return False
w_type1 = W_Type(TypeDef(123))
w_type2 = W_Type(TypeDef(456))
space = Space()
def run(x):
if x:
w_type = w_type1
else:
w_type = w_type2
typedef = w_type.instancetypedef
w_type.funcptr = get_tp_function(space, typedef)()
return w_type.funcptr()
fn = compile(run, [bool])
assert fn(True) == 123
assert fn(False) == 456
|
323ddd524e24eeb70284bf2229e77fe66e557f51
| 3,636,748
|
def get_chronicle_http_client(account_info):
"""
Return an http client that is authorized with the given credentials
using oauth2client or google-auth.
"""
try:
credentials = service_account.Credentials.from_service_account_info(
account_info, scopes=current_app.config['AUTH_SCOPES']
)
except ValueError as e:
raise AuthorizationError(str(e))
return _auth.authorized_http(credentials)
|
df75a33c41891ccdab36ac933c81a09be8ebf4f8
| 3,636,749
|
def svn_auth_save_credentials(*args):
"""svn_auth_save_credentials(svn_auth_iterstate_t state, apr_pool_t pool) -> svn_error_t"""
return apply(_core.svn_auth_save_credentials, args)
|
429958f965bea9b5f4838cf471c91dd6e1d26e77
| 3,636,750
|
def get_nodes_str (name, nodes):
"""
helper function to dump nodes as a list of names
"""
nodes_str = " %s nodes = %d\n" % (name, len(nodes))
nodes_str += " " + ", ".join(map(lambda x: x._name, nodes)) + "\n"
return nodes_str
|
cafb9fd0aa202c2172aede97eabbf829dc9a1b53
| 3,636,751
|
import requests
def clean_df(df, selected_columns=default_columns):
"""Take a dataframe with GDELT2.0 data and only retain the useful columns for us and also add the country where the news was written
Keyword arguments:
df -- The dataframe complying to GDELT2.0 columns format
selected_columns (optionnal) -- The set of columns we want to keep
"""
df = df[selected_columns]
df = df.dropna(axis=0, how='any')
mapping = get_mapping(df).set_index('ActionGeo_CountryCode')
df['Country_Code'] = df['ActionGeo_CountryCode'].apply(
lambda x: mapping.loc[x]['Country_Code'] if x in mapping['Country_Code'].index.values else 'None')
df['Country_Source'] = get_countries_for_dataframe(df, 'SOURCEURL', get_all_newspapers_to_country_dict(),
get_tld_to_country_dict())
r = requests.get('https://raw.githubusercontent.com/mledoze/countries/master/countries.json')
d = {}
for c in r.json():
d[c['cca3']] = c['name']['common']
df['Country_Name'] = df['Country_Code'].apply(lambda x: d[x] if x in d else 'None')
return df[cleaned_columns]
|
6ef2b2537c5190541691c4230e6f3164c5c9ae32
| 3,636,753
|
from functools import reduce
def mse(y_true, y_pred, reduce_mode="mean"):
"""mean squared error。"""
return reduce(tf.math.square(y_pred - y_true), reduce_mode)
|
a74f04405d1cbc5d4ff3715286f4b76fa3355a42
| 3,636,754
|
def calculate_assignment_probabilites(assignments, num_clusters):
"""
Just counts the occurence of each assignment to get an empirical pdf estimate
"""
temp = np.arange(num_clusters)
hist_b_edges = np.hstack([-np.inf, (temp[:-1] + temp[1:]) / 2, np.inf])
assignment_counts, _ = np.histogram(assignments, hist_b_edges)
empirical_density = assignment_counts / np.sum(assignment_counts)
return empirical_density
|
fe2d99b108d9baac9876a7cb9af54cb69a04525a
| 3,636,755
|
def getStudiesOptions(request, id):
""" Get a list of studies for an investigation id.
Input:
id, investigation id.
"""
seekdb = SeekDB(None, None, None)
user_seek = seekdb.getSeekLogin(request, False)
investigation_id = id
studies = seekdb.getStudiesFromID(investigation_id)
#print(studies)
#study_options = json.dumps(convertDicToOptions(studies))
study_options = convertDicToOptions(studies)
#print(study_options)
data = {'msg':'okay', 'status': 1, 'study_options':study_options}
return HttpResponse(simplejson.dumps(data))
|
b864a9aae99851f8f904cbc55b896ec6d22300c1
| 3,636,757
|
def fibonacci_thrid(n):
"""计算斐波那契数列3"""
return n if n < 2 else fibonacci_thrid(n - 2) + fibonacci_thrid(n - 1)
|
b98251e9bd4ec507933338738c2b65faea8700b2
| 3,636,758
|
import socket
def site_url(url):
"""
Determine the server URL.
"""
base_url = 'http://%s' % socket.gethostname()
if server.port is not 80:
base_url += ':%d' % server.port
return urlparse.urljoin(base_url, url)
|
cb759e7e7d0273397106be79c20072eb3d7d1898
| 3,636,759
|
from typing import Tuple
from typing import List
def get_coordinates(
mask: np.ndarray, ths: int = 5, kernel_len: int = 10
) -> Tuple[List, np.ndarray, np.ndarray]:
"""This function extract the coordinate of table, horizontal and vertical lines.
Args:
mask (np.darray): A binary table image
ths (int, optional): Threshold value to ignore the lines
has not same y coordinate for horizontal lines or x coordinate
for vertical lines. Defaults to 5.
kernel_len (int, optional): The size of kernel is applied.
Raises:
ValueError: will be raised if the number of detected lines is not enough to
rebuild the table
Returns:
Tuple[List, np.ndarray, np.ndarray]: Tuple contain the coordinate of
table, vertical and horizontal lines.
"""
# get horizontal lines mask image
horizontal_lines_mask = get_hor_lines_mask(mask, kernel_len)
# get vertical lines mask image
vertical_lines_mask = get_ver_lines_mask(mask, kernel_len)
# get coordinate of horizontal and vertical lines
hor_lines = get_lines_coordinate(horizontal_lines_mask, axis=0, ths=ths)
ver_lines = get_lines_coordinate(vertical_lines_mask, axis=1, ths=ths)
if len(hor_lines.shape) != 2 or len(ver_lines.shape) != 2:
raise ValueError("Empty line coords array")
# remove noise edge
hor_lines, ver_lines = remove_noise(hor_lines, ver_lines, ths)
# get coordinate of table
tab_x1, tab_y1, tab_x2, tab_y2 = get_table_coordinate(hor_lines, ver_lines)
# preserve sure that all table has 4 borders
new_ver_lines = []
new_hor_lines = []
for e in ver_lines:
x1, y1, x2, y2 = e
# dont add left and right border
if abs(x1 - tab_x1) >= ths and abs(x2 - tab_x2) >= ths:
new_ver_lines.append([x1, y1, x2, y2])
for e in hor_lines:
x1, y1, x2, y2 = e
# dont add top and bottom border
if abs(y1 - tab_y1) >= ths and abs(y2 - tab_y2) >= ths:
new_hor_lines.append([x1, y1, x2, y2])
# add top, bottom ,left, right border
new_ver_lines.append([tab_x1, tab_y1, tab_x1, tab_y2])
new_ver_lines.append([tab_x2, tab_y1, tab_x2, tab_y2])
new_hor_lines.append([tab_x1, tab_y1, tab_x2, tab_y1])
new_hor_lines.append([tab_x1, tab_y2, tab_x2, tab_y2])
# normalize
final_hor_lines = normalize_v1(new_hor_lines, axis=0, ths=ths)
final_ver_lines = normalize_v1(new_ver_lines, axis=1, ths=ths)
final_hor_lines, final_ver_lines = normalize_v2(final_ver_lines, final_hor_lines)
return [tab_x1, tab_y1, tab_x2, tab_y2], final_ver_lines, final_hor_lines
|
3c0bbc395df07cb240d82cf0cf78b3623591bd98
| 3,636,760
|
def get_pixels(extrinsic, intrinsic, X):
"""
Returns the x, y pixels for the given X vector
:param extrinsic: extrinsic (4*4) matrix obtained from the headset
:param intrinsic: intrinsic (3*3) matrix obtained from the headset
:param X: the position vector
:return: image pixels for the vector
"""
intm = np.dot(extrinsic, np.append(X, 1))
intm = (intm / intm[2])[:3]
intm = np.dot(intrinsic, intm)[:2]
return [intm[0], intm[1]]
|
700bbbd721e4a1547c593163a41e13a8c20bee0d
| 3,636,761
|
def delete_question(media_package, level=0):
"""
Ask user the question whether they want to delete the distribution artefacts for the next media package or for all
remaining media packages.
:param media_package: The media package to ask the question for
:type: str
:param level: The level to indent the question to
:type level: int
:return: The answer.
:rtype: FixAnswer
"""
long_descriptions = ["deleting the distribution artefacts of the next media package",
"deleting all(remaining) distribution artefacts",
"quitting the script"]
short_descriptions = ["next", "all", "quit"]
options = ['n', 'a', 'q']
question = "Delete distribution artefacts of media package {}?".format(media_package)
answer = get_configurable_answer(options, short_descriptions, long_descriptions, question, level)
return answer
|
b11ea47f01f41c4211b32d1f61fa5255f5a0fb92
| 3,636,762
|
import importlib
import pkgutil
def import_submodules(package, recursive=True):
""" Import all submodules of a package, recursively, including subpackages
Arguments:
1. package = (string) name of the package
(module) loader of the package
2. recrusive = (bool) True = load packages and modules from all sub-packages as well.
(bool) False = load only first level of packages and modules, do not load modules from sub packages
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
|
e299334de43ee9bd9544589698472db978fcae8d
| 3,636,763
|
def is_collision_ray_cell(map_obj, cell):
"""
cell : cell r, c index from left bottom.
"""
idx = cell[0] + map_obj.mapdim[0] * cell[1]
if (cell[0] < 0) or (cell[1] < 0) or (cell[0] >= map_obj.mapdim[0]) or (cell[1] >= map_obj.mapdim[1]):
return True
#elif (map_obj.map is not None) and map_obj.map[cell[0], cell[1]] == 1:
elif (map_obj.map is not None) and map_obj.map_linear[idx] == 1:
return True
else:
return False
|
6eaf38710843c4c4e82e8411db9f1e1d97fb1710
| 3,636,764
|
from datetime import datetime
def time_of_trip(datum, city):
"""
Takes as input a dictionary containing info about a single trip (datum) and
its origin city (city) and returns the month, hour, and day of the week in
which the trip was made.
Remember that NYC includes seconds, while Washington and Chicago do not.
HINT: You should use the datetime module to parse the original date
strings into a format that is useful for extracting the desired information.
see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
"""
# YOUR CODE HERE
dt = None
if city == "NYC":
dt = datetime.strptime(datum['starttime'], "%m/%d/%Y %H:%M:%S")
elif city == "Chicago":
dt = datetime.strptime(datum['starttime'], "%m/%d/%Y %H:%M")
elif city == "Washington":
dt = datetime.strptime(datum['Start date'], "%m/%d/%Y %H:%M")
month = dt.strftime("%m")
hour = dt.strftime("%H")
day_of_week = dt.strftime("%A")
return (int(month), int(hour), day_of_week)
|
37824a6f2fe3816ec09fc3f86bb00400cfd43b38
| 3,636,765
|
def transform_resource_name(ctx, param, value):
"""Callback to transform resource_name into title case."""
if value is not None:
return value.title()
return value
|
b708c3318b731d652a7acad216093c96bc18fe2e
| 3,636,766
|
def extrema (im):
"""
Return the minimum and maximum of an image.
Arguments:
im image whose extrema are to be found
"""
return [im.min(), im.max()]
|
303d9c50cca91c3e73341d7b40195aceb02aef7a
| 3,636,767
|
def _create_statement(name, colnames):
"""Create table if not exists foo (...).
Note:
Every type is numeric.
Table name and column names are all lowercased
"""
# every col is numeric, this may not be so elegant but simple to handle.
# If you want to change this, Think again
schema = ', '.join([col + ' ' + 'numeric' for col in colnames])
return "create table if not exists %s (%s)" % (name, schema)
|
53c7fc9486274645c5dc7dea2257fda3cf496f9e
| 3,636,768
|
def createBundle():
"""create bundled type of OSC messages"""
b = OSC.OSCMessage()
b.address = ""
b.append("#bundle")
b.append(0)
b.append(0)
return b
|
fee80abd7aa2d71b2e03dbebd65aaca07be7037a
| 3,636,769
|
def binary_or(a: int, b: int):
"""
Take in 2 integers, convert them to binary, and return a binary number that is the
result of a binary or operation on the integers provided.
>>> binary_or(25, 32)
'0b111001'
>>> binary_or(37, 50)
'0b110111'
>>> binary_or(21, 30)
'0b11111'
>>> binary_or(58, 73)
'0b1111011'
>>> binary_or(0, 255)
'0b11111111'
>>> binary_or(0, 256)
'0b100000000'
>>> binary_or(0, -1)
Traceback (most recent call last):
...
ValueError: the value of both input must be positive
>>> binary_or(0, 1.1)
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
>>> binary_or("0", "1")
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if a < 0 or b < 0:
raise ValueError("the value of both input must be positive")
a_binary = str(bin(a))[2:] # remove the leading "0b"
b_binary = str(bin(b))[2:]
max_len = max(len(a_binary), len(b_binary))
return "0b" + "".join(
str(int("1" in (char_a, char_b)))
for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len))
)
|
514fa4a02b778dfa91c4097bb8916522339cda33
| 3,636,770
|
def tukey(N, alpha):
"""
generate a tukey window
The Tukey window, also known as the tapered cosine window, can be regarded as a cosine lobe of width \alpha * N / 2
that is convolved with a rectangle window of width (1 - \alpha / 2). At \alpha = 1 it becomes rectangular, and
at \alpha = 0 it becomes a Hann window.
"""
# Special cases
if alpha <= 0:
return np.ones(N) #rectangular window
elif alpha >= 1:
return np.hanning(N)
# Normal case
x = np.linspace(0, 1, N)
w = np.ones(x.shape)
# first condition 0 <= x < alpha/2
first_condition = x<alpha/2
w[first_condition] = 0.5 * (1 + np.cos(2*np.pi/alpha * (x[first_condition] - alpha/2) ))
# second condition already taken care of
# third condition 1 - alpha / 2 <= x <= 1
third_condition = x>=(1 - alpha/2)
w[third_condition] = 0.5 * (1 + np.cos(2*np.pi/alpha * (x[third_condition] - 1 + alpha/2)))
return w
|
507b86f0cc98c832d0405f560b1018531d32c172
| 3,636,771
|
def psi(X, Y, c_i, A, config, pkg='numpy'):
"""Computes the value of magnetic flux at point (X, Y)
according to coefficients ci.
Args:
X (float or numpy.array): x coordinate
Y (float or numpy.array): y coordinate
c_i (list): list of floats, the ci coefficients
A (float): plasma parameter
config (str): shape of the plasma 'non-null', 'single-null',
'double-null'.
pkg (str, optional): if set to 'numpy' (resp. 'sympy'), numpy
(resp. sympy) objects will be used. Defaults to 'numpy'.
Raises:
ValueError: If argument pkg is not in ['numpy', 'np', 'sympy', 'sp']
Returns:
float or numpy.array or sympy.Add: value(s) of magnetic flux
"""
if pkg in ['numpy', 'np']:
pkg = np
elif pkg in ['sympy', 'sp']:
pkg = sp
else:
raise ValueError("Unexpected string for argument pkg")
psi_1 = 1
psi_2 = X**2
psi_3 = Y**2 - X**2*pkg.log(X)
psi_4 = X**4 - 4*X**2*Y**2
psi_5 = 2*Y**4 - 9*Y**2*X**2 + 3*X**4*pkg.log(X) - 12*X**2*Y**2*pkg.log(X)
psi_6 = X**6 - 12*X**4*Y**2 + 8*X**2*Y**4
psi_7 = 8*Y**6 - 140*Y**4*X**2 + 75*Y**2*X**4 - 15*X**6*pkg.log(X) + \
180*X**4*Y**2*pkg.log(X) - 120*X**2*Y**4*pkg.log(X)
psis = [psi_1, psi_2, psi_3, psi_4, psi_5, psi_6, psi_7]
if config == 'single-null':
psi_8 = Y
psi_9 = Y*X**2
psi_10 = Y**3 - 3*Y*X**2*pkg.log(X)
psi_11 = 3*Y*X**4 - 4*Y**3*X**2
psi_12 = 8*Y**5 - 45*Y*X**4 - 80*Y**3*X**2*pkg.log(X) + \
60*Y*X**4*pkg.log(X)
psis += [psi_8, psi_9, psi_10, psi_11, psi_12]
val = X**4/8 + A*(1/2*X**2*pkg.log(X) - X**4/8) + \
sum([c_i[i]*psis[i] for i in range(len(c_i))])
return val
|
af110abfe37a82a0fcf89a31d1c8eae87bf280b8
| 3,636,773
|
def tempConvert(temp, unit):
""" Convert Fahrenheit to Celsius """
if unit == 'F':
celsius = (temp-32) * 5/9
return celsius
else:
return temp
|
224c7b5bd72ff5d209bfaf2b10d94cc24ac8681d
| 3,636,775
|
def _find_best_twitter_key(type, reset, remaining, limit, proxies, auth):
"""
This function switches to another pair of Twitter API keys, if they are available, to avoid pausing.
* WANT TO SWAP KEYS HERE B/C PAUSE IS MORE THAN 3 MINUTES
:param type: Type of API call: "timeline", "friends", "followers", "search_tweets", "search_users", "retweets",
"rls", or "users"
:param reset: The remaining window before the limit resets in UTC epoch seconds
:param remaining: The number of requests left for the 15 minute window
:param limit: The rate limit ceiling for that given reque
:param proxies: Proxy dictionary, ex. {'http': 'http://%s:%s' % (HOST, PORT), 'https': 'http://%s:%s' % (HOST, PORT)}
:param auth: Twitter application authentication, see the get_authorization method
:return best_key_auth: Authorization object using the best keys
:return isNewAuth: Boolean value representing whether a new authorization has been produced
"""
rls_types = _rls_type_list()
assert (type in rls_types), "Specify an RLS type as: {}".format("', '".join(rls_types))
# Count JSON files in key directory
key_dir = os.path.join(os.path.dirname(pyTweet.__file__), 'twitter_api_keys')
key_jsons = _get_key_list()
isNewAuth = False
# Check if there are enough keys to continue with this function
assert (len(key_jsons) > 0), "You have no Twitter API key files saved in {}. \nRefer to the documentation to " \
"create key files, or move your key files to that location.".format(key_dir)
if len(key_jsons) == 1:
print "\tThere are no other API keys to use...returning current API key."
pause = abs(int(time.time()) - reset) + 5
print "\tThere are no alternative keys. Pause for {} seconds.".format(pause)
time.sleep(pause)
return (auth, isNewAuth)
# Define best auth and key
best_key_auth = auth
best_key = {}
best_key[type] = {'RESET': reset, 'LIMIT': limit, 'REMAINING': remaining}
for k in key_jsons:
try:
key = load_twitter_api_key_set(key_file=k)
except (ValueError, AttributeError):
print "\tWarning! The file {} does not contain a valid Twitter API key. Please refer to the " \
"documentation on creating an API key".format(k)
continue
if ('API_KEY' not in key.keys()) or ('API_SECRET' not in key.keys()) or ('ACCESS_TOKEN' not in key.keys()) or ('ACCESS_TOKEN_SECRET' not in key.keys()):
print "\tWarning! The file {} does not contain a valid Twitter API key. Please refer to the documentation " \
"on creating an API key".format(k)
continue
# Be sure that this is not the same key we started the function with
if auth['KEY_FILE'] == k:
continue
if (auth['API_KEY'] == key['API_KEY']) and (auth['API_SECRET'] == key['API_SECRET']) and (auth['ACCESS_TOKEN'] == key['ACCESS_TOKEN']) and (auth['ACCESS_TOKEN_SECRET'] == key['ACCESS_TOKEN_SECRET']):
continue
# Check the RLS of RLS for key
key_auth = get_authorization(key)
_, _, _ = _get_rate_limit_status(type=type, proxies=proxies, auth=key_auth)
key = load_twitter_api_key_set(key_file=k)
# Skip key if it doesn't have appropriate fields
if ('RESET' not in key[type].keys()) or ('REMAINING' not in key[type].keys()) or ('LIMIT' not in key[type].keys()):
continue
# Check keys!
if key[type]['REMAINING'] == key[type]['LIMIT']:
best_key = key
best_key_auth = key_auth
isNewAuth = True
break
if key[type]['REMAINING'] < 1:
continue
if key[type]['REMAINING'] > best_key[type]['REMAINING']:
best_key = key
best_key_auth = key_auth
isNewAuth = True
break
if isNewAuth:
print "\nSwitch to Twitter key {} after using {}".format(best_key_auth['KEY_FILE'], auth['KEY_FILE'])
else:
pause = abs(int(time.time()) - best_key[type]['RESET']) + 5
print "\nUnable to find a better Twitter key, they all appear to be exahusted for the {} call. \nPause for {} " \
"minutes".format(type, np.ceil(pause/60))
time.sleep(pause)
return (best_key_auth, isNewAuth)
|
777cd7bed54f635c06ad3cef1bb65a6a6075dcc9
| 3,636,776
|
import fnmatch
def allowed_file(filename, allowed_exts):
"""
The validator for blueimp that limits which file extensions are allowed.
Args:
filename (str): a filepath
allowed_exts (str): set of allowed file extensions
Returns:
bool: True if extension is an allowed file type, False otherwise
"""
allowed_extensions = ["*."+str(e) for e in list(allowed_exts)]
for ext in allowed_extensions:
if fnmatch.fnmatch(filename.lower(), ext):
return True
return False
|
af23f6017ffa76e5402800a77cf794a2c1bce330
| 3,636,777
|
import string
def tokenize(document):
"""
Given a document (represented as a string), return a list of all of the
words in that document, in order.
Process document by coverting all words to lowercase, and removing any
punctuation or English stopwords.
"""
words = document.lower()
words = nltk.word_tokenize(words)
x = []
for w in words:
for i in string.punctuation:
if i in w:
w = w.replace(i, "")
x.append(w)
words = x
words = [w for w in words if w != ""]
words = [w for w in words if not w in nltk.corpus.stopwords.words("english")]
words = sorted(words, reverse=True)
return words
|
76e2eedc08ccc8bec28830ab6b0d8a70d4a67b14
| 3,636,778
|
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
database = db_connect()
cursor = database.cursor()
cursor.execute("SELECT password, active FROM users WHERE api_user=%s;", (username,))
results = cursor.fetchone()
cursor.close()
database.close()
if results is None:
return False
if results[0] != password or results[1] != 1:
return False
return True
|
d621ecefba65793b8f8230eb9e211e3d23d2c270
| 3,636,780
|
from typing import Union
def _handle_axis(axis: Union[str, int]) -> int:
"""Handles axis arguments including "columns" and "index" strings."""
if axis not in {0, 1, 'columns', 'index'}:
raise ValueError(
"axis value error: not in {0, 1, 'columns', 'index'}"
)
# Map to int if str
if isinstance(axis, str):
axis_mapper = {'index': 0, 'columns': 1}
axis = axis_mapper.get(axis)
return axis
|
4ebc4fe2ccf9124e326d21b14a7dc4d9aae52f12
| 3,636,781
|
def is_icmp_dest_unreach(icmp_data):
"""is ICMP_DEST_UNREACH?"""
return icmp_data["TYPE"] == ICMP_DEST_UNREACH
|
07213bed90a9e17c0236883b2739f54b8d5ccf09
| 3,636,785
|
def exists_user_notifications(session, user_id):
"""Helper method to check if notifications for user exists."""
res = session.execute(text("""SELECT EXISTS(
SELECT 1 FROM public.notification WHERE user_id='{0}') AS user"""
.format(user_id))).fetchone()
return res.user
|
a22c171359a95fc3723edbb967df865046692969
| 3,636,786
|
import torch
def export_onnx(model, config, device, onnx_model_path, verbose):
""" Export GPT-2 model with past state to ONNX model
"""
num_layer = config.n_layer
dummy_inputs = get_dummy_inputs(batch_size=1,
past_sequence_length=1,
sequence_length=1,
num_attention_heads=config.num_attention_heads,
hidden_size=config.hidden_size,
num_layer=num_layer,
vocab_size=config.vocab_size,
device=device,
float16=False)
dummy_input_ids, dummy_position_ids, dummy_attention_mask, dummy_past = dummy_inputs
input_list = [dummy_input_ids, dummy_position_ids, dummy_attention_mask] + dummy_past
with torch.no_grad():
outputs = model(*input_list)
past_names = [f'past_{i}' for i in range(num_layer)]
present_names = [f'present_{i}' for i in range(num_layer)]
# GPT2Model outputs last_state; GPT2LMHeadModel outputs logits (prediction_scores)
assert outputs[0].shape[2] == config.vocab_size or outputs[0].shape[2] == config.hidden_size
output_names = ["logits" if outputs[0].shape[2] == config.vocab_size else "last_state"] + present_names
# Shape of input tensors:
# input_ids: (batch_size, seq_len)
# past_{i}: (2, batch_size, num_heads, past_seq_len, hidden_size/num_heads)
# attention_mask: (batch_size, past_seq_len + seq_len)
# Shape of output tensors:
# last_state: (batch_size, seq_len, hidden_size)
# or logits: (batch_size, seq_len, vocab_size)
# present_{i}: (2, batch_size, num_heads, past_seq_len + seq_len, hidden_size/num_heads)
dynamic_axes = {'input_ids': {0: 'batch_size', 1: 'seq_len'}, output_names[0]: {0: 'batch_size', 1: 'seq_len'}}
for name in past_names:
dynamic_axes[name] = {1: 'batch_size', 3: 'past_seq_len'}
for name in present_names:
dynamic_axes[name] = {1: 'batch_size', 3: 'total_seq_len'}
dynamic_axes['attention_mask'] = {0: 'batch_size', 1: 'total_seq_len'}
dynamic_axes['position_ids'] = {0: 'batch_size', 1: 'seq_len'}
logger.info(
f"Shapes: input_ids={dummy_input_ids.shape} past={dummy_past[0].shape} output={outputs[0].shape} present={outputs[1][0].shape}"
)
torch.onnx.export(model,
args=tuple(input_list),
f=onnx_model_path,
input_names=['input_ids', 'position_ids', 'attention_mask'] + past_names,
output_names=output_names,
example_outputs=outputs,
dynamic_axes=dynamic_axes,
opset_version=11,
do_constant_folding=True,
verbose=verbose)
return onnx_model_path
|
13b4152750efecef5275a154768d8a891fe95829
| 3,636,787
|
def set_peak_elo(df: DataFrame, playersElo) -> DataFrame:
"""Add 2 columns PeakElo and PeakEloSince to a dataframe containing Date, P1Id and P2Id fields
Args:
df (DataFrame): the dataframe from where we read row by row (match by match)
playersElo ([type]): dict <id>:[eloratings_history]
Returns:
DataFrame: the input dataframe with 2 additionnal columns PeakElo, PeakEloSince
"""
(
df.loc[:, ["PeakElo1"]],
df.loc[:, ["PeakEloSince1"]],
df.loc[:, ["PeakElo2"]],
df.loc[:, ["PeakEloSince2"]],
) = zip(
*df.apply(
lambda row: set_peak_elo_match(row, playersElo),
axis=1,
)
)
# save a dataframe with all matches and Elo rating of each player for the matches
df.to_csv("./results/dfWithElos9m_peak.csv")
return df
|
a55c4d098686a1b75b03eac0c46345bc7c248593
| 3,636,788
|
def calc_plane_vector(atom_pos):
"""
Method to calculate best-fitted (unit) plane vector given a set of points using SVD
ARGS:
atom_pos (ndarray) :: ndarray storing atomic positions
returns:
ndarray
"""
# Zero-centering centroid of atoms before SVD
atom_pos_0 = atom_pos.T - np.mean(atom_pos.T, axis=1, keepdims=True)
u, v, sh = svd(atom_pos_0, full_matrices=True)
# Obtain unit plane vector and ensure it points upwards (z>0)
unit_n = u[:, -1] / norm(u[:, -1])
return unit_n * np.sign(unit_n[-1])
|
5576152adcd1406a94e79eb29d3030214424c2b7
| 3,636,789
|
import requests
def search_reddit(search, subreddit='', t='week', limit='100',
sort='new', restrict_sr='1'):
"""
search - string object, representing your search query
subreddit - string object, representing the subreddit
t - string object, one of (hour, day, week, month, year, all)
limit - string object, limits the number of posts returned
sort - string object, one of 'hot', 'old', 'top' or 'new'
restrict_sr - string object, '0' or '1', specifies if restriction
to the subreddit is applied
"""
print(f"Retrieving reddit posts for {search=} and {subreddit=}")
headers = get_reddit_token('nie_irek_ubuntu')
reddit_url = "https://oauth.reddit.com/r/" + subreddit + "/search"
res = requests.get(reddit_url,
headers=headers, params={'q': search,
'sort': sort,
'restrict_sr': restrict_sr,
'limit': limit,
't': t
})
return(res)
|
d26383f464b16bf8cefa5e089b975c6ad2c5f19f
| 3,636,790
|
def _expected_type_expression(typedef: Typedef) -> str:
"""
Determine the type expression supplied to ``from_obj`` function corresponding to the type definition.
:param typedef: type definition in Python representation
:return: Python code representing the type definition
"""
# pylint: disable=too-many-return-statements
if isinstance(typedef, Booldef):
return 'bool'
elif isinstance(typedef, Intdef):
return 'int'
elif isinstance(typedef, Floatdef):
return 'float'
elif isinstance(typedef, Strdef):
return 'str'
elif isinstance(typedef, Bytesdef):
return 'bytes'
elif isinstance(typedef, Listdef):
if typedef.items is None:
raise ValueError('Unexpected None items in typedef: {!r}'.format(typedef.identifier))
return 'list, {}'.format(_expected_type_expression(typedef=typedef.items))
elif isinstance(typedef, Dictdef):
if typedef.values is None:
raise ValueError('Unexpected None values in typedef: {!r}'.format(typedef.identifier))
return 'dict, {}'.format(_expected_type_expression(typedef=typedef.values))
elif isinstance(typedef, Classdef):
return _class_name(typedef.identifier)
else:
raise NotImplementedError('Translating the typedef to an expected type is not supported: {}'.format(typedef))
|
98ce70280c5054083b9234f1ef47b759b852720a
| 3,636,791
|
def adjoint(m):
"""Compute the Hermitian adjoint."""
return np.transpose(np.conj(m))
|
f7dea92a990473f88547574846aa1be8dc4bfee1
| 3,636,792
|
def get_graphic_template_variables(path, graphic_number):
"""
Generates the template variables for each graphic
"""
slug, abspath = utils.parse_path(path)
graphic_path = '%s/%s' % (abspath, slug)
## Get Spreadsheet Path
try:
graphic_config = load_graphic_config(graphic_path)
except IOError:
print '%s/graphic_config.py does not exist.' % slug
return
if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY:
print 'COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.' % slug
return
## Generate Links From Slug
spreadsheet_id = graphic_config.COPY_GOOGLE_DOC_KEY
app_id = slug
## Update Spreadsheet
copy_path = os.path.join(graphic_path, '%s.xlsx' % slug)
get_document(graphic_config.COPY_GOOGLE_DOC_KEY, copy_path)
## Get Sheet Data
copy = copytext.Copy(filename=copy_path)
sheet = copy['labels']
note = {
"spreadsheet_id": spreadsheet_id,
"app_id": app_id,
"graphic_number": graphic_number + 1,
"sheet": sheet,
}
return note
|
e755ec96cdcf0f9bddeb2d5134db97cdc9777dfd
| 3,636,793
|
def svc(self, model):
""" Obtain the model and the search space of the SVC
classifier. """
svc_sp = {}
if model == "linear":
svc = LinearSVC(dual=False, class_weight='balanced')
else:
svc = SVC(cache_size=1000, class_weight='balanced')
svc_sp['kernel'] = ['linear', 'poly', 'rbf']
svc_sp['degree'] = [2, 3, 4]
svc_sp['gamma'] = ["auto", "scale"]
svc_sp['C'] = [0.001, 0.01, 0.1, 1.0]
return svc, svc_sp
|
a58bafa7bdf3ff71120afd528b479893dada14e4
| 3,636,794
|
import re
def _FormatDataTransferIdentifiers(client, transfer_identifier):
"""Formats a transfer config or run identifier.
Transfer configuration/run commands should be able to support different
formats of how the user could input the project information. This function
will take the user input and create a uniform transfer config or
transfer run reference that can be used for various commands.
This function will also set the client's project id to the specified
project id.
Returns:
The formatted transfer config or run.
"""
formatted_identifier = transfer_identifier
match = re.search(r'projects/([^/]+)', transfer_identifier)
if not match:
formatted_identifier = ('projects/' +
client.GetProjectReference().projectId + '/' +
transfer_identifier)
else:
client.project_id = match.group(1)
return formatted_identifier
|
951a3576a1a53f9dd141e718c31c8b0314a550d7
| 3,636,795
|
def getReactionUrl(reaction, family=None, estimator=None, resonance=True):
"""
Get the URL (for kinetics data) of a reaction.
Returns '' if the reaction contains functional Groups or LogicNodes instead
of real Species or Molecules.
"""
kwargs = dict()
for index, reactant in enumerate(reaction.reactants):
if isinstance(reactant, Entry):
reactant = reactant.item
if isinstance(reactant, Group) or isinstance(reactant, LogicNode):
return ''
mol = reactant if isinstance(reactant, Molecule) else reactant.molecule[0]
kwargs['reactant{0:d}'.format(index+1)] = moleculeToAdjlist(mol)
for index, product in enumerate(reaction.products):
mol = product if isinstance(product, Molecule) else product.molecule[0]
kwargs['product{0:d}'.format(index+1)] = moleculeToAdjlist(mol)
kwargs['resonance'] = resonance
if family:
if estimator:
kwargs['family'] = family
kwargs['estimator'] = estimator.replace(' ', '_')
reaction_url = reverse('database:kinetics-group', kwargs=kwargs)
else:
reaction_url = ''
else:
reaction_url = reverse('database:kinetics-data', kwargs=kwargs)
return reaction_url
|
6ce5ca833bf4871d98314f8bf64a1ce024d4f41d
| 3,636,796
|
def hexLat2W(nrows=5, ncols=5):
"""
Create a W object for a hexagonal lattice.
Parameters
----------
nrows : int
number of rows
ncols : int
number of columns
Returns
-------
w : W
instance of spatial weights class W
Notes
-----
Observations are row ordered: first k observations are in row 0, next k in row 1, and so on.
Construction is based on shifting every other column of a regular lattice
down 1/2 of a cell.
Examples
--------
>>> import pysal as ps
>>> w = ps.lat2W()
>>> w.neighbors[1]
[0, 6, 2]
>>> w.neighbors[21]
[16, 20, 22]
>>> wh = ps.hexLat2W()
>>> wh.neighbors[1]
[0, 6, 2, 5, 7]
>>> wh.neighbors[21]
[16, 20, 22]
>>>
"""
if nrows == 1 or ncols == 1:
print "Hexagon lattice requires at least 2 rows and columns"
print "Returning a linear contiguity structure"
return lat2W(nrows, ncols)
n = nrows * ncols
rid = [i // ncols for i in xrange(n)]
cid = [i % ncols for i in xrange(n)]
r1 = nrows - 1
c1 = ncols - 1
w = lat2W(nrows, ncols).neighbors
for i in xrange(n):
odd = cid[i] % 2
if odd:
if rid[i] < r1: # odd col index above last row
# new sw neighbor
if cid[i] > 0:
j = i + ncols - 1
w[i] = w.get(i, []) + [j]
# new se neighbor
if cid[i] < c1:
j = i + ncols + 1
w[i] = w.get(i, []) + [j]
else: # even col
# nw
jnw = [i - ncols - 1]
# ne
jne = [i - ncols + 1]
if rid[i] > 0:
w[i]
if cid[i] == 0:
w[i] = w.get(i, []) + jne
elif cid[i] == c1:
w[i] = w.get(i, []) + jnw
else:
w[i] = w.get(i, []) + jne
w[i] = w.get(i, []) + jnw
return pysal.weights.W(w)
|
7e33f66d40d87b71d0b06d73cf83a33752aecfdd
| 3,636,797
|
import numpy as np
def Modelo(Mags, Phi, Me, alpha):
""" Modelo para ajustar
Parameters
----------
Mags, ERR : list
Magnitudes observadas
Phi, Me, alpha : .float, .float, .float
Parámetros del modelo
Returns
--------
F : list
Valores de la función
"""
M = Mags # Definición para mejor vizualización
F = [] # Contendrá valores de la función
ij = 0
while ij<len(M):
# Para que no sea tan larga la def. de "F": parto en factores a la función
# F = f1*f2*f3
f1 = 0.4*np.log(10)*Phi
f2 = 10**(-0.4*(M[ij]-Me)*(alpha+1))
f3 = np.exp( -10**(-0.4*(M[ij]-Me)) )
F.append( f1*f2*f3 )
ij = ij + 1
return F
|
0e547058032bc682c6d0c5bffa5f00aaa1318989
| 3,636,798
|
import fractions
def totient(n):
"""
Calculates Euler's totient
"""
count = 0
for i in range(1, n):
if (fractions.gcd(n, i) == 1):
count = count + 1
return count
|
11107816582285e712d9b2f5e98649ad345f9bf0
| 3,636,799
|
import pickle
def read_img_pkl(path):
"""Real image from a pkl file.
:param path: the file path
:type path: str
:return: the image
:rtype: tuple
"""
with open(path, "rb") as file:
return pickle.load(file)
|
8c7045d460e0583b02b565b818888c6b7991bc6b
| 3,636,800
|
from datetime import datetime
def create_new_session(connection_handler, session_tablename="session"):
"""
Creates a new session record into the session datatable
:param connection_handler: the connection handler
:param session_tablename: the session tablename (default: session)
:return: last inserted row id, -1 if an exception is thrown
"""
try:
timestamp = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
sql = f"INSERT INTO {session_tablename}(start_timestamp)VALUES(?)"
cursor = connection_handler.cursor()
cursor.execute(sql, (timestamp,))
return cursor.lastrowid
except Exception as e:
logger.error(f"Exception: {str(e)}")
return -1
|
f955ed02b7292aab6d71b96a0412aa56c1212999
| 3,636,801
|
def splitData(y, tx, ratios=[0.4, 0.1]):
""" Split the dataset into train, test and validation sets """
indices = np.arange(len(y))
np.random.shuffle(indices)
splits = (np.array(ratios) * len(y)).astype(int).cumsum()
training_indices, validation_indices, test_indices = np.split(indices, splits)
tX_train = tx[training_indices]
y_train = y[training_indices]
tX_validation = tx[validation_indices]
y_validation = y[validation_indices]
tX_test = tx[test_indices]
y_test = y[test_indices]
return tX_train, y_train, tX_validation, y_validation, tX_test, y_test
|
6cbf2907f32906779f8cf4193336cb867e66bf14
| 3,636,802
|
def conv_compare(node1, node2):
"""Compares two conv_general_dialted nodes."""
assert node1["op"] == node2["op"] == "conv_general_dilated"
params1, params2 = node1["eqn"].params, node2["eqn"].params
for k in ("window_strides", "padding", "lhs_dilation", "rhs_dilation",
"lhs_shape", "rhs_shape"):
if len(params1[k]) != len(params2[k]):
return False
if (len(params1["dimension_numbers"].lhs_spec) != #
len(params2["dimension_numbers"].lhs_spec)):
return False
if (len(params1["dimension_numbers"].rhs_spec) != #
len(params2["dimension_numbers"].rhs_spec)):
return False
if (len(params1["dimension_numbers"].out_spec) != #
len(params2["dimension_numbers"].out_spec)):
return False
if ((params1["feature_group_count"] > 1) != #
(params2["feature_group_count"] > 1)):
return False
if ((params1["batch_group_count"] > 1) != #
(params2["batch_group_count"] > 1)):
return False
return True
|
cd7bad7d298e5f3faa971a9c968b3cd3a6a27812
| 3,636,803
|
import json
def render_zones(zones: dict):
"""Render the zones based on accept header"""
requested_types = bottle.request.headers.get("Accept")
if "application/json" in requested_types:
output = json.dumps(zones)
content_type = "application/json"
elif "text/html" in requested_types:
output = bottle.template("zones", zones=zones)
content_type = "text/html"
elif "text/csv" in requested_types:
output = '"timezone","UTC offset"\n' + "\n".join(f'"{k}","{v}"' for k, v in zones.items()) + "\n"
content_type = "text/csv"
else:
output = "\n".join([f"{k}: {v}" for k, v in zones.items()])
content_type = "text/plain"
bottle.response.set_header("Content-Type", f"{content_type}; charset=UTF-8")
return output
|
c4bbbef191fc6507d13bddcb4ffe1d51124a3e18
| 3,636,804
|
from typing import Union
import numbers
def less(left: Tensor, right: Union[Tensor, np.ndarray,numbers.Number],dtype=Dtype.float32,name='less'):
"""Elementwise 'less' comparison of two tensors. Result is 1 if left < right else 0.
Args:
left: left side tensor
right: right side tensor
dtype (dtype): output tensor dtype.
name(str):op name
Returns:
Result is 1 if left < right else 0.
Examples:
>>> less(to_tensor([41., 42., 43.]), to_tensor([42., 42., 42.]))
<Tensor: shape=(3,), dtype=float32, numpy=array([1.0000e+00, 0.0000e+00, 0.0000e+00], dtype=float32)>
>>> less(to_tensor([-1,0,1]), 0)
<Tensor: shape=(3,), dtype=float32, numpy=array([1.0000e+00, 0.0000e+00, 0.0000e+00], dtype=float32)>
"""
return tf.cast(tf.less(left, right,name=name), tf.float32,name='cast')
|
4848bbbadf5ff789c1b406b1b53f0de4b436b155
| 3,636,805
|
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Read more in the :ref:`User Guide <sample_images>`.
Parameters
----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img : 3D array
The image as a numpy array: height x width x color
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
|
7a1131f49a04343a4d8c0dbfed1099420ee906fb
| 3,636,806
|
from datetime import datetime
def read_date_from_GPM(infile, radar_lat, radar_lon):
"""
Extract datetime from TRMM HDF files.
Parameters:
===========
infile: str
Satellite data filename.
radar_lat: float
Latitude of ground radar
radar_lon: float
Longitude of ground radar
Returns:
========
gpm_date: datetime
Datetime of satellite data at ground radar position.
min_dist: float
Minimal distance between satellite swath and ground radar, i.e.
is satellite swath are in ground radar domain?
"""
with h5py.File(infile, 'r') as file_id:
obj_id = file_id['NS']
# Read GPM lat/lon
latitude = obj_id['Latitude'].value
longitude = obj_id['Longitude'].value
# Read time data
mem_id = obj_id['ScanTime']
year = mem_id['Year'].value
month = mem_id['Month'].value
day = mem_id['DayOfMonth'].value
hour = mem_id['Hour'].value
minute = mem_id['Minute'].value
second = mem_id['Second'].value
# Using distance, find min to radar
dist = np.sqrt((latitude - radar_lat)**2 + (longitude - radar_lon)**2)
dist_atrack = np.amin(dist, axis=1) # Min distance along track axis
radar_center = np.argmin(dist_atrack)
min_dist = np.amin(dist_atrack)
gpm_date = datetime.datetime(year[radar_center], month[radar_center], day[radar_center],
hour[radar_center], minute[radar_center], second[radar_center])
return gpm_date, min_dist
|
1366ad4da74b5c31f88435257bbf2c6bc4662b92
| 3,636,807
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.