content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def lorentzian_sim(xdata, Amp, Width, Center):
"""
Estimates sum of Lorentzian functions where:
Amp = 1 X N lorentzian of amplitudes
Width = 1 X N lorentzian of widths
Center = 1 X N lorentzian of centers
xdata = 1XN indepedent variable
"""
# Convert to arrays (just in case):
Amp = np.array(Amp)
Width = np.array(Width)
Center = np.array(Center)
# Estimate number of pools
Num_variables = Amp.__len__() + Width.__len__() + Center.__len__()
# make sure it is divisible by 3.0
assert (Num_variables % 3 == 0),"Please provide 3 variables per pool"
# calculate final output
num_pools = int(Num_variables/3)
# Preallocate output
Lsum = np.zeros( (xdata.shape[0]))
for idx in range(num_pools):
# assign each variable
amp = Amp[idx]
width = Width[idx]
center = Center[idx]
# estimate signal and sum
Lsum += Lorentzian( [amp,width,center], xdata)
return Lsum
|
04560cf52c212ea504939b1ad2d65dd516176cd4
| 3,637,501
|
def convertASTtoThreeAddrForm(ast):
"""Convert an AST to a three address form.
Three address form is (op, reg1, reg2, reg3), where reg1 is the
destination of the result of the instruction.
I suppose this should be called three register form, but three
address form is found in compiler theory.
"""
return [(node.value, node.reg) + tuple([c.reg for c in node.children])
for node in ast.allOf('op')]
|
17bcd628b2b6feb916cdfaea2f6a210f47afa7bf
| 3,637,502
|
def aggregate_corrupt_metrics(metrics,
corruption_types,
max_intensity,
alexnet_errors_path=None,
fine_metrics=False):
"""Aggregates metrics across intensities and corruption types."""
results = {
'test/nll_mean_corrupted': 0.,
'test/kl_mean_corrupted': 0.,
'test/elbo_mean_corrupted': 0.,
'test/accuracy_mean_corrupted': 0.,
'test/ece_mean_corrupted': 0.,
'test/member_acc_mean_corrupted': 0.,
'test/member_ece_mean_corrupted': 0.
}
for intensity in range(1, max_intensity + 1):
nll = np.zeros(len(corruption_types))
kl = np.zeros(len(corruption_types))
elbo = np.zeros(len(corruption_types))
acc = np.zeros(len(corruption_types))
ece = np.zeros(len(corruption_types))
member_acc = np.zeros(len(corruption_types))
member_ece = np.zeros(len(corruption_types))
for i in range(len(corruption_types)):
dataset_name = '{0}_{1}'.format(corruption_types[i], intensity)
nll[i] = metrics['test/nll_{}'.format(dataset_name)].result()
if 'test/kl_{}'.format(dataset_name) in metrics.keys():
kl[i] = metrics['test/kl_{}'.format(dataset_name)].result()
else:
kl[i] = 0.
if 'test/elbo_{}'.format(dataset_name) in metrics.keys():
elbo[i] = metrics['test/elbo_{}'.format(dataset_name)].result()
else:
elbo[i] = 0.
acc[i] = metrics['test/accuracy_{}'.format(dataset_name)].result()
ece[i] = metrics['test/ece_{}'.format(dataset_name)].result()
if 'test/member_acc_mean_{}'.format(dataset_name) in metrics.keys():
member_acc[i] = metrics['test/member_acc_mean_{}'.format(
dataset_name)].result()
else:
member_acc[i] = 0.
if 'test/member_ece_mean_{}'.format(dataset_name) in metrics.keys():
member_ece[i] = list(metrics['test/member_ece_mean_{}'.format(
dataset_name)].result().values())[0]
else:
member_ece[i] = 0.
if fine_metrics:
results['test/nll_{}'.format(dataset_name)] = nll[i]
results['test/kl_{}'.format(dataset_name)] = kl[i]
results['test/elbo_{}'.format(dataset_name)] = elbo[i]
results['test/accuracy_{}'.format(dataset_name)] = acc[i]
results['test/ece_{}'.format(dataset_name)] = ece[i]
avg_nll = np.mean(nll)
avg_kl = np.mean(kl)
avg_elbo = np.mean(elbo)
avg_accuracy = np.mean(acc)
avg_ece = np.mean(ece)
avg_member_acc = np.mean(member_acc)
avg_member_ece = np.mean(member_ece)
results['test/nll_mean_{}'.format(intensity)] = avg_nll
results['test/kl_mean_{}'.format(intensity)] = avg_kl
results['test/elbo_mean_{}'.format(intensity)] = avg_elbo
results['test/accuracy_mean_{}'.format(intensity)] = avg_accuracy
results['test/ece_mean_{}'.format(intensity)] = avg_ece
results['test/nll_median_{}'.format(intensity)] = np.median(nll)
results['test/kl_median_{}'.format(intensity)] = np.median(kl)
results['test/elbo_median_{}'.format(intensity)] = np.median(elbo)
results['test/accuracy_median_{}'.format(intensity)] = np.median(acc)
results['test/ece_median_{}'.format(intensity)] = np.median(ece)
results['test/nll_mean_corrupted'] += avg_nll
results['test/kl_mean_corrupted'] += avg_kl
results['test/elbo_mean_corrupted'] += avg_elbo
results['test/accuracy_mean_corrupted'] += avg_accuracy
results['test/ece_mean_corrupted'] += avg_ece
results['test/member_acc_mean_{}'.format(intensity)] = avg_member_acc
results['test/member_ece_mean_{}'.format(intensity)] = avg_member_ece
results['test/member_acc_mean_corrupted'] += avg_member_acc
results['test/member_ece_mean_corrupted'] += avg_member_ece
results['test/nll_mean_corrupted'] /= max_intensity
results['test/kl_mean_corrupted'] /= max_intensity
results['test/elbo_mean_corrupted'] /= max_intensity
results['test/accuracy_mean_corrupted'] /= max_intensity
results['test/ece_mean_corrupted'] /= max_intensity
results['test/member_acc_mean_corrupted'] /= max_intensity
results['test/member_ece_mean_corrupted'] /= max_intensity
if alexnet_errors_path:
with tf.io.gfile.GFile(alexnet_errors_path, 'r') as f:
df = pd.read_csv(f, index_col='intensity').transpose()
alexnet_errors = df.to_dict()
corrupt_error = {}
for corruption in corruption_types:
alexnet_normalization = alexnet_errors[corruption]['average']
errors = np.zeros(max_intensity)
for index in range(max_intensity):
dataset_name = '{0}_{1}'.format(corruption, index + 1)
errors[index] = 1. - metrics['test/accuracy_{}'.format(
dataset_name)].result()
average_error = np.mean(errors)
corrupt_error[corruption] = average_error / alexnet_normalization
results['test/corruption_error_{}'.format(
corruption)] = 100 * corrupt_error[corruption]
results['test/mCE'] = 100 * np.mean(list(corrupt_error.values()))
return results
|
3857eae3bf2260fb80c80ff7c7c77770a1ea67ce
| 3,637,503
|
def lc_reverse_integer(n):
"""
Given a 32-bit signed integer, reverse digits of an integer. Assume we are dealing with an environment which could
only hold integers within the 32-bit signed integer range. For the purpose of this problem, assume that your
function returns 0 when the reversed integer overflows.
Examples:
>>> lc_reverse_integer(123)
321
>>> lc_reverse_integer(-123)
-321
>>> lc_reverse_integer(120)
21
"""
class Solution(object):
@staticmethod
def reverse(x):
neg = x < 0
if neg:
x = -x
result = 0
while x:
result = result * 10 + x % 10
x /= 10
if result > 2 ** 31:
return 0
return -result if neg else result
return Solution.reverse(n)
|
eff1054873ef0e77a82e34b7cf7af51d42f27d6c
| 3,637,504
|
def hard_swish(x_tens: Tensor, inplace: bool = False):
"""
| Hardswish layer implementation:
| 0 for x <= -3
| x for x >= 3
| x * (x + 3) / 6 otherwise
More information can be found in the paper
`here <https://arxiv.org/abs/1905.02244>`__.
:param x_tens: the input tensor to perform the swish op on
:param inplace: True to run the operation in place in memory, False otherwise
:return: 0 for x <= -3, x for x >= 3, x * (x + 3) / 6 otherwise
"""
if inplace:
x_tens.mul_(clamp(x_tens + 3, 0, 6))
x_tens.div_(6)
else:
relu_6 = x_tens + 3
relu_6 = relu_6.clamp(0, 6)
x_tens = x_tens * relu_6
x_tens = x_tens / 6
return x_tens
|
29b59d24ce321ac3f08aa72d3c9175b7453d6977
| 3,637,505
|
def get_buttons():
""" renders the ok and cancel buttons. Called from get_body() """
# this is going to be what we actually do when someone clicks the button
def ok_button_callback(button):
raise ExitPasterDemo(exit_token='ok')
# leading spaces to center it....seems like there should be a better way
b = urwid.Button(' OK', on_press=ok_button_callback)
okbutton = urwid.AttrWrap(b, 'button', 'buttonfocus')
# second verse, same as the first....
def cancel_button_callback(button):
raise ExitPasterDemo(exit_token='cancel')
b = urwid.Button('Cancel', on_press=cancel_button_callback)
cancelbutton = urwid.AttrWrap(b, 'button', 'buttonfocus')
return urwid.GridFlow([okbutton, cancelbutton], 10, 7, 1, 'center')
|
87a44506d95d4cb91ee10a9fd140e6a18e64d2e8
| 3,637,506
|
def dist_weights(distfile, weight_type, ids, cutoff, inverse=False):
"""
Returns a distance-based weights object using user-defined options
Parameters
----------
distfile: string, a path to distance csv file
weighttype: string, either 'threshold' or 'knn'
ids: a numpy array of id values
cutoff: float or integer; float for 'threshold' weight type and integer for knn type
inverse: boolean; true if inversed weights required
"""
try:
data_csv = csv.reader(open(distfile))
if csv.Sniffer().has_header(distfile):
data_csv.next()
except:
data_csv = None
if weight_type == 'threshold':
def neighbor_func(dists, threshold):
dists = filter(lambda x: x[0] <= threshold, dists)
return dists
else:
def neighbor_func(dists, k):
dists.sort()
return dists[:k]
if inverse:
def weight_func(dists, alpha=-1.0):
return list((np.array(dists)**alpha).round(decimals=6))
else:
def weight_func(dists, binary=False):
return [1]*len(dists)
dist_src = {}
for row in data_csv:
des = dist_src.setdefault(row[0], {})
if row[0] != row[1]:
des[row[1]] = float(row[2])
neighbors, weights = {}, {}
for id_val in ids:
if id_val not in dist_src:
raise ValueError, 'An ID value doest not exist in distance file'
else:
dists = zip(dist_src[id_val].values(), dist_src[id_val].keys())
ngh, wgt = [], []
if len(dists) > 0:
nghs = neighbor_func(dists, cutoff)
for d, i in nghs:
ngh.append(i)
wgt.append(d)
neighbors[id_val] = ngh
weights[id_val] = weight_func(wgt)
w = W(neighbors, weights)
w.id_order = ids
return w
|
1e9e5743933f15de89ac768cea268a52842db9e0
| 3,637,507
|
def alias_phased_obs_with_phase(x, y, start, end):
"""
:param x: a list containing phases
:param y: a list containing observations
:param start: start phase
:param end: end phase
:return: aliased phases and observations
"""
x = [float(n) for n in x]
y = [float(n) for n in y]
if start > end:
raise ValueError("Start phase can't be larger than stop phase.")
if len(x) != len(y):
raise ValueError("x and y must be the same size.")
distance = int(start - min(x))
if (distance == 0 and min(x) > start) or (distance < 0 < min(x)):
distance = distance - 1
x = [phase + distance for phase in x]
new_x = x[:]
new_y = y[:]
i = 1
while max(new_x) < end:
x_temp = [phase + i for phase in x]
new_x = new_x + x_temp
new_y = new_y + y[:]
i = i + 1
_x = []
_y = []
for phase, value in zip(new_x, new_y):
if start <= phase <= end:
_x.append(phase)
_y.append(value)
return _x, _y
|
4b9bd3507180d2e7cd2c9957e1c2ea4ce3e17cb6
| 3,637,509
|
import webbrowser
import time
def create_storage_account(subscription: str, resource_group: str, name: str) -> None:
"""Creates an Azure storage account. Also adds upload access, as well
as possibility to list/generate access keys, to the user creating it
(i.e. the currently logged in user).
Note that Azure documentation states that it can take up to five minutes
after the command has finished until the added access is enabled in practice.
"""
storage_client = StorageManagementClient(
_credential(), _subscription_id(subscription)
)
azure_pim_already_open = False
while True:
try:
return storage_client.storage_accounts.begin_create(
resource_group,
name,
{
"sku": {"name": "Standard_ZRS"},
"kind": "StorageV2",
"location": "northeurope",
"encryption": {
"key_source": "Microsoft.Storage",
"services": {"blob": {"key_type": "Account", "enabled": True}},
},
},
).result()
except HttpResponseError as exc:
if "AuthorizationFailed" in str(exc):
if not azure_pim_already_open:
webbrowser.open(f"{PIMCOMMON_URL}/azurerbac")
print(
"Not able to create new storage account. Do you have "
"enough priviliges to do it? We automatically opened the URL "
"to where you activate Azure PIM. Please activate/add necessary "
"priviliges."
)
azure_pim_already_open = True
print("New attempt of creating storage account in 30 seconds.")
time.sleep(30)
else:
raise RuntimeError("Not able to create new storage account.") from exc
|
c6b58f6b2b0e3d2a980fff90df4cb54d99151536
| 3,637,510
|
import platform
def get_os():
"""
Checks the OS of the system running and alters the directory structure accordingly
:return: The directory location of the Wordlists folder
"""
if platform.system() == "Windows":
wordlist_dir = "Wordlists\\"
else:
wordlist_dir = "Wordlists/"
return wordlist_dir
|
6f4a6f70505b1512987e75a069a960b136f66d97
| 3,637,511
|
def _try_warp(image, transform_, large_warp_dim, dsize, max_dsize, new_origin,
flags, borderMode, borderValue):
"""
Helper for warp_affine
"""
if large_warp_dim == 'auto':
# this is as close as we can get to actually discovering SHRT_MAX since
# it's not introspectable through cv2. numpy and cv2 could be pointing
# to a different limits.h, but otherwise this is correct
# https://stackoverflow.com/a/44123354
SHRT_MAX = np.iinfo(np.short).max
large_warp_dim = SHRT_MAX
max_dim = max(image.shape[0:2])
if large_warp_dim is None or max_dim < large_warp_dim:
try:
M = np.asarray(transform_)
return cv2.warpAffine(image, M[0:2], dsize=dsize, flags=flags,
borderMode=borderMode,
borderValue=borderValue)
except cv2.error as e:
if e.err == 'dst.cols < SHRT_MAX && dst.rows < SHRT_MAX && src.cols < SHRT_MAX && src.rows < SHRT_MAX':
print(
'Image too large for warp_affine. Bypass this error by setting '
'kwimage.warp_affine(large_warp_dim="auto")')
raise e
else:
# make these pieces as large as possible for efficiency
pieces_per_dim = 1 + max_dim // (large_warp_dim - 1)
return _large_warp(image, transform_, dsize, max_dsize,
new_origin, flags, borderMode,
borderValue, pieces_per_dim)
|
8634aa11c5b26a581f80643bc4a22d1456d29c3d
| 3,637,512
|
from datetime import datetime
from typing import Any
import requests
import json
def read_leaderboard(
*,
db: Session = Depends(deps.get_db),
rank_type: schemas.RankType,
skip: int = None,
limit: int = 10,
min_studied: int = 10,
deck_id: int = None,
date_start: datetime = None,
date_end: datetime = None,
current_user: models.User = Depends(deps.get_current_active_user),
) -> Any:
"""
Retrieves leaderboard of users since the specified start time, or all time otherwise
"""
top_users = interface.statistics.get_leaderboard(db=db, user=current_user, rank_type=rank_type, skip=skip,
limit=limit,
min_studied=min_studied, deck_id=deck_id, date_start=date_start,
date_end=date_end)
if isinstance(top_users, requests.exceptions.RequestException):
raise HTTPException(status_code=555, detail="Connection to scheduler is down")
if isinstance(top_users, json.decoder.JSONDecodeError):
raise HTTPException(status_code=556, detail="Scheduler malfunction")
return top_users
|
bbb4b8dfc9223aa982a419596b6f9391c1f600aa
| 3,637,513
|
from essentia import array
import essentia.standard as ess
def getMultiFeatureOnsets(XAudio, Fs, hopSize):
"""
Call Essentia's implemtation of multi feature
beat tracking
:param XAudio: Numpy array of raw audio samples
:param Fs: Sample rate
:param hopSize: Hop size of each onset function value
:returns (tempo, beats): Average tempo, numpy array
of beat intervals in seconds
"""
X = array(XAudio)
b = ess.BeatTrackerMultiFeature()
beats = b(X)
print("Beat confidence: ", beats[1])
beats = beats[0]
tempo = 60/np.mean(beats[1::] - beats[0:-1])
beats = np.array(np.round(beats*Fs/hopSize), dtype=np.int64)
return (tempo, beats)
|
4fefeeb2e87a2c32676d7f3c42ca8c426c15ba79
| 3,637,514
|
def check_derivation(derivation, premises, conclusion):
"""Checks if a derivation is ok. If it is, returns an empty list, otherwise returns [step, error]
Does not check if the conclusion and premises are ok, for that there is another function"""
for step in sorted(derivation):
try:
# See that the on steps are all between 1 and the current step
for s in derivation[step]['on_steps']:
if not 0 < s < step:
raise ValueError("Incorrect 'on steps' specification")
current_sups = derivation[step]['open_sups'][:]
previous_sups = list()
if step > 1:
previous_sups = derivation[step-1]['open_sups'][:]
# If the step does not open or close any previous supossitions, or closes the last open one
if (current_sups == previous_sups or current_sups == previous_sups[:-1]) and \
derivation[step]['rule'] != 'SUP':
if derivation[step]['rule'] == 'PREM':
# Check that the formula is a premise
if derivation[step]['formula'] not in premises:
raise ValueError("Formula given is not among the premises")
# Check that this is the first step or that the previous step is also a premise
# And that the steps field is empty
if (step == 1 or derivation[step-1]['rule'] == 'PREM') and derivation[step]['on_steps'] == list():
pass
else:
raise ValueError("Premises go at the beggining of the derivation and have empty 'on steps'")
else:
# A rule is being applied
prev_steps = list()
for s in derivation[step]['on_steps']:
if s not in derivation:
raise ValueError(f"Non existent step {s}")
prev_steps.append(derivation[s])
results = derivation[step]['rule'](derivation[step], prev_steps)
is_ok = False
for result in results:
if derivation[step]['formula'] == result:
is_ok = True
if not is_ok:
raise ValueError("Rule incorrectly applied")
# Y FINALMENTE VER SI LA FORMULA DEL PASO COINCIDE CON ALGUNO DE LOS RETURNS
pass
# If it contains one more supposition (the current step opens one)
elif current_sups[:-1] == previous_sups and current_sups[-1] == step:
# The rule must be SUP and the on_steps must be empty
if derivation[step]['rule'] == 'SUP' and derivation[step]['on_steps'] == list():
pass
else:
raise ValueError("Only SUP can open suppositions, and it must have empty 'on steps'")
else:
raise ValueError("Incorrect handling of suppositions")
except ValueError as e:
return [step, str(e)]
# Lastly, see that the derivation does not contain open suppositions at the last step,
# and that the conclusion is the last step
last_step = max(derivation)
if derivation[last_step]['open_sups'] != list():
return [last_step, 'The derivation ends with open suppositions']
elif derivation[last_step]['formula'] != conclusion:
return [last_step, 'The rules are correctly applied but the final formula is not the conclusion']
return []
|
67c15ae26ee399e95808495845c260ca55808532
| 3,637,515
|
def __trunc__(self,l) :
"""Return a bitstring formed by truncating self to length |l|; if l < 0, from left"""
if not isint(l) :
raise IndexError('length not an integer');
if l < 0 :
B = self._B;
if self._l <= max(B,-l) :
return __itrunc__(type(self)(self),l);
s = type(self)();
if -l <= B :
s._x = (((self._x[-2]<<B)|self._x[-1]) >> ((B-self._l)%B))&((1<<-l)-1);
else :
o = (self._l+l)%B
nl = o-l; # new length in best case
s._x = x = self._x[-((nl+B-1)//B):];
if o : # have to shift
m = (1<<B)-1;
for i in xrange(len(x)-1) :
x[i] = (x[i]<<o)&m | (x[i+1]>>(B-o));
if (B-1-l)//B < len(x) :
del x[-1];
else :
x[-1] = (x[-1]<<o)&m;
s._l = -l;
return s;
if l >= self._l : # extend with zeroes
return __itrunc__(type(self)(self),l);
B = self._B;
x = self._x;
r = type(self)();
if l <= B:
r._x = x[0]>>(B-l) if self._l > B else x>>(self._l-l);
else :
r._x = x[:(l+B-1)//B];
if l%B : r._x[-1] &= -1<<(B-l%B);
r._l = l;
return r;
|
52dbe55e16aa32a36a83ef5567768fcde2babe9e
| 3,637,516
|
def version_list_url(content):
"""Returns a URL to list of content model versions,
filtered by `content`'s grouper
"""
versionable = _cms_extension().versionables_by_content[content.__class__]
return _version_list_url(
versionable, **versionable.grouping_values(content, relation_suffix=False)
)
|
adce83b018b79f9b75eeb1f359cc8ec3a6633756
| 3,637,518
|
def as_ops(xs):
"""
Converts an iterable of values to a tuple of Ops using as_op.
Arguments:
xs: An iterable of values.
Returns:
A tuple of Ops.
"""
return tuple(as_op(x) for x in xs)
|
a47d82ca27655e02cd5ee45aa787d0d42272da63
| 3,637,519
|
def send_neighbors():
"""
The node sends its neighbors to the requesting node.
:return: <json> This node's neighbors.
"""
bc_nodes_mutex.acquire()
neighbors = blockchain.nodes
neighbors_dict = {}
for i, node in enumerate(neighbors):
neighbors_dict[i] = node
bc_nodes_mutex.release()
return jsonify(neighbors_dict), 200
|
3155f102e3af00be52c5312a9ce4c57fee06afe3
| 3,637,520
|
from typing import Optional
def get_db_home(db_home_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDbHomeResult:
"""
This data source provides details about a specific Db Home resource in Oracle Cloud Infrastructure Database service.
Gets information about the specified Database Home.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_db_home = oci.database.get_db_home(db_home_id=var["db_home_id"])
```
:param str db_home_id: The Database Home [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
__args__ = dict()
__args__['dbHomeId'] = db_home_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getDbHome:getDbHome', __args__, opts=opts, typ=GetDbHomeResult).value
return AwaitableGetDbHomeResult(
compartment_id=__ret__.compartment_id,
database=__ret__.database,
database_software_image_id=__ret__.database_software_image_id,
db_home_id=__ret__.db_home_id,
db_home_location=__ret__.db_home_location,
db_system_id=__ret__.db_system_id,
db_version=__ret__.db_version,
defined_tags=__ret__.defined_tags,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
is_desupported_version=__ret__.is_desupported_version,
kms_key_id=__ret__.kms_key_id,
kms_key_version_id=__ret__.kms_key_version_id,
last_patch_history_entry_id=__ret__.last_patch_history_entry_id,
lifecycle_details=__ret__.lifecycle_details,
source=__ret__.source,
state=__ret__.state,
time_created=__ret__.time_created,
vm_cluster_id=__ret__.vm_cluster_id)
|
43d855888e0e884e637fb5e06eab3b2397758eae
| 3,637,521
|
def all_daily_file_paths_for_month(hemisphere, year, month, search_paths):
"""Return a list of all the filenames available for the given year and month.
"""
start_date = dt.date(year, month, 1)
end_date = dt.date(year, month, monthrange(year, month)[1])
return daily_file_paths_in_date_range(hemisphere, start_date, end_date, search_paths)
|
8bd251b16b3591f704c3d88327c2f577b259a171
| 3,637,523
|
def uniques_only(iterable):
"""
This works only for sequence, but not for all iterable
"""
items = []
for i, n in enumerate(iterable):
if n not in iterable[:i]:
items.append(n)
return items
|
159e220be340fc027053b7dbae0d146ae88ceea9
| 3,637,524
|
def str2date(start_time_str):
"""
将到到期日中的大写中文字符转化为标准数字格式
2020年1月1日 --> 2020-1-1
"""
list_s = [i for i in start_time_str]
num_list = []
for index, s in enumerate(list_s):
list_s[index] = CN_NUM.get(s, s)
if isinstance(list_s[index], int):
num_list.append((index, str(list_s[index])))
# 判断类型,利用是否为数字,将几组数字找出来然后组合
str_num = ''
flag = 0
if num_list:
str_num = num_list[0][1]
str_flag = num_list[0][0]
for num in num_list[1:]:
if num[0] - str_flag == 1:
str_flag = num[0]
str_num += num[1]
else:
str_flag = num[0]
str_num = str_num + '-' + num[1]
flag += 1
if flag == 0 and len(str_num) > 4:
try:
str_n = str(parse(str_num))
str_num = str_n.split(' ')[0]
except Exception as e:
print(e)
str_num = ''
if len(str_num): # 不为空的情况下转换时间格式
if validate_date(str_num):
str_num, flag = match_date(str_num)
else:
str_num = None # 默认时间
return str_num
|
dbe289dbab8721fdb4a2d095b2754dcea61fff1e
| 3,637,525
|
import json
def load_omnical_metrics(filename):
"""Load an omnical metrics file.
Parameters
----------
filename : str
Path to an omnical metrics file.
Returns
-------
metrics : dict
A dictionary containing omnical metrics.
Raises
------
IOError:
If the filetype inferred from the filename is not "json" or "pkl",
an IOError is raised.
"""
# get filetype
filetype = filename.split('.')[-1]
# load json
if filetype == 'json':
with open(filename, 'r') as f:
metrics = json.load(f, object_pairs_hook=odict)
# ensure keys of ant_dicts are not strings
# loop over pols
for key, metric in metrics.items():
# loop over items in each pol metric dict
for key2 in metric.keys():
if isinstance(metric[key2], (dict, odict)):
if isinstance(list(metric[key2].values())[0], list):
metric[key2] = odict([(int(i), np.array(metric[key2][i])) for i in metric[key2]])
elif isinstance(list(metric[key2].values())[0], (str, np.unicode_)):
metric[key2] = odict([(int(i), metric[key2][i].astype(np.complex128)) for i in metric[key2]])
elif isinstance(metric[key2], list):
metric[key2] = np.array(metric[key2])
# load pickle
elif filetype == 'pkl':
with open(filename, 'rb') as f:
inp = pkl.Unpickler(f)
metrics = inp.load()
else:
raise IOError("Filetype not recognized, try a json or pkl file")
return metrics
|
ed2082f74904d0101efcd958f0acdb8383adf849
| 3,637,526
|
def tsallis(ion_temp, avg_temp, n):
"""
Non-normalized probability of an ion at ion-temp using a Tsallis distribution
:param ion_temp: temperature of ion (K)
:param avg_temp: average temperature of ions (K)
:param n: average harmonic oscillator level
:return: value
"""
kb = 1.38e-23
energy = ion_temp * kb
top = (n - 3) * (n - 2) * (n - 1) * energy ** 2
bot = 2 * (n * kb * avg_temp) ** 3 * (1 + energy / (n * kb * avg_temp)) ** n
output = top / bot
return output
|
4598c5241fc06219938beced4c9d5a4473cf8363
| 3,637,527
|
def nasnet_dual_path_sequential(return_two=True,
first_ordinals=0,
last_ordinals=0,
can_skip_input=False):
"""
NASNet specific dual path sequential container.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first modules with single input/output.
last_ordinals : int, default 0
Number of the final modules with single input/output.
dual_path_scheme : function
Scheme of dual path response for a module.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal module.
can_skip_input : bool, default False
Whether can skip input for some modules.
"""
return DualPathSequential(
return_two=return_two,
first_ordinals=first_ordinals,
last_ordinals=last_ordinals,
dual_path_scheme=NasDualPathScheme(can_skip_input=can_skip_input),
dual_path_scheme_ordinal=nasnet_dual_path_scheme_ordinal)
|
bb148410e1261444586224ea32f981e340b2f1e3
| 3,637,528
|
def _extractRGBFromHex(hexCode):
"""
Extract RGB information from an hexadecimal color code
Parameters:
hexCode (string): an hexadecimal color code
Returns:
A tuple containing Red, Green and Blue information
"""
hexCode = hexCode.lstrip('#') # Remove the '#' from the string
# Convert each byte into decimal, store it in a tuple and return
return tuple(int(hexCode[i:i+2], 16) for i in (0, 2, 4))
|
e1d67b4f2004e5e2d4a646a3cc5dc49a1e8cd890
| 3,637,529
|
def summarise(indices, fields, **kwargs):
"""Summarise taxonomy."""
summary = {}
meta = kwargs['meta']
try:
if 'taxid' in meta.taxon:
summary.update({'taxid': meta.taxon['taxid']})
names = []
for rank in ('superkingdom', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'):
if rank in meta.taxon:
names.append(meta.taxon[rank])
if names:
summary.update({'lineage': '; '.join(names)})
if 'cat' in meta.plot:
rank = meta.plot['cat'].split('_')[-1]
summary.update({'targetRank': rank})
summary.update({'target': meta.taxon[rank]})
except Exception:
pass
return summary
|
87f04cb86978e2350f3e535b65a1efa14af77ee8
| 3,637,530
|
def _get_active_user_p_by_date(start_date, end_date, *, seed=None):
"""Generate a trajectory for % daily active users (DAU).
The series is a random walk with drift.
DAU % starts between approx. 20-30% range and then follows random walk
with drift over time. In bad cases, DAU % might halve within a year.
Args:
start_date (str): Y-m-d date string for the start of the series.
end_date (str): Y-m-d date string for the end of the series.
approx_yoy_growth_rate (int, optional): YoY growth rate for the
user count (>=1), e.g. 2 for +100%, 3 for +200%. Defaults to 3.
seed (int, optional): Random seed. Defaults to None.
Returns:
[type]: [description]
"""
if seed:
np.random.seed(seed)
date_index = pd.date_range(start_date, end_date,
closed='left')
# set up random walk (steps cumsum for drift)
ACTIVE_P = np.random.normal(0.25, 0.03)
steps = np.random.normal(0, 0.002, size=len(date_index))
DAU_p_draw = ((np.zeros(len(date_index))
+ ACTIVE_P + steps.cumsum())
.clip(0, 1))
return pd.Series(DAU_p_draw, index=date_index)
|
6e4df3c7ab5a114aa177563e1898c22dd523576a
| 3,637,532
|
def get_unknow_opttrans_attr(path):
"""Utility method that gives a `dict` of unknown optional transitive
path attributes of `path`.
Returns dict: <key> - attribute type code, <value> - unknown path-attr.
"""
path_attrs = path.pathattr_map
unknown_opt_tran_attrs = {}
for _, attr in path_attrs.iteritems():
if (isinstance(attr, BGPPathAttributeUnknown) and
attr.is_optional_transitive()):
unknown_opt_tran_attrs[attr.type_code] = attr
return unknown_opt_tran_attrs
|
a3bb4c039ad713a03ad6bdeb2a438530d34af074
| 3,637,533
|
def get_user_partition_groups(course_key, user_partitions, user, partition_dict_key='name'):
"""
Collect group ID for each partition in this course for this user.
Arguments:
course_key (CourseKey)
user_partitions (list[UserPartition])
user (User)
partition_dict_key - i.e. 'id', 'name' depending on which partition attribute you want as a key.
Returns:
dict[partition_dict_key: Group]: Mapping from user partitions to the group to
which the user belongs in each partition. If the user isn't
in a group for a particular partition, then that partition's
ID will not be in the dict.
"""
partition_groups = {}
for partition in user_partitions:
group = partition.scheme.get_group_for_user(
course_key,
user,
partition,
)
if group is not None:
partition_groups[getattr(partition, partition_dict_key)] = group
return partition_groups
|
3bb2f76f4a48ce0af745637810903b8086b2fc02
| 3,637,534
|
import requests
from bs4 import BeautifulSoup
import yaml
def getemptybracket(league, testyear):
"""
Generates an empty bracket for the current year. Does not work for any other year due to the url only containing
info for the current year. Prefer to use the output yaml as a guide and correct as needed each year
:param league: league: str, mens or womens league
:param testyear: int, requested empty bracket year
:return: None, creates a yaml file with the seeds for the current year
"""
# URL for bracket
urlbracket = f'http://www.espn.com/{league}-college-basketball/tournament/bracket'
bracket_teams = {}
seed_list = []
results = requests.get(urlbracket, headers=headers)
soup = BeautifulSoup(results.text, "html.parser")
# Gets year for the bracket
year = soup.find(class_="h2")
year = str(year).replace('<h1 class="h2">NCAA Tournament Bracket - ', '').replace('</h1>', '')
# Exits if year selected is not current year
if testyear != year:
log.error('No empty bracket for selected year')
raise ValueError
# Split regions
region_div = soup.find_all(class_="region")
for region in region_div:
# Splits region into list of details
regionlist = str(region).split('>')
for i in regionlist:
# If regionlist item starts with a number(seed) and is longer than 25 chars(team link) adds element to list
if len(i) > 25 and i[:1].isnumeric():
# Does not add element if it already exists
if i not in seed_list:
seed_list.append(i)
# Creates dictionary with seed as key for team
for element in seed_list:
bracket_teams[f'd1r{-(-(seed_list.index(element)+1)//16)}seed{element.split(" ")[0]}'] = \
nameCheck(element.split('title=')[-1].replace('"', ''))
# Dump teams dict into yaml
if league == 'mens':
with open(f'{bracketpath}NCAAMBracket{year}.yaml', 'w') as f:
yaml.dump(bracket_teams, f)
elif league == 'womens':
with open(f'{bracketpath}NCAAWBracket{year}.yaml', 'w') as f:
yaml.dump(bracket_teams, f)
return None
|
b3a81085a600d9bf9b23dd2e46577932aec2cdfe
| 3,637,535
|
def kendall_tau(solar_ts, wind_ts):
"""
Compute Kendall's tau correlation between the given solar
and wind timeseries data. Return just the correlation coefficient.
Parameters
----------
solar_ts : ndarray
Solar time-series vector for a single site
wind_ts : ndarray
Wind time-series vector for a single site
Returns
-------
float
Kendall's tau
"""
return kendalltau(solar_ts, wind_ts)[0]
|
e9e256cf4578d6d3a6fa4b835ea5a2eefff6ba2e
| 3,637,536
|
from unittest.mock import patch
def test_osx_memdata_with_comma():
"""
test osx memdata method when comma returns
"""
def _cmd_side_effect(cmd):
if "hw.memsize" in cmd:
return "4294967296"
elif "vm.swapusage" in cmd:
return "total = 1024,00M used = 160,75M free = 863,25M (encrypted)"
with patch.dict(
core.__salt__, {"cmd.run": MagicMock(side_effect=_cmd_side_effect)}
), patch("salt.utils.path.which", MagicMock(return_value="/usr/sbin/sysctl")):
ret = core._osx_memdata()
assert ret["swap_total"] == 1024
assert ret["mem_total"] == 4096
|
a111965caec46a271e6c56582148e56637539b8e
| 3,637,538
|
import html
def convert_html_to_dash(el, style=None):
"""[Quite] Conveniently auto-converts whole input HTML
into the corresponding Python Dash HTML components. Uses
Beautiful Soup to auto-parse into required HTML elements
('el') if given str input.
Parameters
----------
el : bs.element.NavigableString, str
Accepts bs4 HTML 'element' object or raw html as string.
(Input condition checked and converted by inner function)
Beautiful Soup-parsed HTML element, by the tag (e.g., "<p>Hello</p>").
If not already in bs4-format (just str instead), recursion is employed to simply
auto bs4-parse the HTML into a `NavigableString` which can then be passed
into the included Dash conversion.
style : None, optional
Style params for the HTML element.
Returns
-------
Dash.html.Div()
Where content (i.e. via attr 'children') is a list of Dash `html` components
precisely mirroring the elements input as standard-format HTML.
"""
ALLOWED_TAGS = {
"a",
"address",
"b",
"big",
"blockquote",
"br",
"caption",
"center",
"cite",
"div",
"em",
"font",
"footer",
"header",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hr",
"i",
"img",
"li",
"ol",
"option",
"p",
"pre",
"s",
"small",
"span",
"strong",
"table",
"td",
"textarea",
"th",
"tr",
"tt",
"u",
"ul",
}
def _extract_style(el):
"""Convert HTML-formatted style code into a
format that can be passed to a Dash html object
during instantiation, which underlies the
conversion procedure herein.
Parameters
----------
el : bs.element.NavigableString
Returns
-------
dict
Dash-compatible style params
"""
if not el.attrs.get("style"):
return None
return {
k.strip(): v.strip()
for k, v in [
x.split(": ") for x in el.attrs["style"].split(";")
if len(x) > 0
]
}
if type(el) is str:
return convert_html_to_dash(bs.BeautifulSoup(el, "html.parser"))
if type(el) == bs.element.NavigableString:
return str(el)
else:
name = el.name
style = _extract_style(el) if style is None else style
contents = [convert_html_to_dash(x) for x in el.contents]
if name.title().lower() not in ALLOWED_TAGS:
return contents[0] if len(contents) == 1 else html.Div(contents)
return getattr(html, name.title())(contents, style=style)
|
1bcc95c8cc9d9c2704d5ecc711a78a1c59c3a62a
| 3,637,539
|
def _ice_d2gdt2(temp,pres):
"""Calculate ice Gibbs free energy TT-derivative.
Calculate the second derivative of the specific Gibbs free energy of
ice with respect to temperature.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:returns: Gibbs free energy derivative in J/kg/K^2.
"""
# Reduced variables
tn = temp/_TTP
pn = pres/_PTPE
_PI0 = _PATM/_PTPE
g_tt = 0.
# Residual terms including complex numbers
sr = [_GCOEFFS[1], complex(0.0,0.0)]
for (k,rk) in enumerate(_GCOEFFS[2]):
sr[1] += rk * (pn-_PI0)**k
for (tk,s) in zip(_GCOEFFS[3],sr):
term = 1./(tk-tn) + 1./(tk+tn) - 2./tk
g_tt += (s*term).real / _TTP
return g_tt
|
fcbb08801c2767b16163e9c10fb84916ff0633c7
| 3,637,540
|
def ifht(A, dln, mu, offset=0.0, bias=0.0):
"""ifht multimethod."""
return (Dispatchable(A, np.ndarray),)
|
d7c597ba10a6d83afffd7e085556c44a263a6b8c
| 3,637,541
|
def make_import_names_callback(library_calls, library_addr):
""" Return a callback function used by idaapi.enum_import_names(). """
def callback(ea, name, ordinal):
""" Callback function to retrieve code references to library calls. """
library_calls[name] = []
library_addr[name] = ea
for ref in idautils.CodeRefsTo(ea, 0):
library_calls[name].append(ref)
return True # True -> Continue enumeration
return callback
|
316d51e09ee564fb87ef3eeaeb38c1f6fba94a1d
| 3,637,542
|
def unNormalizeData(normalizedData, data_mean, data_std, dimensions_to_ignore, actions, one_hot):
"""Borrowed from SRNN code. Reads a csv file and returns a float32 matrix.
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/generateMotionData.py#L12
Args
normalizedData: nxd matrix with normalized data
data_mean: vector of mean used to normalize the data
data_std: vector of standard deviation used to normalize the data
dimensions_to_ignore: vector with dimensions not used by the model
actions: list of strings with the encoded actions
one_hot: whether the data comes with one-hot encoding
Returns
origData: data originally used to
"""
T = normalizedData.shape[0]
D = data_mean.shape[0]
origData = np.zeros((T, D), dtype=np.float32)
dimensions_to_use = []
for i in range(D):
if i in dimensions_to_ignore:
continue
dimensions_to_use.append(i)
dimensions_to_use = np.array(dimensions_to_use)
if one_hot:
origData[:, dimensions_to_use] = normalizedData[:, :-len(actions)]
else:
origData[:, dimensions_to_use] = normalizedData[:, :]
origData = origData * np.expand_dims(data_std, 0) + np.expand_dims(data_mean, 0)
return origData
|
83371c999b78b4843f9d4dc4359b65fa8028b7b5
| 3,637,544
|
def rbf_approx(t, y, centers, eps, C):
"""
function to return vector field of a single point (rbf)
:param t: time (for solve_ivp)
:param y: single point
:param centers: all centers
:param eps: radius of gaussians
:param C: coefficient matrix, found with least squares
:return: derivative for point y
"""
y = y.reshape(1, y.shape[-1])
phi = np.exp(-cdist(y, centers) ** 2 / eps ** 2)
return phi @ C
|
f988ba7e1ee5c68c7335fc88cdfffd894c7d845c
| 3,637,545
|
def get_preview_fragment(request, descriptor, context):
"""
Returns the HTML returned by the XModule's student_view or author_view (if available),
specified by the descriptor and idx.
"""
module = _load_preview_module(request, descriptor)
preview_view = AUTHOR_VIEW if has_author_view(module) else STUDENT_VIEW
try:
fragment = module.render(preview_view, context)
except Exception as exc: # pylint: disable=broad-except
log.warning("Unable to render %s for %r", preview_view, module, exc_info=True)
fragment = Fragment(render_to_string('html_error.html', {'message': str(exc)}))
return fragment
|
7f220983f36321b0ad5a4bac76ad4c5118fb66ed
| 3,637,546
|
def expr_close(src, size = 5):
"""
Same result as core.morpho.Close(), faster and workable in 32 bit.
"""
close = expr_dilate(src, size)
return expr_erode(close, size)
|
25d69ca4c53d08676143d8bfc7a2053dd236fc80
| 3,637,547
|
def recent_records(request):
"""
The landing view, at /.
"""
interval = request.GET.get('interval', 'month')
days = 31
if interval == 'week':
days = 7
recent_records, last_processed = _get_recent_records_range(0, NR_OF_RECENT_RECORDS, days)
context = {
'active': 'home',
'records_recent': recent_records,
# not very pretty but good enough for now
'interval': interval,
'last_processed': last_processed
}
return render(request, 'isisdata/recent_records.html', context=context)
|
17a9aa4858c1aa5238468618473141cb6d41c73f
| 3,637,548
|
import threading
def create_background_consumers(count, before_start=None, target='run', *args, **kwargs):
"""Create new Consumer instances on background threads, starts them, and
return a tuple ([consumer], [thread]).
:param count: The number of Consumer instances to start.
:param before_start: A callable that will be called with each consumer as
an argument before the threads are started.
:param target: The name of the method of the Consumer that will be run.
The remaining arguments of the function are passed to the __init__() method
of each Consumer.
"""
consumers = []
threads = []
for _ in range(count):
consumer = Consumer(*args, **kwargs)
consumers.append(consumer)
thread_target = partial(_consumer_run_and_close_connection, consumer, target)
thread = threading.Thread(target=thread_target)
threads.append(thread)
if before_start:
for consumer in consumers:
before_start(consumer)
for thread in threads:
thread.start()
return (consumers, threads)
|
9870d3b26b84367975323d6f54c6f751297c558d
| 3,637,549
|
from datetime import datetime
def linkGen(year):
"""
This function generates the download links based on user input.
"""
current_year = datetime.datetime.now().year
try:
if (int(year) >= 2016) and (int(year) <= int(current_year)):
url = f"http://dev.hsl.fi/citybikes/od-trips-{year}/od-trips-{year}.zip"
return url
else:
raise ValueError
except:
print(f"Incorrect input.\nThe value should be an integer between 2016 and {current_year}")
quit()
|
a45727a61613770db5ee09ee81ee008bba1588a3
| 3,637,550
|
import collections
def factory_dict(value_factory, *args, **kwargs):
"""A dict whose values are computed by `value_factory` when a `__getitem__` key is missing.
Note that values retrieved by any other method will not be lazily computed; eg: via `get`.
:param value_factory:
:type value_factory: A function from dict key to value.
:param *args: Any positional args to pass through to `dict`.
:param **kwrags: Any kwargs to pass through to `dict`.
:rtype: dict
"""
class FactoryDict(collections.defaultdict):
@staticmethod
def __never_called():
raise AssertionError('The default factory should never be called since we override '
'__missing__.')
def __init__(self):
super().__init__(self.__never_called, *args, **kwargs)
def __missing__(self, key):
value = value_factory(key)
self[key] = value
return value
return FactoryDict()
|
f7d4898e62377958cf9d9c353ed12c8d381f042f
| 3,637,551
|
def build_or_passthrough(model, obj, signal):
"""Builds the obj on signal, or returns the signal if obj is None."""
return signal if obj is None else model.build(obj, signal)
|
bee9c8557a89a458cf281b42f968fe588801ed46
| 3,637,552
|
def load_generated_energy_gwh_yearly_irena():
"""Returns xr.DataArray with dims=year and integer as coords, not timestamp!"""
generated_energy_twh = pd.read_csv(
INPUT_DIR / "energy_generation_irena" / "irena-us-generation.csv",
delimiter=";",
names=("year", "generation"),
)
generated_energy_twh_xr = xr.DataArray(
generated_energy_twh.generation,
dims="year",
coords={"year": generated_energy_twh.year},
)
return 1e3 * generated_energy_twh_xr
|
98a0b12cef9a214d2254e33c073320374f497a3b
| 3,637,553
|
import requests
import json
def warc_url(url):
"""
Search the WARC archived version of the URL
:returns: The WARC URL if found, else None
"""
query = "http://archive.org/wayback/available?url={}".format(url)
response = requests.get(query)
if not response:
raise RuntimeError()
data = json.loads(response.text)
snapshots = data["archived_snapshots"]
if not snapshots:
return None
return snapshots["closest"]["url"]
|
afc24876f72915ba07233d5fde667dd0ba964f5a
| 3,637,554
|
def check_gpu():
"""
Check if GPUs are available on this machine
"""
try:
cuda.gpus.lst
tf = True
except cuda.CudaSupportError:
tf = False
return tf
|
f2145426a658185856b99961be9a72c1407707fa
| 3,637,555
|
def vm_impl_avg_pool(self):
"""Generate vm_impl function for AvgPool"""
def vm_impl(x):
x = x.asnumpy()
out = vm.avg_pooling(x, self.ksize[-2], self.ksize[-1], self.strides[-2])
return Tensor(out)
return vm_impl
|
5e38b84668747a416c8018a0c3b017f637534950
| 3,637,556
|
def plotAllPoints(x,y,z,f,x0,con):
"""
Args:
x- initial x points
y- initial y points
z- initial z points
f- objective function for optimization
x0- flattened initial values to be shoved into objective function
con- list of dicts of constraints to be placed on the values
"""
#plt.close(5006)
fig = plt.figure(num=5006)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, color='blue')
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rc('font',weight='bold')
plt.show(block=False)
out01k = minimize(f,x0, method='SLSQP',constraints=(con), options={'ftol':1e-4, 'maxiter':100})
out01kx, out01ky, out01kz = splitOut(out01k)
ax.scatter(out01kx, out01ky, out01kz,color='purple')
out1k = minimize(f,x0, method='SLSQP',constraints=(con), options={'ftol':1e-4, 'maxiter':1000})
out1kx, out1ky, out1kz = splitOut(out1k)
ax.scatter(out1kx, out1ky, out1kz,color='red')
plt.show(block=False)
out2k = minimize(f,x0, method='SLSQP',constraints=(con), options={'ftol':1e-4, 'maxiter':2000})
out2kx, out2ky, out2kz = splitOut(out2k)
ax.scatter(out2kx, out2ky, out2kz,color='green')
plt.show(block=False)
out4k = minimize(f,x0, method='SLSQP',constraints=(con), options={'ftol':1e-4, 'maxiter':4000})
out4kx, out4ky, out4kz = splitOut(out4k)
ax.scatter(out4kx, out4ky, out4kz,color='cyan')
plt.legend(['Initial','100 iter.','1k iter.','2k iter.','4k iter.'],loc='uplter left')
ax.set_xlabel('X',weight='bold')
ax.set_ylabel('Y',weight='bold')
ax.set_zlabel('Z',weight='bold')
plt.title('Points Distributed on a Sphere',weight='bold')
plt.show(block=False)
# To Save this figure:
# gca()
# savefig('figurename.png')
return fig, out01k, out1k, out2k, out4k
|
c42626d9562b655fac9c4620bea2e9d2eb33dacd
| 3,637,558
|
import json
def add_samples(request, product_id):
"""Adds passed samples (by request body) to product with passed id.
"""
parent_product = Product.objects.get(pk=product_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
add_sample(parent_product, temp_id)
# This isn't necessary but it cleans the cache. See lfs.cache listeners
# for more
parent_product.save()
html = [["#samples-inline", manage_samples_inline(request, product_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Samples have been added.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
|
35a982cf4309a727a19aea20bb9a8a392d67292d
| 3,637,559
|
def array_to_top_genes(data_array, cluster1, cluster2, is_pvals=False, num_genes=10):
"""
Given a data_array of shape (k, k, genes), this returns two arrays:
genes and values.
"""
data_cluster = data_array[cluster1, cluster2, :]
if is_pvals:
order = data_cluster.argsort()
else:
order = data_cluster.argsort()[::-1]
genes = order[:num_genes]
values = data_cluster[order[:num_genes]]
return genes, values
|
4f9a0ea673f4ddfa59e7d4fc2249597daef4a260
| 3,637,560
|
def set_attrib(node, key, default):
"""
Parse XML key for a given node
If key does not exist, use default value
"""
return node.attrib[key] if key in node.attrib else default
|
0e21b6b0e5a64e90ee856d4b413084a8c395b070
| 3,637,562
|
def nested_hexagon_stretched():
"""A stretched, nested hexagon"""
poly = [
[
(0.86603, -0.5),
(0.86603, 1.5),
(0.0, 2.0),
(-0.86603, 1.5),
(-0.86603, -0.5),
(-0.0, -1.0),
(0.86603, -0.5),
],
[
(1.29904, -0.75),
(1.29904, 1.75),
(0.0, 2.5),
(-1.29904, 1.75),
(-1.29904, -0.75),
(-0.0, -1.5),
(1.29904, -0.75),
],
]
# convert to triangulation input
conv = ToPointsAndSegments()
conv.add_polygon(poly)
conv = ToPointsAndSegments()
conv.add_polygon(poly)
return conv, 35, 24, 6
|
722a53c74787e9fa8962dd6f8970c96c1ae4c9fd
| 3,637,563
|
def vector(math_engine, size, dtype):
"""
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if size < 1:
raise ValueError('The `size` must be > 0.')
shape = (size, 1, 1, 1, 1, 1, 1)
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
|
c71ed713140d4a8dc4a9465be3b32b8bfab1e86c
| 3,637,564
|
def is_order_exist(context, symbol, side) -> bool:
"""判断同方向订单是否已经存在
:param context:
:param symbol: 交易标的
:param side: 交易方向
:return: bool
"""
uo = context.unfinished_orders
if not uo:
return False
else:
for o in uo:
if o.symbol == symbol and o.side == side:
context.logger.info("同类型订单已存在:{} - {}".format(symbol, side))
return True
return False
|
42363c8b3261e500a682b65608c27537b93bcfb1
| 3,637,565
|
import random
import math
def getBestMove(board,selectedFunction = "minmax"):
"""find the best move
Args:
board (board): board object from chess
Returns:
UCI.move: piece movement on the board, i.e. "g2g4"
"""
#Get AI Movement
maxWeight = 0
deplacement = None
#Get movement in the polyglot
deplacement = pg.bestMove(board)
#If no deplacement
if not deplacement:
#Déplacement de l'IA de facon aléatoire
deplacement = random.choice(list(board.legal_moves))
#Déplacement de l'IA avec l'algorithme de minmax
if(selectedFunction == "minmax"):
val, deplacement = minmax(board,3)
elif(selectedFunction == "minmaxAlphaBeta"):
val, deplacement = minmaxAlphaBeta(board,5,-math.inf,math.inf)
return deplacement
|
babb0130f495e15b582e3ea68e07183672927587
| 3,637,566
|
def newline_prep(target_str, do_escape_n=True, do_br_tag=True):
"""
Set up the newlines in a block of text so that they will be processed correctly by Reportlab and logging
:param target_str:
:param do_escape_n:
:param do_br_tag:
:return:
"""
newline_str = get_newline_str(do_escape_n=do_escape_n, do_br_tag=do_br_tag)
# Save the newline characters that appear together to a temporary tag
target_str = target_str.replace("\n<br/>", "<newline>").replace("<br/>\n", "<newline>")
# Change the characters that appear individually
target_str = target_str.replace("\n", "<newline>").replace("<br/>", "<newline>")
target_str = target_str.replace("\r", "<newline>")
# Set everything to the target newline string
target_str = target_str.replace("<newline><newline>", "<newline>").replace("<newline>", newline_str)
return target_str
|
6156438c8bd1f400e37fc6145a6fd558ed6a750b
| 3,637,567
|
import warnings
def romb_extrap(sr, der_init, expon, compute_amp = False):
"""
Perform Romberg extrapolation for estimates formed within derivest.
Arguments:
sr : Decrease ratio between successive steps.
der_init : Initial derivative estimates.
expon : List of orders corresponding to the higher-order terms to be
cancelled via Romberg step. The accepted parameter values of
derivest will use a list of, at most, three values. A warning
is issued if a longer list is received.
compute_amp : Boolean specifying whether to also compute the noise
amplification factor. (Default: False)
Returns a 2-tuple or 3-tuple, containing:
der_romb : Derivative estimates.
err_est : Error estimates.
amp : Computed noise amplification factor (if compute_amp == True).
"""
# Guarantee that expon is a one-dimensional array of floats:
if isinstance(expon, list):
expon = np.array(expon).flatten()
elif not isinstance(expon, np.ndarray):
expon = np.array([float(expon)])
else:
expon = expon.flatten()
num_expon = expon.size
# Construct the Romberg matrix:
sr_inv = 1.0/sr
rmat = np.ones((num_expon + 2, num_expon + 1))
if num_expon > 3:
warnings.warn("Ordinary use of derivest() should not need more than "
"three terms to be cancelled.", RuntimeWarning)
elif num_expon > 0:
for i in range(1, num_expon + 2):
# Compute QR factorization, uncertainty estimates:
rmat[i, np.arange(1, num_expon + 1)] = sr_inv**(i*expon)
(Q, R) = np.linalg.qr(rmat)
# Extrapolate to a zero step-size:
rhs = diag_tile(der_init, (num_expon + 2,
max(1, der_init.size - num_expon - 2)),
flatten = True)
# Compute Romberg coefficients by solving linear systems:
coeffs = np.linalg.lstsq(R, Q.T @ rhs, rcond = None)[0]
der_romb = coeffs[0,:] # Extract derivative estimates.
# Approximate the uncertainty:
s = np.sqrt(np.sum((rhs - rmat @ coeffs)**2.0, axis = 0))
R_inv = np.linalg.lstsq(R, np.eye(num_expon + 1), rcond = None)[0]
cov = np.sum(R_inv**2.0, axis = 1)
err_est = 12.7062047361747*np.sqrt(cov[0])*s
if compute_amp: return (der_romb, err_est, np.linalg.cond(R))
else: return (der_romb, err_est)
|
707afc236ec44489c2b8edc06fd07fd54545ae4d
| 3,637,568
|
def nativeTextOverline(self):
"""
TOWRITE
:rtype: bool
"""
return self.textOverline()
|
69c67bb49446c74f59d880fdcf2ea591f7a4553b
| 3,637,570
|
def ca_(arr, l_bound=4000, guard_len=4, noise_len=8):
"""Perform CFAR-CA detection on the input array.
Args:
arr (list or ndarray): Noisy array.
l_bound (int): Additive lower bound of detection threshold.
guard_len (int): Left and right side guard samples for leakage protection.
noise_len (int): Left and right side noise samples after guard samples.
Returns:
threshold (ndarray): CFAR generated threshold based on inputs (Peak detected if arr[i] > threshold[i]) \
for designated false-positive rate
noise_floor (ndarray): noise values with the same shape as input arr.
Example:
>>> signal = np.random.randint(100, size=10)
>>> signal
array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
>>> threshold = mm.dsp.ca_(signal, l_bound=20, guard_len=1, noise_len=3)
>>> threshold
(array([70, 76, 64, 79, 81, 91, 74, 71, 70, 79]), array([50, 56, 44, 59, 61, 71, 54, 51, 50, 59]))
FEATURE NOT YET ADDED - Perform a non-wrapping cfar thresholding.
>>> signal = np.random.randint(100, size=10)
>>> signal
array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
>>> threshold = mm.dsp.ca_(signal, l_bound=20, guard_len=1, noise_len=3, wrap=False)
>>> threshold
(array([70, 76, 64, 79, 81, 91, 74, 71, 70, 79]), array([50, 56, 44, 59, 61, 71, 54, 51, 50, 59]))
"""
if isinstance(arr, list):
arr = np.array(arr)
assert type(arr) == np.ndarray
kernel = np.ones(1 + (2 * guard_len) + (2 * noise_len), dtype=arr.dtype) / (2 * noise_len)
kernel[noise_len:noise_len + (2 * guard_len) + 1] = 0
noise_floor = convolve1d(arr, kernel, mode='wrap')
threshold = noise_floor + l_bound
return threshold, noise_floor
|
efae86d2740677e77929018b11b4412257870c40
| 3,637,571
|
def create_nodes_encoder(properties):
"""Create an one-hot encoder for node labels."""
nodes_encoder = OneHotEncoder(handle_unknown='ignore')
nodes_labels = list(get_nodes_labels(properties))
nodes_encoder.fit(np.array(nodes_labels).reshape((-1, 1)))
return nodes_encoder
|
06a38d25c94562250b9e28ac01d4d376c28d171b
| 3,637,572
|
import requests
def get_response(data_type, client_id=None, device_id=None):
"""Request GET from API server based on desired type, return
raw response data
Args:
data_type (str): Type of request, 'client', 'site',
'device', 'scan'
client_id (int): Active client ID number
device_id (int): Active device ID number
"""
if data_type == 'client':
payload = {'service': 'list_clients'}
temp_resp = requests.get('https://%s/api/?apikey=%s&' % (query_server, api_key), params=payload)
resp = temp_resp.text
if data_type == 'site':
payload = {'service': 'list_sites', 'clientid': client_id}
temp_resp = requests.get('https://%s/api/?apikey=%s&' % (query_server, api_key), params=payload)
resp = temp_resp.text
if data_type == 'device':
device_type = ('workstation', 'server')
resp = ''
for dev_type in device_type:
payload = {'service': 'list_devices_at_client', 'clientid': client_id, 'devicetype': dev_type}
temp_resp = requests.get('https://%s/api/?apikey=%s&' % (query_server, api_key), params=payload)
resp += temp_resp.text
if data_type == 'scan':
payload = {'service': 'list_mav_scans', 'deviceid': device_id, 'details': 'YES'}
temp_resp = requests.get('https://%s/api/?apikey=%s&' % (query_server, api_key), params=payload)
resp = temp_resp.text
return resp
|
3ed355164995411e7ce017f590b8d7143f1529f5
| 3,637,573
|
def average_pool(inputs, masks, axis=-2, eps=1e-10):
"""
inputs.shape: [A, B, ..., Z, dim]
masks.shape: [A, B, ..., Z]
inputs.shape[:-1] (A, B, ..., Z) must be match masks.shape
"""
assert inputs.shape[:-1] == masks.shape, f"inputs.shape[:-1]({inputs.shape[:-1]}) must be equal to masks.shape({masks.shape})"
masks_unsq = masks.unsqueeze(-1)
return (inputs * masks_unsq).sum(axis) / (masks_unsq.sum(axis)+eps)
|
e6f99634245a4c46e2b5d776afcd73e855da68de
| 3,637,574
|
def validate_graph_without_circle(data):
"""
validate if a graph has not cycle
return {
"result": False,
"message": "error message",
"error_data": ["node1_id", "node2_id", "node1_id"]
}
"""
nodes = [data["start_event"]["id"], data["end_event"]["id"]]
nodes += list(data["gateways"].keys()) + list(data["activities"].keys())
flows = [
[flow["source"], flow["target"]] for _, flow in list(data["flows"].items())
]
cycle = Graph(nodes, flows).get_cycle()
if cycle:
return {
"result": False,
"message": "pipeline graph has circle",
"error_data": cycle,
}
return {"result": True, "data": []}
|
dee8c9e2671505dd17de09903c7fe093ca6394cc
| 3,637,575
|
import logging
def generate_landsat_ndvi(src_info, no_data_value):
"""Generate Landsat NDVI
Args:
src_info <SourceInfo>: Information about the source data
no_data_value <int>: No data (fill) value to use
Returns:
<numpy.2darray>: Generated NDVI band data
list(<int>): Locations containing no data (fill) values
"""
logger = logging.getLogger(__name__)
logger.info('Building TOA based NDVI band for Landsat data')
# NIR ----------------------------------------------------------------
nir_data = emis_util.extract_raster_data(src_info.toa.nir.name, 1)
nir_no_data_locations = np.where(nir_data == no_data_value)
nir_data = nir_data * src_info.toa.nir.scale_factor
# RED ----------------------------------------------------------------
red_data = emis_util.extract_raster_data(src_info.toa.red.name, 1)
red_no_data_locations = np.where(red_data == no_data_value)
red_data = red_data * src_info.toa.red.scale_factor
# NDVI ---------------------------------------------------------------
ndvi_data = ((nir_data - red_data) / (nir_data + red_data))
# Cleanup no data locations
ndvi_data[nir_no_data_locations] = no_data_value
ndvi_data[red_no_data_locations] = no_data_value
# Memory cleanup
del red_data
del nir_data
del nir_no_data_locations
del red_no_data_locations
# Capture these before less than zero operation
no_data_locations = np.where(ndvi_data == no_data_value)
# Turn all negative values to zero
# Use a realy small value so that we don't have negative zero (-0.0)
ndvi_data[ndvi_data < 0.0000001] = 0
return (ndvi_data, no_data_locations)
|
8def6cde518fc4a88c40fddaa7fc1d5f0e7ae36b
| 3,637,576
|
def ordinal(string):
"""
Converts an ordinal word to an integer.
Arguments:
- string -- the word to parse.
Returns: an integer if successful; otherwise None.
-----------------------------------------------------------------
"""
try:
# Full word.
if string in ORD_LIST:
return ORD_LIST.index(string)
# end if
try:
return int(string[:-2])
except ValueError:
return None
# end if
except Exception as err:
_z_exc("wl_resource.py/ordinal", err)
# end try
|
fc744ab90c89019cf0ac22eb4d48e26057d655a0
| 3,637,577
|
def get_tables_stats(dbs=None,tables=None,period=365*86400):
"""
obtains counts and frequencies stats from all data tables from all dbs
"""
dbs = dbs or pta.multi.get_hdbpp_databases()
result = fn.defaultdict(fn.Struct)
date = int(fn.clsub('[^0-9]','',fn.time2str().split()[0]))
if period:
date0 = int(fn.clsub('[^0-9]','',
fn.time2str(fn.now()-period).split()[0]))
else:
date0 = 0
print(date0,date)
for d in dbs:
api = pta.api(d)
dbtables = tables or api.getTables()
for t in dbtables:
result[(d,t)].db = d
result[(d,t)].table = t
result[(d,t)].partitions = [p for p in api.getTablePartitions(t)
if date0 < fn.str2int(p) < date]
result[(d,t)].attributes = (api.get_attributes_by_table(t)
if t in api.get_data_tables() else [])
result[(d,t)].last = (api.get_last_partition(t,tref=fn.now())
if t in api.get_data_tables() else '')
if len(result[(d,t)].partitions) > 1:
result[(d,t)].size = sum(api.getPartitionSize(t,p)
for p in result[(d,t)].partitions)
result[(d,t)].rows = sum(api.getPartitionRows(t,p)
for p in result[(d,t)].partitions)
else:
result[(d,t)].size = api.getTableSize(t)
result[(d,t)].rows = api.getTableRows(t)
for k,v in result.items():
v.partitions = len(v.partitions)
v.attributes = len(v.attributes)
v.attr_size = float(v.size)/v.attributes if v.attributes else 0
v.attr_rows = float(v.rows)/v.attributes if v.attributes else 0
v.row_size = v.size/v.rows if v.rows else 0
v.part_size = v.size/v.partitions if v.partitions else 0
v.row_freq = v.rows/float(period) if period else 0
v.size_freq = v.size/float(period) if period else 0
v.attr_freq = v.row_freq/v.attributes if v.attributes else 0
return result
|
f3671bda3dca41b3d2c96e7fee86d9f13f52157e
| 3,637,578
|
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
A value of factor 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor.
image2: An image Tensor.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
"""
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return to_uint8(image1 + factor * (image2 - image1))
|
f8cb28b68b809bce6a258f9b6e15c19120a123de
| 3,637,579
|
from sklearn.preprocessing import MinMaxScaler
def _minmax_scaler(data, settings):
"""Normalize by min max mode."""
info = settings['model']
frag = settings['id_frag']
features = settings['input_col']
alias = settings.get('output_col', [])
min_r, max_r = settings.get('feature_range', (0, 1))
remove_input = settings.get('remove', False)
if len(alias) != len(features):
alias = features
values = data[features].values
to_remove = [c for c in alias if c in data.columns]
if remove_input:
to_remove += features
data.drop(to_remove, axis=1, inplace=True)
if len(data) > 0:
minimum, maximum = info
minimum = np.array(minimum)
maximum = np.array(maximum)
scale_ = (max_r - min_r) / (maximum - minimum)
scaler = MinMaxScaler()
scaler.data_min_ = minimum
scaler.data_max_ = maximum
scaler.scale_ = scale_
scaler.data_range_ = maximum - minimum
scaler.min_ = min_r - minimum * scale_
res = scaler.transform(values)
del values
data = pd.concat([data, pd.DataFrame(res, columns=alias)], axis=1)
else:
for col in alias:
data[col] = np.nan
info = generate_info(data, frag)
return data, info
|
d60a94382b1ceadc99bd46ab4d121fa6c9031641
| 3,637,581
|
def find_by_id(widget_id):
"""
Get a widget by its ulid.
"""
return db.select_single('widgets', {'widget_id':widget_id}, None,
['widget_id', 'widget_name', 'user_id', 'user_email', 'description'])
|
2640261d1a702ce029c95ef54d43c825a5478431
| 3,637,582
|
def sainte_lague(preliminary_divisor, data, total_available_seats):
"""Iterative Sainte-Lague procedure which applies core_sainte_lague
Input:
preliminary_divisor (float): Guess for the divisor
data (pd.DateFrame): data processed with divisors (e.g. votes by party)
total_available_seats (int): number of seats in parliament for the
respective Bundesland, Germany, etc.
Output:
allocated_seats (DataFrame): seats by party, state, etc.
"""
allocated_seats, sum_of_seats = core_sainte_lague(preliminary_divisor, data)
while sum_of_seats != total_available_seats:
if sum_of_seats > total_available_seats:
preliminary_divisor = preliminary_divisor + 50
elif sum_of_seats < total_available_seats:
preliminary_divisor = preliminary_divisor - 50
else:
pass
allocated_seats, sum_of_seats = core_sainte_lague(preliminary_divisor, data)
else:
return allocated_seats, preliminary_divisor
|
899157982c14ea8adea3decbd0e267211b0f031f
| 3,637,583
|
def nonchangingdims(index, ndim, axes, shape=None):
"""nonchanging for particular dimensions
Args:
index(index): object used in slicing (expanded)
ndim(num): dimensions before indexings
axes(array): dimensions for which you want to know the index
shape(Optional(tuple)): dimension before applying index
Returns:
tuple
"""
index2 = [ind for ind in index if ind is not np.newaxis]
index2 = expand(index, ndim)
index2 = tuple(listtools.listadvanced(index2, axes))
if shape is not None:
shape = tuple(listtools.listadvanced(list(shape), axes))
b = nonchanging(index2, shape)
axesorder, _ = axesorder_afterindexing(index, ndim)
i = listtools.where(axesorder, lambda x: instance.islistgen(x))
if len(i) == 1:
i = i[0]
if len(axesorder[i]) == 1:
axesorder[i] = axesorder[i][0]
try:
b &= listtools.listadvanced(axesorder, axes) == axes
except:
b = False
return b
|
72b348314344123b6aee194293102ec1af35f6a0
| 3,637,584
|
def get_voxels(df, center, config, rot_mat=np.eye(3, 3)):
"""
Generate the 3d grid from coordinate format.
Args:
df (pd.DataFrame):
region to generate grid for.
center (3x3 np.array):
center of the grid.
rot_mat (3x3 np.array):
rotation matrix to apply to region before putting in grid.
Returns:
4-d numpy array representing an occupancy grid where last dimension
is atom channel. First 3 dimension are of size radius_ang * 2 + 1.
"""
size = grid_size(config)
true_radius = size * config.resolution / 2.0
# Select valid atoms.
at = df[['x', 'y', 'z']].values.astype(np.float32)
elements = df['element'].values
# Center atoms.
at = at - center
# Apply rotation matrix.
at = np.dot(at, rot_mat)
# at = (np.around((at + true_radius) / config.resolution - 0.5)).astype(np.int16)
bins = np.linspace(-true_radius, true_radius, size+1)
at_bin_idx = np.digitize(at, bins)
# Prune out atoms outside of grid as well as non-existent atoms.
sel = np.all(at_bin_idx > 0, axis=1) & np.all(at_bin_idx < size+1, axis=1) & (elements != '')
at = at_bin_idx[sel] - 1
# Form final grid.
labels = elements[sel]
lsel = np.nonzero([_recognized(x, config.element_mapping) for x in labels])
labels = labels[lsel]
labels = np.array([config.element_mapping[x] for x in labels], dtype=np.int8)
grid = np.zeros(grid_shape(config), dtype=np.float32)
grid[at[lsel, 0], at[lsel, 1], at[lsel, 2], labels] = 1
return grid
|
f806840209dd4542f91834c761b1b5ccadba175c
| 3,637,585
|
def dispatch_factory(msg: Result, **kwargs) -> DispatchCallOutSchema:
"""result_factory Generate result as expected by Daschat
Examples:
from daschat_base.messages import MSGS, msg_factory
msg_factory(MSGS.success)
msg_factory(MSGS.not_logged_in, user="abner")
Args:
msg (Result): Any result type in the MSGS contant
Raises:
ValueError: Parameter name not allowed
ValueError: Result don't accept params
ValueError: Wrong number of params
ValueError: Wrong parameter type
ValueError: Wrong parameter size
Returns:
ResultFieldSchema: [description]
"""
call_params: int = len(kwargs)
msg_params: int = len(msg.params)
params: dict = {}
if call_params > 0 and msg_params == 0:
raise ValueError("This message do not accept params")
if not call_params == msg_params:
raise ValueError(
f"Wrong number of params. This message only accepts {msg_params} parameter(s)"
)
if len(kwargs) > 0:
for k in kwargs:
param_def = next((item for item in msg.params if item.name == k), None)
if param_def is None:
raise ValueError(f"This parameter name is not allowed: {k}")
if not type(kwargs[k]) == param_def.type:
raise ValueError(
f"Wrong parameter type: '{k}' must be {param_def.type}"
)
if param_def.type == str:
if not param_def.min_size <= len(kwargs[k]) <= param_def.max_size:
raise ValueError(
f"Wrong parameter size: '{k}' must be between {param_def.min_size} and {param_def.max_size}"
)
params[k] = kwargs[k]
return DispatchCallOutSchema(
result=ResultFieldSchema(id=msg.id, status=msg.status, params=params)
)
|
290519532babe33c3472b7096d7b5631c6383d40
| 3,637,586
|
def local_config(config_path=FIXTURE_CONFIG_PATH):
"""Return an instance of the Config class as a fixture available
for a module."""
return Config(config_path=config_path)
|
bde94430ec174da3950357d7c5f5203a5eebb5cd
| 3,637,587
|
def ln_prior(theta, parameters_to_fit):
"""priors - we only reject obviously wrong models"""
if theta[parameters_to_fit.index("t_E")] < 0.:
return -np.inf
if theta[parameters_to_fit.index("t_star")] < 0.:
return -np.inf
return 0.0
|
8121904e00443a5df76e3477339ca3444219bd3e
| 3,637,588
|
def get_axis_letter_aimed_at_child(transform):
"""
Returns the axis letter that is poinitng to the given transform
:param transform: str, name of a transform
:return: str
"""
vector = get_axis_aimed_at_child(transform)
return get_vector_axis_letter(vector)
|
22b523583571330030595f8cb787d979692740bc
| 3,637,589
|
import math
def computeLatitudePrecision(codeLength):
"""
Compute the latitude precision value for a given code length. Lengths <=
10 have the same precision for latitude and longitude, but lengths > 10
have different precisions due to the grid method having fewer columns than
rows.
"""
if codeLength <= 10:
return pow(20, math.floor((codeLength / -2) + 2))
return pow(20, -3) / pow(GRID_ROWS_, codeLength - 10)
|
a9614e6ad68c126333a818f545766ca055ff3165
| 3,637,590
|
def EmptyStateMat(nX,nU,nY):
""" Returns state matrices with proper dimensions, filled with 0 """
Xx = np.zeros((nX,nX)) # Ac
Yx = np.zeros((nY,nX)) # Gc
Xu = np.zeros((nX,nU)) # Xu
Yu = np.zeros((nY,nU)) # Jc
return Xx,Xu,Yx,Yu
|
6efce17ae8908b543afe5f7bec45252623f1b99b
| 3,637,591
|
def get_market(code):
"""
非常粗糙的通过代码获取交易市场的函数
:param code:
:return:
"""
trans = {
"USD": "US",
"GBP": "UK",
"HKD": "HK",
"CNY": "CN",
"CHF": "CH",
"JPY": "JP",
"EUR": "DE",
"AUD": "AU",
"INR": "IN",
"SGD": "SG",
}
try:
if code in market_info:
return market_info[code]
elif code.startswith("CNY/") or code.endswith("/CNY"):
return "CM" # china money 中间价市场标记
elif code.startswith("HK") and code[2:].isdigit():
return "HK"
market = get_rt(code)["market"]
if market is None:
market = get_currency(code)
market = trans.get(market, market)
except (TypeError, AttributeError, ValueError, IndexError):
market = "CN"
return market
|
9e91263bea34033bee83f943eb8ea864a5fd4e5e
| 3,637,592
|
import numpy
def rawPlot():
"""Description.
More...
"""
def f(t):
return numpy.exp(-t) * numpy.cos(2*numpy.pi*-t)
plot_x = 495
plot_y = 344
fig = plt.figure(figsize=[plot_x * 0.01, plot_y * 0.01], # Inches.
dpi=100, # 100 dots per inch, so the resulting buffer is 395x344 pixels
)
fig.set_size_inches(plot_x * 0.01, plot_y * 0.01)
ax = fig.gca()
plt.xlabel('xlabel')
plt.ylabel('ylabel')
plt.title("Title")
plt.gcf().subplots_adjust(bottom=0.15, top=0.90, left=0.14, right=0.95)
#l1, = ax.plot([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [1, 2, 4, 8, 15, 17, 18, 22, 23, 23, 24, 24, 25, 25])
#l1, = ax.plot(numpy.sin(numpy.linspace(0, 2 * numpy.pi)), 'r-o')
t1 = numpy.arange(0.0, 5.0, 0.10)
t2 = numpy.arange(0.0, 5.0, 0.02)
#l1, = ax.plot(t1, f(t1), 'bo', t2, f(t2), 'k')
plt.figure(1)
p1 = plt.subplot(211)
l1, = plt.plot(t1, f(t1), 'o')
p2 = plt.subplot(212)
l2, = plt.plot(t2, numpy.cos(2*numpy.pi*t2), 'r--')
l1.set_color((162/255, 19/255, 24/255))
l2.set_color((0/255, 166/255, 56/255))
#plt.xlabel('xlabel')
#plt.ylabel('ylabel')
#plt.title("Title")
p1.spines['right'].set_visible(False)
p1.spines['top'].set_visible(False)
p2.spines['right'].set_visible(False)
p2.spines['top'].set_visible(False)
return fig
|
da2b74c26157da58d7081ebbee764d1c22137303
| 3,637,593
|
def corrupt_single_relationship(triple: tf.Tensor,
all_triples: tf.Tensor,
max_range: int,
name=None):
""" Corrupt the relationship by __sampling from [0, max_range]
:param triple:
:param all_triples:
:param max_range:
:param name:
:return: corrupted 1-d [h,r,t] triple
"""
with tf.name_scope(name, 'corrupt_single_relation', [triple, all_triples]):
h, r, t = tf.unstack(triple, name='unstack_triple')
head_mask = tf.equal(all_triples[:, 0], h, name='head_mask')
head_matched_triples = tf.boolean_mask(all_triples[:, 1:], head_mask, name='head_matched_triples')
tail_mask = tf.equal(head_matched_triples[:, 1], t, name='tail_mask')
true_rels = tf.boolean_mask(head_matched_triples[:, 0], tail_mask)
corrupted_rel = tf.reshape(single_negative_sampling(true_rels, max_range), ())
return tf.stack([h, corrupted_rel, t], name='rel_corrupted_triple')
|
efb875764d4530033d9b889685501b1928cfee7d
| 3,637,595
|
def save_ipynb_from_py(folder: str, py_filename: str) -> str:
"""Save ipynb file based on python file"""
full_filename = f"{folder}/{py_filename}"
with open(full_filename) as pyfile:
code_lines = [line.replace("\n", "\\n").replace('"', '\\"')
for line in pyfile.readlines()]
pycode = '",\n"'.join(code_lines)
with open('template.ipynb') as template:
template_body = ''.join(template.readlines())
ipynb_code = template_body.replace('{{TEMPLATE}}', pycode)
new_filename = full_filename.replace('.py', '.ipynb')
with open(new_filename, "w") as ipynb_file:
ipynb_file.write(ipynb_code)
return py_filename.replace('.py', '.ipynb')
|
f2711f0282c2bf40e9da2fec6e372c76038ac04a
| 3,637,596
|
def send():
"""
Updates the database with the person who is responding and their busy times.
"""
invitee = request.args.get('invitee')
busy_times = request.args.get('busy_times')
meetcode = flask.session['meetcode']
# Get the record with this meet code.
record = collection.find({"code": meetcode})[0]
# First indicate the person who just responded.
if "{}".format(invitee) in record['participants']:
# The invitee should always be in the record unless
# users are doing something wrong, like multiple people
# choosing the same name at the same time.
# Either way, this if statement protects in that case.
record['participants'].remove("{}".format(invitee))
record['already_checked_in'].append("{}".format(invitee))
# Next append the new list of busy times to the list from the db.
# First the new list will need to be converted from a str to a list.
busy_times = busy_times[3:-3].split("\"],[\"")
for i in range(len(busy_times)):
record['busy'].append(busy_times[i].split("\",\""))
# Now update the database with the new busy times,
# and updated info on who has checked in.
collection.find_one_and_update(
{"code": meetcode},
{'$set': {"participants": record['participants'],
"already_checked_in": record['already_checked_in'],
"busy": record['busy']}})
result = {"meetcode": meetcode}
return flask.jsonify(result=result)
|
40dc5c34e7e638da68e1be8e1169f8167506d11a
| 3,637,597
|
import tqdm
def get_plos_article_type_list(article_list=None, directory=None):
"""Makes a list of of all internal PLOS article types in the corpus
Sorts them by frequency of occurrence
:param article_list: list of articles, defaults to None
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: dictionary with each PLOS type matched to number of occurrences
:rtype: dict
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
PLOS_article_type_list = []
for article_file in tqdm(article_list):
article = Article.from_filename(article_file, directory=directory)
PLOS_article_type_list.append(article.plostype)
print(len(set(PLOS_article_type_list)), 'types of articles found.')
PLOS_article_types_structured = counter(PLOS_article_type_list).most_common()
return PLOS_article_types_structured
|
bc06bb0179e1f8014c59db3835df844908b312ab
| 3,637,598
|
def e_coordenada(arg):
"""tuplo -> Boole
Esta funcao verifica se o argumento que recebe e um tuplo do tipo coordenada"""
return isinstance(arg,tuple) and len(arg)==2 and 1<=coordenada_linha(arg)<=4 and 1<=coordenada_coluna(arg)<=4 and isinstance(coordenada_linha(arg),int) and isinstance(coordenada_coluna(arg),int)
|
61dacd4b775b0276220fe99e6fad2b1684fc6f4c
| 3,637,599
|
def make_coro(func):
"""Wrap a normal function with a coroutine."""
async def wrapper(*args, **kwargs):
"""Run the normal function."""
return func(*args, **kwargs)
return wrapper
|
080e543bc91daee13c012225ba47cd6d054c9ea5
| 3,637,600
|
def eliminate(values):
"""Apply the eliminate strategy to a Sudoku puzzle
The eliminate strategy says that if a box has a value assigned, then none
of the peers of that box can have the same value.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the assigned values eliminated from peers
"""
solved_boxes = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_boxes:
digit = values[box]
if len(digit) == 1:
for peerBox in peers[box]:
values[peerBox] = values[peerBox].replace(digit,'')
return values
|
a8f41f2cf789c1c14a4f70f760731864af65cc80
| 3,637,601
|
def sqlpool_blob_auditing_policy_update(
cmd,
instance,
workspace_name,
resource_group_name,
sql_pool_name,
state=None,
blob_storage_target_state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
storage_account_subscription_id=None,
is_storage_secondary_key_in_use=None,
retention_days=None,
audit_actions_and_groups=None,
log_analytics_target_state=None,
log_analytics_workspace_resource_id=None,
event_hub_target_state=None,
event_hub_authorization_rule_id=None,
event_hub=None,
is_azure_monitor_target_enabled=None,
blob_auditing_policy_name=None):
"""
Updates a sql pool blob auditing policy. Custom update function to apply parameters to instance.
"""
_audit_policy_update(
cmd=cmd,
instance=instance,
workspace_name=workspace_name,
resource_group_name=resource_group_name,
sql_pool_name=sql_pool_name,
state=state,
blob_storage_target_state=blob_storage_target_state,
storage_account=storage_account,
storage_endpoint=storage_endpoint,
storage_account_access_key=storage_account_access_key,
storage_account_subscription_id=storage_account_subscription_id,
is_storage_secondary_key_in_use=is_storage_secondary_key_in_use,
retention_days=retention_days,
category_name='SQLSecurityAuditEvents',
log_analytics_target_state=log_analytics_target_state,
log_analytics_workspace_resource_id=log_analytics_workspace_resource_id,
event_hub_target_state=event_hub_target_state,
event_hub_authorization_rule_id=event_hub_authorization_rule_id,
event_hub_name=event_hub,
audit_actions_and_groups=audit_actions_and_groups,
is_azure_monitor_target_enabled=is_azure_monitor_target_enabled)
return instance
|
1248e12dae9f6299d86e26d069c22f560856a7e3
| 3,637,602
|
def find_unique_ID(list_of_input_smpls):
"""Attempt to determine a unique ID shared among all input
sample names/IDs, via a largest substring function performed
combinatorially exhaustively pairwise among the input list.
Parameters
----------
list_of_input_smpls : list
Returns
-------
list
Unique set of all possible found shared uid's
"""
if len(list_of_input_smpls) == 1:
uid = list_of_input_smpls
uid = list(
set([
largest_substr(a, b)
for (a, b) in [*itl.combinations(list_of_input_smpls, 2)]
]))
return uid
|
c6ab308ac4e03d1ea6d855348a35eb3d58938439
| 3,637,603
|
import math
def cartesian_to_polar(xy):
"""Convert :class:`np.ndarray` `xy` to polar coordinates `r` and `theta`.
Args:
xy (:class:`np.ndarray`): x,y coordinates
Returns:
r, theta (tuple of float): step-length and angle
"""
assert xy.ndim == 2, f"Dimensions are {xy.ndim}, expecting 2"
x, y = np.split(xy,[-1], axis=1)
x, y = np.squeeze(x), np.squeeze(y)
r = math.sqrt(x * x + y * y)
theta = math.atan2(y, x)
return r, theta
|
c38c4abfbbe3acea6965530d58a1e6a9614a035b
| 3,637,604
|
def return_manifold(name):
"""
Returns a list of possible manifolds with name 'name'.
Args:
name: manifold name, str.
Returns:
list of manifolds, name, metrics, retractions
"""
m_list = []
descr_list = []
if name == 'ChoiMatrix':
list_of_metrics = ['euclidean']
for metric in list_of_metrics:
m_list.append(manifolds.ChoiMatrix(metric=metric))
descr_list.append((name, metric))
if name == 'DensityMatrix':
list_of_metrics = ['euclidean']
for metric in list_of_metrics:
m_list.append(manifolds.DensityMatrix(metric=metric))
descr_list.append((name, metric))
if name == 'HermitianMatrix':
list_of_metrics = ['euclidean']
for metric in list_of_metrics:
m_list.append(manifolds.HermitianMatrix(metric=metric))
descr_list.append((name, metric))
if name == 'PositiveCone':
list_of_metrics = ['log_euclidean', 'log_cholesky']
for metric in list_of_metrics:
m_list.append(manifolds.PositiveCone(metric=metric))
descr_list.append((name, metric))
if name == 'StiefelManifold':
list_of_metrics = ['euclidean', 'canonical']
list_of_retractions = ['svd', 'cayley', 'qr']
for metric in list_of_metrics:
for retraction in list_of_retractions:
m_list.append(manifolds.StiefelManifold(metric=metric,
retraction=retraction))
descr_list.append((name, metric, retraction))
return m_list, descr_list
|
5361a8c38069d01c5fc9383e4bc06407f485c0d2
| 3,637,605
|
def change_to_rgba_array(image, dtype="uint8"):
"""Converts an RGB array into RGBA with the alpha value opacity maxed."""
pa = image
if len(pa.shape) == 2:
pa = pa.reshape(list(pa.shape) + [1])
if pa.shape[2] == 1:
pa = pa.repeat(3, axis=2)
if pa.shape[2] == 3:
alphas = 255 * np.ones(
list(pa.shape[:2]) + [1],
dtype=dtype,
)
pa = np.append(pa, alphas, axis=2)
return pa
|
3328ec90e114a7b2c0c2529d126494756f0ce608
| 3,637,606
|
def spacetime_lookup(ra,dec,time=None,buffer=0,print_table=True):
"""
Check for overlapping TESS ovservations for a transient. Uses the Open SNe Catalog for
discovery/max times and coordinates.
------
Inputs
------
ra : float or str
ra of object
dec : float or str
dec of object
time : float
reference time to use, must be in MJD
buffer : float
overlap buffer time in days
-------
Options
-------
print_table : bool
if true then the lookup table is printed
-------
Returns
-------
tr_list : list
list of ra, dec, and sector that can be put into tessreduce.
"""
if time is None:
print('!!! WARNING no MJD time specified, using default of 59000')
time = 59000
if type(ra) == str:
c = SkyCoord(ra,dec, unit=(u.hourangle, u.deg))
ra = c.ra.deg
dec = c.dec.deg
outID, outEclipLong, outEclipLat, outSecs, outCam, outCcd, outColPix, \
outRowPix, scinfo = focal_plane(0, ra, dec)
sec_times = pd.read_csv(package_directory + 'sector_mjd.csv')
if len(outSecs) > 0:
ind = outSecs - 1
secs = sec_times.iloc[ind]
disc_start = secs['mjd_start'].values - time
disc_end = secs['mjd_end'].values - time
covers = []
differences = []
tr_list = []
tab = []
for i in range(len(disc_start)):
ds = disc_start[i]
de = disc_end[i]
if (ds-buffer < 0) & (de + buffer> 0):
cover = True
dif = 0
elif (de+buffer < 0):
cover = False
dif = de
elif (ds-buffer > 0):
cover = False
dif = ds
covers += [cover]
differences += [dif]
tab += [[secs.Sector.values[i], cover, dif]]
tr_list += [[ra, dec, secs.Sector.values[i], cover]]
if print_table:
print(tabulate(tab, headers=['Sector', 'Covers','Time difference \n(days)'], tablefmt='orgtbl'))
return tr_list
else:
print('No TESS coverage')
return None
|
efdcfc315c82db808478c302163a146512659f0b
| 3,637,607
|
from typing import Union
from datetime import datetime
import time
def utc2local(utc: Union[date, datetime]) -> Union[datetime, date]:
"""Returns the local datetime
Args:
utc: UTC type date or datetime.
Returns:
Local datetime.
"""
epoch = time.mktime(utc.timetuple())
offset = datetime.fromtimestamp(epoch) - datetime.utcfromtimestamp(epoch)
return utc + offset
|
34997f08a8ca7e2156849bb6be346964cc3fadcd
| 3,637,608
|
import math
def gvisc(P, T, Z, grav):
"""Function to Calculate Gas Viscosity in cp"""
#P pressure, psia
#T temperature, °R
#Z gas compressibility factor
#grav gas specific gravity
M = 28.964 * grav
x = 3.448 + 986.4 / T + 0.01009 * M
Y = 2.447 - 0.2224 * x
rho = (1.4926 / 1000) * P * M / Z / T
K = (9.379 + 0.01607 * M) * T ** 1.5 / (209.2 + 19.26 * M + T)
return K * math.exp(x * rho ** Y) / 10000
|
5ff1ad63ef581cea0147348104416913c7b77e37
| 3,637,610
|
def get_closest_mesh_normal_to_pt(mesh, pt):
"""
Finds the closest vertex normal to the point.
Parameters
----------
mesh: :class: 'compas.datastructures.Mesh'
pt: :class: 'compas.geometry.Point'
Returns
----------
:class: 'compas.geometry.Vector'
The closest normal of the mesh.
"""
closest_vkey = get_closest_mesh_vkey_to_pt(mesh, pt)
v = mesh.vertex_normal(closest_vkey)
return Vector(v[0], v[1], v[2])
|
95c9bf82c0c24da27ba8433feb9c5c02cf453713
| 3,637,611
|
def algo_reg_deco(func):
"""
Decorator for making registry of functions
"""
algorithms[str(func.__name__)] = func
return func
|
56228dcf557e7de64c75b598fe8d283eb08050ba
| 3,637,613
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.