content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
|
b133557d88deac2d9357731d820de0522521d6f3
| 3,647,073
|
def strategy(history, alivePlayers, whoami, memory):
"""
history contains all previous rounds (key : id of player (shooter), value : id of player (target))
alivePlayers is a list of all player ids
whoami is your own id (to not kill yourself by mistake)
memory is None by default and transferred over (if you set it to 1, it will be 1 in the next round)
memory is NOT shared between games (subject to changes)
"""
# Your code would be here but this strategy is dumb...
"""
You must return an id of a player (if not : you shoot in the air)
Memory must be set to something but can be anything (None included )
"""
return alivePlayers[0], None
|
f211a0961269808d9a7b0a08758273d4a03b9136
| 3,647,074
|
def parse_fn(serialized_example: bytes) -> FeaturesType:
"""Parses and converts Tensors for this module's Features.
This casts the audio_raw_pcm16 feature to float32 and scales it into the range
[-1.0, 1.0].
Args:
serialized_example: A serialized tf.train.ExampleProto with the features
dict keys declared in the :py:class:Features enum.
Returns:
Tensor-valued dict of features. The keys are those declared in the
:py:class:Features enum.
"""
features = tf.io.parse_single_example(
serialized_example, {f.value.name: f.value.spec for f in Features})
audio_key: str = Features.AUDIO.value.name
features[audio_key] = tf.cast(tf.io.decode_raw(features[audio_key], tf.int16),
tf.float32) / np.iinfo(np.int16).max
return features
|
54e841987986027dc6d4d989fe6442ceecd022b8
| 3,647,075
|
import click
def cli(ctx: click.Context) -> int:
"""
Method used to declare root CLI command through decorators.
"""
return 0
|
be5016c5c38f435b8a213a6ce39b5571aee809f1
| 3,647,076
|
def parse_clock(line):
"""Parse clock information"""
search = parse(REGEX_CLOCK, line)
if search:
return int(search.group('clock'))
else:
return None
|
a4464c979d31bab463f949bec83da99e72af6ca6
| 3,647,077
|
import requests
def block_latest(self, **kwargs):
"""
Return the latest block available to the backends, also known as the tip of the blockchain.
https://docs.blockfrost.io/#tag/Cardano-Blocks/paths/~1blocks~1latest/get
:param return_type: Optional. "object", "json" or "pandas". Default: "object".
:type return_type: str
:returns BlockResponse object.
:rtype BlockResponse
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/blocks/latest",
headers=self.default_headers
)
|
a14fc3512138c1d15b32b09bd20ea03678964437
| 3,647,078
|
def get_courses():
"""
Route to display all courses
"""
params = format_dict(request.args)
if params:
try:
result = Course.query.filter_by(**params).order_by(Course.active.desc())
except InvalidRequestError:
return { 'message': 'One or more parameter(s) does not exist' }, 400
else:
result = Course.query.order_by(Course.active.desc())
return { "courses": [c.serialize for c in result] }
|
6dcdcb5df4d0010661ffe92f55522638ae51a2b8
| 3,647,079
|
def zero_adam_param_states(state: flax.optim.OptimizerState, selector: str):
"""Applies a gradient for a set of parameters.
Args:
state: a named tuple containing the state of the optimizer
selector: a path string defining which parameters to freeze.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
step = state.step
params = flax.core.unfreeze(state.param_states)
flat_params = {
"/".join(k): v for k, v in traverse_util.flatten_dict(params).items()
}
for k in flat_params:
if k.startswith(selector):
v = flat_params[k]
# pylint: disable=protected-access
flat_params[k] = flax.optim.adam._AdamParamState(
jnp.zeros_like(v.grad_ema), jnp.zeros_like(v.grad_sq_ema)
)
new_param_states = traverse_util.unflatten_dict(
{tuple(k.split("/")): v for k, v in flat_params.items()}
)
new_param_states = dict(flax.core.freeze(new_param_states))
new_state = flax.optim.OptimizerState(step, new_param_states)
return new_state
|
8a7cb65028866e4a7f3a03b589fa1bf5798a25e0
| 3,647,080
|
from bs4 import BeautifulSoup
def get_stock_market_list(corp_cls: str, include_corp_name=True) -> dict:
""" 상장 회사 dictionary 반환
Parameters
----------
corp_cls: str
Y: stock market(코스피), K: kosdaq market(코스닥), N: konex Market(코넥스)
include_corp_name: bool, optional
if True, returning dictionary includes corp_name(default: True)
Returns
-------
dict of {stock_code: information}
상장 회사 정보 dictionary 반환( 회사 이름, 섹터, 물품)
"""
if corp_cls.upper() == 'E':
raise ValueError('ETC market is not supported')
corp_cls_to_market = {
"Y": "stockMkt",
"K": "kosdaqMkt",
"N": "konexMkt",
}
url = 'http://kind.krx.co.kr/corpgeneral/corpList.do'
referer = 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=loadInitPage'
market_type = corp_cls_to_market[corp_cls.upper()]
payload = {
'method': 'download',
'pageIndex': 1,
'currentPageSize': 5000,
'orderMode': 3,
'orderStat': 'D',
'searchType': 13,
'marketType': market_type,
'fiscalYearEnd': 'all',
'location': 'all',
}
stock_market_list = dict()
resp = request.post(url=url, payload=payload, referer=referer)
html = BeautifulSoup(resp.text, 'html.parser')
rows = html.find_all('tr')
for row in rows:
cols = row.find_all('td')
if len(cols) > 0:
corp_name = cols[0].text.strip()
stock_code = cols[1].text.strip()
sector = cols[2].text.strip()
product = cols[3].text.strip()
corp_info = {'sector': sector, 'product': product, 'corp_cls': corp_cls}
if include_corp_name:
corp_info['corp_name'] = corp_name
stock_market_list[stock_code] = corp_info
return stock_market_list
|
c8e0242e1ddfcc4f32514f131f3a9797694202c1
| 3,647,082
|
def evaluate_template(template: dict) -> dict:
"""
This function resolves the template by parsing the T2WML expressions
and replacing them by the class trees of those expressions
:param template:
:return:
"""
response = dict()
for key, value in template.items():
if key == 'qualifier':
response[key] = []
for i in range(len(template[key])):
temp_dict = dict()
for k, v in template[key][i].items():
if isinstance(v, (ItemExpression, ValueExpression, BooleanEquation)):
col, row, temp_dict[k] = v.evaluate_and_get_cell(bindings)
temp_dict['cell'] = get_actual_cell_index((col, row))
else:
temp_dict[k] = v
if "property" in temp_dict and temp_dict["property"] == "P585":
if "format" in temp_dict:
try:
datetime_string, precision = parse_datetime_string(temp_dict["value"], additional_formats=[temp_dict["format"]])
if "precision" not in temp_dict:
temp_dict["precision"] = int(precision.value.__str__())
else:
temp_dict["precision"] = translate_precision_to_integer(temp_dict["precision"])
temp_dict["value"] = datetime_string
except Exception as e:
raise e
response[key].append(temp_dict)
else:
if isinstance(value, (ItemExpression, ValueExpression, BooleanEquation)):
col, row, response[key] = value.evaluate_and_get_cell(bindings)
if key == "item":
response['cell'] = get_actual_cell_index((col, row))
else:
response[key] = value
return response
|
596516f9dfb81170212020cfb053339ddb49b716
| 3,647,083
|
def get_CommandeProduits(path, prefix='CP_',cleaned=False):
"""
Read CSV (CommandeProduits) into Dataframe. All relevant columns are kept and renamed with prefix.
Args:
path (str): file path to CommandeProduits.csv
prefix (str): All relevant columns are renamed with prefix
Returns:
df (Dataframe): Resulting dataframe
"""
col = {'Id':prefix+'Id',
'Commande_Id':'Commande_Id',
'OffreProduit_Id':'OffreProduit_Id',
'QuantiteTotale':prefix+'QuantiteTotale',
'QuantiteUnite':prefix+'QuantiteUnite',
'QuantiteValeur':prefix+'QuantiteValeur',
'MontantTotal':prefix+'MontantTotal',
'Weight':prefix+'Weight'}
dt = {'Id': 'int64',
'Commande_Id': 'int64',
'OffreProduit_Id':'int64',
'QuantiteTotale':'float64',
'QuantiteUnite':'object',
'QuantiteValeur':'float64',
'MontantTotal':'float64',
'Weight':'float64'}
if not cleaned:
df = pd.read_csv(path, sep='\t', encoding='utf-8', usecols=list(col.keys()), dtype=dt)
df = df.rename(index=str, columns=col)
else:
df = pd.read_csv(path, sep='\t', encoding='utf-8',index_col=0)
return df
|
18c5c7e375abcc57c2cfcbc4f2c58ecec5aecf59
| 3,647,084
|
def hist_equal(image, hist):
"""
Equalize an image based on a histogram.
Parameters
----------
image : af.Array
- A 2 D arrayfire array representing an image, or
- A multi dimensional array representing batch of images.
hist : af.Array
- Containing the histogram of an image.
Returns
---------
output : af.Array
- The equalized image.
"""
output = Array()
safe_call(backend.get().af_hist_equal(c_pointer(output.arr), image.arr, hist.arr))
return output
|
70aeeb1822752c2f7fb5085d761bb9b309d29335
| 3,647,085
|
def get_close_icon(x1, y1, height, width):
"""percentage = 0.1
height = -1
while height < 15 and percentage < 1.0:
height = int((y2 - y1) * percentage)
percentage += 0.1
return (x2 - height), y1, x2, (y1 + height)"""
return x1, y1, x1 + 15, y1 + 15
|
78b65cdeeb4f6b3a526fd5dd41b34f35545f1e9d
| 3,647,086
|
def train_model(network, data, labels, batch_size,
epochs, validation_data=None, verbose=True, shuffle=False):
"""
Train
"""
model = network.fit(
data,
labels,
batch_size=batch_size,
epochs=epochs,
validation_data=validation_data,
shuffle=shuffle,
verbose=verbose)
return model
|
a2b093aef1b607cd34dd30e8c5f126e1efb3d409
| 3,647,087
|
def taoyuan_agrichannel_irrigation_transfer_loss_rate():
"""
Real Name: TaoYuan AgriChannel Irrigation Transfer Loss Rate
Original Eqn: 0
Units: m3/m3
Limits: (None, None)
Type: constant
Subs: None
This is "no loss rate" version.
"""
return 0
|
9fd8a84ae79cbeaf8c8259da815f9322f27b253f
| 3,647,088
|
def lambda_handler(event, context):
"""
Find and replace following words and outputs the result.
Oracle -> Oracle©
Google -> Google©
Microsoft -> Microsoft©
Amazon -> Amazon©
Deloitte -> Deloitte©
Example input: “We really like the new security features of Google Cloud”.
Expected output: “We really like the new security features of Google© Cloud”.
"""
# Return 400 if event is none or strToReplace is blank
if not event or not event['strToReplace']:
return {
'statusCode': 400,
'body': "Input string not provided."
}
# Input String
replacementString = event['strToReplace']
# Dictionary of words with replacement words
wordsToReplaceDict = {'Oracle': 'Oracle©', 'Google': 'Google©', 'Microsoft': 'Microsoft©', 'Amazon': 'Amazon©', 'Deloitte': 'Deloitte©'}
# Iterate over all key-value pairs in dictionary
for key, value in wordsToReplaceDict.items():
# Replace words in string
replacementString = replacementString.replace(key, value)
return {
'statusCode': 200,
'body': replacementString
}
|
66dc2914dd04a2e265ed21542bd462b61344d040
| 3,647,089
|
def update_inv(X, X_inv, i, v):
"""Computes a rank 1 update of the the inverse of a symmetrical matrix.
Given a symmerical matrix X and its inverse X^{-1}, this function computes
the inverse of Y, which is a copy of X, with the i'th row&column replaced
by given vector v.
Parameters
----------
X : ndarray, shape (N, N)
A symmetrical matrix.
X_inv : nparray, shape (N, N)
The inverse of X_inv.
i : int
The index of the row/column to replace.
v : ndarray, shape (N,)
The values to replace the row/column with.
Returns
-------
Y_inv : ndarray, shape (N, N)
The inverse of Y.
"""
U = v[:, np.newaxis] - X[:, [i]]
mask = np.zeros((len(U), 1))
mask[i] = 1
U = np.hstack((U, mask))
V = U[:, [1, 0]].T
V[1, i] = 0
C = np.eye(2)
X_inv_U = X_inv.dot(U)
V_X_inv = V.dot(X_inv)
Y_inv = X_inv - X_inv_U.dot(pinv(C + V_X_inv.dot(U))).dot(V_X_inv)
return Y_inv
|
c811dbf699d8f93fa2fa5b3f68c5b23cf4131e9f
| 3,647,090
|
import csv
def read_barcode_lineno_map(stream):
"""Build a map of barcodes to line number from a stream
This builds a one based dictionary of barcode to line numbers.
"""
barcodes = {}
reader = csv.reader(stream, delimiter="\t")
for i, line in enumerate(reader):
barcodes[line[0]] = i + 1
return barcodes
|
545a0d02dd76e774ba0de86431113ad9f36a098e
| 3,647,091
|
def match_in_candidate_innings(entry, innings, summary_innings, entities):
"""
:param entry:
:param innings: innings to be searched in
:param summary_innings: innings mentioned in the summary segment
:param entities: total entities in the segment
:return:
"""
entities_in_summary_inning = set()
for summary_inning in summary_innings:
intersection = get_matching_entities_in_inning(entry, summary_inning, entities)
entities_in_summary_inning.update(intersection)
entities_not_found = entities.difference(entities_in_summary_inning)
matched_inning = -1
if len(entities_not_found) > 1:
remaining_inings = set(innings).difference(set(summary_innings))
orderered_remaining_innings = [inning for inning in innings if inning in remaining_inings]
matched_inning = get_inning_all_entities_set_intersection(entry, orderered_remaining_innings, entities_not_found)
return matched_inning
|
3551212f79c6ecb298ec6b55aa7b68213b950394
| 3,647,092
|
from typing import Optional
from typing import Union
from typing import Callable
from typing import Any
def checkpoint(
name: Optional[str] = None,
on_error: bool = True,
cond: Union[bool, Callable[..., bool]] = False,
) -> Callable[[Callable], Any]:
"""
Create a checkpointing decorator.
Args:
ckpt_name (Optional[str]): Name of the checkpoint when saved.
on_error (bool): Whether to save checkpoint when an error occurs.
cond (Union[bool, Callable[..., bool]]): Condition under which to save checkpoint.
If a Callable, all parameters of the wrapped function should be passed
and it has to return a boolean.
Returns:
A decorator function.
"""
def ckpt_worker(func: Callable):
if name is None:
ckpt_name = func.__name__
else:
ckpt_name = name
return CkptWrapper(func=func, ckpt_name=ckpt_name, on_error=on_error, cond=cond)
return ckpt_worker
|
39bab1a33523c34b04a2ed7f2efd6467de63b27b
| 3,647,093
|
def return_int(bit_len, unsigned=False):
"""
This function return the decorator that change return value to valid value.
The target function of decorator should return only one value
e.g. func(*args, **kargs) -> value:
"""
if bit_len not in VALID_BIT_LENGTH_OF_INT:
err = "Value of bit_len should be the one of {}, but your bit_len={}."
raise ByteDatasValueError(err.format(VALID_BIT_LENGTH_OF_INT, bit_len))
# calculate max_value for changing raw value to valid value
max_value = 2**bit_len
def decorator(function):
"""decorator function"""
@wraps(function)
def wrapper(*args, **kwargs):
"""
change valid to positive if value < 0
check value than call function or return False directly
"""
value = function(*args, **kwargs)
if value >= max_value or value < 0:
err = ("Returned value of {} should be between 0 and {}, but your "
"value = {}.")
raise ByteDatasValueError(err.format(function.__name__, max_value, value))
if unsigned is False:
# if value > max_value//2 , it means the top bit of value is
# 1 , it is a negative value, so we should change it to negative
value = value - max_value if value > max_value//2 else value
return value
return wrapper
return decorator
|
66121d389a389c6152fd4491ed8a698336e042a2
| 3,647,094
|
def get_integral_curve(f, init_xy, x_end, delta):
"""
solve ode 'dy/dx=f(x,y)' with Euler method
"""
(x, y) = init_xy
xs, ys = [x], [y]
for i in np.arange(init_xy[0], x_end, delta):
y += delta*f(x, y)
x += delta
xs.append(x)
ys.append(y)
return xs, ys
|
0526643acd37b8d7c2646d3a21d54e9d9f16ef58
| 3,647,095
|
def compute_atime_posteriors(sg, proposals,
global_srate=1.0,
use_ar=False,
raw_data=False,
event_idx=None):
"""
compute the bayesian cross-correlation (logodds of signal under an AR noise model)
for all signals in the historical library, against all signals in the current SG.
This is quite expensive so should in general be run only once, and the results cached.
"""
atime_lls = []
i = 0
for idx, (x, signals) in enumerate(proposals):
if event_idx is not None and event_idx != idx:
continue
sta_lls = dict()
for (sta, chan, band, phase), c in signals.items():
wns = sg.station_waves[sta]
if len(wns) == 0:
continue
elif len(wns) > 1:
raise Exception("haven't worked out correlation proposals with multiple wns from same station")
wn = wns[0]
if raw_data:
sdata = wn.get_value().data.copy()
sdata[np.isnan(sdata)] = 0.0
else:
sdata = wn.unexplained_kalman()
if use_ar:
lls = ar_advantage(sdata, c, wn.nm)
else:
normed_sdata = sdata / wn.nm_env.c #np.std(sdata)
lls = np.sqrt(iid_advantage(normed_sdata, c)) # sqrt for laplacian noise, essentially
tt_array, tt_mean = build_ttr_model_array(sg, x, sta, wn.srate, phase=phase)
origin_ll, origin_stime = atime_likelihood_to_origin_likelihood(lls, wn.st, wn.srate, tt_mean, tt_array, global_srate)
signal_scale = wn.nm_env.c
sta_lls[(wn.label, phase)] = origin_ll, origin_stime, signal_scale
sg.logger.info("computed advantage for %s %s %s" % (x, wn.label, phase))
i += 1
atime_lls.append((x, sta_lls))
return atime_lls
|
1029f57fe500ef6f08eec56ab34539d3f9a80637
| 3,647,096
|
def search4vowels(pharse :str) -> set:
""""Return any vowels found in a supplied word."""
vowels = set('aeiou')
return vowels.intersection(set(pharse))
|
8a45c50828b6ba8d173572ac771eb8fe5ddc5a42
| 3,647,097
|
def rsort(s):
"""Sort sequence s in ascending order.
>>> rsort([])
[]
>>> rsort([1])
[1]
>>> rsort([1, 1, 1])
[1, 1, 1]
>>> rsort([1, 2, 3])
[1, 2, 3]
>>> rsort([3, 2, 1])
[1, 2, 3]
>>> rsort([1, 2, 1])
[1, 1, 2]
>>> rsort([1,2,3, 2, 1])
[1, 1, 2, 2, 3]
"""
if len(s) <= 1:
return s
else:
return [rmin(s)]+rsort(remove(rmin(s),s))
|
d9f67d713e55d50cd4468ad709f04c7bfea05c71
| 3,647,098
|
def read_starlight_output_syn_spec(lines):
""" read syn_spec of starlight output """
Nl_obs = len(lines)
wave = Column(np.zeros((Nl_obs, ), dtype=np.float), 'wave')
flux_obs = Column(np.zeros((Nl_obs, ), dtype=np.float), 'flux_obs')
flux_syn = Column(np.zeros((Nl_obs, ), dtype=np.float), 'flux_syn')
weight = Column(np.zeros((Nl_obs, ), dtype=np.float), 'weight')
for i, line in enumerate(lines):
line_split = line.split()
wave[i] = np.float(line_split[0])
flux_obs[i] = np.float(line_split[1])
flux_syn[i] = np.float(line_split[2])
weight[i] = np.float(line_split[3])
return Table([wave, flux_obs, flux_syn, weight])
|
e25aed3ff9294f07b1549b610030241895b78f67
| 3,647,100
|
def get_stations_trips(station_id):
"""
https://api.rasp.yandex.net/v1.0/schedule/ ?
apikey=<ключ>
& format=<формат>
& station=<код станции>
& lang=<язык>
& [date=<дата>]
& [transport_types=<тип транспорта>]
& [system=<текущая система кодирования>]
& [show_systems=<коды в ответе>]
"""
params = {
'apikey': RASP_KEY,
'format': 'json',
'station': station_id,
'lang': 'ua',
'transport_types': 'suburban'
}
url = 'https://api.rasp.yandex.net/v1.0/schedule/'
return get_json(url, params)
|
8b841f19b135e7792e2f8d3aad642f38b2a6cd74
| 3,647,101
|
def _compute_pairwise_kpt_distance(a, b):
"""
Args:
a, b (poses): Two sets of poses to match
Each "poses" is represented as a list of 3x17 or 4x17 np.ndarray
"""
res = np.zeros((len(a), len(b)))
for i in range(len(a)):
for j in range(len(b)):
res[i, j] = pck_distance(a[i], b[j])
return res
|
aaf4696292bb7d1e9377347d93d97da321787c6f
| 3,647,102
|
def _extract_dialog_node_name(dialog_nodes):
"""
For each dialog_node (node_id) of type *standard*, check if *title exists*.
If exists, use the title for the node_name. otherwise, use the dialog_node
For all other cases, use the dialog_node
dialog_node: (dialog_node_title, dialog_node_type)
In the case of Login Issues,
"title": "Login Issue",
"dialog_node": "Login Issues",
the record will be created as:
"Login Issues": ("Login Issue", "standard")
"""
nodes_dict = {}
nodes_type = {}
for obj in dialog_nodes:
if (obj['type']=='standard') and ('title' in obj):
if (obj['title'] is not None):
nodes_dict[obj['dialog_node']] = (obj['title'],obj['type'])
else:
nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type'])
else:
nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type'])
return nodes_dict
|
23121efa486c2da16a54b2441bb1435eec5b8b49
| 3,647,103
|
from typing import Dict
from typing import List
def search_all_entities(bsp, **search: Dict[str, str]) -> Dict[str, List[Dict[str, str]]]:
"""search_all_entities(key="value") -> {"LUMP": [{"key": "value", ...}]}"""
out = dict()
for LUMP_name in ("ENTITIES", *(f"ENTITIES_{s}" for s in ("env", "fx", "script", "snd", "spawn"))):
entity_lump = getattr(bsp, LUMP_name, shared.Entities(b""))
results = entity_lump.search(**search)
if len(results) != 0:
out[LUMP_name] = results
return out
|
ca24b50524cc96b35a605bebc5ead0d8d4342314
| 3,647,105
|
import re
def is_probably_beginning_of_sentence(line):
"""Return True if this line begins a new sentence."""
# Check heuristically for a parameter list.
for token in ['@', '-', r'\*']:
if re.search(r'\s' + token + r'\s', line):
return True
stripped_line = line.strip()
is_beginning_of_sentence = re.match(r'[^\w"\'`\(\)]', stripped_line)
is_pydoc_ref = re.match(r'^:\w+:', stripped_line)
return is_beginning_of_sentence and not is_pydoc_ref
|
68a6a2151b4559f0b95e0ac82a8a16bd06d9d1ff
| 3,647,106
|
def default_attack_handler(deck, discard, hand, turn, supply, attack):
"""Handle some basic attacks in a default manner. Returns True iff the
attack was handled."""
covertool.cover("domsim.py:219")
if attack == COUNCIL_ROOM:
# Not really an attack, but this is an easy way to handle it.
covertool.cover("domsim.py:221")
hand += draw(deck, discard, 1)
covertool.cover("domsim.py:222")
return True
elif MOAT in hand:
covertool.cover("domsim.py:224")
return True
elif attack == MINION and len(hand) > 4:
covertool.cover("domsim.py:226")
discard += hand
covertool.cover("domsim.py:227")
hand[:] = draw(deck, discard, 4)
covertool.cover("domsim.py:228")
return True
elif attack == WITCH:
covertool.cover("domsim.py:230")
gain(CURSE, supply, discard)
covertool.cover("domsim.py:231")
return True
elif attack == SEA_HAG:
covertool.cover("domsim.py:233")
discard += draw(deck, discard, 1)
covertool.cover("domsim.py:234")
gain(CURSE, supply, deck)
covertool.cover("domsim.py:235")
return True
else:
covertool.cover("domsim.py:237")
return False
|
79745f30b5607348771ee6d5778202370e553a7b
| 3,647,107
|
def hello_world():
"""return bool if exists -> take in email"""
email = request.json['email']
c = conn.cursor()
c.execute("select * from Users where Users.email = {}".format(email))
result = False
conn.commit()
conn.close()
return result
|
c28b33c5106b51144d4b58f3bffd2ea128dd948a
| 3,647,110
|
from typing import Callable
from typing import Optional
import torch
def get_model_relations(
model: Callable,
model_args: Optional[tuple] = None,
model_kwargs: Optional[dict] = None,
):
"""
Infer relations of RVs and plates from given model and optionally data.
See https://github.com/pyro-ppl/pyro/issues/949 for more details.
This returns a dictionary with keys:
- "sample_sample" map each downstream sample site to a list of the upstream
sample sites on which it depend;
- "sample_dist" maps each sample site to the name of the distribution at
that site;
- "plate_sample" maps each plate name to a list of the sample sites within
that plate; and
- "observe" is a list of observed sample sites.
For example for the model::
def model(data):
m = pyro.sample('m', dist.Normal(0, 1))
sd = pyro.sample('sd', dist.LogNormal(m, 1))
with pyro.plate('N', len(data)):
pyro.sample('obs', dist.Normal(m, sd), obs=data)
the relation is::
{'sample_sample': {'m': [], 'sd': ['m'], 'obs': ['m', 'sd']},
'sample_dist': {'m': 'Normal', 'sd': 'LogNormal', 'obs': 'Normal'},
'plate_sample': {'N': ['obs']},
'observed': ['obs']}
:param callable model: A model to inspect.
:param model_args: Optional tuple of model args.
:param model_kwargs: Optional dict of model kwargs.
:rtype: dict
"""
if model_args is None:
model_args = ()
if model_kwargs is None:
model_kwargs = {}
with torch.random.fork_rng(), torch.no_grad(), pyro.validation_enabled(False):
with TrackProvenance():
trace = poutine.trace(model).get_trace(*model_args, **model_kwargs)
sample_sample = {}
sample_dist = {}
plate_sample = defaultdict(list)
observed = []
for name, site in trace.nodes.items():
if site["type"] != "sample" or site_is_subsample(site):
continue
sample_sample[name] = [
upstream
for upstream in get_provenance(site["fn"].log_prob(site["value"]))
if upstream != name
]
sample_dist[name] = _get_dist_name(site["fn"])
for frame in site["cond_indep_stack"]:
plate_sample[frame.name].append(name)
if site["is_observed"]:
observed.append(name)
def _resolve_plate_samples(plate_samples):
for p, pv in plate_samples.items():
pv = set(pv)
for q, qv in plate_samples.items():
qv = set(qv)
if len(pv & qv) > 0 and len(pv - qv) > 0 and len(qv - pv) > 0:
plate_samples_ = plate_samples.copy()
plate_samples_[q] = pv & qv
plate_samples_[q + "__CLONE"] = qv - pv
return _resolve_plate_samples(plate_samples_)
return plate_samples
plate_sample = _resolve_plate_samples(plate_sample)
# convert set to list to keep order of variables
plate_sample = {
k: [name for name in trace.nodes if name in v] for k, v in plate_sample.items()
}
return {
"sample_sample": sample_sample,
"sample_dist": sample_dist,
"plate_sample": dict(plate_sample),
"observed": observed,
}
|
b0cc8f58a70575492ba0c5efe7f282b0e9ab0a4e
| 3,647,111
|
from typing import Any
def is_valid_dim(x: Any) -> bool:
"""determine if the argument will be valid dim when included in torch.Size.
"""
return isinstance(x, int) and x > 0
|
09b8dd41b20a835583cd051868f13756e8383342
| 3,647,113
|
def compute_alpha(n, S_d, d_min):
"""
Approximate the alpha of a power law distribution.
Parameters
----------
n: int or np.array of int
Number of entries that are larger than or equal to d_min
S_d: float or np.array of float
Sum of log degrees in the distribution that are larger than or equal to d_min
d_min: int
The minimum degree of nodes to consider
Returns
-------
alpha: float
The estimated alpha of the power law distribution
"""
return n / (S_d - n * np.log(d_min - 0.5)) + 1
|
9df2c39ccaa70e729b1bf2f7bfcc78cde0f649de
| 3,647,114
|
from typing import Optional
def _head_object(
s3_conn: S3Client, bucket: str, key: str
) -> Optional[HeadObjectOutputTypeDef]:
"""Retrieve information about an object in S3 if it exists.
Args:
s3_conn: S3 connection to use for operations.
bucket: name of the bucket containing the key.
key: name of the key to lookup.
Returns:
S3 object information, or None if the object does not exist.
See the AWS documentation for explanation of the contents.
Raises:
botocore.exceptions.ClientError: any error from boto3 other than key
not found is passed through.
"""
try:
return s3_conn.head_object(Bucket=bucket, Key=key)
except botocore.exceptions.ClientError as err:
if err.response["Error"]["Code"] == "404":
return None
raise
|
3b7b239ea09bf3df75c9c9a3e3e19cba505d67a5
| 3,647,115
|
from operator import add
def Residual(feat_maps_in, feat_maps_out, prev_layer):
"""
A customizable residual unit with convolutional and shortcut blocks
Args:
feat_maps_in: number of channels/filters coming in, from input or previous layer
feat_maps_out: how many output channels/filters this block will produce
prev_layer: the previous layer
"""
skip = skip_block(feat_maps_in, feat_maps_out, prev_layer)
conv = conv_block(feat_maps_out, prev_layer)
merged = add([skip, conv]) # the residual connection
return LeakyReLU()(merged)
|
cfb7345340785a8fc7b3068c2baa0e5452b189aa
| 3,647,116
|
from typing import List
from datetime import datetime
def clean_detail_line_data(detail_row: List[str], date: str) -> List[str]:
"""
:param detail_row: uncleaned detail row
:param date: job data to be added to data
:return: a cleaned list of details fields
"""
if not detail_row:
print('detail_row:', detail_row)
return detail_row
# The age field is an integer number of days between the date when the video was uploaded and Feb.15,
# 2007 (YouTube's establishment)
age_field_location = 2
age_date_format = '%Y-%m-%d'
age = int(detail_row[age_field_location].strip()) if detail_row[age_field_location].strip() else 0
new_date = datetime.strptime('2007-02-15', age_date_format) + timedelta(days=age)
detail_row[age_field_location] = datetime.strftime(new_date, age_date_format)
return [date, ] + detail_row
|
ff9c3b8f5079674bf9a727f4baedc264443dbdeb
| 3,647,117
|
def lammps_prod(job):
"""Run npt ensemble production."""
in_script_name = "in.prod"
modify_submit_lammps(in_script_name, job.sp)
msg = f"sbatch submit.slurm {in_script_name} {job.sp.replica} {job.sp.temperature} {job.sp.pressure} {job.sp.cutoff}"
return msg
|
faa122a6e22f54028cd536f78c17094f9c1f07b4
| 3,647,118
|
def gather_tensors(tensors, indices):
"""Performs a tf.gather operation on a set of Tensors.
Args:
tensors: A potentially nested tuple or list of Tensors.
indices: The indices to use for the gather operation.
Returns:
gathered_tensors: A potentially nested tuple or list of Tensors with the
same structure as the 'tensors' input argument. Contains the result of
applying tf.gather(x, indices) on each element x in 'tensors'.
"""
return map_nested(lambda x: tf.gather(x, indices), tensors)
|
68fd88121cdca7beb13f3f5633401ddb420f34d4
| 3,647,119
|
import types
def is_iterator(obj):
"""
Predicate that returns whether an object is an iterator.
"""
return type(obj) == types.GeneratorType or ('__iter__' in dir(obj) and 'next' in dir(obj))
|
db57a2a1f171a48cc43ba4c248387191780dfd04
| 3,647,121
|
import shutil
def delete_entries(keytab_file: str, slots: t.List[int]) -> bool:
"""
Deletes one or more entries from a Kerberos keytab.
This function will only delete slots that exist within the keylist.
Once the slots are deleted, the current keylist will be written to
a temporary file. This avoids having the keylist appended to the
keylist within the keytab file. Once the keylist is written to the
temporary file, the temporary file will be move/renamed the original
keytab filename.
:param keytab_file: Kerberos V5 keytab file name. The file can be a
relative path read from the user's home directory.
:param slots: list of slots to be deleted from the keylist.
:return: True on success, otherwise False.
"""
keytab_file = ktutil.keytab_exists(keytab_file)
if not keytab_file or not isinstance(slots, list):
return False
keytab_tmp = ktutil.resolve_keytab_file(f"{keytab_file}.tmp")
kt = ktutil()
# Read the Kerberos keytab file first to check if slots exist before
# trying to delete them.
kt.read_kt(keytab_file)
kt.list()
kt.quit()
existing_slots = [
key["slot"] for key in kt.keylist if key["slot"] in slots]
if len(existing_slots) == 0:
return False # No slots exist to be deleted.
# Re-initialize 'ktutil' command and delete the slot(s).
# Write the current keylist to a temporary file, then rename
# the temporary file to the original name. This avoids the
# duplication caused by the ``write_kt`` invocation.
kt.ktutil_init()
kt.read_kt(keytab_file)
for slot in existing_slots:
kt.delete_entry(slot)
kt.write_kt(keytab_tmp)
kt.quit()
shutil.move(keytab_tmp, keytab_file)
return True if kt.error else False
|
cc488778abdc75a9814702642fcfc9b245b6a99c
| 3,647,122
|
def detect(environ, context=None):
"""
parse HTTP user agent string and detect a mobile device.
"""
context = context or Context()
try:
## if key 'HTTP_USER_AGENT' doesn't exist,
## we are not able to decide agent class in the first place.
## so raise KeyError to return NonMobile agent.
carrier = detect_fast(environ['HTTP_USER_AGENT'])
## if carrier is 'nonmobile', raise KeyError intentionally
factory_class = {
'docomo' : context.docomo_factory,
'ezweb' : context.ezweb_factory,
'softbank': context.softbank_factory,
'willcom' : context.willcom_factory,
}[carrier]
return factory_class().create(environ, context)
except KeyError:
return NonMobile(environ, context)
|
2977cb2847c4917904cc096c5787b6ddb3a889b9
| 3,647,123
|
async def get_product(id: UUID4): # noqa: A002
"""Return ProductGinoModel instance."""
return await ProductGinoModel.get_or_404(id)
|
f7988faf08da081a1922f8df24b843be67658c16
| 3,647,125
|
def LSIIR_unc(H,UH,Nb,Na,f,Fs,tau=0):
"""Design of stabel IIR filter as fit to reciprocal of given frequency response with uncertainty
Least-squares fit of a digital IIR filter to the reciprocal of a given set
of frequency response values with given associated uncertainty. Propagation of uncertainties is
carried out using the Monte Carlo method.
Parameters
----------
H: np.ndarray
frequency response values.
UH: np.ndarray
uncertainties associated with real and imaginary part of H
Nb: int
order of IIR numerator polynomial.
Na: int
order of IIR denominator polynomial.
f: np.ndarray
frequencies corresponding to H
Fs: float
sampling frequency for digital IIR filter.
tau: float
initial estimate of time delay for filter stabilization.
Returns
-------
b,a: np.ndarray
IIR filter coefficients
tau: int
time delay (in samples)
Uba: np.ndarray
uncertainties associated with [a[1:],b]
References
----------
* Eichstädt, Elster, Esward and Hessling [Eichst2010]_
.. seealso:: :mod:`PyDynamic.uncertainty.propagate_filter.IIRuncFilter`
:mod:`PyDynamic.deconvolution.fit_filter.LSIIR`
"""
runs = 1000
print("\nLeast-squares fit of an order %d digital IIR filter to the" % max(Nb,Na))
print("reciprocal of a frequency response given by %d values.\n" % len(H))
print("Uncertainties of the filter coefficients are evaluated using\n"\
"the GUM S2 Monte Carlo method with %d runs.\n" % runs)
HRI = np.random.multivariate_normal(np.hstack((np.real(H),np.imag(H))),UH,runs)
HH = HRI[:,:len(f)] + 1j*HRI[:,len(f):]
AB = np.zeros((runs,Nb+Na+1))
Tau= np.zeros((runs,))
for k in range(runs):
bi,ai,Tau[k] = LSIIR(HH[k,:],Nb,Na,f,Fs,tau,verbose=False)
AB[k,:] = np.hstack((ai[1:],bi))
bi = np.mean(AB[:,Na:],axis=0)
ai = np.hstack((np.array([1.0]),np.mean(AB[:,:Na],axis=0)))
Uab= np.cov(AB,rowvar=0)
tau = np.mean(Tau)
return bi,ai, tau, Uab
|
d262065fdfe49514101ff65a6c4ea7329e8aef84
| 3,647,126
|
import time
def genMeasureCircuit(H, Nq, commutativity_type, clique_cover_method=BronKerbosch):
"""
Take in a given Hamiltonian, H, and produce the minimum number of
necessary circuits to measure each term of H.
Returns:
List[QuantumCircuits]
"""
start_time = time.time()
term_reqs = np.full((len(H[1:]),Nq),'*',dtype=str)
for i, term in enumerate(H[1:]):
for op in term[1]:
qubit_index = int(op[1:])
basis = op[0]
term_reqs[i][qubit_index] = basis
# Generate a graph representing the commutativity of the Hamiltonian terms
comm_graph = commutativity_type.gen_comm_graph(term_reqs)
num_terms = len(comm_graph)
# Find a set of cliques within the graph where the nodes in each clique
# are disjoint from one another.
try:
max_cliques = clique_cover_method(comm_graph)
except RecursionError as re:
print('Maximum recursion depth reached: {}'.format(re.args[0]))
return 0, 0, 0
end_time = time.time()
print('MEASURECIRCUIT: {} found {} unique circuits'.format(
clique_cover_method.__name__, len(max_cliques)))
et = end_time - start_time
print('MEASURECIRCUIT: Elapsed time: {:.6f}s'.format(et))
return num_terms, len(max_cliques), et
|
1128592a61da7601e41c0328abbf8770f187d009
| 3,647,127
|
def run_test(session, m, data, batch_size, num_steps):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(m.initial_state)
for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)):
cost, state = session.run([m.cost, m.final_state], {
m.input_data: x,
m.targets: y,
m.initial_state: state
})
costs += cost
iters += 1
return costs / iters
|
681a11e7cd52c0f690a3ad79e69fac4951906796
| 3,647,128
|
def table_to_dict(table):
"""Convert Astropy Table to Python dict.
Numpy arrays are converted to lists. This Can work with multi-dimensional
array columns, by representing them as list of list.
e.g. This is useful in the following situation.
foo = Table.read('foo.fits')
foo.to_pandas() <- This will not work if columns are multi-dimensional.
The alternative is:
foo = Table.read('foo.fits')
bar = table_to_dict(foo)
df = pd.DataFrame(bar, columns=bar.keys()) <- The desired result.
"""
total_data = {}
multi_cols = []
for i, _ in enumerate(table.columns):
# This looks unusual, but it is the only way to iterate over columns.
col = table.columns[i]
data = table[col.name].tolist()
total_data[col.name] = data
if len(col.shape) == 2:
multi_cols.append(col.name)
return total_data, multi_cols
|
8ad9206222101bbd4d40913e3b43c8ffee9dd6ad
| 3,647,129
|
def margin_loss(y_true, y_pred):
"""
Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
:param y_true: [None, n_classes]
:param y_pred: [None, num_capsule]
:return: a scalar loss value.
"""
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))
R = K.mean(K.sum(L, 1))
return R
|
252c26949b6255742df90cc9c83a586dfcbb8ac6
| 3,647,130
|
def strip_quotes(string):
"""Remove quotes from front and back of string
>>> strip_quotes('"fred"') == 'fred'
True
"""
if not string:
return string
first_ = string[0]
last = string[-1]
if first_ == last and first_ in '"\'':
return string[1:-1]
return string
|
7e10d37e5b5bb4569c88b4de17ffde31a4456e15
| 3,647,131
|
from xclim.core.indicator import registry
def generate_local_dict(locale: str, init_english: bool = False):
"""Generate a dictionary with keys for each indicators and translatable attributes.
Parameters
----------
locale : str
Locale in the IETF format
init_english : bool
If True, fills the initial dictionary with the english versions of the attributes.
Defaults to False.
"""
if locale in _LOCALES:
locname, attrs = get_local_dict(locale)
for ind_name in attrs.copy().keys():
if ind_name != "attrs_mapping" and ind_name not in registry:
attrs.pop(ind_name)
else:
attrs = {}
attrs_mapping = attrs.setdefault("attrs_mapping", {})
attrs_mapping.setdefault("modifiers", [""])
for key, value in default_formatter.mapping.items():
attrs_mapping.setdefault(key, [value[0]])
eng_attr = ""
for ind_name, indicator in registry.items():
ind_attrs = attrs.setdefault(ind_name, {})
for translatable_attr in set(TRANSLATABLE_ATTRS).difference(
set(indicator._cf_names)
):
if init_english:
eng_attr = getattr(indicator, translatable_attr)
if not isinstance(eng_attr, str):
eng_attr = ""
ind_attrs.setdefault(f"{translatable_attr}", eng_attr)
for cf_attrs in indicator.cf_attrs:
# In the case of single output, put var attrs in main dict
if len(indicator.cf_attrs) > 1:
ind_attrs = attrs.setdefault(f"{ind_name}.{cf_attrs['var_name']}", {})
for translatable_attr in set(TRANSLATABLE_ATTRS).intersection(
set(indicator._cf_names)
):
if init_english:
eng_attr = cf_attrs.get(translatable_attr)
if not isinstance(eng_attr, str):
eng_attr = ""
ind_attrs.setdefault(f"{translatable_attr}", eng_attr)
return attrs
|
34398b297bb269df4668a09d055a69d409fe7bec
| 3,647,132
|
import csv
def generate_scheme_from_file(filename=None, fileobj=None, filetype='bson', alimit=1000, verbose=0, encoding='utf8', delimiter=",", quotechar='"'):
"""Generates schema of the data BSON file"""
if not filetype and filename is not None:
filetype = __get_filetype_by_ext(filename)
datacache = []
if filetype == 'bson':
if filename:
source = open(filename, 'rb')
else:
source = fileobj
n = 0
for r in bson.decode_file_iter(source):
n += 1
if n > alimit:
break
datacache.append(r)
if filename:
source.close()
elif filetype == 'jsonl':
if filename:
source = open(filename, 'r', encoding=encoding)
else:
source = fileobj
n = 0
for r in source:
n += 1
if n > alimit:
break
datacache.append(orjson.loads(r))
if filename:
source.close()
elif filetype == 'csv':
if filename:
source = open(filename, 'r', encoding=encoding)
else:
source = fileobj
n = 0
reader = csv.DictReader(source, quotechar=quotechar, delimiter=delimiter, quoting=csv.QUOTE_ALL)
for r in reader:
n += 1
if n > alimit:
break
datacache.append(r)
if filename:
source.close()
n = 0
scheme = None
for r in datacache:
n += 1
if scheme is None:
scheme = get_schema(r)
else:
scheme = merge_schemes([scheme, get_schema(r)])
return scheme
|
5a160dabd6141724075e3645342b804b556094d6
| 3,647,133
|
def getTests():
"""Returns a dictionary of document samples for the Entity Types Person, Location, and Organization.
Returns:
[type] -- Returns a dictionary of document samples for the Entity Types Person, Location, and Organization.
"""
personDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Person'}, {'sentences.5': {'$exists': True}}]}) # Person and at least 5 test sentences
locationDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Location'}, {'sentences.5': {'$exists': True}}]}) # Location and at least 5 test sentences
organizationDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Organization'}, {'sentences.5': {'$exists': True}}]}) # Organization and at least 5 test sentences
tests = {"person": personDocument,
"location": locationDocument,
"organization": organizationDocument}
return tests
|
63d1ff2e2f77f33ef634d495a1d318f688a1d51b
| 3,647,134
|
def ts_cor(a, b, min_sample = 3, axis = 0, data = None, state = None):
"""
ts_cor(a) is equivalent to a.cor()[0][1]
- supports numpy arrays
- handles nan
- supports state management
:Example: matching pandas
-------------------------
>>> # create sample data:
>>> from pyg_timeseries import *; import pandas as pd; import numpy as np
>>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0.5] = np.nan
>>> b = pd.Series(np.random.normal(0,1,10000), drange(-9999)); b[b>0.5] = np.nan
>>> state = data = None; min_sample = 3; axis = 0
>>> df = pd.concat([a,b], axis=1)
>>> assert abs(df.corr()[0][1] - ts_cor(a, b))<1e-10
:Example: slightly faster than pandas
-------------------------------------
%timeit ts_cor(a, b)
245 µs ± 6.43 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
%timeit df.corr()[0][1]
575 µs ± 13 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
:Example: numpy
-----------------------------------
>>> assert ts_cor(a.values, b.values) == ts_cor(a,b)
:Example: state management
-------------------------------------------
>>> old = ts_std_(a.iloc[:2000])
>>> new = ts_std(a.iloc[2000:], vec = old.vec)
>>> assert new == ts_std(a)
"""
state = state or dict(vec = _vec(a, None,6,0.))
rtn = first_(_ts_cor(a, b, min_sample = min_sample, **state))
return rtn
|
217b8c2196c270ffe22905884586b7466eb59c88
| 3,647,135
|
def get_robin_bndry_conditions(kappa,alpha,Vh):
"""
Do not pass element=function_space.ufl_element()
as want forcing to be a scalar pass degree instead
"""
bndry_obj = get_2d_unit_square_mesh_boundaries()
boundary_conditions=[]
ii=0
for phys_var in [0,1]:
for normal in [1,-1]:
boundary_conditions.append(
['robin',
bndry_obj[ii],
[RobinBoundaryRHS(kappa,normal,alpha,'real',phys_var,
degree=Vh.ufl_element().degree()),
RobinBoundaryRHS(kappa,normal,alpha,'imag',phys_var,
degree=Vh.ufl_element().degree())],
[dl.Constant(0),alpha]])
ii+=1
return boundary_conditions
|
65bb3f9d216ddc146866cf8aa570c2ee73e6d7f2
| 3,647,136
|
def get_prop_datatypes(labels, propnames, MB=None):
"""Retrieve the per-property output datatypes."""
rp = regionprops(labels, intensity_image=MB, cache=True)
datatypes = []
for propname in propnames:
if np.array(rp[0][propname]).dtype == 'int64':
datatypes.append('int32')
else:
datatypes.append('float')
return datatypes
|
b706e724bee867a23c290580f2b865d967946d1b
| 3,647,137
|
from typing import Tuple
def parse_html(html: str) -> Tuple[str, str]:
"""
This function parses the html, strips the tags an return
the title and the body of the html file.
Parameters
----------
html : str
The HTML text
Returns
-------
Tuple[str, str]
A tuple of (title, body)
"""
#
doc = pq(html)
title = doc("title").text()
body = doc("body").text()
return (title, body)
|
a57e5ae50c8eae16c06333a3de5cc388524504ab
| 3,647,138
|
from .._common import header
def block(keyword, multi=False, noend=False):
"""Decorate block writing functions."""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
head_fmt = "{:5}{}" if noend else "{:5}{}\n"
out = [head_fmt.format(keyword, header)]
out += func(*args, **kwargs)
out += ["\n"] if multi else []
return out
return wrapper
return decorator
|
5a0ec1cdec6c2f956d47d13f7beea41814a0db5d
| 3,647,139
|
def data(path):
"""Get the file from the specified path from the data directory.
Parameters
----------
path : str
The relative path to the file in the data directory.
Returns
-------
file : File
The requested file.
"""
return send_from_directory(app.config['DATA_DIRECTORY'], path)
|
8f1cde6d248026fbadd2a81519f8f2beed7ab308
| 3,647,140
|
import requests
import logging
def search_software_fuzzy(query, max=None, csv_filename=None):
"""Returns a list of dict for the software results.
"""
results = _search_software(query)
num = 0
softwares = []
while True:
for r in results:
r = _remove_useless_keys(r)
softwares.append(r)
num += len(results)
# quit if no results or results number reach the max
if num == 0 or (max and num >= max):
break
query_string = _get_next_page_query(results[-1]['SearchResultDescr'])
if not query_string:
break
try:
results = _get_software_search_results(query_string)
# Sometimes it responds 50x http error for some keywords,
# but it's not the client's fault.
except requests.exceptions.HTTPError as e:
logging.warning(f'{e.response.status_code} HTTP Error occurred '
f'during pagination: {e.response.url}')
break
if csv_filename:
_write_software_results(softwares, csv_filename)
return
return softwares
|
ef0124670b02b148f918d3f573e22ca3f646cf96
| 3,647,141
|
def fitCirc(x,y,xerr = None, rIni = None, aveR=False):
"""
Performs a circle fit to data using least square residuals.
Parameters
----------
x : An array of length N.
y : An array of length N.
xerr : None or an array of length N,
If provided, it is the standard-deviation of points.
This vector, if given, will be used as weights in the fit.
rIni : is a maximum radius of the circle to be fitted.
aveR : if True, returns the average deviation from the fit.
Returns
-------
xc, yc, R : center and the radius of the circle.
errorbars : errorbars on the center x, y and the radius.
aveResid : (optional) average residual
"""
x=np.array(x)
y=np.array(y)
if x.size<2:
print('fitCirc: not enough data points to fit circle')
return
x_m = np.mean(x)
y_m = np.mean(y)
if xerr == None or all(xerr)==0:
xerr = np.ones(len(x))
else:
xerr=np.array(xerr)
xerr[np.where(xerr==0)]=100
def calc_R(xc, yc):
""" calculate the distance of each 2D points from the center (xc, yc) """
return np.sqrt((x-xc)**2 + (y-yc)**2)
def resid(pars):
""" calculate the algebraic distance between the 2D points and the mean circle centered at c=(xc, yc) """
# xc,yc, radius = pars
v = pars.valuesdict()
xc,yc, radius = v['xc'],v['yc'],v['radius']
Ri = calc_R(xc,yc)
if rIni is not None and radius>rIni:
return 10000000*(Ri - radius)
return (Ri - radius)/np.array(xerr)
center_estimate = x_m, y_m
radius = calc_R(*center_estimate).mean()
if rIni is not None and radius>rIni: radius = rIni
params = Parameters()
params.add('xc', x_m)
params.add('yc', y_m)
params.add('radius', radius, min=0)
minzer=minimize(resid,params=params)
res = minzer.params
xc, yc, R = res['xc'].value,res['yc'].value,res['radius'].value
errorbars = [res['xc'].stderr,res['yc'].stderr,res['radius'].stderr]
aveResid = sum(abs(minzer.residual))/x.size
if rIni is not None and R>rIni: print('radius greater than initial, resid=',aveResid)
if aveR: return xc,yc,R,errorbars,aveResid
else: return xc,yc,R, errorbars
|
fd384526b385f33a8ddd91c7c631afb204afa0db
| 3,647,142
|
def get_clients():
"""
Return current clients
---
tags:
- clients
operationId: listClients
produces:
- application/json
schemes: ['http', 'https']
responses:
200:
description: List of clients
schema:
type: array
items:
$ref: '#/definitions/Client'
"""
return jsonify_obj(get_locker().get_clients())
|
e38c8da6094303415bc285f847b9915a4a55f7e7
| 3,647,143
|
def GetInstanceListForHypervisor(hname, hvparams=None,
get_hv_fn=hypervisor.GetHypervisor):
"""Provides a list of instances of the given hypervisor.
@type hname: string
@param hname: name of the hypervisor
@type hvparams: dict of strings
@param hvparams: hypervisor parameters for the given hypervisor
@type get_hv_fn: function
@param get_hv_fn: function that returns a hypervisor for the given hypervisor
name; optional parameter to increase testability
@rtype: list
@return: a list of all running instances on the current node
- instance1.example.com
- instance2.example.com
"""
try:
return get_hv_fn(hname).ListInstances(hvparams=hvparams)
except errors.HypervisorError, err:
_Fail("Error enumerating instances (hypervisor %s): %s",
hname, err, exc=True)
|
b42e19af31d17ff6ca2343ce274572f15950c8d5
| 3,647,144
|
def is_nersc_system(system = system()):
"""Whether current system is a supported NERSC system."""
return (system is not None) and (system in _system_params.keys())
|
6ac968e45f7d586d56b28578eb685f705abafe0f
| 3,647,145
|
def is_string_type_suspicious_score(confidence_score, params):
"""
determine if string type confidence score is suspicious in reputation_params
"""
return not isinstance(confidence_score, int) and CONFIDENCE_LEVEL_PRIORITY.get(
params['override_confidence_level_suspicious'], 10) <= CONFIDENCE_LEVEL_PRIORITY.get(confidence_score.lower(),
-1)
|
9282ca6e58638fb240ca4c0b752a6dddbaa05255
| 3,647,146
|
def align_embeddings(base_embed, other_embed, sample_size=1):
"""Fit the regression that aligns model1 and model2."""
regression = fit_w2v_regression(base_embed, other_embed, sample_size)
aligned_model = apply_w2v_regression(base_embed, regression)
return aligned_model
|
3e017f1a0049cac40f6f7311be2dd531895fc436
| 3,647,147
|
def data_unmerged():
"""
Load HEWL diffraction data from APS 24-ID-C
"""
datapath = ["data", "data_unmerged.mtz"]
return load_dataset(datapath)
|
f7fb453f617191e19f948fc9097d10bc104478b3
| 3,647,149
|
def copy(object, *args):
"""Copy the object."""
copiedWrapper = wrapCopy( object )
try: copiedWrapper.name = copiedWrapper.name + "_Copy"
except AttributeError: pass
return copiedWrapper.createAndFillObject(None, *args)
|
fd5e7dfb3e5d6c920ebcb73477d19c9a8be152d3
| 3,647,150
|
def convert_to_n0(n):
"""
Convert count vector to vector of "greater than" counts.
Parameters
-------
n : 1D array, size K
each entry k represents the count of items assigned to comp k.
Returns
-------
n0 : 1D array, size K
each entry k gives the total count of items at index above k
N0[k] = np.sum(N[k:])
Example
-------
>> convertToN0([1., 3., 7., 2])
[12, 9, 2]
"""
n = np.asarray(n)
n0 = np.zeros_like(n)
n0[:-1] = n[::-1].cumsum()[::-1][1:]
return n0
|
c75ce9e68bc949ef9fed55283c4dd2a424acadc7
| 3,647,151
|
def application(environ, start_response):
"""Serve the button HTML."""
with open('wsgi/button.html') as f:
response_body = f.read()
status = '200 OK'
response_headers = [
('Content-Type', 'text/html'),
('Content-Length', str(len(response_body))),
]
start_response(status, response_headers)
return [response_body.encode('utf-8')]
|
97f1f793f234dbd3c29e9c4a791a224ba32c984b
| 3,647,152
|
def get_handler():
"""
Return the handler configured by the most recent call to
:func:`configure`.
If :func:`configure` has not yet been called, this returns ``None``.
"""
return current_handler
|
0508343f6775544204de9a35186d92ddf829533f
| 3,647,153
|
def display_rf_feature_importance(cache, save_location: str = None):
"""
Displays which pixels have the most influence in the model's decision.
This is based on sklearn,ensemble.RandomForestClassifier's feature_importance array
Parameters
----------
save_location : str
the location to save the figure on disk. If None, the plot is displayed on runtime and not saved.
cache : dict
the cache dict returned by the classifier.
Must at least include ['actual', 'prediction'] objects, each with ['train', 'test'] arrays
Returns
-------
matplotlib.pyplot.figure :
the figure
"""
fig = plt.figure()
plt.title("Pixel importance in random forest classification")
plt.imshow(cache['model'].feature_importances_.reshape((28,28)), extent=[0,28,28,0])
plt.colorbar()
if save_location is None:
plt.show()
else:
plt.savefig(fname=save_location)
return fig
|
86fc921f4f3ffcd004a2995b0a82f69ba3088e5a
| 3,647,154
|
def first_order(A, AB, B):
"""
First order estimator following Saltelli et al. 2010 CPC, normalized by
sample variance
"""
return np.mean(B * (AB - A), axis=0) / np.var(np.r_[A, B], axis=0)
|
3a94fdcf17a10242fb07d60e1468a21e17182a25
| 3,647,155
|
import array
def mod_df(arr,timevar,istart,istop,mod_name,ts):
"""
return time series (DataFrame) from model interpolated onto uniform time base
"""
t=timevar.points[istart:istop]
jd = timevar.units.num2date(t)
# eliminate any data that is closer together than 10 seconds
# this was required to handle issues with CO-OPS aggregations, I think because
# they use floating point time in hours, which is not very accurate, so the FMRC
# aggregation is aggregating points that actually occur at the same time
dt =diff(jd)
s = array([ele.seconds for ele in dt])
ind=where(s>10)[0]
arr=arr[ind+1]
jd=jd[ind+1]
b = pd.DataFrame(arr,index=jd,columns=[mod_name])
# eliminate any data with NaN
b = b[isfinite(b[mod_name])]
# interpolate onto uniform time base, fill gaps up to: (10 values @ 6 min = 1 hour)
c = pd.concat([b, ts],axis=1).interpolate(limit=10)
return c
|
6740d74bcfa82a3f813b991b8593b9c2cd5fddb5
| 3,647,156
|
def hydrate_reserve_state(data={}):
"""
Given a dictionary, allow the viewmodel to hydrate the data needed by this view
"""
vm = State()
return vm.hydrate(data)
|
203c9c4143713cf8f386a2ac95d91b50a9525a3c
| 3,647,157
|
def get_devstudio_versions ():
"""Get list of devstudio versions from the Windows registry. Return a
list of strings containing version numbers; the list will be
empty if we were unable to access the registry (eg. couldn't import
a registry-access module) or the appropriate registry keys weren't
found."""
if not _can_read_reg:
return []
K = 'Software\\Microsoft\\Devstudio'
L = []
for base in (HKEY_CLASSES_ROOT,
HKEY_LOCAL_MACHINE,
HKEY_CURRENT_USER,
HKEY_USERS):
try:
k = RegOpenKeyEx(base,K)
i = 0
while 1:
try:
p = RegEnumKey(k,i)
if p[0] in '123456789' and p not in L:
L.append(p)
except RegError:
break
i = i + 1
except RegError:
pass
L.sort()
L.reverse()
return L
|
ad02e38e216649cb0f29cfffe256aee7b79d80ea
| 3,647,158
|
def mapDictToProfile(wordD, tdm):
"""
Take the document in as a dictionary with word:wordcount format, and map
it to a p profile
Parameters
----------
wordD : Dictionary
Dictionary where the keys are words, and the values are the corrosponding word
count
tdm : termDocMatrix object
The trained and factorized term-document matrix structure
Returns
-------
p : numpy array
The mapped vector profile for the string
Notes
-----
References
----------
Examples
--------
"""
p = np.zeros(len(tdm.terms))
tfweight = float(sum(wordD.values()))
for i in range(len(tdm.terms)):
if tdm.terms[i] in wordD:
p[i] = wordD[tdm.terms[i]]/tfweight
#print len(p)
p = np.multiply(tdm.idfs, p).transpose()
return p
|
df178e7a6857ff9f85caedbfb5abac77e4f04d55
| 3,647,159
|
from typing import VT
from typing import Tuple
from typing import List
def _to_tikz(g: BaseGraph[VT,ET],
xoffset:FloatInt=0, yoffset:FloatInt=0, idoffset:int=0) -> Tuple[List[str],List[str]]:
"""Converts a ZX-graph ``g`` to a string representing a tikz diagram.
The optional arguments are used by :func:`to_tikz_sequence`.
"""
verts = []
maxindex = idoffset
for v in g.vertices():
p = g.phase(v)
ty = g.type(v)
if ty == VertexType.BOUNDARY:
style = "none"
elif ty == VertexType.H_BOX:
style = "hadamard"
else:
style = 'Z' if ty==VertexType.Z else 'X'
if p != 0: style += " phase"
style += " dot"
if (ty == VertexType.H_BOX and p == 1) or (ty != VertexType.H_BOX and p == 0):
phase = ""
else:
ns = '' if p.numerator == 1 else str(p.numerator)
dn = '' if p.denominator == 1 else str(p.denominator)
if dn: phase = r"$\frac{%s\pi}{%s}$" % (ns, dn)
else: phase = r"$%s\pi$" % ns
x = g.row(v) + xoffset
y = - g.qubit(v) - yoffset
s = " \\node [style={}] ({:d}) at ({:.2f}, {:.2f}) {{{:s}}};".format(style,v+idoffset,x,y,phase) # type: ignore
verts.append(s)
maxindex = max([v+idoffset,maxindex]) # type: ignore
edges = []
for e in g.edges():
v,w = g.edge_st(e)
ty = g.edge_type(e)
s = " \\draw "
if ty == EdgeType.HADAMARD:
if g.type(v) != VertexType.BOUNDARY and g.type(w) != VertexType.BOUNDARY:
s += "[style=hadamard edge] "
else:
x = (g.row(v) + g.row(w))/2.0 +xoffset
y = -(g.qubit(v)+g.qubit(w))/2.0 -yoffset
t = " \\node [style=hadamard] ({:d}) at ({:.2f}, {:.2f}) {{}};".format(maxindex+1, x,y)
verts.append(t)
maxindex += 1
s += "({:d}) to ({:d});".format(v+idoffset,w+idoffset) # type: ignore
edges.append(s)
return (verts, edges)
|
0c5fab1fcd0f5db9e8b7d267de5e4b5ea2444046
| 3,647,160
|
def _fix_server_adress(raw_server):
""" Prepend http:// there. """
if not raw_server.startswith("http://"):
raw_server = "http://" + raw_server
return raw_server
|
64171be5033930fd5ecb3cd275cc0d859b7e6ca0
| 3,647,161
|
def _parse_output_keys(val):
"""Parse expected output keys from string, handling records.
"""
out = {}
for k in val.split(","):
# record output
if ":" in k:
name, attrs = k.split(":")
out[name] = attrs.split(";")
else:
out[k] = None
return out
|
abd739026574b1a3fa87c42d2719d172e36a1c4a
| 3,647,162
|
import sqlite3
def check(sync_urls: list, cursor: sqlite3.Cursor, db: sqlite3.Connection, status: str):
"""Checking update in the back.
Args:
sync_urls: URL(s) to be checked as a list
cursor: Cursor object of sqlite3
db: Connection object of sqlite3
status: 'viewed' or 'unviewed'
Return:
Set of update links
"""
out_updates = []
for sync_url in sync_urls:
links_fetch = []
links_from_db = []
https_updates = []
sync_url = sync_url.strip("/")
f_links = fetch(sync_url) # .split(",")
for f_link in set(f_links):
links_fetch.append(f_link.strip())
db_links = cursor.execute(
"SELECT link FROM links JOIN urls ON links.url_id=urls.url_id WHERE urls.url=?",
(sync_url,),
)
for link in db_links:
links_from_db.append(link[0])
updates = [x for x in links_fetch if x not in set(links_from_db)]
url_split = sync_url.split("/")
for update in updates:
if sync_url in update:
https_updates.append(update)
elif len(url_split) > 3:
url_split = url_split[:3]
https_updates.append("/".join(url_split) + "/" + update.strip("/"))
else:
https_updates.append(sync_url + "/" + update.strip("/"))
url_id = cursor.execute(
"SELECT url_id FROM urls WHERE url=?", (sync_url,)
).fetchone()[0]
for update in updates:
items = (url_id, update, status)
cursor.execute(
"INSERT INTO links (url_id, link, status) VALUES (?, ?, ?)", items
)
db.commit()
out_updates.extend(https_updates)
return set(out_updates)
|
a9d7160d7f08d51b4ef596692fa10136f1f21375
| 3,647,163
|
from typing import Dict
from typing import Tuple
from typing import OrderedDict
from typing import Type
import torch
def _get_output_structure(
text: str,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
tokenizer_args: Dict,
) -> Tuple[OrderedDict, Type]:
"""Function needed for saving in a dictionary the output structure of the
transformers model.
"""
encoded_input = tokenizer([text], **tokenizer_args)
output = model(**encoded_input)
structure = OrderedDict()
for key, value in output.items():
if isinstance(value, torch.Tensor):
structure[key] = None
else:
size = _get_size_recursively(value)
structure[key] = size
return structure, type(output)
|
0f268b3fb9208b8f447b0c030744b9ce310049e6
| 3,647,164
|
def encode_board(board):
""" Encode the 2D board list to a 64-bit integer """
new_board = 0
for row in board.board:
for tile in row:
new_board <<= 4
if tile is not None:
new_board += tile.val
return new_board
|
2c85964902dc3b2d097e30b71f11e7c17b80297a
| 3,647,165
|
def get_symbol(i):
"""Get the symbol corresponding to int ``i`` - runs through the usual 52
letters before resorting to unicode characters, starting at ``chr(192)``.
Examples
--------
>>> get_symbol(2)
'c'
>>> oe.get_symbol(200)
'Ŕ'
>>> oe.get_symbol(20000)
'京'
"""
if i < 52:
return einsum_symbols_base[i]
return chr(i + 140)
|
e6e9a91fa48e04ed591b22fba62bf2ba6fd5d81f
| 3,647,166
|
def longest_common_substring(string1, string2):
"""
Function to find the longest common substring of two strings
"""
m = [[0] * (1 + len(string2)) for i in range(1 + len(string1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(string1)):
for y in range(1, 1 + len(string2)):
if string1[x - 1] == string2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return string1[x_longest - longest: x_longest]
|
f567c629f5bd02143f0ed6bbbdc11f0e59e5f4bd
| 3,647,167
|
def elapsed_time_id(trace, event_index: int):
"""Calculate elapsed time by event index in trace
:param trace:
:param event_index:
:return:
"""
try:
event = trace[event_index]
except IndexError:
# catch for 0 padding.
# calculate using the last event in trace
event = trace[-1]
return elapsed_time(trace, event)
|
7e94531f13458fc32ca5d971178b79fc13aa65f8
| 3,647,168
|
def build_candidate_digest(proof, leaf_hash):
"""
Build the candidate digest representing the entire ledger from the Proof hashes.
:type proof: dict
:param proof: The Proof object.
:type leaf_hash: bytes
:param leaf_hash: The revision hash to pair with the first hash in the Proof hashes list.
:rtype: bytes
:return: The calculated root hash.
"""
parsed_proof = parse_proof(proof)
root_hash = calculate_root_hash_from_internal_hashes(parsed_proof, leaf_hash)
return root_hash
|
1434cf5e1da9edbd6b41caacd42a67df235267f5
| 3,647,169
|
from typing import Iterable
import torch
def confusion_matrix_eval(cnn, data_loader):
"""Retrieves false positives and false negatives for further investigation
Parameters
----------
cnn : torchvision.models
A trained pytorch model.
data_loader : torch.utils.data.DataLoader
A dataloader iterating through the holdout test sample.
Returns
-------
dict
Dictionary containing cases model classified as false positives and false negatives.
Raises
------
ValueError
Raised if data_loader is not iterable.
"""
if not isinstance(data_loader, Iterable):
raise ValueError("data_loader is not iterable")
fp = []
fn = []
cnn.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with torch.no_grad():
for i, (inputs, classes) in enumerate(data_loader):
inputs = inputs.to(device)
classes = classes.to(device)
outputs = cnn(inputs).flatten()
preds = torch.sigmoid(outputs) > 0.5
j = 0
for t, p in zip(classes.view(-1), preds.view(-1)):
if [float(t.cpu().numpy()), float(p.long().cpu().numpy())] == [
0.0,
1.0,
]:
fp.append(
data_loader.dataset.samples[(i * data_loader.batch_size + j)][1]
)
elif [float(t.cpu().numpy()), float(p.long().cpu().numpy())] == [
1.0,
0.0,
]:
fn.append(
data_loader.dataset.samples[(i * data_loader.batch_size + j)][1]
)
j += 1
return {"false_positives": fp, "false_negatives": fn}
|
411a7d95f7713ff00dbcb7b25db7bd497f427593
| 3,647,170
|
import hashlib
def match_by_hashed_faceting(*keys):
"""Match method 3 - Hashed Faceted Search"""
matches = []
hfs = []
for i in range(len(__lookup_attrs__)):
key = [x for x in keys if x[0] == __lookup_attrs__[i]]
if key:
hfs.append(key[0])
hashed_val = hashlib.sha256(str(hfs).encode('utf-8')).hexdigest()
hashed_key = keynamehelper.create_key_name("hfs", hashed_val)
for found_key in redis.sscan_iter(hashed_key):
matches.append(found_key)
return matches
|
b2b849583e732747b42a4d4e7ec56c1090ddb1a8
| 3,647,171
|
def get_derivative_density_matrix(mat_diag,mat_Rabi,sigma_moins_array,**kwargs):
"""
Returns function for t-evolution using the numerical integration of the density matrix
\dot{\rho}=-i(H_eff \rho-\rho H_eff^{\dagger})
+\Gamma \sum_j \sigma_j^_ \rho \sigma_j^+
"""
dim=len(mat_diag)
tunneling=kwargs.get('tunneling','on')
if tunneling=='off':
def L_on_rho_loc(tt,yy):
yy=np.reshape(yy, (dim,dim))
H_eff=csr_matrix(square_mat(mat_diag))
deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array)
return np.reshape(deriv, dim*dim)
return L_on_rho_loc
else:
def L_on_rho_loc(tt,yy):
yy=np.reshape(yy, (dim,dim))
H_eff=csr_matrix(mat_Rabi+square_mat(mat_diag))
deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array)
return np.reshape(deriv, dim*dim)
return L_on_rho_loc
|
1dc1321a6578b6bd9b3c5e937f9c7ed8a787f5a6
| 3,647,172
|
def area_of_polygon(polygon):
"""
Returns the area of an OpenQuake polygon in square kilometres
"""
lon0 = np.mean(polygon.lons)
lat0 = np.mean(polygon.lats)
# Transform to lamber equal area projection
x, y = lonlat_to_laea(polygon.lons, polygon.lats, lon0, lat0)
# Build shapely polygons
poly = geometry.Polygon(zip(x, y))
return poly.area
|
bc57bd58b2ae64b33e34b1bee4582e3c6733fe4d
| 3,647,174
|
def build_dataset_values(claim_object, data_value):
""" Build results with different datasets.
Parameters:
claim_object (obj): Onject to modify and add to rows .
data_value (obj): result object
Returns:
Modified claim_boject according to data_value.type
"""
if data_value["type"] == "globecoordinate":
claim_object["str"] = str(data_value["value"]["latitude"]) + "," + str(data_value["value"]["longitude"])
elif data_value["type"] == "time":
claim_object["date"] = data_value["value"]["time"].split("T")[0].split("+")[1]
elif data_value["type"] == "string":
claim_object["str"] = data_value["value"]
else:
pass
return claim_object
|
f3d267a4e9ac099f6d2313deffb2f45d35b90217
| 3,647,175
|
def gen_csrv_msome(shape, n_parts, mic_rad, min_ip_dst):
"""
Generates a list of 3D coordinates and rotations a CSRV pattern
:param shape: tomogram shape
:param n_parts: number of particles to try to generate
:param mic_rad: microsome radius
:param min_ip_dst: minimum interparticle distance
:param c_jump_prob: probabilty to create a new cluster evaluated each time a particle is addded [0, 1]
:return: two output lists; coordinates and rotations
"""
# Initialization
count = 0
min_ip_dst_2 = float(min_ip_dst) ** 2
locs, rots = list(), list()
mic_cent = .5 * np.asarray(shape, dtype=np.float)
mic_rad_f = float(mic_rad)
max_n_tries = np.prod(np.asarray(shape, dtype=np.int))
# Loop for particles
mic_end, n_try = False, 0
p_end = False
while not p_end:
p_cent = np.random.randn(1, 3)[0]
norm = mic_rad_f / np.linalg.norm(p_cent)
p_cent *= norm
p_cent += mic_cent
# Check that the particle is within the tomogram
if (p_cent[0] >= 0) and (p_cent[0] < shape[0]) \
and (p_cent[1] >= 0) and (p_cent[1] < shape[1]) \
and (p_cent[2] >= 0) and (p_cent[2] < shape[2]):
if len(locs) > 0:
# Check that the new particle does not overlap with other already inserted
hold_dst = p_cent - np.asarray(locs, dtype=np.float)
if np.sum(hold_dst * hold_dst, axis=1) >= min_ip_dst_2:
locs.append(p_cent)
tilt, psi = vect_to_zrelion(p_cent - mic_cent, mode='active')[1:]
rots.append((360. * np.random.rand() - 180., tilt, psi))
count += 1
else:
locs.append(p_cent)
tilt, psi = vect_to_zrelion(p_cent - mic_cent, mode='active')[1:]
rots.append((360. * np.random.rand() - 180., tilt, psi))
count += 1
# Ensure termination
n_try += 1
if (n_try > max_n_tries) or (count >= n_parts):
p_end = True
return locs, rots
|
ba0033859e4e18b55d877a6db957b64674171d74
| 3,647,177
|
def electra_model(request):
"""Exposes the command-line option to a test case."""
electra_model_path = request.config.getoption("--electra_model")
if not electra_model_path:
pytest.skip("No --electra_model given")
else:
return electra_model_path
|
9273ddfe253c7dc0ab3307b4fb6f009aa806821b
| 3,647,178
|
def By_2d_approximation(x, w, d, j):
"""Approximation of By_surface valid except near edges of slab."""
mu0_over_4pi = 1e-7
return 2e-7 * j * d * np.log((w/2 + x) / (w/2 - x))
|
2d64957d0dbec677b6d8b6e825c18ac32b25f7e7
| 3,647,179
|
from typing import Dict
from pathlib import Path
from typing import List
def read_lists(paths: Dict[str, Path]) -> Dict[str, List[str]]:
"""Return a dictionary of song lists read from file.
Arguments:
paths {Dict[str, Path]} -- A dictionary of type returned by find_paths.
Returns:
Dict[str, List[str]] -- The keys are a string song list id ('1' to '6' or 'F'),
and the value lists contains the song keys to be written to that list.
"""
sl_dict: Dict[str, List[str]] = dict()
for song_list_id, file_path in paths.items():
logger.log_this(
f"Reading file '{file_path.name}'' for song list '{song_list_id}'."
)
with open(file_path, "rt", encoding="locale") as file_handle:
song_list = simplejson.load(file_handle)
# structure checks - could have used a schema for this.
# because I'm a bit lazy here, might also fail if a song key
# is pure digits and has been converted to a number on the way in
# We can tidy this up if it ever happens.
if not isinstance(song_list, list):
raise TypeError(
f"Invalid format in file '{file_path.name}'."
f"\n This should be a JSON list of strings, but I found "
f"a {type(song_list)}."
)
for val in song_list:
if not isinstance(val, str):
raise TypeError(
f"Invalid song list member in file '{file_path.name}'."
f"\n This should be a JSON list of strings, but I found "
f"a member with {type(val)}."
)
# just to be sure, clean out white space and empty strings silently.
song_list = [x for x in song_list if x.strip() != ""]
sl_dict[song_list_id] = song_list
logger.log_this("All song list files passed structure tests.")
return sl_dict
|
13d6618293e17e5a7388ca9f3d44ffffe913f482
| 3,647,180
|
def learningCurve(theta, X_train, y_train, X_cv, y_cv, lambda_param):
"""
:param X_train:
:param y_train:
:param X_cv:
:param y_cv:
:param lambda_param:
:return:
"""
number_examples = y_train.shape[0]
J_train, J_cv = [], []
for i in range(1, number_examples + 1):
theta, _ = gradientDescent(theta, X_train[:i, :], y_train[:i, :], 0.001, 3000, lambda_param)
cost_train = linearRegressionCostFunction(theta, X_train[0:i, :], y_train[:i, :], lambda_param)
J_train.append(cost_train)
cost_cv = linearRegressionCostFunction(theta, X_cv, y_cv, lambda_param)
J_cv.append(cost_cv)
return J_train, J_cv
|
c15f5b3af34beb4c20982c2b339919c50b3336b1
| 3,647,181
|
def reduce_labels(y):
"""Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe"""
labels = [] # new y
themes = []
disciplines = []
for i, elements in enumerate(y):
tmp_all_labels = []
tmp_themes = []
tmp_disciplines = []
#print("\nlabels in y an der Stelle %s: %s" % (i, elements))
for element in elements:
#print("\nLabel:", element)
# themes
for key, value in themes_dic.items():
if element == key:
tmp_all_labels.append(element)
tmp_themes.append(element)
#print("\nTheme key:", element)
elif element in value:
tmp_all_labels.append(key)
tmp_themes.append(key)
#print("\nTheme:", key)
else:
("Element nicht gefunden:", element)
# discipilnes
for key, value in disciplines_dic.items():
if element == key:
tmp_all_labels.append(element)
tmp_disciplines.append(element)
#print("\nDiscipline key:", element)
elif element in value:
tmp_all_labels.append(key)
tmp_disciplines.append(key)
#print("\nDiscipline:", key)
else:
("Element nicht gefunden:", element)
#print("\ntmp_list:", tmp_all_labels)
labels.append(list(set(tmp_all_labels)))
themes.append(list(set(tmp_themes)))
disciplines.append(list(set(tmp_disciplines)))
#print("\nnew labelset:", labels)
return labels, themes, disciplines
|
9dc5d3b1d07f7fd7f72911b891533d90bae4ad63
| 3,647,182
|
def api_get_categories(self):
"""
Gets a list of all the categories.
"""
response = TestCategory.objects.all()
s = ""
for cat in response:
s += b64(cat.name) + "," + b64(cat.description) + ","
return HttpResponse(s.rstrip(','))
|
f7c523437c8f9d538bb4c5411232458ae3e10450
| 3,647,183
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.