content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def dump_yaml_and_check_difference(obj, filename, sort_keys=False):
"""Dump object to a yaml file, and check if the file content is different
from the original.
Args:
obj (any): The python object to be dumped.
filename (str): YAML filename to dump the object to.
sort_keys (str); Sort key by dictionary order.
Returns:
Bool: If the target YAML file is different from the original.
"""
str_dump = mmcv.dump(obj, None, file_format='yaml', sort_keys=sort_keys)
if osp.isfile(filename):
file_exists = True
with open(filename, 'r', encoding='utf-8') as f:
str_orig = f.read()
else:
file_exists = False
str_orig = None
if file_exists and str_orig == str_dump:
is_different = False
else:
is_different = True
with open(filename, 'w', encoding='utf-8') as f:
f.write(str_dump)
return is_different
|
47a271a34b0a1774188a725eddf0d6698f76e04c
| 3,643,324
|
from re import T
from typing import Optional
def get_data(
db: Redis[bytes],
store: StorageEngine,
source: Artefact[T],
carry_error: Optional[hash_t] = None,
do_resolve_link: bool = True,
) -> Result[T]:
"""Retrieve data corresponding to an artefact."""
stream = get_stream(db, store, source.hash, carry_error, do_resolve_link)
if isinstance(stream, Error):
return stream
else:
raw = stream.read()
stream.close()
return _serdes.decode(source.kind, raw, carry_error=carry_error)
|
1bb07e01ae151f985fcd30e8cca0da1b11213459
| 3,643,325
|
def record_edit(request, pk):
"""拜访记录修改"""
user = request.session.get('user_id')
record = get_object_or_404(Record, pk=pk, user=user, is_valid=True)
if request.method == 'POST':
form = RecordForm(data=request.POST, instance=record)
if form.is_valid():
form.save()
return redirect('record')
else:
print(form.errors.as_json)
else:
form = RecordForm(instance=record)
return render(request, 'record_edit.html', {
'form': form,
'pk': pk
})
|
d2d610e53641962e913849b4b643f38898b72a3f
| 3,643,326
|
def remove_body_footer(raw):
"""
Remove a specific body footer starting with the delimiter : -=-=-=-=-=-=-=-=-=-=-=-
"""
body = raw[MELUSINE_COLS[0]]
return body.replace(r'-=-=-=-=.*?$', '')
|
60161b06fe80fd526f66c796657bd9a77cc1bfb9
| 3,643,327
|
def get_strategy_name():
"""Return strategy module name."""
return 'store_type'
|
bbf1ed9f43f492561ee5c595061f74bea0f5e464
| 3,643,328
|
def pyccel_to_sympy(expr, symbol_map, used_names):
"""
Convert a pyccel expression to a sympy expression saving any pyccel objects
converted to sympy symbols in a dictionary to allow the reverse conversion
to be carried out later
Parameters
----------
expr : PyccelAstNode
The pyccel node to be translated
symbol_map : dict
Dictionary containing any pyccel objects converted to sympy symbols
used_names : Set
A set of all the names which already exist and therefore cannot
be used to create new symbols
Returns
----------
expr : sympy Object
"""
#Constants
if isinstance(expr, LiteralInteger):
return sp.Integer(expr.p)
elif isinstance(expr, LiteralFloat):
return sp.Float(expr)
#Operators
elif isinstance(expr, PyccelDiv):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return args[0] / args[1]
elif isinstance(expr, PyccelMul):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Mul(*args)
elif isinstance(expr, PyccelMinus):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return args[0] - args[1]
elif isinstance(expr, PyccelUnarySub):
arg = pyccel_to_sympy(expr.args[0], symbol_map, used_names)
return -arg
elif isinstance(expr, PyccelAdd):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Add(*args)
elif isinstance(expr, PyccelPow):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Pow(*args)
elif isinstance(expr, PyccelAssociativeParenthesis):
return pyccel_to_sympy(expr.args[0], symbol_map, used_names)
elif isinstance(expr, MathCeil):
return sp.ceiling(pyccel_to_sympy(expr.args[0], symbol_map, used_names))
elif expr in symbol_map.values():
return list(symbol_map.keys())[list(symbol_map.values()).index(expr)]
elif isinstance(expr, Variable):
sym = sp.Symbol(expr.name)
symbol_map[sym] = expr
return sym
elif isinstance(expr, PyccelArraySize):
sym_name,_ = create_incremented_string(used_names, prefix = 'tmp_size')
sym = sp.Symbol(sym_name)
symbol_map[sym] = expr
return sym
elif isinstance(expr, CodeBlock):
body = (pyccel_to_sympy(b, symbol_map, used_names) for b in expr.body)
return CodeBlock(body)
elif isinstance(expr, (Comment)):
return Comment('')
elif isinstance(expr, For):
target = pyccel_to_sympy(expr.target, symbol_map, used_names)
iter_obj = pyccel_to_sympy(expr.iterable, symbol_map, used_names)
body = pyccel_to_sympy(expr.body, symbol_map, used_names)
return For(target, iter_obj, body)
elif isinstance(expr, PythonRange):
start = pyccel_to_sympy(expr.start, symbol_map, used_names)
stop = pyccel_to_sympy(expr.stop , symbol_map, used_names)
step = pyccel_to_sympy(expr.step , symbol_map, used_names)
return sp.Range(start, stop, step)
elif isinstance(expr, Assign):
lhs = pyccel_to_sympy(expr.lhs, symbol_map, used_names)
rhs = pyccel_to_sympy(expr.rhs, symbol_map, used_names)
return Assign(lhs, rhs)
elif isinstance(expr, (sp.core.basic.Atom, sp.core.operations.AssocOp, sp.Set)):
# Already translated
return expr
else:
raise TypeError(str(type(expr)))
|
1800a41d1d06fbbfd212b3b7b48ddc9f4ae07508
| 3,643,329
|
from pathlib import Path
def get_lockfile_path(repo_name: str) -> Path:
"""Get a lockfile to lock a git repo."""
if not _lockfile_path.is_dir():
_lockfile_path.mkdir()
return _lockfile_path / f"{repo_name}_lock_file.lock"
|
5f043b6976921d487054d5c9171c91eb6def19ee
| 3,643,330
|
def path_to_graph(hypernym_list, initialnoun):
"""Make a hypernym chain into a graph.
:param hypernym_list: list of hypernyms for a word as obtained from wordnet
:type hypernym_list: [str]
:param initialnoun: the initial noun (we need this to mark it as leaf in the tree)
:type initialnoun: str
:return: the linear directed graph of the chain
:rtype: :class:`networkx.DiGraph`
"""
graph = nx.DiGraph()
# mark the original word as 'seed' so we can track 'importance' later
graph.add_node(initialnoun, seed=True)
previous = initialnoun
for hypernym in reversed(hypernym_list):
# we'll take care of the distances later
graph.add_edge(previous, hypernym.name(), similarity=1.0, distance=1.0)
graph.nodes[hypernym.name()]["seed"] = False
previous = hypernym.name()
return graph
|
e80f90490e6376403d511f37a4703a7b867d2738
| 3,643,331
|
def make_3d_grid():
"""Generate a 3d grid of evenly spaced points"""
return np.mgrid[0:21, 0:21, 0:5]
|
0eccd9b2320ed28f0d08d40c9d59c22e77b607f4
| 3,643,332
|
def rho(flag, F, K, t, r, sigma):
"""Returns the Black rho of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:returns: float
::
==========================================================
The text book analytical formula does not multiply by .01,
but in practice rho is defined as the change in price
for each 1 percent change in r, hence we multiply by 0.01.
==========================================================
>>> F = 49
>>> K = 50
>>> r = .05
>>> t = 0.3846
>>> sigma = 0.2
>>> flag = 'c'
>>> v1 = rho(flag, F, K, t, r, sigma)
>>> v2 = -0.0074705380059582258
>>> abs(v1-v2) < .000001
True
>>> flag = 'p'
>>> v1 = rho(flag, F, K, t, r, sigma)
>>> v2 = -0.011243286001308292
>>> abs(v1-v2) < .000001
True
"""
return -t * black(flag, F, K, t, r, sigma) * .01
|
62bd0fdfe76319261c89bfa33b02b57fcdafb8df
| 3,643,333
|
async def novel_series(id: int, endpoint: PixivEndpoints = Depends(request_client)):
"""
## Name: `novel_series`
> 获取小说系列的信息
---
### Required:
- ***int*** **`id`**
- Description: 小说系列ID
"""
return await endpoint.novel_series(id=id)
|
94859a313c823d3fdcf055390473b116ea1229e0
| 3,643,334
|
def to_raw(
y: np.ndarray,
low: np.ndarray,
high: np.ndarray,
eps: float = 1e-4
) -> np.ndarray:
"""Scale the input y in [-1, 1] to [low, high]"""
# Warn the user if the arguments are out of bounds, this shouldn't happend.""""
if not (np.all(y >= -np.ones_like(y) - eps) and np.all(y <= np.ones_like(y) + eps)):
logger.warning(f"argument out of bounds, {y}, {low}, {high}")
# Clip the values (in case the above warning is ignored).
y = np.clip(y, -np.ones_like(y), np.ones_like(y))
# Transform the input to [low, high].
return (y * (high - low) + (high + low)) / 2.
|
61e916f9f46582fc6b9c135ac53fff3a3939d710
| 3,643,335
|
def etched_lines(image):
"""
Filters the given image to a representation that is similar to a drawing being preprocessed with an Adaptive Gaussian
Threshold
"""
block_size = 61
c = 41
blur = 7
max_value = 255
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
img_blur = cv2.GaussianBlur(image, (21, 21), 0, 0)
img_blend = cv2.divide(image, img_blur, scale=256)
blurred = cv2.medianBlur(img_blend, blur)
threshold_image = cv2.adaptiveThreshold(blurred, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, block_size, c)
return threshold_image
|
33858c8ee50cd6977f81cc64f55967ecd8849369
| 3,643,336
|
def get_last_position(fit, warmup=False):
"""Parse last position from fit object
Parameters
----------
fit : StanFit4Model
warmup : bool
If True, returns the last warmup position, when warmup has been done.
Otherwise function returns the first sample position.
Returns
-------
list
list contains a dictionary of last draw from each chain.
"""
fit._verify_has_samples()
positions = []
extracted = fit.extract(permuted=False, pars=fit.model_pars, inc_warmup=warmup)
draw_location = -1
if warmup:
draw_location += max(1, fit.sim["warmup"])
chains = fit.sim["chains"]
for i in range(chains):
extract_pos = {key : values[draw_location, i] for key, values in extracted.items()}
positions.append(extract_pos)
return positions
|
28ec10c4f90ac786053334f593ffd3ade27b1fc5
| 3,643,337
|
def find_fast_route(objective, init, alpha=1, threshold=1e-3, max_iters=1e3):
"""
Optimizes FastRoute objective using Newton’s method optimizer to
find a fast route between the starting point and finish point.
Arguments:
objective : an initialized FastRoute object with preset start and finish points,
velocities and initialization vector.
init : (N-1,) numpy array, initial guess for the crossing points
alpha : step size for the NewtonOptimizer
threshold : stopping criteria |x_(k+1)- x_k |<threshold
max_iters : maximal number of iterations (stopping criteria)
Return:
route_time : scalar
route : (N-1,) numpy array with x coordinate of the optimal route,
i.e., a vector of x-coordinates of crossing points (not including start and finish point)
num_iters : number of iteration
"""
opt = NewtonOptimizer(func=objective, alpha=alpha, init=init)
route_time, route, num_iters = opt.optimize(threshold=threshold, max_iters=max_iters)
return route_time, route, num_iters
|
ab0d8364a7aab80a735b2b468a45abb5e30b396b
| 3,643,338
|
def check_tx_success(result):
"""
Checks if function :meth:`UcanServer.write_can_msg_ex` successfully wrote all CAN message(s).
:param ReturnCode result: Error code of the function.
:return: True if CAN message(s) was(were) written successfully, otherwise False.
:rtype: bool
"""
return result.value == ReturnCode.SUCCESSFUL
|
815293aafa42b7323414e1cb96d6d150ef16bb48
| 3,643,341
|
from typing import Iterable
from typing import Optional
def cache_contains_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable,
query_id: Optional[int] = None) -> 'APIResult':
"""
Returns a value indicating whether all given keys are present in cache.
:param connection: connection to Ignite server,
:param cache_info: cache meta info,
:param keys: a list of keys or (key, type hint) tuples,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when all keys are present, `False` otherwise,
non-zero status and an error description on failure.
"""
return __cache_contains_keys(connection, cache_info, keys, query_id)
|
48fffa703d7cd120d0faa898e7e94355ec663a84
| 3,643,342
|
def discount_cumsum_trun(x, discount, length):
"""
compute discounted cumulative sums of vectors.
truncate x in length array
:param x:
vector x,
[x0,
x1,
x2,
x3,
x4]
:param length:
vector length,
[3,
2]
:return:
truncated by the vector length
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2,
x3 + discount * x4,
x4]
"""
ret_arr = x.copy()
total_len = 0
for len in length:
tmp_list = ret_arr[total_len : total_len + len]
ret_arr[total_len: total_len + len] = discount_cumsum(tmp_list, discount)
total_len += len
return ret_arr
|
589ac22b19705a7881f91cffe78bed5accafc661
| 3,643,343
|
def get_canonical(flop):
"""
Returns the canonical version of the given flop.
Canonical flops are sorted. The first suit is 'c' and, if applicable,
the second is 'd' and the third is 'h'.
Args:
flop (tuple): three pokertools.Card objects
Returns
A tuple of three pokertools.Card objects which represent
the canonical version of the given flop.
>>> flop = (CARDS['Ks'], CARDS['2c'], CARDS['3s'])
>>> get_canonical(flop)
(<Card: 2c>, <Card: 3d>, <Card: Kd>)
"""
card1, card2, card3 = sorted(flop)
A, B, C = "cdh"
if card1.suit == card2.suit == card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + A],
)
elif card1.suit == card2.suit != card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + B],
)
elif card1.suit == card3.suit != card2.suit:
# Special case: if the 2nd and 3rd cards are a pair e.g. the flop is
# [Jc, Qd, Qc], then our suit changes have resulted in an
# unsorted flop! The correct canonical form is [Jc, Qc, Qd].
return tuple(sorted([
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
]))
elif card1.suit != card2.suit == card3.suit:
# Special case: if the 1st and 2nd cards are a pair e.g. flop is
# [2c, 2d, 8d], that is isomorphic with those cards being switched
# e.g. [2d, 2c, 8d] -- which forms the suit pattern already
# covered above: 'ABA'. Thus, it can be transformed to [2c, 2d, 8c].
# This version has higher priority lexicographically -- it has more
# clubs! To make this change we can simply change the suit of the
# third card to 'c'.
if card1.rank == card2.rank:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
)
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + B],
)
elif card1.suit != card2.suit != card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + C],
)
|
4a797c27e8c32dff18412128d2823a1592c2468e
| 3,643,344
|
import importlib
def _version(lib_name):
"""
Returns the version of a package.
If version cannot be determined returns "available"
"""
lib = importlib.import_module(lib_name)
if hasattr(lib, "__version__"):
return lib.__version__
else:
return "available"
|
cec49d2de66d2fc3a7ed3c89259711bdf40bbe8e
| 3,643,346
|
def DeltaDeltaP(y, treatment, left_mask):
"""Absolute difference between ATEs of two groups."""
return np.abs(
ATE(y[left_mask], treatment[left_mask])
- ATE(y[~left_mask], treatment[~left_mask])
)
|
cd7816d2aa02cfb72dccf364cc73e07d596cc6ec
| 3,643,347
|
def start(isdsAppliance, serverID='directoryserver', check_mode=False, force=False):
"""
Restart the specified appliance server
"""
if force is True or _check(isdsAppliance, serverID, action='start') is True:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post("Restarting the service " + serverID,
"/widgets/server/start/" + serverID,
{})
return isdsAppliance.create_return_object()
|
b59941eafff24d9389f91edaa38de7b35eb48660
| 3,643,348
|
def get_dates_keyboard(dates):
"""
Метод получения клавиатуры дат
"""
buttons = []
for date in dates:
button = InlineKeyboardButton(
text=date['entry_date'],
callback_data=date_callback.new(date_str=date['entry_date'], entry_date=date['entry_date'])
)
buttons.append(button)
keyboard = InlineKeyboardMarkup(inline_keyboard=[
buttons[:3],
buttons[3:],
])
return keyboard
|
41a87c64e603d6b19921c3a960743d3d27f2e373
| 3,643,351
|
def merge_synset(wn, synsets, reason, lexfile, ssid=None, change_list=None):
"""Create a new synset merging all the facts from other synsets"""
pos = synsets[0].part_of_speech.value
if not ssid:
ssid = new_id(wn, pos, synsets[0].definitions[0].text)
ss = Synset(ssid, "in",
PartOfSpeech(pos), lexfile)
ss.definitions = [d for s in synsets for d in s.definitions]
ss.examples = [x for s in synsets for x in s.examples]
members = {}
wn.add_synset(ss)
for s in synsets:
# Add all relations
for r in s.synset_relations:
if not any(r == r2 for r2 in ss.synset_relations):
add_relation(
wn, ss, wn.synset_by_id(
r.target), r.rel_type, change_list)
# Add members
for m in wn.members_by_id(s.id):
if m not in members:
members[m] = add_entry(wn, ss, m, change_list)
add_entry(wn, ss, m, change_list)
e = [e for e in [wn.entry_by_id(e2) for e2 in wn.entry_by_lemma(m)]
if e.lemma.part_of_speech.value == pos][0]
for f in e.forms:
if not any(f2 == f for f in members[m].forms):
members[m].add_form(f)
# syn behaviours - probably fix manually for the moment
if change_list:
change_list.change_synset(ss)
return ss
|
d1d7af2a83d6b7deb506fb69c7cbdb2770735f4f
| 3,643,355
|
def clean_all(record):
""" A really messy function to make sure that the citeproc data
are indeed in the citeproc format. Basically a long list of if/...
conditions to catch all errors I have noticed.
"""
record = clean_fields(record)
for arrayed in ['ISSN']:
if arrayed in record:
record = clean_arrayed(record, arrayed)
return record
|
28ba59e808e88058c5745c444f1e58cd564c726d
| 3,643,357
|
def _create_model() -> Model:
"""Setup code: Load a program minimally"""
model = Model(initial_program, [], load=False)
engine = ApproximateEngine(model, 1, geometric_mean)
model.set_engine(engine)
return model
|
71fa7c000e6ed0cd8ad14bb0be3bb617337e7631
| 3,643,358
|
def candidate_elimination(trainingset):
"""Computes the version space containig all hypothesis
from H that are consistent with the examples in the training set"""
G = set()#set of maximally general h in H
S = set()#set of maximally specific h in H
G.add(("?","?","?","?","?","?"))
S.add(("0","0","0","0","0","0"))
for e in trainingset:
update_vs(G,S,e)
# print "-----------------"
# print "S:",S
# print "G:",G
return G,S
|
b368cea3b058cc667c41725b0fa6a6b4a51f418b
| 3,643,359
|
from pathlib import Path
def mkdir(path_str):
"""
Method to create a new directory or directories recursively.
"""
return Path(path_str).mkdir(parents=True, exist_ok=True)
|
1621fd5f4d74b739de0b17933c1804faabf44a2f
| 3,643,360
|
def get_image_with_projected_bbox3d(img, proj_bbox3d_pts=[], width=0, color=Color.White):
"""
Draw the outline of a 3D bbox on the image.
Input:
proj_bbox3d_pts: (8,2) array of projected vertices
"""
v = proj_bbox3d_pts
if proj_bbox3d_pts != []:
draw = ImageDraw.Draw(img)
for k in range(0,4):
#http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j = k,(k+1)%4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
i,j = k+4,(k+1)%4 + 4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
i,j = k,k+4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
return img
|
2ec900c055635adbc6619f8e786e52bd820c6930
| 3,643,361
|
def process_spectrogram_params(fs, nfft, frequency_range, window_start, datawin_size):
""" Helper function to create frequency vector and window indices
Arguments:
fs (float): sampling frequency in Hz -- required
nfft (int): length of signal to calculate fft on -- required
frequency_range (list): 1x2 list - [<min frequency>, <max frequency>] -- required
window_start (1xm np.array): array of timestamps representing the beginning time for each
window -- required
datawin_size (float): seconds in one window -- required
Returns:
window_idxs (nxm np array): indices of timestamps for each window
(nxm where n=number of windows and m=datawin_size)
stimes (1xt np array): array of times for the center of the spectral bins
sfreqs (1xf np array): array of frequency bins for the spectrogram
freq_inds (1d np array): boolean array of which frequencies are being analyzed in
an array of frequencies from 0 to fs with steps of fs/nfft
"""
# create frequency vector
df = fs / nfft
sfreqs = np.arange(0, fs, df)
# Get frequencies for given frequency range
freq_inds = (sfreqs >= frequency_range[0]) & (sfreqs <= frequency_range[1])
sfreqs = sfreqs[freq_inds]
# Compute times in the middle of each spectrum
window_middle_samples = window_start + round(datawin_size / 2)
stimes = window_middle_samples / fs
# Get indexes for each window
window_idxs = np.atleast_2d(window_start).T + np.arange(0, datawin_size, 1)
window_idxs = window_idxs.astype(int)
return [window_idxs, stimes, sfreqs, freq_inds]
|
0e8563051a5ee4b48f7e635126ed4e6639e47bdd
| 3,643,362
|
from typing import Union
def Hellwig2022_to_XYZ(
specification: CAM_Specification_Hellwig2022,
XYZ_w: ArrayLike,
L_A: FloatingOrArrayLike,
Y_b: FloatingOrArrayLike,
surround: Union[
InductionFactors_CIECAM02, InductionFactors_Hellwig2022
] = VIEWING_CONDITIONS_HELLWIG2022["Average"],
discount_illuminant: Boolean = False,
) -> NDArray:
"""
Convert from *Hellwig and Fairchild (2022)* specification to *CIE XYZ*
tristimulus values.
Parameters
----------
specification : CAM_Specification_Hellwig2022
*Hellwig and Fairchild (2022)* colour appearance model specification.
Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or
correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in
degrees must be specified, e.g. :math:`JCh` or :math:`JMh`.
XYZ_w
*CIE XYZ* tristimulus values of reference white.
L_A
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
to be 20% of the luminance of a white object in the scene).
Y_b
Luminous factor of background :math:`Y_b` such as
:math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the
light source and :math:`L_b` is the luminance of the background. For
viewing images, :math:`Y_b` can be the average :math:`Y` value for the
pixels in the entire image, or frequently, a :math:`Y` value of 20,
approximate an :math:`L^*` of 50 is used.
surround
Surround viewing conditions.
discount_illuminant
Discount the illuminant.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ* tristimulus values.
Raises
------
ValueError
If neither *C* or *M* correlates have been defined in the
``CAM_Specification_Hellwig2022`` argument.
Notes
-----
+-------------------------------------+-----------------------+-----------\
----+
| **Domain** | **Scale - Reference** | **Scale - \
1** |
+=====================================+=======================+===========\
====+
| ``CAM_Specification_Hellwig2022.J`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.C`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.h`` | [0, 360] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.s`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.Q`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.M`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.H`` | [0, 360] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``XYZ_w`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
+-----------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+===========+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+-----------+-----------------------+---------------+
References
----------
:cite:`Fairchild2022`, :cite:`Hellwig2022`
Examples
--------
>>> specification = CAM_Specification_Hellwig2022(J=41.731207905126638,
... C=0.025763615829912909,
... h=217.06795976739301)
>>> XYZ_w = np.array([95.05, 100.00, 108.88])
>>> L_A = 318.31
>>> Y_b = 20.0
>>> Hellwig2022_to_XYZ(specification, XYZ_w, L_A, Y_b)
... # doctest: +ELLIPSIS
array([ 19.01..., 20... , 21.78...])
"""
J, C, h, _s, _Q, M, _H, _HC = astuple(specification)
J = to_domain_100(J)
C = to_domain_100(C)
h = to_domain_degrees(h)
M = to_domain_100(M)
L_A = as_float_array(L_A)
XYZ_w = to_domain_100(XYZ_w)
_X_w, Y_w, _Z_w = tsplit(XYZ_w)
# Step 0
# Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
RGB_w = vector_dot(MATRIX_16, XYZ_w)
# Computing degree of adaptation :math:`D`.
D = (
np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
if not discount_illuminant
else ones(L_A.shape)
)
F_L, z = viewing_conditions_dependent_parameters(Y_b, Y_w, L_A)
D_RGB = (
D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w
+ 1
- D[..., np.newaxis]
)
RGB_wc = D_RGB * RGB_w
# Applying forward post-adaptation non-linear response compression.
RGB_aw = post_adaptation_non_linear_response_compression_forward(
RGB_wc, F_L
)
# Computing achromatic responses for the whitepoint.
A_w = achromatic_response_forward(RGB_aw)
# Step 1
if has_only_nan(M) and not has_only_nan(C):
M = (C * A_w) / 35
elif has_only_nan(M):
raise ValueError(
'Either "C" or "M" correlate must be defined in '
'the "CAM_Specification_Hellwig2022" argument!'
)
# Step 2
# Computing eccentricity factor *e_t*.
e_t = eccentricity_factor(h)
# Computing achromatic response :math:`A` for the stimulus.
A = achromatic_response_inverse(A_w, J, surround.c, z)
# Computing *P_p_1* to *P_p_2*.
P_p_n = P_p(surround.N_c, e_t, A)
P_p_1, P_p_2 = tsplit(P_p_n)
# Step 3
# Computing opponent colour dimensions :math:`a` and :math:`b`.
ab = opponent_colour_dimensions_inverse(P_p_1, h, M)
a, b = tsplit(ab)
# Step 4
# Applying post-adaptation non-linear response compression matrix.
RGB_a = matrix_post_adaptation_non_linear_response_compression(P_p_2, a, b)
# Step 5
# Applying inverse post-adaptation non-linear response compression.
RGB_c = post_adaptation_non_linear_response_compression_inverse(
RGB_a + 0.1, F_L
)
# Step 6
RGB = RGB_c / D_RGB
# Step 7
XYZ = vector_dot(MATRIX_INVERSE_16, RGB)
return from_range_100(XYZ)
|
ef5f05f32f6871eaa67bb554a23595cedf2a97b1
| 3,643,363
|
def build_exec_file_name(graph: str,
strt: str,
nagts: int,
exec_id: int,
soc_name: str = None):
"""Builds the execution file name of id `exec_id` for the given patrolling
scenario `{graph, nagts, strt}` .
Args:
graph:
strt:
nagts:
exec_id:
soc_name:
"""
if soc_name is None or soc_name == '':
soc_name = misc.build_soc_name(strategy=strt, nagts=nagts)
return regularise_path("{}-{}-{}-{}-{}.json".format(strt,
graph,
soc_name,
str(nagts),
str(exec_id)))
|
143731bee19ad8e4b925f07d5449baff83994059
| 3,643,364
|
def set_ticks(ax, tick_locs, tick_labels=None, axis='y'):
"""Sets ticks at standard numerical locations"""
if tick_labels is None:
tick_labels = tick_locs
ax_transformer = AxTransformer()
ax_transformer.fit(ax, axis=axis)
getattr(ax, f'set_{axis}ticks')(ax_transformer.transform(tick_locs))
getattr(ax, f'set_{axis}ticklabels')(tick_labels)
ax.tick_params(axis=axis, which='both', bottom=True, top=False, labelbottom=True)
return ax
|
690179bcb2d2ca4f3b1e5b8cb03f68627168b73a
| 3,643,365
|
from typing import List
import re
def extract_discovery(value:str) -> List[dict]:
"""处理show discovery/show onu discovered得到的信息
Args:
value (str): show discovery/show onu discovered命令返回的字符串
Returns:
List[dict]: 包含字典的列表
"""
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 1 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
# Command executes success.
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 1 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
#
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 6 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 5506-04-F1 FHTT033178b0 fiberhome fiberhome fiberhome 1
# 2 HG6243C FHTT92f445c8 fiberhome fiberhome fiberhome 1
# 3 5506-10-A1 FHTT00010104 fiberhome fiberhome fiberhome 1
# 4 5506-10-A1 FHTT000aae64 fiberhome fiberhome 1
# 5 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
# 6 5506-02-F FHTT0274ab18 wangran3 12345678 1
# ====================================================================================
slotPortExp = re.compile('SLOT = (\d+), PON = (\d+)')
titleExp = re.compile('(No)\s+(OnuType)\s+(PhyId)\s+(PhyPwd)\s+(LogicId)\s+(LogicPwd)\s+(Why)\s*')
valueExp = re.compile('([\d\s]{3,3})\s([\w\s-]{14,14})\s([\w\s]{12,12})\s([\w\s]{10,10})\s([\w\s]{24,24})\s([\w\s]{12,12})\s([\d\s]{1,3})')
lines = value.splitlines()
ret = [ ]
titles = None
slot, port = None, None
for line in lines:
match = slotPortExp.search(line)
if match:
slot, port = match.groups()
if titles == None:
match = titleExp.match(line)
if match:
titles = match.groups()
continue
else:
match = valueExp.match(line)
if match:
values = match.groups()
ret.append({ })
for k, v in zip(titles, values):
ret[-1][value(k)] = value(v)
ret[-1]['SLOT'] = value(slot)
ret[-1]['PON'] = value(port)
continue
return ret
|
6107d194d10e6b7c1c6e33f7151214152e5bff7d
| 3,643,366
|
def dict_to_networkx(data):
"""
Convert data into networkx graph
Args:
data: data in dictionary type
Returns: networkx graph
"""
data_checker(data)
G = nx.Graph(data)
return G
|
0a3c670d3bad87bb18212dc6d2e47ac5a1ccc413
| 3,643,367
|
import urllib
def to_url_slug(string):
"""Transforms string into URL-safe slug."""
slug = urllib.parse.quote_plus(string)
return slug
|
0976e3d1568f793fa946be9fa67b40cc82e6f4f5
| 3,643,368
|
def is_wrapping(wrapper):
"""Determines if the given callable is a wrapper for another callable"""
return hasattr(wrapper, __WRAPPED)
|
16dcff38253424f6b93cee2a887aa7d91afd4f44
| 3,643,369
|
from conekt.models.relationships.sequence_go import SequenceGOAssociation
from typing import Sequence
def sequence_view(sequence_id):
"""
Get a sequence based on the ID and show the details for this sequence
:param sequence_id: ID of the sequence
"""
current_sequence = Sequence.query.get_or_404(sequence_id)
go_associations = current_sequence.go_associations.group_by(SequenceGOAssociation.go_id,
SequenceGOAssociation.evidence,
SequenceGOAssociation.source).all()
# to avoid running long count queries, fetch relations here and pass to template
return render_template('sequence.html',
sequence=current_sequence,
go_associations=go_associations,
interpro_associations=current_sequence.interpro_associations.all(),
families=current_sequence.families.all(),
expression_profiles=current_sequence.expression_profiles.all(),
network_nodes=current_sequence.network_nodes.all(),
coexpression_clusters=current_sequence.coexpression_clusters.all(),
ecc_query_associations=current_sequence.ecc_query_associations.all()
)
|
c9493376b8df2b9dc7585d8b380e54ce4d20f473
| 3,643,370
|
def horner(n,c,x0):
"""
Parameters
----------
n : integer
degree of the polynomial.
c : float
coefficients of the polynomial.
x0 : float
where we are evaluating the polynomial.
Returns
-------
y : float
the value of the function evaluated at x0.
z : float
the value of the derivative evaluated at x0.
"""
y=c[n]
z=c[n]
for i in range(n-1,0,-1):
y= x0*y+c[i]
z=x0*z+y
y=x0*y+c[0] #this computes the b0
return y,z
|
adf3f3772d12d5bed0158045ad480cee8454cb5c
| 3,643,371
|
import gzip
def _compression_safe_opener(fname):
"""Determine whether to use *open* or *gzip.open* to read
the input file, depending on whether or not the file is compressed.
"""
f = gzip.open(fname, "r")
try:
f.read(1)
opener = gzip.open
except IOError:
opener = open
finally:
f.close()
return opener
|
4c44da2ae15c63ccd6467e6e893a3c590c20a7e9
| 3,643,373
|
from typing import List
import json
def read_payload(payload: str) -> OneOf[Issue, List[FileReport]]:
"""Transform an eslint payload to a list of `FileReport` instances.
Args:
payload: The raw payload from eslint.
Returns:
A `OneOf` containing an `Issue` or a list of `FileReport` instances.
"""
return one_of(lambda: [
[
FileReport(
file_path=error['filePath'],
violations=[
Violation(
msg['ruleId'],
msg['message'],
msg['line'],
msg['column'],
error['filePath'],
)
for msg in error['messages']
],
)
for error in json_payload
]
for json_payload in json.parse_json(payload)
])
|
809e4db54cb8d4c737d9eea7f77f1a1846f24589
| 3,643,375
|
from typing import Iterable
from typing import Any
from typing import Iterator
import itertools
def prepend(
iterable: Iterable[Any],
value: Any,
*,
times: int = 1,
) -> Iterator[Any]:
"""Return an iterator with a specified value prepended.
Arguments:
iterable: the iterable to which the value is to be prepended
value: the value to prepend to the iterable
Keyword Arguments:
times: number of times to prepend the value
(optional; default is 1)
Returns:
iterator prepending the specified value(s) to the items of the iterable
Examples:
>>> list(prepend(range(5), -1))
[-1, 0, 1, 2, 3, 4]
>>> list(prepend(['off to work we go'], 'hi ho', times=2))
['hi ho', 'hi ho', 'off to work we go']
"""
return itertools.chain([value] * times, iterable)
|
659bc3616238f5e40865505c006c1369f20e33d3
| 3,643,377
|
from skimage.transform import warp
def apply_transform(transform, source, target,
fill_value=None, propagate_mask=False):
"""Applies the transformation ``transform`` to ``source``.
The output image will have the same shape as ``target``.
Args:
transform: A scikit-image ``SimilarityTransform`` object.
source (numpy array): A 2D numpy array of the source image to be
transformed.
target (numpy array): A 2D numpy array of the target image. Only used
to set the output image shape.
fill_value (float): A value to fill in the areas of aligned_image
where footprint == True.
propagate_mask (bool): Wether to propagate the mask in source.mask
onto footprint.
Return:
A tuple (aligned_image, footprint).
aligned_image is a numpy 2D array of the transformed source
footprint is a mask 2D array with True on the regions
with no pixel information.
"""
if hasattr(source, 'data') and isinstance(source.data, _np.ndarray):
source_data = source.data
else:
source_data = source
if hasattr(target, 'data') and isinstance(target.data, _np.ndarray):
target_data = target.data
else:
target_data = target
aligned_image = warp(source_data, inverse_map=transform.inverse,
output_shape=target_data.shape, order=3, mode='constant',
cval=_np.median(source_data), clip=False,
preserve_range=True)
footprint = warp(_np.zeros(source_data.shape, dtype='float32'),
inverse_map=transform.inverse,
output_shape=target_data.shape,
cval=1.0)
footprint = footprint > 0.4
if hasattr(source, 'mask') and propagate_mask:
source_mask = _np.array(source.mask)
if source_mask.shape == source_data.shape:
source_mask_rot = warp(source_mask.astype('float32'),
inverse_map=transform.inverse,
output_shape=target_data.shape,
cval=1.0)
source_mask_rot = source_mask_rot > 0.4
footprint = footprint | source_mask_rot
if fill_value is not None:
aligned_image[footprint] = fill_value
return aligned_image, footprint
|
97843939a6e03389d8c4741a04cea77ac7e1e0c4
| 3,643,378
|
def _with_extension(base: str, extension: str) -> str:
"""
Adds an extension to a base name
"""
if "sus" in base:
return f"{extension}{base}"
else:
return f"{base}{extension}"
|
5a1253763808127f296c3bcb04c07562346dea2d
| 3,643,379
|
def putin_rfid_no_order_api():
"""
无订单的情况下入库, 自动创建订单(类型为生产入库), 订单行入库
post req: withlock
{
lines: [{line_id:~, qty, location, lpn='', sku,
rfid_list[rfid1, rfid2, rfid3...],
rfid_details[{rfid1, weight, gross_weight, qty_inner}, {rfid2}, {rfid3}...}],
}...]
w_user_code,
w_user_name
}
sample:{
lines: [
{qty, sku, location:~, rfid_details[{rfid1, weight, gross_weight, qty_inner}, ], }
]
}
"""
if request.method == 'POST':
is_overcharge = ('overcharge' in request.path) or g.owner.is_overcharge
is_enable_fast_stockin_qty_inner = g.owner.is_enable_fast_stockin_qty_inner
data = request.json.pop('lines', [])# [{line_id, qty, location, lpn=''}...]
w_user_code = request.json.pop('w_user_code', None)
w_user_name = request.json.pop('w_user_name', None)
# 每次只一个RFID入库时, 判断RFID是否已经入库了.
if len(data) == 1:
r_details = data[0].get('rfid_details', [])
if len(r_details) == 1:
rfid0 = r_details[0]['rfid']
inv0 = InvRfid.query.t_query.filter_by(rfid=rfid0).first()
if inv0 and inv0.qty == 1:
return json_response({'status': 'fail', 'msg': u'已经入库过了'})
ok, order = StockinAction.create_stockin({'xtype': 'produce'}, g)
db.session.add(order)
db.session.flush()
action = StockinAction(order)
for xd in data:
d = DictNone(xd)
if d.get('qty', 0) <= 0:
continue
# 填充, rfid有数据详情的情况
rfid_details = {}
if not d.get('rfid_list', None) and d.get('rfid_details', None):
r_details = d.get('rfid_details', [])
rfid_list = [r['rfid'] for r in r_details]
d['rfid_list'] = rfid_list
rfid_details = {r['rfid']:r for r in r_details}
# ('spec','brand','unit','style','color','size','level')
ld = DictNone()
ld.sku = d.sku
ld.qty = 1 if is_enable_fast_stockin_qty_inner else (d.qty or 1)
ld.location_code = d.location or ''
ld.batch_code = d.batch_code or ''
ld.spec = d.spec or ''
ld.style = d.style or ''
ld.color = d.color or ''
ld.size = d.size or ''
ld.level = d.level or ''
ld.twisted = d.twisted or ''
line = StockinAction.create_stockin_line(ld, order, poplist=None, is_add=True)
db.session.add(line)
db.session.flush()
# line_id, qty, location, lpn='', line=None
is_overcharge, qty_off, qty_real = action.putin(line_id=None, line=line, qty=ld.qty, location=(ld.location_code or 'STAGE'), \
rfid_list=d['rfid_list'], rfid_details=rfid_details, \
w_user_code=w_user_code, w_user_name=w_user_name, is_overcharge=is_overcharge)
d['qty_real'] = qty_real
order.state = 'all'
db.session.flush()
finish = True
for line in order.lines:
if line.qty_real < line.qty: # 有单行小于预期数量的时候, 则未完成
finish = False
# 计重
_ = line.weight, line.gross_weight, line.qty_inner
# 计重
_ = order.weight, order.gross_weight, order.qty_inner
order.state = 'all' if finish else 'part'
if order.state == 'all':
order.finish()
db.session.commit()
return json_response({'status': 'success', 'msg': u'ok', 'data':data})
|
6637fba766e86bc25dae733d7ddc102114e79e27
| 3,643,380
|
def GuessLanguage(filename):
""" Attempts to Guess Langauge of `filename`. Essentially, we do a
filename.rsplit('.', 1), and a lookup into a dictionary of extensions."""
try:
(_, extension) = filename.rsplit('.', 1)
except ValueError:
raise ValueError("Could not guess language as '%s' does not have an \
extension"%filename)
return {'c' : 'c'
,'py' : 'python'}[extension]
|
3cd1289ab3140256dfbeb3718f30a3ac3ffca6f2
| 3,643,381
|
import numpy
def extract_data_size(series, *names):
"""
Determines series data size from the first available property, which
provides direct values as list, tuple or NumPy array.
Args:
series: perrot.Series
Series from which to extract data size.
names: (str,)
Sequence of property names to check.
Returns:
int or None
Determined data size.
"""
# get size
for name in names:
# check property
if not series.has_property(name):
continue
# get property
prop = series.get_property(name, native=True)
# get size
if isinstance(prop, (list, tuple, numpy.ndarray)):
return len(prop)
# no data
return None
|
39d503b359318d9dc118481baa7f99a43b926711
| 3,643,382
|
def uintToQuint (v, length=2):
""" Turn any integer into a proquint with fixed length """
assert 0 <= v < 2**(length*16)
return '-'.join (reversed ([u16ToQuint ((v>>(x*16))&0xffff) for x in range (length)]))
|
96f707ed527e1063d055ab1b6d1f8a17308ed772
| 3,643,383
|
import hashlib
import base64
def alphanumeric_hash(s: str, size=5):
"""Short alphanumeric string derived from hash of given string"""
hash_object = hashlib.md5(s.encode('ascii'))
s = base64.b32encode(hash_object.digest())
result = s[:size].decode('ascii').lower()
return result
|
915159aa2242eedfe8dcba682ae4bcf4fdebc3c4
| 3,643,384
|
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
"""A reply handler for commands that haven't been added to the reply list.
Returns empty strings for stdout and stderr.
"""
return '', ''
|
e73bd970030c4f78aebf2913b1540fc1b370d906
| 3,643,385
|
from typing import List
from pathlib import Path
def require(section: str = "install") -> List[str]:
""" Requirements txt parser. """
require_txt = Path(".").parent / "requirements.txt"
if not Path(require_txt).is_file():
return []
requires = defaultdict(list) # type: Dict[str, List[str]]
with open(str(require_txt), "rb") as fh:
key = "" # type: str
for line in fh.read().decode("utf-8").split("\n"):
if not line.strip():
" empty line "
continue
if line[0] == "#":
" section key "
key = line[2:]
continue
# actual package
requires[key].append(line.strip())
return requires[section]
|
efda45491798e5b7b66e0f2d6a4ac7b9fc3324d0
| 3,643,386
|
def string_in_list_of_dicts(key, search_value, list_of_dicts):
"""
Returns True if search_value is list of dictionaries at specified key.
Case insensitive and without leading or trailing whitespaces.
:return: True if found, else False
"""
for item in list_of_dicts:
if equals(item[key], search_value):
return True
return False
|
a761e3b44efc6e584c8f9045be307837daad49c4
| 3,643,388
|
import itertools
import pandas
def get_data(station_id, elements=None, update=True, as_dataframe=False):
"""Retrieves data for a given station.
Parameters
----------
station_id : str
Station ID to retrieve data for.
elements : ``None``, str, or list of str
If specified, limits the query to given element code(s).
update : bool
If ``True`` (default), new data files will be downloaded if they are
newer than any previously cached files. If ``False``, then previously
downloaded files will be used and new files will only be downloaded if
there is not a previously downloaded file for a given station.
as_dataframe : bool
If ``False`` (default), a dict with element codes mapped to value dicts
is returned. If ``True``, a dict with element codes mapped to equivalent
pandas.DataFrame objects will be returned. The pandas dataframe is used
internally, so setting this to ``True`` is a little bit faster as it
skips a serialization step.
Returns
-------
site_dict : dict
A dict with element codes as keys, mapped to collections of values. See
the ``as_dataframe`` parameter for more.
"""
if isinstance(elements, basestring):
elements = [elements]
start_columns = [
('year', 11, 15, int),
('month', 15, 17, int),
('element', 17, 21, str),
]
value_columns = [
('value', 0, 5, float),
('mflag', 5, 6, str),
('qflag', 6, 7, str),
('sflag', 7, 8, str),
]
columns = list(itertools.chain(start_columns, *[
[(name + str(n), start + 13 + (8 * n), end + 13 + (8 * n), converter)
for name, start, end, converter in value_columns]
for n in xrange(1, 32)
]))
station_file_path = _get_ghcn_file(
station_id + '.dly', check_modified=update)
station_data = util.parse_fwf(station_file_path, columns, na_values=[-9999])
dataframes = {}
for element_name, element_df in station_data.groupby('element'):
if not elements is None and element_name not in elements:
continue
element_df['month_period'] = element_df.apply(
lambda x: pandas.Period('%s-%s' % (x['year'], x['month'])),
axis=1)
element_df = element_df.set_index('month_period')
monthly_index = element_df.index
# here we're just using pandas' builtin resample logic to construct a daily
# index for the timespan
daily_index = element_df.resample('D').index.copy()
# XXX: hackish; pandas support for this sort of thing will probably be
# added soon
month_starts = (monthly_index - 1).asfreq('D') + 1
dataframe = pandas.DataFrame(
columns=['value', 'mflag', 'qflag', 'sflag'], index=daily_index)
for day_of_month in range(1, 32):
dates = [date for date in (month_starts + day_of_month - 1)
if date.day == day_of_month]
if not len(dates):
continue
months = pandas.PeriodIndex([pandas.Period(date, 'M') for date in dates])
for column_name in dataframe.columns:
col = column_name + str(day_of_month)
dataframe[column_name][dates] = element_df[col][months]
dataframes[element_name] = dataframe
if as_dataframe:
return dataframes
else:
return dict([
(key, util.dict_from_dataframe(dataframe))
for key, dataframe in dataframes.iteritems()
])
|
7eaa0d152a8f76fa7bfc4109fb4e0a5c3d90e318
| 3,643,389
|
def Find_Peaks(profile, scale, **kwargs):
"""
Pulls out the peaks from a radial profile
Inputs:
profile : dictionary, contains intensity profile and pixel scale of
diffraction pattern
calibration : dictionary, contains camera parameters to scale data
properly in two theta space
is_profile : boolean, changes processing for profiles vs 2D patterns
scale_bar : string, determines which conversions need to be run
to convert to two theta
display_type: string, determines which plots to show
Outputs:
peak_locs : dictionary, contains two_theta, d_spacings, and input_vector arrays
peaks locations found in the profile
"""
max_numpeaks = kwargs.get('max_numpeaks', 75)
scale_range = kwargs.get('dspace_range',[0.5, 6])
squished_scale = [True if x<scale_range[1] and x >scale_range[0] else False for x in scale]
print(squished_scale)
filter_size_default=max(int(scale[squished_scale].shape[0]/50),3)
print(filter_size_default)
kwargs['filter_size'] = kwargs.get('filter_size',filter_size_default)
print('filter size')
print(kwargs['filter_size'])
# find the location of the peaks in pixel space
peaks = pfnd.vote_peaks(profile[squished_scale], **kwargs)
peaks_d = scale[squished_scale][peaks>0]
scale_d = scale
thresh = 0
orig_length = len(peaks_d)
if len(peaks_d) > max_numpeaks:
print(len(peaks_d))
print("WARNING: {} peaks were detected," +
" some of the peaks will be trimmed."+
"\nFor best results. Please check calibration or run manual peak detection.".format(len(peaks_d)))
srt_peaks = np.sort(peaks[peaks>0])
thresh = srt_peaks[len(peaks_d)-max_numpeaks]
if len(scale[squished_scale][peaks>thresh]) ==0 and thresh>0:
thresh -=1
peaks_d = scale[squished_scale][peaks>thresh]
print(len(peaks_d))
print(thresh)
print(srt_peaks)
if len(peaks_d) == orig_length:
print("WARNING: reduction based on votes unsuccessful. try other parameters")
elif len(peaks_d)> max_numpeaks:
print("WARNING: partial reduction to {} peaks.".format(len(peaks_d)))
peak_locs = {"d_spacing":scale[squished_scale][peaks>thresh],
"vec":[int(round((x-.5)*164))-1 for x in peaks_d]
}
# Display the data
peaks_h = pfnd.plot_peaks(profile[squished_scale], scale[squished_scale], peaks, thresh, **kwargs)
if len(peak_locs['vec']) <= 4:
print("WARNING: only {} peaks were detected," +
" this is lower than the recommended 4+ peaks needed"+
"\nFor best results. Please check calibration.".format(len(peaks_d)))
return peak_locs, peaks_h
|
3d5cf4a5d559d54aa061d4abd9a02efb96c03d05
| 3,643,390
|
def empty_items(item_list, total):
"""
Returns a list of null objects. Useful when you want to always show n
results and you have a list of < n.
"""
list_length = len(item_list)
expected_total = int(total)
if list_length != expected_total:
return range(0, expected_total-list_length)
return ''
|
12848fe61457b2d138a2fcd074fb6ec6d09cbaf5
| 3,643,391
|
import struct
def _read_string(fp):
"""Read the next sigproc-format string in the file.
Parameters
----------
fp : file
file object to read from.
Returns
-------
str
read value from the file
"""
strlen = struct.unpack("I", fp.read(struct.calcsize("I")))[0]
return fp.read(strlen).decode()
|
346a65e6be15f593c91dde34cb45c53cb5731877
| 3,643,392
|
def add_optional_parameters(detail_json, detail, rating, rating_n, popularity, current_popularity, time_spent, detailFromGoogle={}):
"""
check for optional return parameters and add them to the result json
:param detail_json:
:param detail:
:param rating:
:param rating_n:
:param popularity:
:param current_popularity:
:param time_spent:
:return:
"""
if rating:
detail_json["rating"] = rating
elif "rating" in detail:
detail_json["rating"] = detail["rating"]
if rating_n:
detail_json["rating_n"] = rating_n
if "international_phone_number" in detail:
detail_json["international_phone_number"] = detail["international_phone_number"]
if current_popularity:
detail_json["current_popularity"] = current_popularity
if popularity:
popularity, wait_times = get_popularity_for_day(popularity)
detail_json["populartimes"] = popularity
if wait_times:
detail_json["time_wait"] = wait_times
if time_spent:
detail_json["time_spent"] = time_spent
if ("name" in detailFromGoogle):
detail_json.update(detailFromGoogle)
return detail_json
|
176fab2255f9302c945cb29ac5f9513da368a57e
| 3,643,393
|
def build_get_string_with_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get string dictionary value {"0": "foo", "1": null, "2": "foo2"}.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"str": "str" # Optional.
}
"""
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/dictionary/prim/string/foo.null.foo2')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
|
976b20770b74b4cf8504f673e66aec94fbf55c2b
| 3,643,394
|
def get_db_url(db_host, db_name, db_user, db_pass):
"""
Helper function for creating the "pyodbc" connection string.
@see /etc/freetds.conf
@see http://docs.sqlalchemy.org/en/latest/dialects/mssql.html
@see https://code.google.com/p/pyodbc/wiki/ConnectionStrings
"""
params = parse.quote(
"Driver={{FreeTDS}};Server={};Port=1433;"
"Database={};UID={};PWD={};"
.format(db_host, db_name, db_user, db_pass))
return 'mssql+pyodbc:///?odbc_connect={}'.format(params)
|
f0ed18ac321fcc9e93b038dc2f3905af52191c7b
| 3,643,395
|
import torch
def boxes_iou3d_cpu(boxes_a, boxes_b, box_mode='wlh', rect=False, need_bev=False):
"""
Input (torch):
boxes_a: (N, 7) [x, y, z, h, w, l, ry], torch tensor with type float32
boxes_b: (M, 7) [x, y, z, h, w, l, ry], torch tensor with type float32
rect: True/False means boxes in camera/velodyne coord system.
Output:
iou_3d: (N, M)
"""
w_index, l_index, h_index = box_mode.index('w') + 3, box_mode.index('l') + 3, box_mode.index('h') + 3
boxes_a_bev = utils.boxes3d_to_bev_torch(boxes_a, box_mode, rect)
boxes_b_bev = utils.boxes3d_to_bev_torch(boxes_b, box_mode, rect)
overlaps_bev = torch.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_cuda.boxes_overlap_bev_cpu(boxes_a_bev.contiguous(), boxes_b_bev.contiguous(), overlaps_bev)
# bev iou
area_a = (boxes_a[:, w_index] * boxes_a[:, l_index]).view(-1, 1) # (N, 1)
area_b = (boxes_b[:, w_index] * boxes_b[:, l_index]).view(1, -1) # (1, M) -> broadcast (N, M)
iou_bev = overlaps_bev / torch.clamp(area_a + area_b - overlaps_bev, min=1e-7)
# height overlap
if rect:
boxes_a_height_min = (boxes_a[:, 1] - boxes_a[:, h_index]).view(-1, 1) # y - h
boxes_a_height_max = boxes_a[:, 1].view(-1, 1) # y
boxes_b_height_min = (boxes_b[:, 1] - boxes_b[:, h_index]).view(1, -1)
boxes_b_height_max = boxes_b[:, 1].view(1, -1)
else:
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, h_index]).view(-1, 1) # z - h, (N, 1)
boxes_a_height_max = boxes_a[:, 2].view(-1, 1) # z
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, h_index]).view(1, -1) # (1, M)
boxes_b_height_max = boxes_b[:, 2].view(1, -1)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) # (N, 1)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) # (1, M)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) # (N, M)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h # broadcast: (N, M)
vol_a = (boxes_a[:, h_index] * boxes_a[:, w_index] * boxes_a[:, l_index]).view(-1, 1) # (N, 1)
vol_b = (boxes_b[:, h_index] * boxes_b[:, w_index] * boxes_b[:, l_index]).view(1, -1) # (1, M) -> broadcast (N, M)
iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-7)
if need_bev:
return iou3d, iou_bev
return iou3d
|
e3b40e2c4c35a7f423739791cc9268ecd22cdf42
| 3,643,396
|
def make_attrstring(attr):
"""Returns an attribute string in the form key="val" """
attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()])
return '%s%s' % (' ' if attrstring != '' else '', attrstring)
|
fbaf2b763b4b1f4399c45c3a19698d0602f0b224
| 3,643,397
|
import requests
from bs4 import BeautifulSoup
from datetime import datetime
def depreciated_get_paste(paste_tup):
"""
This takes a tuple consisting of href from a paste link and a name that identify a pastebin paste.
It scrapes the page for the pastes content.
:param paste_tup: (string, string)
:return: Paste if successful or False
"""
href, name = paste_tup
# Form the url from the href and perform GET request
paste_url = 'http://pastebin.com' + href
paste_page = requests.get(paste_url)
# Collect the paste details from paste page
if paste_page.status_code == 200:
text = paste_page.text
soup = BeautifulSoup(text, 'html.parser')
# soup.textarea.get_text() return the paste content
paste = Paste(url="http://www.pastebin.com"+href, name=name, content=soup.textarea.get_text(), datetime=datetime.now())
return paste
# Return False if the scrape failed
return False
|
6f3620354827998eade57b989c503be4f093b6d8
| 3,643,399
|
from typing import List
from typing import Dict
def delete_nodes_list(
nodes: List[str],
credentials: HTTPBasicCredentials = Depends(
check_credentials
), # pylint: disable=unused-argument
) -> Dict[str, str]:
"""Deletes a list of nodes (that are discoverables with lldp) to the db.
Exple of simplest call :
curl -X DELETE --user u:p -H "Content-type: application/json" \
http://127.0.0.1/api/nodes \
-d '["node1", "node2", "node3"]'"""
for node in nodes:
delete_node(node)
return {"response": "Ok"}
|
1b7d4e25e67f1a0d2a5eec23b12b1ca87242a066
| 3,643,400
|
def index():
"""
Application Home page
"""
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
|
527aa4b19eff87bb5c6fde6c0578ced5e876f59b
| 3,643,401
|
def is_CW_in_extension(G):
"""
Returns True if G is 'CW in expansion', otherwise it returns False.
G: directed graph of type 'networkx.DiGraph'
EXAMPLE
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
G=nx.DiGraph()
e_list = [(0,1),(0,2),(0,3),(0,4),(1,2),(1,3),(1,4)]
G.add_edges_from(e_list)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
G.remove_edge(0,1)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
G.remove_edge(0,3)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
"""
assert type(G) is nx.DiGraph, "'G' has to be of type 'networkx.DiGraph'."
nodes = list(G.nodes)
m = len(nodes)
nr_beaten_list = np.zeros(m) #nr_beaten_list[i] is the number of nodes v with an edge i->v in G if i is NOT beaten by any other node. Otherwise its -1
for i in range(0,m):
for j in range(0,m):
if i != j and G.has_edge(nodes[i],nodes[j]) and nr_beaten_list[i] != -1:
nr_beaten_list[i]+=1
if i != j and G.has_edge(nodes[j],nodes[i]):
nr_beaten_list[i]=-1
#print(nr_beaten_list)
if len(np.where(nr_beaten_list==m-1)[0]) >0: #G has a CW
return(True)
buf = np.where(nr_beaten_list==m-2)[0]
if len(buf)==2:
[i0,i1] = buf
if not G.has_edge(i0,i1) and not G.has_edge(i1,i0): # There exist i0, i1 which are connected to every other node and i0 is not connected to i1
return(True)
return(False)
|
3a1af65be274d23de16cdc15253185a0bbeda0ec
| 3,643,403
|
from typing import Callable
from typing import Iterable
from typing import List
def get_index_where(condition: Callable[..., bool], iterable: Iterable) -> List[int]:
"""Return index values where `condition` is `True`."""
return [idx for idx, item in enumerate(iterable) if condition(item)]
|
6f99086730dfc2ab1f87df90632bc637fc6f2b93
| 3,643,404
|
def geom_crossbar(mapping=None, *, data=None, stat=None, position=None, show_legend=None, sampling=None, tooltips=None,
fatten=None,
**other_args):
"""
Display bars with horizontal median line.
Parameters
----------
mapping : `FeatureSpec`
Set of aesthetic mappings created by `aes()` function.
Aesthetic mappings describe the way that variables in the data are
mapped to plot "aesthetics".
data : dict or `DataFrame`
The data to be displayed in this layer. If None, the default, the data
is inherited from the plot data as specified in the call to ggplot.
stat : str, default='identity'
The statistical transformation to use on the data for this layer, as a string.
Supported transformations: 'identity' (leaves the data unchanged),
'count' (counts number of points with same x-axis coordinate),
'bin' (counts number of points with x-axis coordinate in the same bin),
'smooth' (performs smoothing - linear default),
'density' (computes and draws kernel density estimate).
position : str or `FeatureSpec`
Position adjustment, either as a string ('identity', 'stack', 'dodge', ...),
or the result of a call to a position adjustment function.
show_legend : bool, default=True
False - do not show legend for this layer.
sampling : `FeatureSpec`
Result of the call to the `sampling_xxx()` function.
Value None (or 'none') will disable sampling for this layer.
tooltips : `layer_tooltips`
Result of the call to the `layer_tooltips()` function.
Specifies appearance, style and content.
fatten : float, default=2.5
A multiplicative factor applied to size of the middle bar.
other_args
Other arguments passed on to the layer.
These are often aesthetics settings used to set an aesthetic to a fixed value,
like color='red', fill='blue', size=3 or shape=21.
They may also be parameters to the paired geom/stat.
Returns
-------
`LayerSpec`
Geom object specification.
Notes
-----
`geom_crossbar()` represents a vertical interval, defined by `x`, `ymin`, `ymax`.
The mean is represented by horizontal line.
`geom_crossbar()` understands the following aesthetics mappings:
- x : x-axis coordinates.
- ymin : lower bound for error bar.
- middle : position of median bar.
- ymax : upper bound for error bar.
- alpha : transparency level of a layer. Understands numbers between 0 and 1.
- color (colour) : color of a geometry lines. Can be continuous or discrete. For continuous value this will be a color gradient between two colors.
- fill : color of geometry filling.
- size : lines width.
- width : width of a bar.
- linetype : type of the line. Codes and names: 0 = 'blank', 1 = 'solid', 2 = 'dashed', 3 = 'dotted', 4 = 'dotdash', 5 = 'longdash', 6 = 'twodash'.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 10
from lets_plot import *
LetsPlot.setup_html()
data = {
'x': ['a', 'b', 'c', 'd'],
'ymin': [5, 7, 3, 5],
'middle': [6.5, 9, 4.5, 7],
'ymax': [8, 11, 6, 9],
}
ggplot(data, aes(x='x')) + \\
geom_crossbar(aes(ymin='ymin', middle='middle', ymax='ymax'))
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 14-15
import numpy as np
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
n = 800
cat_list = {c: np.random.uniform(3) for c in 'abcdefgh'}
np.random.seed(42)
x = np.random.choice(list(cat_list.keys()), n)
y = np.array([cat_list[c] for c in x]) + np.random.normal(size=n)
df = pd.DataFrame({'x': x, 'y': y})
err_df = df.groupby('x').agg({'y': ['min', 'median', 'max']}).reset_index()
err_df.columns = ['x', 'ymin', 'ymedian', 'ymax']
ggplot() + \\
geom_crossbar(aes(x='x', ymin='ymin', middle='ymedian', ymax='ymax', fill='x'), \\
data=err_df, width=.6, fatten=5) + \\
geom_jitter(aes(x='x', y='y'), data=df, width=.3, shape=1, color='black', alpha=.5)
"""
return _geom('crossbar',
mapping=mapping,
data=data,
stat=stat,
position=position,
show_legend=show_legend,
sampling=sampling,
tooltips=tooltips,
fatten=fatten,
**other_args)
|
27f1faf1dea99b033e9ac5ab4dbc52ea2865934c
| 3,643,405
|
from typing import Union
def chess_to_coordinate(pos: str) -> Union[Coordinate, Move]:
"""
Arguments:
"""
if len(pos) == 2:
return Coordinate(int(pos[1]) - 1, file_dict[pos[0]])
else:
if len(pos) == 5:
if pos[4] == 'n':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.N_PROMO)
elif pos[4] == 'b':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.B_PROMO)
elif pos[4] == 'r':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.R_PROMO)
elif pos[4] == 'q':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.Q_PROMO)
else:
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]))
|
f55e8c4d349419a5477d5fc3c5390d133b89cdf7
| 3,643,406
|
def get_db_session():
"""
Get the db session from g.
If not exist, create a session and return.
:return:
"""
session = get_g_cache('_flaskz_db_session')
if session is None:
session = DBSession()
set_g_cache('_flaskz_db_session', session)
return session
|
1254a99c3c1dd3fe71f1a9099b9937df46754c33
| 3,643,407
|
def make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_tril_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_tril is not None:
scale_tril = array_ops.matrix_band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = array_ops.matrix_diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., array_ops.newaxis]
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
|
137a0ac84e7b2fab71f1630ae1bd1b0b24fe8879
| 3,643,408
|
def remove_punctuation(word):
"""Remove all punctuation from the word (unicode). Note that the `translate`
method is used, and we assume unicode inputs. The str method has a different
`translate` method, so if you end up working with strings, you may want to
revisit this method.
"""
return word.translate(TRANSLATION_TABLE)
|
46476b6e4480a2f067c2370fd378778b452a1a3e
| 3,643,409
|
def prepare_filter_weights_slice_conv_2d(weights):
"""Change dimension order of 2d filter weights to the one used in fdeep"""
assert len(weights.shape) == 4
return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 0, 3]).flatten()
|
2b6ca65d68d4407ac0a7744efe01a90dc5423870
| 3,643,410
|
async def hello(request):
"""Hello page containing sarafan node metadata.
`version` contains sarafan node version.
`content_service_id` — contains service_id of content node
:param request:
:return:
"""
return web.json_response(await request.app['sarafan'].hello())
|
4a8b82a525082a03009d087a18042574e19c1796
| 3,643,411
|
import yaml
def load_capabilities(
base: str = "docassemble.ALWeaver", minimum_version="1.5", include_playground=False
):
"""
Load and return a dictionary containing all advertised capabilities matching
the specified minimum version, and optionally include capabilities that were
advertised from a namespace matching docassemble.playground*. The local
capabilities will always be the default configuration.
"""
current_package_name = _package_name()
this_yaml = path_and_mimetype(
f"{current_package_name}:data/sources/configuration_capabilities.yml"
)[0]
weaverdata = DAStore(base=base)
published_configuration_capabilities = (
weaverdata.get("published_configuration_capabilities") or {}
)
try:
with open(this_yaml) as f:
this_yaml_contents = f.read()
first_file = list(yaml.safe_load_all(this_yaml_contents))[0]
capabilities = {"Default configuration": first_file}
except:
capabilities = {}
for key in list(published_configuration_capabilities.keys()):
# Filter configurations based on minimum published version
if isinstance(published_configuration_capabilities[key], tuple) and Version(
published_configuration_capabilities[key][1]
) < Version(minimum_version):
log(
"Skipping published weaver configuration {key}:{published_configuration_capabilities[key]} because it is below the minimum version {minimum_version}. Consider updating the {key} package."
)
del published_configuration_capabilities[key]
# Filter out capability files unless the package is installed system-wide
if not include_playground and key.startswith("docassemble.playground"):
del published_configuration_capabilities[key]
for package_name in published_configuration_capabilities:
# Don't add the current package twice
if not current_package_name == package_name:
path = path_and_mimetype(
f"{package_name}:data/sources/{published_configuration_capabilities[package_name][0]}"
)[0]
try:
with open(path) as f:
yaml_contents = f.read()
capabilities[package_name] = list(yaml.safe_load_all(yaml_contents))[0]
except:
log(f"Unable to load published Weaver configuration file {path}")
return capabilities
|
3bb12fdbf4fc4340a042f0685a4917a7b1c1ed85
| 3,643,413
|
def build_graph(
config,
train_input_fn, test_input_fn, model_preprocess_fn, model):
"""Builds the training graph.
Args:
config: Training configuration.
train_input_fn: Callable returning the training data as a nest of tensors.
test_input_fn: Callable returning the test data as a nest of tensors.
model_preprocess_fn: Image pre-processing that should be combined with
the model for adversarial evaluation.
model: Callable taking (preprocessed_images, is_training, test_local_stats)
and returning logits.
Returns:
loss: 0D tensor containing the loss to be minimised.
train_measures: Dict (with string keys) of 0D tensors containing
training measurements.
test_measures: Dict (with string keys) of 0D tensors containing
test set evaluation measurements.
init_step_fn: Function taking (session, initial_step_val)
to be invoked to initialise the global training step.
"""
global_step = tf.train.get_or_create_global_step()
optimizer = _optimizer(config.optimizer, global_step)
model_with_preprocess = _model_with_preprocess_fn(
model, model_preprocess_fn)
# Training step.
loss, train_logits, train_adv_logits, train_labels = _train_step(
config.train, model_with_preprocess, global_step, optimizer,
train_input_fn())
train_measures = {
'acc': _top_k_accuracy(train_labels, train_logits),
}
if config.train.adversarial_loss_weight > 0.:
train_measures.update({
'adv_acc': _top_k_accuracy(train_labels, train_adv_logits),
})
# Test evaluation.
with tf.name_scope('test_accuracy'):
test_logits, test_adv_logits, test_labels = _test_step(
config.train, model_with_preprocess, test_input_fn())
test_measures = {
'acc': _top_k_accuracy(test_labels, test_logits),
'adv_acc': _top_k_accuracy(test_labels, test_adv_logits),
}
initial_step = tf.placeholder(shape=(), dtype=tf.int64)
init_global_step_op = tf.assign(global_step, initial_step)
def init_step_fn(session, initial_step_val):
session.run(init_global_step_op, feed_dict={initial_step: initial_step_val})
return loss, train_measures, test_measures, init_step_fn
|
4eff7555b2383db0870d5e63467e2d68a3336ece
| 3,643,414
|
import functools
import traceback
import time
def execli_deco():
""" This is a decorating function to excecute a client side Earth Engine
function and retry as many times as needed.
Parameters can be set by modifing module's variables `_execli_trace`,
`_execli_times` and `_execli_wait`
:Example:
.. code:: python
from geetools.tools import execli_deco
import ee
# TRY TO GET THE INFO OF AN IMAGE WITH DEFAULT PARAMETERS
@execli_deco()
def info():
# THIS IMAGE DOESN'E EXISTE SO IT WILL THROW AN ERROR
img = ee.Image("wrongparam")
return img.getInfo()
# TRY WITH CUSTOM PARAM (2 times 5 seconds and traceback)
@execli_deco(2, 5, True)
def info():
# THIS IMAGE DOESN'E EXISTE SO IT WILL THROW AN ERROR
img = ee.Image("wrongparam")
return img.getInfo()
:param times: number of times it will try to excecute the function
:type times: int
:param wait: waiting time to excetue the function again
:type wait: int
:param trace: print the traceback
:type trace: bool
"""
def wrap(f):
'''
if trace is None:
global trace
trace = _execli_trace
if times is None:
global times
times = _execli_times
if wait is None:
global wait
wait = _execli_wait
try:
times = int(times)
wait = int(wait)
except:
print(type(times))
print(type(wait))
raise ValueError("'times' and 'wait' parameters must be numbers")
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
trace = _execli_trace
times = _execli_times
wait = _execli_wait
r = range(times)
for i in r:
try:
result = f(*args, **kwargs)
except Exception as e:
print("try n°", i, "ERROR:", e)
if trace:
traceback.print_exc()
if i < r[-1] and wait > 0:
print("waiting {} seconds...".format(str(wait)))
time.sleep(wait)
elif i == r[-1]:
raise RuntimeError("An error occured tring to excecute"\
" the function '{0}'".format(f.__name__))
else:
return result
return wrapper
return wrap
|
c245cd30f372e6d00895f42ba26936f2fb92c257
| 3,643,415
|
import uuid
def lstm_with_backend_selection(inputs, init_h, init_c, kernel,
recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the LSTM with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
cuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the corresponding
timestep should be ignored.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_lstm.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'init_c': init_c,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_lstm_with_fallback(inputs, init_h, init_c, kernel, recurrent_kernel,
bias, mask, time_major, go_backwards,
sequence_lengths, zero_output_for_mask):
"""Use cuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def cudnn_lstm_fn():
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def stardard_lstm_fn():
return standard_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return tf.cond(
gru_lstm_utils.is_cudnn_supported_inputs(mask, time_major),
true_fn=cudnn_lstm_fn,
false_fn=stardard_lstm_fn)
if gru_lstm_utils.use_new_gru_lstm_impl():
# Chooses the implementation dynamically based on the running device.
(last_output, outputs, new_h, new_c,
runtime) = tf.__internal__.execute_fn_for_device(
{
gru_lstm_utils.CPU_DEVICE_NAME:
lambda: standard_lstm(**params),
gru_lstm_utils.GPU_DEVICE_NAME:
lambda: gpu_lstm_with_fallback(**params)
}, lambda: standard_lstm(**params))
else:
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple LSTM layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'lstm_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_lstm = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.CPU_DEVICE_NAME, standard_lstm,
supportive_attribute)
defun_gpu_lstm = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.GPU_DEVICE_NAME, gpu_lstm_with_fallback,
supportive_attribute)
# Call the normal LSTM impl and register the cuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(**params)
gru_lstm_utils.function_register(defun_gpu_lstm, **params)
return last_output, outputs, new_h, new_c, runtime
|
4c45709265de5385399a7b9bff0aeb4e9a4d7b17
| 3,643,416
|
def _build_stack_from_3d(recipe, input_folder, fov=0, nb_r=1, nb_c=1):
"""Load and stack 3-d tensors.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Only contain the keys
'fov', 'r', 'c', 'z', 'ext' or 'opt'.
input_folder : str
Path of the folder containing the images.
fov : int
Index of the fov to build.
nb_r : int
Number of round file to stack in order to get a 5-d tensor.
nb_c : int
Number of channel file to stack in order to get a 4-d tensor.
Returns
-------
tensor_5d : np.ndarray, np.uint
Tensor with shape (r, c, z, y, x).
"""
# load and stack successively channel elements then round elements
tensors_4d = []
for r in range(nb_r):
# load and stack channel elements (3-d tensors)
tensors_3d = []
for c in range(nb_c):
path = get_path_from_recipe(recipe, input_folder, fov=fov, r=r,
c=c)
tensor_3d = read_image(path)
tensors_3d.append(tensor_3d)
# stack 3-d tensors in 4-d
tensor_4d = np.stack(tensors_3d, axis=0)
tensors_4d.append(tensor_4d)
# stack 4-d tensors in 5-d
tensor_5d = np.stack(tensors_4d, axis=0)
return tensor_5d
|
6cb4e567324cb3404d6e373b3f9a00d3ccdd51ef
| 3,643,417
|
def view_menu(request):
"""Admin user view all the reservations."""
menus = Menu.objects.all()
return render(request,
"super/view_menu.html",
{'menus': menus})
|
7b8244a315f2da0794a80f71cf73517e81f614e0
| 3,643,418
|
def _get_hdfs_dirs_by_date(physical_table_name, date):
"""
根据日期获取指定日期的hdfs上数据目录列表
:param physical_table_name: 物理表名称
:param date: 日期
:return: hdfs上的数据目录列表
"""
return [f"{physical_table_name}/{date[0:4]}/{date[4:6]}/{date[6:8]}/{hour}" for hour in DAY_HOURS]
|
6581f81ebcf9051ccf97ade02fc80eeba46e0e78
| 3,643,419
|
import json
def indeed_jobs(request, category_id):
"""
Load Indeed jobs via ajax.
"""
if request.is_ajax() and request.method == 'POST':
per_page = 10
page = 1
html = []
if category_id == '0':
all_jobs = IndeedJob.objects.all()
else:
all_jobs = IndeedJob.objects.filter(category=category_id)
paginator = Paginator(all_jobs, per_page)
page = request.GET.get('page')
try:
jobs = paginator.page(page)
except PageNotAnInteger:
jobs = paginator.page(1)
except EmptyPage:
jobs = paginator.page(paginator.num_pages)
for job in jobs:
html.append(render_to_string('indeed-job.html', {'job': job}))
context = {
'html': u''.join(html),
'page': jobs.number,
}
if jobs.has_next(): context.update({'next_page': jobs.next_page_number()})
return HttpResponse(json.dumps(context), content_type='application/json')
raise Http404
|
12cf21f9ecad672e78715ef9687ad2e69d5ea963
| 3,643,420
|
def iinsertion_sort(arr, order=ASCENDING):
"""Iterative implementation of insertion sort.
:param arr: input list
:param order: sorting order i.e "asc" or "desc"
:return: list sorted in the order defined
"""
operator = SORTING_OPERATORS.get(order.lower(), GREATER_THAN)
for i in range(1, len(arr)):
position = i - 1
value = arr[i]
while position >= 0 and operator(arr[position], value):
arr[position + 1] = arr[position]
position -= 1
arr[position + 1] = value
return arr
|
8698fbb500bfad3cb2e6964112d46ef8151c1e89
| 3,643,421
|
def actor_files_paths():
"""
Returns the file paths that are bundled with the actor. (Path to the content of the actor's file directory).
"""
return current_actor().actor_files_paths
|
2ec9505eceb2da78aee668ff044e565374aa3a1c
| 3,643,422
|
import struct
def parse_table(data: bytes, fields: list) -> dict:
"""Return a Python dictionary created from the bytes *data* of
an ISIS cube table (presumably extracted via read_table_data()),
and described by the *fields* list and *records*.
Please be aware that this does not perform masking of the ISIS
special pixels that may be present in the table, and simply
returns them as the appropriate int or float values.
The *fields* list must be a list of dicts, each of which must
contain the following keys: 'Name', 'Type', and 'Size'. The
'Name' key can be any string (and these will end up being the
keys in the returned dict). 'Size' is the size in bytes of the
field, and 'Type' is a string that must be one of 'Integer',
'Double', 'Real', or 'Text'.
If you are using the pvl library, the get_table() function will
be easier to use.
"""
row_len = 0
for f in fields:
row_len += data_sizes[f["Type"]] * int(f["Size"])
if len(data) % row_len != 0:
raise ValueError(
f"The total sizes of each field ({row_len}) do not evenly divide "
f"into the size of the data ({len(data)}), so something is off."
)
# Parse the binary data
results = {f["Name"]: [] for f in fields}
offset = 0
while offset < len(data):
for f in fields:
if f["Type"] == "Text":
field_data = data[offset : offset + int(f["Size"])].decode(
encoding="latin_1"
)
else:
data_fmt = data_formats[f["Type"]] * int(f["Size"])
f_data = struct.unpack_from(data_fmt, data, offset)
if len(f_data) == 1:
field_data = f_data[0]
else:
field_data = list(f_data)
results[f["Name"]].append(field_data)
offset += data_sizes[f["Type"]] * int(f["Size"])
return results
|
3727a37d619c77c6789e1d11479ecfd67b814766
| 3,643,423
|
import scipy
def gridtilts(shape, thismask, slit_cen, coeff2, func2d, spec_order, spat_order, pad_spec=30, pad_spat = 5, method='interp'):
"""
Parameters
----------
tilt_fit_dict: dict
Tilt fit dictioary produced by fit_tilts
Returns
-------
piximg: ndarray, float
Image indicating how spectral pixel locations move across the image. This output is used in the pipeline.
"""
# Compute the tilts image
nspec, nspat = shape
xnspecmin1 = float(nspec-1)
xnspatmin1 = float(nspat-1)
spec_vec = np.arange(nspec)
spat_vec = np.arange(nspat)
# JFH This histogram method is not preferred, since it basically does NGP. It is however super fast, so for big images
# it is useful to have it
if 'hist2d' in method:
oversamp_spec=5
oversamp_spat=3
spec_ind, spat_ind = np.where(thismask)
min_spec = spec_ind.min() - pad_spec
max_spec = spec_ind.max() + pad_spec
num_spec = max_spec - min_spec + 1
min_spat = spat_ind.min() - pad_spat
max_spat = spat_ind.max() + pad_spat
num_spat = max_spat - min_spat + 1
spec_lin = np.linspace(min_spec,max_spec,num = int(np.round(num_spec*oversamp_spec)))
spat_lin = np.linspace(min_spat,max_spat,num = int(np.round(num_spat*oversamp_spat)))
spat_img, spec_img = np.meshgrid(spat_lin, spec_lin)
# Normalized spatial offset image (from central trace)
slit_cen_lin = (scipy.interpolate.interp1d(np.arange(nspec),slit_cen,bounds_error=False,fill_value='extrapolate'))(spec_lin)
slit_cen_img = np.outer(slit_cen_lin, np.ones(spat_img.shape[1])) # center of the slit replicated spatially
dspat_img_nrm = (spat_img - slit_cen_img)/xnspatmin1
spec_img_nrm = spec_img/xnspecmin1
# normalized spec image
tracepix = spec_img + xnspecmin1*utils.func_val(coeff2, spec_img_nrm, func2d, x2=dspat_img_nrm,
minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)
norm_img, spec_edges, spat_edges = np.histogram2d(tracepix.flatten(), spat_img.flatten(),
bins=[np.arange(nspec+1), np.arange(nspat+1)], density=False)
weigh_img, spec_edges, spat_edges = np.histogram2d(tracepix.flatten(), spat_img.flatten(),
bins=[np.arange(nspec+1), np.arange(nspat+1)],
weights = spec_img.flatten(),density=False)
piximg =(norm_img > 0.0)*weigh_img/(norm_img + (norm_img == 0.0))
inmask = thismask & (norm_img > 0) & (piximg/xnspecmin1 > -0.2) & (piximg/xnspecmin1 < 1.2)
# This is the defulat method although scipy.interpolate.griddata is a bit slow
elif 'interp' in method:
spec_vec_pad = np.arange(-pad_spec,nspec+pad_spec)
spat_vec_pad = np.arange(-pad_spat,nspat+pad_spat)
spat_img, spec_img = np.meshgrid(spat_vec, spec_vec)
spat_img_pad, spec_img_pad = np.meshgrid(np.arange(-pad_spat,nspat+pad_spat),np.arange(-pad_spec,nspec+pad_spec))
slit_cen_pad = (scipy.interpolate.interp1d(spec_vec,slit_cen,bounds_error=False,fill_value='extrapolate'))(spec_vec_pad)
thismask_pad = np.zeros_like(spec_img_pad,dtype=bool)
ind_spec, ind_spat = np.where(thismask)
slit_cen_img_pad= np.outer(slit_cen_pad, np.ones(nspat + 2*pad_spat)) # center of the slit replicated spatially
# Normalized spatial offset image (from central trace)
dspat_img_nrm = (spat_img_pad - slit_cen_img_pad)/xnspatmin1
# normalized spec image
spec_img_nrm = spec_img_pad/xnspecmin1
# Embed the old thismask in the new larger padded thismask
thismask_pad[ind_spec + pad_spec,ind_spat + pad_spat] = thismask[ind_spec,ind_spat]
# Now grow the thismask_pad
kernel = np.ones((2*pad_spec, 2*pad_spat))/float(4*pad_spec*pad_spat)
thismask_grow = scipy.ndimage.convolve(thismask_pad.astype(float), kernel, mode='nearest') > 0.0
# Evaluate the tilts on the padded image grid
tracepix = spec_img_pad[thismask_grow] + xnspecmin1*utils.func_val(coeff2, spec_img_nrm[thismask_grow], func2d, x2=dspat_img_nrm[thismask_grow],
minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)
## TESTING STARTS
"""
ikeep = np.isfinite(tracepix)
sigma = np.full_like(spec_img_pad[thismask_grow], 10.0)/xnspecmin1
fitxy = [spec_order, spat_order]
fitmask, coeff2_tilts = utils.robust_polyfit_djs(tracepix/xnspecmin1, spec_img_pad[thismask_grow]/xnspecmin1,
fitxy, x2=spat_img_pad[thismask_grow]/xnspatmin1,
sigma=sigma,
upper=5.0, lower=5.0, maxdev=10.0/xnspecmin1,
inmask=ikeep, function=func2d, maxiter=20,
minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0, use_mad=False)
## TESTING ENDS
# values(points) \equiv spec_pos(tilt,spat_pos) which is the piximg that we want to create via griddata interpolation
"""
ikeep = np.isfinite(tracepix)
points = np.stack((tracepix[ikeep], spat_img_pad[thismask_grow][ikeep]), axis=1)
values =spec_img_pad[thismask_grow][ikeep]
piximg = scipy.interpolate.griddata(points, values, (spec_img, spat_img), method='cubic')
inmask = thismask & np.isfinite(piximg) & (piximg/xnspecmin1 > -0.2) & (piximg/xnspecmin1 < 1.2)
# Now simply do a 2d polynomial fit with just rejection of crazy behavior, i.e. 10 pixels
fitxy = [spec_order, spat_order]
sigma = np.full_like(spec_img,10.0)/xnspecmin1
fitmask, coeff2_tilts = utils.robust_polyfit_djs(spec_img.flatten()/xnspecmin1, piximg.flatten()/xnspecmin1,
fitxy, x2=spat_img.flatten()/xnspatmin1, sigma = sigma.flatten(),
upper=5.0, lower=5.0, maxdev = 10.0/xnspecmin1,
inmask=inmask.flatten(), function=func2d, maxiter=20,
minx=0.0, maxx=1.0, minx2=0.0,maxx2=1.0,use_mad=False)
irej = np.invert(fitmask) & inmask.flatten()
msgs.info('Rejected {:d}/{:d} pixels in final tilts image after gridding'.format(np.sum(irej),np.sum(inmask)))
# normalized tilts image
tilts = utils.func_val(coeff2_tilts, spec_img/xnspecmin1, func2d, x2=spat_img/xnspatmin1,minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0)
tilts = np.fmax(np.fmin(tilts, 1.2),-0.2)
# Added this to ensure that tilts are never crazy values due to extrapolation of fits which can break
# wavelength solution fitting
return coeff2_tilts, tilts
|
55dd6ddd065e4f4bfefdc30bef27dc6e6541190b
| 3,643,424
|
import functools
def exp_t(u, t):
"""Compute exp_t for `u`."""
def _internal_exp_t(u, t):
return tf.nn.relu(1.0 + (1.0 - t) * u) ** (1.0 / (1.0 - t))
return tf.cond(
tf.math.equal(t, 1.0), lambda: tf.math.exp(u),
functools.partial(_internal_exp_t, u, t))
|
27fe729ea55bc8933d6ccd41c5ae96657b4426ad
| 3,643,425
|
def max_matching(G, method="ilp"):
"""Return a largest matching in *G*.
Parameters
----------
G : NetworkX graph
An undirected graph.
method: string
The method to use for finding the maximum matching. Use
'ilp' for integer linear program or 'bf' for brute force.
Defaults to 'ilp'.
Returns
-------
set
A set of edges comprising a maximum matching in *G*.
See Also
--------
max_matching
"""
max_matching_func = {"bf": max_matching_bf, "ilp": max_matching_ilp}.get(method, None)
if max_matching_func:
return max_matching_func(G)
raise ValueError('Invalid `method` argument "{}"'.format(method))
|
34407865678e46d7d042fa94852b66ebc22787d6
| 3,643,426
|
def hasEdgeFlux(source, edgeDistance=1):
"""hasEdgeFlux
Determine whether or not a source has flux within `edgeDistance`
of the edge.
Parameters
----------
source : `scarlet.Component`
The source to check for edge flux
edgeDistance : int
The distance from the edge of the image to consider
a source an edge source. For example if `edgeDistance=3`
then any source within 3 pixels of the edge will be
considered to have edge flux.
If `edgeDistance` is `None` then the edge check is ignored.
Returns
-------
isEdge: `bool`
Whether or not the source has flux on the edge.
"""
if edgeDistance is None:
return False
assert edgeDistance > 0
# Use the first band that has a non-zero SED
flux = scarlet.measure.flux(source)
if hasattr(source, "sed"):
band = np.min(np.where(flux > 0)[0])
else:
band = np.min(np.where(flux > 0)[0])
model = source.get_model()[band]
for edge in range(edgeDistance):
if (
np.any(model[edge-1] > 0)
or np.any(model[-edge] > 0)
or np.any(model[:, edge-1] > 0)
or np.any(model[:, -edge] > 0)
):
return True
return False
|
2fd924c20cb89b3728ef3a24f92b89eb0b136fe5
| 3,643,427
|
def biswas_robustness(data_scikit, data_mm):
"""
summary stats on consensus peaks
"""
CV = find_CV(th=0.0001, ca=0.5, sd=1)
CV_th001 = find_CV(th=0.001, ca=0.5, sd=1)
CV_th01 = find_CV(th=0.01, ca=0.5, sd=1)
CV_th00001 = find_CV(th=0.00001, ca=0.5, sd=1)
CV_sd15 = find_CV(th=0.0001, ca=0.5, sd=1.5)
CV_sd05 = find_CV(th=0.0001, ca=0.5, sd=0.5)
CV_ca09 = find_CV(th=0.0001, ca=0.9, sd=0.5)
CV_ca01 = find_CV(th=0.0001, ca=0.1, sd=0.5)
biswas_df = pd.DataFrame(columns=['ORF', 'corr_th001', 'corr_th01', 'corr_th00001', 'corr_sd15', 'corr_sd05', 'corr_ca09', 'corr_ca01'])
list_orfs = list( data_scikit.keys() )
for ix, orf in enumerate(list_orfs):
output = np.zeros(( 7 ))
coef = 0
p = 1
current_data = data_scikit[orf]
current_mm = data_mm[orf]
if np.shape(current_data)[1] == len(current_mm):
current_data[:,~current_mm] = 0 # after, for false consensus (i.e. multimapping), set to 0
current_cons, current_peaks = run_mc(current_data, CV)
current_cons_th001, current_peaks_th001 = run_mc(current_data, CV_th001)
current_cons_th01, current_peaks_th01 = run_mc(current_data, CV_th01)
current_cons_th00001, current_peaks_th00001 = run_mc(current_data, CV_th00001)
current_cons_sd15, current_peaks_sd15 = run_mc(current_data, CV_sd15)
current_cons_sd05, current_peaks_sd05 = run_mc(current_data, CV_sd05)
current_cons_ca09, current_peaks_ca09 = run_mc(current_data, CV_ca09)
current_cons_ca01, current_peaks_ca01 = run_mc(current_data, CV_ca01)
output[0], p = stats.spearmanr(current_cons, current_cons_th001)
output[1], p = stats.spearmanr(current_cons, current_cons_th01)
output[2], p = stats.spearmanr(current_cons, current_cons_th00001)
output[3], p = stats.spearmanr(current_cons, current_cons_sd15)
output[4], p = stats.spearmanr(current_cons, current_cons_sd05)
output[5], p = stats.spearmanr(current_cons, current_cons_ca09)
output[6], p = stats.spearmanr(current_cons, current_cons_ca01)
output = np.around(output,3)
biswas_df.loc[len(biswas_df)] = ( orf, output[0], output[1], output[2], output[3], output[4], output[5], output[6] )
print(ix, orf, output[0], output[1], output[2], output[3], output[4], output[5], output[6] )
return biswas_df
|
70ecee0baa60a5b06c785dd172bc9d0719840903
| 3,643,428
|
def get_click_offset(df):
"""
df[session_key] return a set of session_key
df[session_key].nunique() return the size of session_key set (int)
df.groupby(session_key).size() return the size of each session_id
df.groupby(session_key).size().cumsum() retunn cumulative sum
"""
offsets = np.zeros(df[session_key].nunique() + 1, dtype=np.int32)
offsets[1:] = df.groupby(session_key).size().cumsum()
return offsets
|
c8caed25899f71549a9333e64452f8eed9cf1029
| 3,643,429
|
def delete_enrichment():
"""
Controller to delete all existing GO enrichments
:return: Redirect to admin main screen
"""
CoexpressionCluster.delete_enrichment()
flash('Successfully removed GO enrichment for co-expression clusters', 'success')
return redirect(url_for('admin.controls.index'))
|
9cface0783929581f3e6076f43a461ef815c0d2b
| 3,643,430
|
from typing import Sequence
def argmax(sequence: Sequence) -> int:
"""Find the argmax of a sequence."""
return max(range(len(sequence)), key=lambda i: sequence[i])
|
58cc1d0e952a7f15ff3fca721f43c4c658c41de1
| 3,643,432
|
def read_data_from_device(device, location):
""" Reads text data from device and returns it as output
Args:
location ('str'): Path to the text file
Raises:
FileNotFoundError: File Does not Exist
Returns:
Data ('str'): Text data read from the device
"""
# IMPORTANT
# =========
# This API does not require the device to have network connection
# copy_from_device is the other API that behaves similar to this one,
# but it requires network connection since it uses SCP
try:
return device.execute("cat {}".format(location))
except Exception: # Throw file not found error when encounter generic error
raise FileNotFoundError("File {} does not exist.".format(location))
|
f6895d25f9f9e68ec33bb2d8f693999a7e3a2812
| 3,643,433
|
from typing import Dict
def postman_parser(postman_info: dict,
environment_vars: Dict = None) -> APITest:
"""
Get a parser collection, in JSON input format, and parser it
:param postman_info: JSON parsed info from Postman
:type postman_info: dict
:param environment_vars: variables to replace
:type environment_vars: dict
:return: a Postman object
:rtype: APITest
:raise ApitestValueError: when an invalid Postman format was received
"""
assert isinstance(postman_info, dict)
assert len(postman_info) > 0
# Try to find Postman variables in the JSON info from Postman Project
variables_from_postman_file = extract_postman_variables(postman_info)
# If variables was found, replace with the values
if variables_from_postman_file:
if not environment_vars:
raise ApitestMissingDataError(
"The Postman collections need some environment variables. "
"Please specify these variables and try again: "
",".join(x for x in variables_from_postman_file))
else:
postman_info = replace_postman_variables(postman_info,
variables_from_postman_file,
environment_vars)
collections = []
try:
# Get all collections
for collection in postman_info.get("item"):
end_points = []
# Get each end-point
for endpoint in collection.get("item"):
# --------------------------------------------------------------------------
# APITestRequest info
# --------------------------------------------------------------------------
query_info = endpoint.get("request")
# APITestRequest headers
request_headers = []
for header in query_info.get("header"):
request_headers.append(APITestHeader(key=header.get("key"),
value=header.get("value")))
# APITestRequest body
request_body_content_type = from_http_content_type_get_type(request_headers, query_info.get("body").get("mode"))
request_body = APITestBody(content_type=request_body_content_type,
value=from_raw_body_get_python_object(data_type=request_body_content_type,
data=query_info.get("body").get("formdata")))
# Build request
_request_url = query_info.get("url") \
if query_info.get("url").startswith("http") \
else "http://{}".format(query_info.get("url"))
request = APITestRequest(url=_request_url,
method=query_info.get("method"),
headers=request_headers,
body=request_body)
# --------------------------------------------------------------------------
# APITestResponse info
# --------------------------------------------------------------------------
response_list = endpoint.get("response")
responses = []
if response_list:
for response_info in response_list:
# APITestResponse headers
response_headers = []
for header in response_info.get("header"):
response_headers.append(APITestHeader(key=header.get("key"),
value=header.get("value")))
# APITestResponse APITestBody
response_body_content_type = from_http_content_type_get_type(response_headers, None)
response_body = APITestBody(content_type=response_body_content_type,
value=from_raw_body_get_python_object(data_type=response_body_content_type,
data=response_info.get("body")))
# APITestResponse cookie
response_cookies = []
for cookie in response_info.get("cookie"):
response_cookies.append(APITestCookie(expires=cookie.get("expires"),
host_only=cookie.get("hostOnly"),
http_only=cookie.get("httpOnly"),
domain=cookie.get("domain"),
path=cookie.get("path"),
secure=cookie.get("secure"),
session=cookie.get("session"),
value=cookie.get("value")))
# Build response
responses.append(APITestResponse(code=response_info.get("code"),
status=response_info.get("status"),
headers=response_headers,
body=response_body,
cookies=response_cookies))
end_points.append(APITestEndPoint(name=endpoint.get("name"),
description=endpoint.get("description"),
request=request,
response=responses))
collections.append(APITestCollection(name=endpoint.get("name"),
description=endpoint.get("description"),
end_points=end_points))
except Exception as exc:
raise ApitestInvalidFormatError from exc
data = APITest(title=postman_info.get("info").get("name"),
description=postman_info.get("info").get("description"),
collections=collections)
return data
|
1e3c351c3b7ee37d438edeb9e64e70d67b45e1b9
| 3,643,435
|
def allOPT2 (routes, dists, maxtime=float("inf")):
"""
A simpler way to make the 2-OPT optimization on all
the provided routes.
:param routes: The routes to optimize.
:param dists: The matrix of distances.
:param maxtime: The maximum time the optimization can go on.
:return: The optimised routes and the overall respective cost.
"""
optimized_routes = [None] * len(routes)
total_cost = 0
for i, route in enumerate(routes):
oproute, cost = OPT2(route, dists, maxtime)
optimized_routes[i] = oproute
total_cost += cost
return optimized_routes, total_cost
|
ec7a2e337371cf806b7fa32661185b7400e774a0
| 3,643,436
|
def getScoreByName(name):
"""
This function will search for the name and
will, if found, return the scores
"""
for idx, val in enumerate(names):
if val == name:
return scores[idx]
|
77074b360c2e35ae30053e1b00b3270166f27ada
| 3,643,437
|
def count_dict(dict_):
"""
Count how many levels the dict has
"""
if not isinstance(dict_, dict):
raise Dict_Exception("dict_ must be a dict")
return max(count_dict(v) if isinstance(v, dict) else 0 for v in dict_.values()) + 1
|
b608469d67f050b366cb5b97a7d686bdf8347616
| 3,643,438
|
def __draw_tick_labels(scales, chart_height, chart_width):
"""Draws the numbers in both axes."""
axis_values = [0, 0.25, 0.5, 0.75, 1]
axis_df = pd.DataFrame({"main_axis_values": axis_values, "aux_axis_position": 0})
x_tick_labels = (
alt.Chart(axis_df)
.mark_text(
yOffset=Scatter_Axis.label_font_size * 1.5,
tooltip="",
align="center",
fontSize=Scatter_Axis.label_font_size,
color=Scatter_Axis.label_color,
fontWeight=Scatter_Axis.label_font_weight,
font=FONT,
)
.encode(
text=alt.Text("main_axis_values:Q"),
x=alt.X("main_axis_values:Q", scale=scales["x"], axis=no_axis()),
y=alt.Y("aux_axis_position:Q", scale=scales["y"], axis=no_axis()),
)
)
axis_df.drop(0, inplace=True)
y_tick_labels = (
alt.Chart(axis_df)
.mark_text(
baseline="middle",
xOffset=-Scatter_Axis.label_font_size * 1.5,
tooltip="",
align="center",
fontSize=Scatter_Axis.label_font_size,
fontWeight=Scatter_Axis.label_font_weight,
color=Scatter_Axis.label_color,
font=FONT,
)
.encode(
text=alt.Text("main_axis_values:Q"),
x=alt.X("aux_axis_position:Q", scale=scales["x"], axis=no_axis()),
y=alt.Y("main_axis_values:Q", scale=scales["y"], axis=no_axis()),
)
)
return x_tick_labels + y_tick_labels
|
85107e3255953af667e43374927299a5a55b6809
| 3,643,439
|
def thread_profile(D,P,inset,internal=True,base_pad=0.1):
"""ISO thread profile"""
H = P*np.sqrt(3)/2
Dm = D - 2*5*H/8
Dp = D - 2*3*H/8
if internal:
return np.array([
(-P/2,D/2+H/8+base_pad+inset),
(-P/2,D/2+H/8+inset),
(-P/8,Dm/2+inset),
(P/8,Dm/2+inset),
(P/2,D/2+H/8+inset),
(P/2,D/2+H/8+base_pad+inset),
])
else:
return np.array([
(-P/2,Dm/2-H/4-base_pad-inset),
(-P/2,Dm/2-H/4-inset),
(-P/16,D/2-inset),
(P/16,D/2-inset),
(P/2,Dm/2-H/4-inset),
(P/2,Dm/2-H/4-base_pad-inset),
])
|
abea6e4f234f4176a385b3abc2ca6f1de0c93a1b
| 3,643,442
|
def get_service(hass, config, discovery_info=None):
"""Get the HipChat notification service."""
return HipchatNotificationService(
config[CONF_TOKEN],
config[CONF_ROOM],
config[CONF_COLOR],
config[CONF_NOTIFY],
config[CONF_FORMAT],
config[CONF_HOST])
|
1d6b7e5d53084bd91de307a162c4710aac84be24
| 3,643,444
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.