content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import List
import operator
def find_top_slices(metrics: List[metrics_for_slice_pb2.MetricsForSlice],
metric_key: Text,
statistics: statistics_pb2.DatasetFeatureStatisticsList,
comparison_type: Text = 'HIGHER',
min_num_examples: int = 10,
num_top_slices: int = 10,
rank_by: Text = 'EFFECT_SIZE'):
"""Finds top-k slices.
Args:
metrics: List of slice metrics protos. We assume that the metrics have
MetricValue.confidence_interval field populated. This will be populated when
the metrics computed with confidence intervals enabled.
metric_key: Name of the metric based on which significance testing is done.
statistics: Data statistics used to configure AutoSliceKeyExtractor.
comparison_type: Type of comparison indicating if we are looking for slices
whose metric is higher (`HIGHER`) or lower (`LOWER`) than the metric
of the base slice (overall dataset).
min_num_examples: Minimum number of examples that a slice should have.
num_top_slices: Number of top slices to return.
rank_by: Indicates how the slices should be ordered in the result.
Returns:
List of ordered slices.
"""
assert comparison_type in ['HIGHER', 'LOWER']
assert min_num_examples > 0
assert 0 < num_top_slices
assert rank_by in ['EFFECT_SIZE', 'PVALUE']
metrics_dict = {
slicer_lib.deserialize_slice_key(slice_metrics.slice_key): slice_metrics
for slice_metrics in metrics
}
overall_slice_metrics = metrics_dict[()]
del metrics_dict[()]
boundaries = auto_slice_key_extractor._get_bucket_boundaries(statistics) # pylint: disable=protected-access
overall_metrics_dict = _get_metrics_as_dict(overall_slice_metrics)
to_be_sorted_slices = []
for slice_key, slice_metrics in metrics_dict.items():
slice_metrics_dict = _get_metrics_as_dict(slice_metrics)
num_examples = slice_metrics_dict['example_count'].unsampled_value
if num_examples < min_num_examples:
continue
# Prune non-interesting slices.
if np.isnan(slice_metrics_dict[metric_key].unsampled_value):
continue
if comparison_type == 'HIGHER':
comparison_fn = operator.le
else:
comparison_fn = operator.ge
if comparison_fn(slice_metrics_dict[metric_key].unsampled_value,
overall_metrics_dict[metric_key].unsampled_value):
continue
# Only consider statistically significant slices.
is_significant, pvalue = _is_significant_slice(
slice_metrics_dict[metric_key].unsampled_value,
slice_metrics_dict[metric_key].sample_standard_deviation,
slice_metrics_dict['example_count'].unsampled_value,
overall_metrics_dict[metric_key].unsampled_value,
overall_metrics_dict[metric_key].sample_standard_deviation,
overall_metrics_dict['example_count'].unsampled_value, comparison_type)
if not is_significant:
continue
# Format the slice info (feature names, values) in the proto into a
# slice key.
transformed_slice_key = []
for (feature, value) in slice_key:
if feature.startswith(
auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX):
feature = feature[len(auto_slice_key_extractor
.TRANSFORMED_FEATURE_PREFIX):]
value = _bucket_to_range(value, boundaries[feature])
transformed_slice_key.append((feature, value))
slice_key = slicer_lib.stringify_slice_key(tuple(transformed_slice_key))
# Compute effect size for the slice.
effect_size = _compute_effect_size(
slice_metrics_dict[metric_key].unsampled_value,
slice_metrics_dict[metric_key].sample_standard_deviation,
overall_metrics_dict[metric_key].unsampled_value,
overall_metrics_dict[metric_key].sample_standard_deviation)
to_be_sorted_slices.append(
SliceComparisonResult(slice_key, num_examples,
slice_metrics_dict[metric_key].unsampled_value,
overall_metrics_dict[metric_key].unsampled_value,
pvalue, effect_size))
# Rank the slices.
ranking_fn, reverse = operator.attrgetter('effect_size'), True
if rank_by == 'PVALUE':
ranking_fn, reverse = operator.attrgetter('pvalue'), False
result = sorted(
to_be_sorted_slices, key=ranking_fn, reverse=reverse)[:num_top_slices]
return result
|
7d71a2a64001e792b4e7cc9467ae99bfe30ebf99
| 3,637,614
|
def parse_texts(texts):
"""
Create a set of parsed documents from a set of texts.
Parsed documents are sequences of tokens whose embedding vectors can be looked up.
:param texts: text documents to parse
:type texts: sequence of strings
:return: parsed documents
:rtype: sequence of spacy.Doc
"""
return _load_text_parser().pipe(texts)
|
05f39ffa453ca448fe1d724d2a5fbb53c52c8ade
| 3,637,615
|
import _json
def dict_to_string(d):
"""Return the passed dict of items converted to a json string.
All items should have the same type
Args:
d (dict): Dictionary to convert
Returns:
str: JSON version of dict
"""
j = {}
for key, value in d.items():
if value is None:
j[key] = None
else:
j[key] = value.to_data()
return _json.dumps(j)
|
3a7b3e464fa68b262be7b08bdefa1c35f603b68f
| 3,637,616
|
def load_test_data(path, var, years=slice('2017', '2018')):
"""
Args:
path: Path to nc files
var: variable. Geopotential = 'z', Temperature = 't'
years: slice for time window
Returns:
dataset: Concatenated dataset for 2017 and 2018
"""
assert var in ['z', 't'], 'Test data only for Z500 and T850'
ds = xr.open_mfdataset(f'{path}/*.nc', combine='by_coords')[var]
try:
ds = ds.sel(level=500 if var == 'z' else 850).drop('level')
except ValueError:
pass
return ds.sel(time=years)
|
fa30a9514654bb3f99f74eaed7b87e3e2eb23430
| 3,637,618
|
def encode(state, b=None):
"""
Encode a base-*b* array of integers into a single integer.
This function uses a `big-endian`__ encoding scheme. That is, the most
significant bits of the encoded integer are determined by the left-most
end of the unencoded state.
>>> from pyinform.utils import *
>>> encode([0,0,1], b=2)
1
>>> encode([0,1,0], b=3)
3
>>> encode([1,0,0], b=4)
16
>>> encode([1,0,4], b=5)
29
If *b* is not provided (or is None), the base is inferred from the state
with a minimum value of 2.
>>> from pyinform.utils import *
>>> encode([0,0,2])
2
>>> encode([0,2,0])
6
>>> encode([1,2,1])
16
See also :py:func:`.decode`.
.. __: https://en.wikipedia.org/wiki/Endianness#Examples
:param sequence state: the state to encode
:param int b: the base in which to encode
:return: the encoded state
:rtype: int
:raises ValueError: if the state is empty
:raises InformError: if an error occurs in the ``inform`` C call
"""
xs = np.ascontiguousarray(state, dtype=np.int32)
data = xs.ctypes.data_as(POINTER(c_int))
if xs.size == 0:
raise ValueError("cannot encode an empty array")
if b is None:
b = max(2, np.amax(xs)+1)
e = ErrorCode(0)
encoding = _inform_encode(data, c_ulong(xs.size), c_int(b), byref(e))
error_guard(e)
return encoding
|
09b5e96c3b9238d41f02cad938b6cb370a3a41da
| 3,637,619
|
from typing import Optional
from typing import Set
def get_equivalent(curie: str, cutoff: Optional[int] = None) -> Set[str]:
"""Get equivalent CURIEs."""
canonicalizer = Canonicalizer.get_default()
r = canonicalizer.single_source_shortest_path(curie=curie, cutoff=cutoff)
return set(r or [])
|
af5cc4049af258b7724539e81218ef74dd8a3229
| 3,637,620
|
def _standardize_input(y_true, y_pred, multioutput):
"""
This function check the validation of the input
input should be one of list/tuple/ndarray with same shape and not be None
input will be changed to corresponding 2-dim ndarray
"""
if y_true is None or y_pred is None:
raise ValueError("The input is None.")
if not isinstance(y_true, (list, tuple, np.ndarray, pd.DataFrame)):
raise ValueError("Expected array-like input."
"Only list/tuple/ndarray/pd.DataFrame are supported")
if isinstance(y_true, (list, tuple)):
y_true = np.array(y_true)
if isinstance(y_pred, (list, tuple)):
y_pred = np.array(y_pred)
if isinstance(y_true, pd.DataFrame) and isinstance(y_pred, pd.DataFrame):
y_true = y_true.to_numpy()
y_pred = y_pred.to_numpy()
original_shape = y_true.shape[1:]
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
else:
y_true = y_true.reshape((y_true.shape[0], -1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
else:
y_pred = y_pred.reshape((y_pred.shape[0], -1))
if y_true.shape[0] != y_pred.shape[0]:
raise ValueError("y_true and y_pred have different number of samples "
"({0}!={1})".format(y_true.shape[0], y_pred.shape[0]))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}"
.format(allowed_multioutput_str, multioutput))
return y_true, y_pred, original_shape
|
c536e777c40a5ce7c886b20f61fac7f20341c20b
| 3,637,621
|
import logging
def disk_detach(vmdk_path, vm):
"""detach disk (by full path) from a vm and return None or err(msg)"""
device = findDeviceByPath(vmdk_path, vm)
if not device:
# Could happen if the disk attached to a different VM - attach fails
# and docker will insist to sending "unmount/detach" which also fails.
# Or Plugin retrying operation due to socket errors #1076
# Return success since disk is anyway not attached
logging.warning("*** Detach disk={0} not found. VM={1}".format(
vmdk_path, vm.config.uuid))
return None
return disk_detach_int(vmdk_path, vm, device)
|
b0f835c51eec4d97a8a925e12cbd3c7531b13fde
| 3,637,622
|
def url_mapper(url, package):
"""
In a package.json, the "url" field is a redirection to a package download
URL published somewhere else than on the public npm registry.
We map it to a download url.
"""
if url:
package.download_urls.append(url)
return package
|
95d6b67a42cac14110b457b96216a40a5d5430f9
| 3,637,624
|
import random
def electricPotential(n, V_SD_grid, V_G_grid):
"""
Function to compute the electric potential of the QDot.
:param n: the number of electrons in the dot
:param V_SD_grid: the 2d array of source-drain voltage values
:param V_G_grid: the 2d array of gate voltage values
:return: The Electric Potential for adding the nth electron to the dot
"""
E_N = E_C*(((n)**2-(n-1)**2)/n*5+random()/9*n) # arbitrary random formula used to increase diamond width as more electrons are added
return (n - N_0 - 1/2) * E_C - (E_C / e) * (C_S * V_SD_grid + C_G * V_G_grid) + E_N
|
fedc11b23d781d16c786dca213eaa578de8017f6
| 3,637,625
|
def mettre_a_jour_uids(nom_fichier, organisateurs, uids):
""" Met à jour le fichier CSV UID,EMAIL à partir du dictionnaire """
nouveaux_uids = False
for id_reunion in organisateurs:
if organisateurs[id_reunion]["id_organisateur"] not in uids:
uids[organisateurs[id_reunion]["id_organisateur"]] = organisateurs[id_reunion]["email_organisateur"]
nouveaux_uids = True
if nouveaux_uids:
with open(nom_fichier, "w", encoding="utf-8") as fichier:
for uid in uids:
fichier.write(
"{:s},{:s}\n".format(
uid,
uids[uid],
)
)
return uids
|
ac0f61b135c8a7bb9de9bb6b5b8e3f9fd7b176f0
| 3,637,626
|
import warnings
def _spectrogram(signal, dB=True, log_prefix=20, log_reference=1,
yscale='linear', unit=None,
window='hann', window_length=1024, window_overlap_fct=0.5,
cmap=mpl.cm.get_cmap(name='magma'), ax=None):
"""Plot the magnitude spectrum versus time.
See pyfar.line.spectogram for more information.
"""
# check input
if not isinstance(signal, Signal):
raise TypeError('Input data has to be of type: Signal.')
_check_time_unit(unit)
_check_axis_scale(yscale, 'y')
if window_length > signal.n_samples:
raise ValueError("window_length exceeds signal length")
if np.prod(signal.cshape) > 1:
warnings.warn(("Using only the first channel of "
f"{np.prod(signal.cshape)}-channel signal."))
# take only the first channel of time data
first_channel = tuple(np.zeros(len(signal.cshape), dtype='int'))
# get spectrogram
frequencies, times, spectrogram = dsp.spectrogram(
signal[first_channel], window, window_length, window_overlap_fct)
# get magnitude data in dB
if dB:
eps = np.finfo(float).eps
spectrogram = log_prefix*np.log10(
np.abs(spectrogram) / log_reference + eps)
# auto detect the time unit
if unit is None:
unit = _time_auto_unit(times[..., -1])
# set the unit
if unit == 'samples':
times *= signal.sampling_rate
else:
factor, unit = _deal_time_units(unit)
times = times * factor
# plot the data
_, ax = _prepare_plot(ax)
ax.pcolormesh(times, frequencies, spectrogram, cmap=cmap,
shading='gouraud')
# Adjust axes:
ax.set_ylabel('Frequency in Hz')
ax.set_xlabel(f'Time in {unit}')
ax.set_xlim((times[0], times[-1]))
ax.set_ylim((max(20, frequencies[1]), signal.sampling_rate/2))
# color limits
if dB:
for PCM in ax.get_children():
if type(PCM) == mpl.collections.QuadMesh:
break
ymax = np.nanmax(spectrogram)
ymin = ymax - 90
ymax = ymax + 10
PCM.set_clim(ymin, ymax)
if yscale == 'log':
ax.set_yscale('symlog')
ax.yaxis.set_major_locator(LogLocatorITAToolbox())
ax.yaxis.set_major_formatter(LogFormatterITAToolbox())
ax.grid(ls='dotted', color='white')
plt.tight_layout()
return ax, spectrogram
|
3648918524c73dff4427a49119a9926eea317f81
| 3,637,627
|
import pprint
def _merge_cwlinputs(items_by_key, input_order, parallel):
"""Merge multiple cwl records and inputs, handling multiple data items.
Special cases:
- Single record but multiple variables (merging arrayed jobs). Assign lists
of variables to the record.
"""
items_by_key = _maybe_nest_bare_single(items_by_key, parallel)
if parallel == "multi-combined":
items_by_key, input_order = _concat_records(items_by_key, input_order)
var_items = set([_item_count(items_by_key[tuple(k.split("__"))])
for (k, t) in input_order.items() if t == "var"])
rec_items = set([_item_count(items_by_key[k]) for (k, t) in input_order.items() if t == "record"])
if var_items:
num_items = var_items
if len(num_items) == 2 and 1 in num_items:
num_items.remove(1)
items_by_key_test = _check_for_single_nested(num_items.pop(), items_by_key, input_order)
var_items = set([_item_count(items_by_key_test[tuple(k.split("__"))])
for (k, t) in input_order.items() if t == "var"])
num_items = var_items
assert len(num_items) == 1, "Non-consistent variable data counts in CWL input:\n%s" % \
(pprint.pformat(items_by_key))
items_by_key, num_items = _nest_vars_in_rec(var_items, rec_items, input_order, items_by_key, parallel)
else:
num_items = rec_items
assert len(num_items) == 1, "Non-consistent record data counts in CWL input:\n%s" % \
(pprint.pformat(items_by_key))
target_items = num_items.pop()
out = [{} for _ in range(target_items)]
for (cwl_key, cwl_type) in input_order.items():
if cwl_type == "var":
cwl_key = tuple(cwl_key.split("__"))
cur_vals = items_by_key[cwl_key]
if _is_nested_single(cur_vals, target_items):
cur_vals = [[x] for x in cur_vals[0]]
for i, cur_val in enumerate(cur_vals):
if isinstance(cwl_key, (list, tuple)):
# nested batches with records
if (parallel.startswith(("batch", "multi-parallel")) and
isinstance(out[i], (list, tuple))):
for j in range(len(out[i])):
out[i][j] = _update_nested(list(cwl_key), cur_val, out[i][j], allow_overwriting=True)
else:
out[i] = _update_nested(list(cwl_key), cur_val, out[i], allow_overwriting=True)
elif out[i] == {}:
out[i] = cur_val
else:
# Handle single non-batched records
if isinstance(cur_val, (list, tuple)) and len(cur_val) == 1:
cur_val = cur_val[0]
assert isinstance(cur_val, dict), (cwl_key, cur_val)
for k, v in cur_val.items():
out[i] = _update_nested([k], v, out[i], allow_overwriting=True)
return out
|
f78777f391747e964be6d02f77bb5c42db084546
| 3,637,628
|
def polar_distance(x1, x2):
"""
Given two arrays of numbers x1 and x2, pairs the cells that are the
closest and provides the pairing matrix index: x1(index(1,:)) should be as
close as possible to x2(index(2,:)). The function outputs the average of
the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
Parameters
----------
x1:
vector 1
x2:
vector 2
Returns
-------
d:
minimum distance between d
index:
the permutation matrix
"""
x1 = np.reshape(x1, (1, -1), order="F")
x2 = np.reshape(x2, (1, -1), order="F")
N1 = x1.size
N2 = x2.size
diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order="F")))
min_N1_N2 = np.min([N1, N2])
index = np.zeros((min_N1_N2, 2), dtype=int)
if min_N1_N2 > 1:
for k in range(min_N1_N2):
d2 = np.min(diffmat, axis=0)
index2 = np.argmin(diffmat, axis=0)
index1 = np.argmin(d2)
index2 = index2[index1]
index[k, :] = [index1, index2]
diffmat[index2, :] = float("inf")
diffmat[:, index1] = float("inf")
d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
else:
d = np.min(diffmat)
index = np.argmin(diffmat)
if N1 == 1:
index = np.array([1, index])
else:
index = np.array([index, 1])
return d, index
|
f3f4f564a6645d183b5d7b1ce700e2ddf40241b7
| 3,637,629
|
def _calc_data_point_locations(num_points, x_values=None):
"""Returns the x-axis location for each of the data points to start at.
Note: A numpy array is returned so that the overloaded "+" operator can be
used on the array.
The x-axis locations are scaled by x_values if it is provided, or else the
x-axis locations are evenly spaced. In either case, the x-axis locations
will always be in the range [1, num_points].
"""
if x_values is None:
# Evenly space the x-axis locations.
x_locs = np.arange(1, num_points + 1)
else:
if len(x_values) != num_points:
raise ValueError("The number of x-axis values must match the "
"number of data points.")
# Scale to the range [1, num_points]. Taken from
# http://www.heatonresearch.com/wiki/Range_Normalization
x_min = min(x_values)
x_max = max(x_values)
x_range = x_max - x_min
n_range = num_points - 1
x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
for x_val in x_values])
return x_locs
|
645af74a2547e25add5e7d7b0d8292568933c177
| 3,637,630
|
def is_base(base_pattern, str):
"""
base_pattern is a compiled python3 regex.
str is a string object.
return True if the string match the base_pattern or False if it is not.
"""
return base_pattern.match(str, 0, len(str))
|
d0b0e3291fdbfad49698deffb9f57aefcabdce92
| 3,637,631
|
def stations_by_river(stations):
"""For a list of MonitoringStation objects (stations),
returns a dictionary that maps river names (key) to a list of MonitoringStation objects on a given river."""
# Dictionary containing river names and their corresponding stations
rivers = {}
for station in stations:
# Check if river is already in the dictionary
if station.river in rivers:
# Check if the station has already been added to the list
if station not in rivers[station.river]:
rivers[station.river].append(station)
else:
rivers.update({station.river: [station]})
return rivers
|
c7fc460aa3e387285abdddfcb216a8ec41d27e06
| 3,637,632
|
def dQ_dY(time):
"""Derivative of transformation matrix for nutation/presession with regards to the Y coordinate of CIP in GCRS
"""
# Rotation matrices
R3_E = R3(E(time))
R3_s = R3(s(time))
R2_md = R2(-d(time))
R3_mE = R3(-E(time))
dR3_s = dR3(s(time))
dR3_E = dR3(E(time))
dR3_mE = dR3(-E(time))
dR2_md = dR2(-d(time))
return (
dR3_mE @ R2_md @ R3_E @ R3_s * (-dE_dY(time))
+ R3_mE @ dR2_md @ R3_E @ R3_s * (-dd_dY(time))
+ R3_mE @ R2_md @ dR3_E @ R3_s * (dE_dY(time))
+ R3_mE @ R2_md @ R3_E @ dR3_s * (ds_dY(time))
)
|
7341de34dccb4134bdc9b3d29e247dcc35b550bb
| 3,637,633
|
def calculate(x: int, y: int = 1, operation: str = None) -> int:
"""Calculates the sum (or difference) of two numbers.
Parameters:
`x` : int
The first number
`y` : int, optional
The second number (default is `1`)
`operation`: str, optional
Pass "subtract" to perform subtraction (default is `None`)
Returns:
int
"""
if operation == "subtract":
return x - y
else:
return x + y
|
e2f79940c7329895bafe0c5ad2b17953f8276902
| 3,637,634
|
def get_power_state(instance):
"""Return the power state of the received instance.
:param instance: nova.objects.instance.Instance
:return: nova.compute.power_state
"""
instance_info = manage.VBoxManage.show_vm_info(instance)
return instance_info.get(constants.VM_POWER_STATE)
|
407488593d5f29cb4d70387bdab18b5d13db5b23
| 3,637,635
|
def toBoolean(val, default=True):
"""convert strings from CSV to Python bool
if they have an empty string - default to true unless specified otherwise
"""
if default:
trueItems = ["true", "t", "yes", "y", "1", "on", ""]
falseItems = ["false", "f", "no", "n", "none", "0"]
else:
trueItems = ["true", "t", "yes", "y", "1", "on"]
falseItems = ["false", "f", "no", "n", "none", "0", ""]
if str(val).strip().lower() in trueItems:
return True
if str(val).strip().lower() in falseItems:
return False
|
d3ca42f73674d0104c2c036462ae421b00501cd3
| 3,637,636
|
from typing import Union
def cache_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache (overwriting existing value if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(connection, {
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
})
|
357141ac0cc128ee2cf9ff9db76bec10d947ede0
| 3,637,637
|
def _process_rows(app, sheet_name, rows, names_map, lang=None):
"""
Processes the rows of a worksheet of translations.
This is the complement of get_bulk_app_sheets_by_name() and
get_bulk_app_single_sheet_by_name(), from
corehq/apps/translations/app_translations/download.py, which creates
these worksheets and rows.
:param app: The application being translated
:param sheet_name: The tab name of the sheet being processed.
e.g. "menu1", "menu1_form1", or "Menus_and_forms"
:param rows: The rows in the worksheet
:param names_map: A map of sheet_name to module/form unique_id, used
to fetch a module/form even if it has been moved
since the worksheet was created
:param lang: The language that the app is being translated into
:return: A list of error messages or an empty list
"""
if not sheet_name or not rows:
return []
if is_modules_and_forms_sheet(sheet_name):
updater = BulkAppTranslationModulesAndFormsUpdater(app, names_map, lang=lang)
return updater.update(rows)
if is_module_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationModuleUpdater(app, sheet_name, unique_id, lang=lang)
except ModuleNotFoundException:
return [(
messages.error,
_('Invalid menu in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
if is_form_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationFormUpdater(app, sheet_name, unique_id, lang=lang)
except FormNotFoundException:
return [(
messages.error,
_('Invalid form in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
return [(
messages.error,
_('Did not recognize "%s", skipping row.') % sheet_name
)]
|
3cb43f813822b1d18ee6afa90412a7a4d6cec7e5
| 3,637,638
|
def parse_line(line, metric):
"""Parses statistics from a line an experiment log file"""
if "top-k" in line:
return f"top-k.{metric}", parse_csv(line)
elif "bottom-k" in line:
return f"bottom-k.{metric}", parse_csv(line)
else:
return f"ml.{metric}", parse_csv(line)
|
9f9f263d3a27256ca98bfc42dbb7426044b8ba42
| 3,637,639
|
from typing import Tuple
def get_adjusted_pvalues(pvals: pd.Series, fdr_thresh: float = 0.05) \
-> Tuple[pd.Series, float]:
"""
Function that controls FDR rate.
Accepts an unsorted list of p-values and an FDR threshold (1).
Returns:
1) the FDR associated with each p-value,
2) the p-value cutoff for the given FDR.
References:
(1) Storey, J. D., & Tibshirani, R. (2003). Statistical significance
for genomewide studies. Proceedings of the National Academy of Sciences,
100(16), 9440-9445. https://doi.org/10.1073/pnas.1530509100
"""
m = pvals.size
# sort list of p-values
sort_ids = np.argsort(pvals) # returns indices for sorting
p_sorted = pvals.values[sort_ids] # sorts the list
adj_p = np.nan * np.zeros(len(p_sorted), dtype=np.float64)
crit_p = 0
# go over all p-values, starting with the largest
crossed = False
adj_p[-1] = p_sorted[-1]
i = m-2
while i >= 0:
FP = m*p_sorted[i] # calculate false positives
FDR = FP / (i+1) # calculate FDR
adj_p[i] = min(FDR, adj_p[i+1])
if FDR <= fdr_thresh and not crossed:
crit_p = p_sorted[i]
crossed = True
i -= 1
# reverse sorting
unsort_ids = np.argsort(sort_ids) # indices for reversing the sort
adj_p = adj_p[unsort_ids]
adj_p = pd.Series(index=pvals.index, data=adj_p, name='adjusted_pval')
return adj_p, crit_p
|
d77bb4721eedd6af0797d2e4c7ea3fac8ddc0ab4
| 3,637,640
|
def solve_nonogram(constraints):
"""this function is solving all kinds of boards of the game and returning
the all possible solutions for it""" # BTM
return [solve_easy_nonogram(constraints)]
|
ec4f7a853af8d216ca800cc3aa2bde6a57c07a8b
| 3,637,641
|
def uwid(string):
"""Return the width of a string"""
if not PY3:
string = string.decode('utf-8', 'ignore')
return sum(utf_char_width(c) for c in string)
|
adae434637415293443570f11ba58035eecf7d98
| 3,637,644
|
def check_double_quote(inpstring):
"""
Check if some strings needs of a double quote (if some space are inside the string, it will need to be inside two double quote). E.g.: --sfmt="TIFF (unstitched, 3D)"
Input:
inpstring: input string or array of strings
Output:
newstring = new string (or array of strings) corrected by quoting if necessary
"""
if type(inpstring) == list:
newstring = []
for index in inpstring:
tmp1 = index.find(" ")
if tmp1 != -1:
tmp2 = index.find('"')
if tmp2 == -1:
dummy = index.find("=")
if dummy != -1:
newstring.append(
index[0 : dummy + 1] + '"' + index[dummy + 1 :] + '"'
)
else:
newstring.append('"' + index + '"')
else:
newstring.append(index)
else:
newstring.append(index)
else:
tmp1 = inpstring.find(" ")
if tmp1 != -1:
tmp2 = inpstring.find('"')
if tmp2 == -1:
dummy = inpstring.find("=")
if dummy != -1:
newstring = (
inpstring[0 : dummy + 1] + '"' + inpstring[dummy + 1 :] + '"'
)
else:
newstring = '"' + inpstring + '"'
else:
newstring = inpstring
else:
newstring = inpstring
return newstring
|
3da3941d9cd8c4c72643f87c533bcfbfbd9b9a79
| 3,637,645
|
def get_linear_schedule_with_warmup(
num_warmup_steps, num_training_steps, last_epoch=-1
):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
function handle to create `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0,
float(num_training_steps - current_step)
/ float(max(1, num_training_steps - num_warmup_steps)),
)
return partial(lr_scheduler.LambdaLR, lr_lambda=lr_lambda, last_epoch=last_epoch)
|
10ee7baafd4751c0d578207706653b5c63f192f3
| 3,637,647
|
import base64
def search_image_targets_for_tag(trust_data: dict, image: Image):
"""
Searches in the `trust_data` for a digest, given an `image` with tag.
"""
image_tag = image.tag
if image_tag not in trust_data:
return None
base64_digest = trust_data[image_tag]["hashes"]["sha256"]
return base64.b64decode(base64_digest).hex()
|
11cfb3e0fb985c8730f72f32466e77881a503b8b
| 3,637,648
|
def calculate_intersection(a: BoundingBox, b: BoundingBox) -> int:
"""Calculate the intersection of two bounding boxes.
:param BoundingBox a: The first bounding box.
:param BoundingBox b: The second Bounding box.
:returns iou: The intersection of ``a`` and ``b``.
:rtype: int
"""
left = max(a.upper_left_corner.x, b.upper_left_corner.x)
right = min(
a.upper_left_corner.x + a.size.width, b.upper_left_corner.x + b.size.width
)
top = max(a.upper_left_corner.y, b.upper_left_corner.y)
bottom = min(
a.upper_left_corner.y + a.size.height, b.upper_left_corner.y + b.size.height
)
return max((bottom - top) * (right - left), 0)
|
74fd375f21a26af23208ba96bbd678ce30367b8f
| 3,637,650
|
def build_nn_model(input_shape):
"""Generate NN model
:param: input_shape (tuple): shape of the input
:return model: NN model
"""
model = keras.Sequential([
# input layer
# multi demensional array and flatten it out
# inputs.shape[1]: the intervals
# inputs.shape[2]: the value of the mfcc for that intervals
keras.layers.Flatten(input_shape=input_shape),
# 1st hidden layer
keras.layers.Dense(512, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# 2nd hidden layer
keras.layers.Dense(256, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# 3rd hidden layer
keras.layers.Dense(64, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# output layer
# softmax: the sum of the result of all the labels = 1
# predicting: pick the neuron hav highest value
keras.layers.Dense(10, activation="softmax")
])
return model
|
c4f56369875bb5ae99b2f98ac25a91b9a985f5a8
| 3,637,652
|
from typing import Optional
def getLatestCode(appDbConnStr: str) -> Optional[ICode]:
"""get latest created code from db
Args:
appDbConnStr (str): app db connection string
Returns:
Optional[ICode]: code object
"""
latestIdFetchsql = """
select id
from code_book.op_codes
where code_issue_time=(select max(code_issue_time) from code_book.op_codes where is_deleted=0)
and is_deleted=0
order by id desc
"""
# initialise code object
code: Optional[ICode] = None
colNames = []
dbRows = []
dbConn = None
dbCur = None
try:
# get connection with raw data table
dbConn = cx_Oracle.connect(appDbConnStr)
# get cursor and execute fetch sql
dbCur = dbConn.cursor()
dbCur.execute(latestIdFetchsql)
colNames = [row[0] for row in dbCur.description]
# fetch all rows
dbRows = dbCur.fetchall()
except Exception as err:
dbRows = []
print('Error while fetching latest code id from app db')
print(err)
finally:
# closing database cursor and connection
if dbCur is not None:
dbCur.close()
if dbConn is not None:
dbConn.close()
targetColumns = ["ID"]
if (False in [(col in targetColumns) for col in colNames]):
# all desired columns not fetched, hence return empty
return None
if len(dbRows) == 0:
return None
row = dbRows[0]
latestCodeId: ICode["id"] = row[colNames.index('ID')]
# get latest code by id
code = getCodeById(appDbConnStr=appDbConnStr, codeId=latestCodeId)
return code
|
aba4106f99e1e23cf0127ad45502bd17cde9c33e
| 3,637,654
|
def start_initialization_pd(update: Update, context: CallbackContext) -> str:
"""When touch "Заполнить данные"."""
u = User.get_user(update, context)
current_text = update.effective_message.text
update.effective_message.edit_text(
text=current_text
)
context.bot.send_message(
chat_id=u.user_id,
text=static_text.ABOUT_FILLING_PERSONAL_DATA
)
update.effective_message.reply_text(
text=static_text.ASK_LAST_NAME,
parse_mode=ParseMode.HTML
)
return LAST_NAME
|
8af74466b5dc84ef4ee0b5ff5a2d824a275ee5c9
| 3,637,655
|
def rpm_comments(table=RPMComment, prefix='comment_', relationships=False):
"""Get filters for rpm comments.
:param sqlalchemy.ext.declarative.api.declarativemeta table: database model
:param string prefix: prefix of the name of the filter
:return dict: dict of filters
"""
filters = dict(
**request_parser.equals(
table.id,
name=prefix + 'id',
function=(lambda x: int(x))
),
)
if relationships:
filters.update(dict(
**request_parser.equals(table.id_user, name=prefix + 'id_user'),
**request_parser.equals(
table.id_comp,
name=prefix + 'id_comp',
function=(lambda x: int(x))
),
**request_parser.equals(
table.id_diff,
name=prefix + 'id_diff',
function=(lambda x: int(x))
),
))
return filters
|
b7e34abdc81e3afa2b19ffe118d129068526b315
| 3,637,656
|
def split_line(line) -> list:
"""Split a line from a dmp file"""
return [x.strip() for x in line.split(" |")]
|
e9c5fb93bab1007b3deb11b8d71fe0cffd3f5bab
| 3,637,657
|
def translate(text):
"""."""
return text
|
a0732d6a802f9846de5b294863f2c096f72c6c70
| 3,637,658
|
def stream_bytes(data, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming binary data.
Returns a buffered generator which encodes binary data as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
data : bytes
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
"""
stream = BytesFileStream(data, chunk_size=chunk_size)
return stream.body(), stream.headers()
|
b329f56cae62122dd4e341dfc80c9c6aaae7ba31
| 3,637,659
|
def helper(n, big):
"""
:param n: int, an integer number
:param big: the current largest digit
:return: int, the final largest digit
"""
n = abs(n)
if n == 0:
return big
else:
# check the last digit of a number
if big < int(n % 10):
big = int(n % 10)
# check the rest of the digits
return helper(n/10, big)
|
aa7fa862d326d9e3400b58c9c520a10672e7340c
| 3,637,660
|
def getPageNumber(ffile):
"""
Extract the page number from the file name
:param ffile:
:return: image URI as a string
"""
return str(ffile).split('_')[-1].split('.')[0]
|
f78166ae3da8ea234c98436144e6c815f341ff5e
| 3,637,663
|
import colorsys
def colors_stepsort(r,g,b,repetitions=1):
"""
Sort colors in hue steps for more perceptually uniform colormaps
"""
lum = np.sqrt( .241 * r + .691 * g + .068 * b )
h, s, v = colorsys.rgb_to_hsv(r,g,b)
h2 = int(h * repetitions)
lum2 = int(lum * repetitions)
v2 = int(v * repetitions)
if h2 % 2 == 1:
v2 = repetitions - v2
lum = repetitions - lum
return (h2, lum, v2)
|
c9ac5729c4208e681e3a91e09746b3bc8f0b3cc5
| 3,637,664
|
def get_string_hash(string: str, algorithm_name: str):
"""Calculates the hash digest of a string.
Args:
string: str: The string to digest.
algorithm_name: str: The name of the algorithm to hash the string with.
Returns:
A hash digest in string form.
"""
hash_algorithm = _get_algorithm(algorithm_name)
hash_algorithm.update(string.encode('utf-8'))
return hash_algorithm.hexdigest()
|
31eb5504703412ac775ae4344ef1ff2c4176104d
| 3,637,665
|
import tqdm
def psd_error(times,rates,errors):
"""
obtain errors for the best frequency estimate of the signal
"""
"""
print(len(times),len(rates),len(errors))
newdatachoice = np.random.choice(len(times),size=int(0.1*len(times)))
newtimes = list(np.array([times[0]])) + list(np.array([times[-1]])) + list(times[np.array(list(set(newdatachoice)))])
newrates = list(np.array([rates[0]])) + list(np.array([rates[-1]])) + list(rates[np.array(list(set(newdatachoice)))])
newerrs = list(np.array([errors[0]])) + list(np.array([errors[-1]])) + list(errors[np.array(list(set(newdatachoice)))])
times = newtimes
rates = newrates
errors = newerrs
print(len(times),len(rates),len(errors))
"""
freqs_list = []
psds_list = []
for j in tqdm(range(1000)):
new_rates = np.zeros(len(rates))
for i in range(len(rates)):
if rates[i] != 0:
new_rates[i] = np.random.normal(loc=rates[i],scale=errors[i])
trunc_times = times-times[0]
newchoice = np.random.choice(len(trunc_times),size=len(trunc_times))
rand_times = trunc_times[np.array(list(set(newchoice)))]
rand_rates = new_rates[np.array(list(set(newchoice)))]
omega,psd,prob3,prob4,prob5 = lsp(rand_times,rand_rates)
nu_reg = omega/(2.0*np.pi)
freq = omega/(2*np.pi)
psds_list.append( np.max(psd[(freq>=8.2e-6)&(freq<=8.4e-6)]) )
freqs_list.append( freq[psd==psds_list[-1]][0])
#plt.figure()
#plt.plot(freq,psd,'rx-')
#plt.show()
return freqs_list,psds_list
|
ecb8dcb297872dae1e9c9a0b491feaf2b2c74490
| 3,637,666
|
def solve(global_step):
"""add solver to losses"""
# learning reate
lr = _configure_learning_rate(82783, global_step)
optimizer = _configure_optimizer(lr)
tf.summary.scalar('learning_rate', lr)
# compute and apply gradient
losses = tf.get_collection(tf.GraphKeys.LOSSES)
regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regular_loss = tf.add_n(regular_losses)
out_loss = tf.add_n(losses)
total_loss = tf.add_n(losses + regular_losses)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar('out_loss', out_loss)
tf.summary.scalar('regular_loss', regular_loss)
update_ops = []
variables_to_train = _get_variables_to_train()
# update_op = optimizer.minimize(total_loss)
gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train)
grad_updates = optimizer.apply_gradients(gradients,
global_step=global_step)
update_ops.append(grad_updates)
# update moving mean and variance
if FLAGS.update_bn:
update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_bn = tf.group(*update_bns)
update_ops.append(update_bn)
return tf.group(*update_ops)
|
085317d679495ab1959106c64fdeb10aeeeeab02
| 3,637,667
|
def get_features_from_policy(env, policy):
"""Represent policies with average feature vector.
This only makes sense for linear reward functions, but it is only used for the
HighwayDriving environment.
"""
assert isinstance(env.unwrapped, HighwayDriving)
assert isinstance(policy, FixedPolicy)
N = 10
features = np.zeros(env.Ndim_repr)
for i in range(N):
obs = env.reset()
done = False
while not done:
act = policy.get_action(obs)
obs, reward, done, info = env.step(act)
features += info["gp_repr"]
features /= N
return features
|
524256c80a8c4dec7b30378e5a377ac02456ffa7
| 3,637,668
|
def predicate(line):
"""
Remove lines starting with ` # `
"""
if "#" in line:
return False
return True
|
ff7d67c1fd7273b149c5a2148963bf898d6a3591
| 3,637,670
|
def pad_slices(ctvol, max_slices): #Done testing
"""For <ctvol> of shape (slices, side, side) pad the slices to shape
max_slices for output of shape (max_slices, side, side)"""
padding_needed = max_slices - ctvol.shape[0]
assert (padding_needed >= 0), 'Image slices exceed max_slices by'+str(-1*padding_needed)
if padding_needed > 0:
before_padding = int(padding_needed/2.0)
after_padding = padding_needed - before_padding
ctvol = np.pad(ctvol, pad_width = ((before_padding, after_padding), (0,0), (0,0)),
mode = 'constant', constant_values = np.amin(ctvol))
assert ctvol.shape[0]==max_slices
return ctvol
|
01b03094dd66a770cb40a136399486bbc018e969
| 3,637,671
|
import random
def average_img_from_dir(path_data_dir,filepat="*", \
parentaslabel=False,\
labels=[],\
sampling_rate=0.001,\
title="average image") :
"""
create and visualize average image of given dataset
dataset_path = path to dataset
sampling_rate = sampling ratio to visualize value in [0,1]
seed = seed number to use for random value generation
return
average_image = average image of the given dataset
"""
data = [ f for lbl, f in gen_find(filepat,path_data_dir,parentaslabel,labels) \
if not parentaslabel or ( parentaslabel and lbl in labels) ]
num_elem = len(data)
ds_size= int(num_elem * sampling_rate)
print ("# sample : {} sampling_rate : {} # of data : {}".format(ds_size,sampling_rate,num_elem))
sampled_data = random.sample(data, ds_size)
img_avg = cv2.imread(sampled_data[0])
h, w = img_avg.shape[:2]
nSum = 1
for i in sampled_data[1:] :
imga = cv2.resize(cv2.imread(i),(w, h), interpolation = cv2.INTER_CUBIC)
weight_avg = float(nSum)/float(nSum+1)
weight_a = float(1)/float(nSum+1)
img_avg = cv2.addWeighted(img_avg,weight_avg,imga,weight_a,0)
#print ("Weight_avg : {} + Weight_a : {} = Total Weight {} " \
# .format(weight_avg,weight_a,weight_avg+weight_a))
nSum+=1
# Make w as 10 - 10 = w : v : h
vr = 10 * h / w
plt.figure(figsize=(10,vr))
plt.title(title)
plt.imshow(img_avg,interpolation='nearest', aspect='auto'),plt.show()
return img_avg
|
98aaf33c35483a9100bb25d667970f9f177955f8
| 3,637,672
|
def get_size(positions):
"""Get the size of bounding rectangle that embodies positions.
Args:
positions (dict of Dendrogram: np.array): positions xy coordinates of dendrograms
Returns:
Tuple of width and height of bounding rectangle.
"""
max_y_list = [dendrogram.height + coords[1] for dendrogram, coords in positions.items()]
coords = np.array(list(positions.values()))
width = np.max(coords[:, 0]) - np.min(coords[:, 0])
height = np.max(max_y_list) - np.min(coords[:, 1])
return width, height
|
2a212541746963d0aa83320d3aa08ddfb5d6f6e0
| 3,637,673
|
def getContactInfo(dic):
"""Returns the Contact info for Chapters.
dic -- Dictionary from the JSON with all values.
"""
return str(dic["content"]["$t"]).split(',')[1].split(':')[1].strip()
|
27ed9bcb1e91db3cf58b82023505cfcffab00bcd
| 3,637,674
|
import torch
import time
def draw_pointcloud(x: torch.Tensor, x_mask: torch.Tensor, grid_on=True):
""" Make point cloud image
:param x: Tensor([B, N, 3])
:param x_mask: Tensor([B, N])
:param grid_on
:return: Tensor([3 * B, W, H])
"""
tic = time.time()
figw, figh = 16., 12.
W, H = 256, int(256 * figh / figw)
imgs = list()
for p, m in zip(x, x_mask):
p = p[~m, :]
p = p.cpu()
fig = plt.figure(figsize=(figw, figh))
ax = fig.gca(projection='3d')
ax.set_facecolor('xkcd:steel')
ax.w_xaxis.set_pane_color((0., 0., 0., 1.0))
ax.w_yaxis.set_pane_color((0., 0., 0., 1.0))
ax.w_zaxis.set_pane_color((0., 0., 0., 1.0))
ax.scatter(-p[:, 2], p[:, 0], p[:, 1], color=(1, 1, 1), marker='o', s=100)
fig.tight_layout()
fig.canvas.draw()
buf = fig.canvas.buffer_rgba()
l, b, w, h = fig.bbox.bounds
img = np.frombuffer(buf, np.uint8).copy()
img.shape = int(h), int(w), 4
img = img[:, :, 0:3]
img = cv2.resize(img, dsize=(W, H), interpolation=cv2.INTER_CUBIC) # [H, W, 3]
imgs.append(torch.tensor(img).transpose(2, 0).transpose(2, 1)) # [3, H, W]
plt.close(fig)
return torch.stack(imgs, dim=0)
|
8828088f8f319f0033c55a4fb5c63705a882f8cd
| 3,637,675
|
def semantic_dsm(word_list, keyed_vectors):
"""Calculate a semantic dissimilarity matrix."""
vectors = np.array([keyed_vectors.word_vec(word) for word in word_list])
dsm = np.clip(pdist(vectors, metric="cosine"), 0, 1)
return dsm
|
05a08b09af0cc95dc647c4a2388824a2f94ed7ec
| 3,637,676
|
def prompt_id_num(message, length=ID_WIDTH):
""" Asks the user to enter a identifier which is a numeric string.
The length is the length of the identifier asked.
:param message: message to ask the input
:param length: the length of the identifier
:return: input
"""
response = input(message)
while len(response) != length:
response = input(f"Entrée incorrecte. Veuillez renseigner"
f" un identifiant contenant {length} nombres: ")
response_is_not_number = True
while response_is_not_number:
try:
int(response)
response_is_not_number = False
except ValueError:
response = input(f"Entrée incorrecte. Veuillez renseigner"
f" un identifiant contenant {length} nombres: ")
return response
|
5cf705e600891bf168ac77ecf3d57637144d7b97
| 3,637,677
|
def click_snr(wl, Spec):
"""Calculate snr in a specific range given by clicks on a plot """
plt.figure()
plt.plot(wl, Spec)
plt.show(block=True)
# points from click
# temp values untill implement above
point2 = np.max(wl)
point1 = np.min(wl)
map2 = wl < point2
map1 = wl > point1
wl_slice = wl[map1 * map2]
Spec_slice = Spec[map1 * map2]
# Calculate SNR on the slice
SNR = snr(Spec_slice)
return SNR
|
b61216780bce3e63687c4fc79b37de8e138fa756
| 3,637,678
|
def rnn_cell_forward(xt, a_prev, parameters):
"""
Implements a single forward step of the RNN-cell
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters)
"""
# Retrieve parameters from "parameters"
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
# compute next activation state using the formula given above
a_next = np.tanh(np.dot(Waa, a_prev) + np.dot(Wax, xt) + ba)
# compute output of the current cell using the formula given above
yt_pred = softmax(np.dot(Wya, a_next) + by)
# store values you need for backward propagation in cache
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache
|
891a5ec7a789dbd4e1ee67598c9e350d9eacffee
| 3,637,679
|
def load_song(trainsize=5000, testsize=5000):
""" The million song dataset
Not a good dataset for feature selection or regression
Standard linear regression performs only a little bit better than a random vector.
Additional complex models, such as interesting kernels, are needed
To improve performance
"""
if trainsize + testsize < 5000:
filename = 'datasets/YearPredictionMSD_small.csv'
else:
filename = 'datasets/YearPredictionMSD.csv'
dataset = loadcsv(filename)
trainset, testset = splitdataset(dataset,trainsize, testsize,outputfirst=True)
return trainset,testset
|
ee428f7c34f256ab9eb3e271751932cc4abcdb4c
| 3,637,681
|
def convert_node(node_data: NodeData):
"""
Convenience method for converting NodeData to a packed TLV message.
:param core.emulator.data.NodeData node_data: node data to convert
:return: packed node message
"""
node = node_data.node
services = None
if node.services is not None:
services = "|".join([x.name for x in node.services])
server = None
if node.server is not None:
server = node.server.name
tlv_data = structutils.pack_values(
coreapi.CoreNodeTlv,
[
(NodeTlvs.NUMBER, node.id),
(NodeTlvs.TYPE, node.apitype.value),
(NodeTlvs.NAME, node.name),
(NodeTlvs.MODEL, node.type),
(NodeTlvs.EMULATION_SERVER, server),
(NodeTlvs.X_POSITION, int(node.position.x)),
(NodeTlvs.Y_POSITION, int(node.position.y)),
(NodeTlvs.CANVAS, node.canvas),
(NodeTlvs.SERVICES, services),
(NodeTlvs.LATITUDE, str(node.position.lat)),
(NodeTlvs.LONGITUDE, str(node.position.lon)),
(NodeTlvs.ALTITUDE, str(node.position.alt)),
(NodeTlvs.ICON, node.icon),
],
)
return coreapi.CoreNodeMessage.pack(node_data.message_type.value, tlv_data)
|
bef0f45295325e15c09249152d3252b7ed949b2e
| 3,637,682
|
def parse_null_value(
null_value_node: "NullValueNode", schema: "GraphQLSchema"
) -> None:
"""
Returns the value of an AST null value node.
:param null_value_node: AST null value node to treat
:param schema: the GraphQLSchema instance linked to the engine
:type null_value_node: NullValueNode
:type schema: GraphQLSchema
"""
# pylint: disable=unused-argument
return None
|
ee4d3f544c83d58abaf40b5cc46aa8953a2745bc
| 3,637,683
|
def SetDataTypesFromColInfo(df, tblCI):
"""
Use colinfo dictionaries to set newly-imported (CSV) DataFrame column types and Boolean Flag columns
"""
for col in df.columns:
#If col is a flag column (1/blank), convert to Boolean for memory and feather file size efficiency
if (col in tblCI.dict_isflagcol):
if (tblCI.dict_isflagcol[col]): df = pdutil.ConvertFlagColToBoolean(df, col)
#If the column is in the data type dictionary, set its type using either .to_datetime() or .astype()
if col in tblCI.dict_types:
if tblCI.dict_types[col] == 'dt':
df[col] = pd.to_datetime(df[col])
else:
df[col] = df[col].astype(tblCI.dict_types[col])
return df
|
43b6ac47d760b613be7419a6d1e7910b04c792f8
| 3,637,684
|
def run_and_wait(request, _):
"""Implementation of RunAndWait."""
process_runner = new_process.ProcessRunner(request.executable_path,
request.default_args)
args = {}
protobuf_utils.get_protobuf_field(args, request.popen_args, 'bufsize')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'executable')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'shell')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'cwd')
if request.popen_args.env_is_set:
args['env'] = request.popen_args.env
else:
args['env'] = None
args['additional_args'] = request.additional_args
protobuf_utils.get_protobuf_field(args, request, 'timeout')
protobuf_utils.get_protobuf_field(args, request, 'terminate_before_kill')
protobuf_utils.get_protobuf_field(args, request, 'terminate_wait_time')
protobuf_utils.get_protobuf_field(args, request, 'input_data')
protobuf_utils.get_protobuf_field(args, request, 'max_stdout_len')
logs.log('Running command: %s' % process_runner.get_command())
return untrusted_runner_pb2.RunAndWaitResponse(
result=process_result_to_proto(process_runner.run_and_wait(**args)))
|
a7c505221c44fd40156fa2a17ee31307e82d0a2f
| 3,637,685
|
import random
def fight(player, enemy):
"""
This starts a round of combat between the user and their selected enemy.
It returns a list of information relating to combat, to be used in the
view function to display it, if required.
"""
# Random player damage based on 80-100% of player damage stat.
dmg_range_roll = random.randrange(80, 101) / 100
dmg_roll_player = round(player.damage * dmg_range_roll)
looted_power_crystals = 0
looted_gold = 0
if enemy.hp_max <= dmg_roll_player:
# Randomly generated loot values, added to player object.
resources_range_roll = random.randrange(75, 101) / 100
looted_power_crystals = round(
enemy.power_crystals * resources_range_roll)
player.power_crystals += looted_power_crystals
resources_range_roll = random.randrange(75, 101) / 100
looted_gold = round(enemy.gold * resources_range_roll)
player.gold += looted_gold
dmg_roll_enemy = 0
result = True
else:
result = False
# Random enemy damage, based on 80-100% of their damage stat.
dmg_range_roll = random.randrange(80, 101) / 100
dmg_roll_enemy = round(enemy.damage * dmg_range_roll)
player.hp_current -= dmg_roll_enemy
return [player, dmg_roll_player, dmg_roll_enemy, result, looted_gold, looted_power_crystals]
|
bca739be92ccacb92c90d784cdbf5b4abb2e61c0
| 3,637,687
|
def sort_list_by_list(L1,L2):
"""Sort a list by another list"""
return [x for (y,x) in sorted(zip(L2,L1), key=lambda pair: pair[0])]
|
04b7c02121620be6d9344af6f56f1b8bfe75e9f3
| 3,637,689
|
def _to_protobuf_value(value: type_utils.PARAMETER_TYPES) -> struct_pb2.Value:
"""Creates a google.protobuf.struct_pb2.Value message out of a provide
value.
Args:
value: The value to be converted to Value message.
Returns:
A google.protobuf.struct_pb2.Value message.
Raises:
ValueError if the given value is not one of the parameter types.
"""
if isinstance(value, str):
return struct_pb2.Value(string_value=value)
elif isinstance(value, (int, float)):
return struct_pb2.Value(number_value=value)
elif isinstance(value, bool):
return struct_pb2.Value(bool_value=value)
elif isinstance(value, dict):
return struct_pb2.Value(
struct_value=struct_pb2.Struct(
fields={k: _to_protobuf_value(v) for k, v in value.items()}))
elif isinstance(value, list):
return struct_pb2.Value(
list_value=struct_pb2.ListValue(
values=[_to_protobuf_value(v) for v in value]))
else:
raise ValueError('Value must be one of the following types: '
'str, int, float, bool, dict, and list. Got: '
f'"{value}" of type "{type(value)}".')
|
2714aa36c4b2ce98795c32993390853172863010
| 3,637,690
|
from typing import Union
from typing import List
def umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in UMAP basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'umap', **kwargs)
|
454d606a62d783047ce5d09372ed0718cf3f4af4
| 3,637,691
|
def _prepare_grid(times, time_step):
"""Prepares grid of times for path generation.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
time_step: Scalar real `Tensor`. Maximal distance between time grid points
Returns:
Tuple `(all_times, mask)`.
`all_times` is a 1-D real `Tensor` containing all points from 'times` and
the uniform grid of points between `[0, times[-1]]` with grid size equal to
`time_step`. The `Tensor` is sorted in ascending order and may contain
duplicates.
`mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
which elements of 'all_times' correspond to THE values from `times`.
Guarantees that times[0]=0 and mask[0]=False.
"""
additional_times = tf.range(
start=time_step, limit=times[-1], delta=time_step, dtype=times.dtype)
zeros = tf.constant([0], dtype=times.dtype)
all_times = tf.concat([zeros] + [times] + [additional_times], axis=0)
additional_times_mask = tf.zeros_like(additional_times, dtype=tf.bool)
mask = tf.concat([
tf.cast(zeros, dtype=tf.bool),
tf.ones_like(times, dtype=tf.bool)
] + [additional_times_mask], axis=0)
perm = tf.argsort(all_times, stable=True)
all_times = tf.gather(all_times, perm)
mask = tf.gather(mask, perm)
return all_times, mask
|
7765a473ce6cf91281410006b07421daf6ed24a8
| 3,637,692
|
def unpack_singleton(x):
"""Gets the first element if the iterable has only one value.
Otherwise return the iterable.
# Argument:
x: A list or tuple.
# Returns:
The same iterable or the first element.
"""
if len(x) == 1:
return x[0]
return x
|
cf551f242c8ea585c1f91eadbd19b8e5f73f0096
| 3,637,693
|
def create_markdown_table(table_info: dict, index_name: str='Id') -> str:
"""
Returns a string for a markdown table, formatted
according to the dictionary passed as `table_info`
Parameters:
table_info: Mapping from index to values
index_name: Name to use for the index column
Returns:
md_str: Markdown formatted table string
Example:
>>> table_info = {
'Apples': {
'Cost': '40p',
'Colour': 'Red/green',
},
'Oranges': {
'Cost': '50p',
'Colour': 'Orange',
},
}
>>> md_str = create_markdown_table(table_info, index_name='Fruit')
>>> print(md_str)
| Fruit | Cost | Colour |
|:--------|:-------|:----------|
| Apples | 40p | Red/green |
| Oranges | 50p | Orange |
"""
df_info = pd.DataFrame(table_info).T
df_info.index.name = index_name
md_str = df_info.to_markdown()
return md_str
|
bcda7ddb9338c3f7e656a0ec74a495f0a677eaeb
| 3,637,696
|
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
|
6ba7ed95bd6bf18e24ae6bce47fdc03868ac4a98
| 3,637,697
|
from typing import List
from typing import Dict
def make_car_dict(key: str, data: List[str]) -> Dict:
"""Organize car data for 106 A/B of the debtor
:param key: The section id
:param data: Content extract from car data section
:return: Organized data for automobile of debtor
"""
return {
"key": key,
"make": data[0],
"model": data[1],
"year": data[2],
"mileage": data[3],
"other_information": data[5],
"property_value": data[6],
"your_property_value": data[7],
}
|
671cb2f82f15d14345e34e9823ea390d72cf040a
| 3,637,698
|
import ast
def insert_code(src, dest, kind):
"""Insert code in source into destination file."""
source_text = open(src).read().strip()
destination_text = open(dest).read()
destination_lines = destination_text.split('\n')
destination_tree = ast.parse(destination_text)
if not destination_tree.body:
idx = 0
elif kind == "prefix":
idx = find_prefix_insertion_idx(destination_tree)
elif kind == "postfix":
idx = find_postfix_insertion_idx(destination_tree)
if idx >= len(destination_tree.body):
# Strip blank line before insertion
if destination_lines[-1].strip() == '':
del destination_lines[-1]
# Append to file
destination_lines.append('\n\n' + source_text + '\n')
else:
# Start with index at first line above object definition
line_no = destination_tree.body[idx].lineno - 1 # line numbers count from 1
line_no = get_previous_blank_line_no(destination_lines, line_no)
# Strip blank lines before insertion
if destination_lines[line_no - 1].strip() == '':
del destination_lines[line_no - 1]
line_no -= 1
# perform the insertion
destination_lines.insert(line_no, '\n\n' + source_text + '\n')
all_text = '\n'.join(destination_lines)
return all_text
|
7f07e8741f5354fc78b840c803424bfd70fe8997
| 3,637,699
|
def is_aix():
"""
Simple function to return if host is AIX or not
"""
return salt.utils.platform.is_aix()
|
e4be83dfefc2a7ce5d97894b7a882808658d470a
| 3,637,701
|
def draw_support_spring(
fig,
support,
orientation="up",
color='orange',
show_values=True,
row=None,
col=None,
units="N/m"):
"""Draw an anchored spring shape on a plotly figure.
Parameters
----------
fig : plotly figure
plotly figure to append roller shape to.
support : Support instance
support to be represented on figure
orientation : 'up' or 'right, optional
direction that the arrow faces, by default "up"
color : str, optional
color of spring, by default 'orange'.
show_values: bool,optional
If true annotates numerical force value next to arrow, by default True.
row : int or None,
Row of subplot to draw line on. If None specified assumes a full plot,
by default None.
col : int or None,
Column of subplot to draw line on. If None specified assumes a full
plot, by default None.
units: str,
The units suffix drawn with the stiffness value. Default is 'N/m'.
Returns
-------
plotly figure
Returns the plotly figure passed into function with the spring shape
appended to it."""
x_sup = support._position
# x0 and y0 initialised so that when loop through each point in the coords
# list will have two points to reference.
x0, y0 = 0, 0
# reduction of 0.8 used on coords specified (simple reduction modification)
reduce = 0.8
if orientation in ['up', 'right']:
# coords are points between lines to be created
# label and stiffness are defined for use as meta data to be added to
# the hovertemplate
if orientation == 'right':
coords = [(5, 0), (7, 5), (12, -5), (14, 0), (19, 0)]
stiffness = support._stiffness[0]
else:
coords = [(0, 5), (-5, 7), (5, 12), (0, 14), (0, 19)]
stiffness = support._stiffness[1]
# x1 and y1 are the ends of the line to be created
for x1, y1 in coords:
x1, y1 = x1 * reduce, y1 * reduce
# Create dictionary for line shape object. Note: multiple lines
# added but reference must always be to the same xanchor
shape = dict(
type="line",
xref="x", yref="y",
x0=x0, y0=y0, x1=x1, y1=y1,
line_color=color,
line_width=2,
xsizemode='pixel',
ysizemode='pixel',
xanchor=x_sup,
yanchor=0
)
# Append line to plot or subplot
if row and col:
fig.add_shape(shape, row=row, col=col)
else:
fig.add_shape(shape)
# set end point to be start point for the next line
x0, y0 = x1, y1
if show_values:
y0 = max(y0, 7)
annotation = dict(
xref="x", yref="y",
x=x_sup,
y=0,
yshift=y0 * 1.5,
xshift=x0 * 2,
text=f"{stiffness:.3f} {units}",
font_color=color,
showarrow=False,
)
# Append shape to plot or subplot
if row and col:
fig.add_annotation(annotation, row=row, col=col)
else:
fig.add_annotation(annotation)
return fig
|
73c546289ac02d9021375f553504991bdaa4ca89
| 3,637,702
|
def _collins_crt(r, R, P, p, K):
"""Wrapper of CRT for Collins's resultant algorithm. """
return gf_int(gf_crt([r, R], [P, p], K), P*p)
|
d84f5ad514872acacc5f7ef626cb05f5df7771f3
| 3,637,703
|
def quantity_remover(my_thing):
"""
removes pint quantities to make json output happy
Parameters
----------
my_thing
Returns
-------
"""
if hasattr(my_thing, 'magnitude'):
return 'QUANTITY', my_thing.magnitude, my_thing.units.format_babel()
elif isinstance(my_thing, dict):
newdict = dict()
for key, item in my_thing.items():
newdict[key] = quantity_remover(item)
return newdict
elif hasattr(my_thing, '__iter__') and not isinstance(my_thing, str):
my_type = type(my_thing)
return my_type([quantity_remover(item) for item in my_thing])
else:
return my_thing
|
54b2db5b638f297ca503513f79eb4eec4ac2afa2
| 3,637,704
|
def sliced_wasserstein(PD1, PD2, M=50):
""" Implementation of Sliced Wasserstein distance as described in
Sliced Wasserstein Kernel for Persistence Diagrams by Mathieu Carriere, Marco Cuturi, Steve Oudot (https://arxiv.org/abs/1706.03358)
Parameters
-----------
PD1: np.array size (m,2)
Persistence diagram
PD2: np.array size (n,2)
Persistence diagram
M: int, default is 50
Iterations to run approximation.
Returns
--------
sw: float
Sliced Wasserstein distance between PD1 and PD2
"""
diag_theta = np.array(
[np.cos(0.25 * np.pi), np.sin(0.25 * np.pi)], dtype=np.float32
)
l_theta1 = [np.dot(diag_theta, x) for x in PD1]
l_theta2 = [np.dot(diag_theta, x) for x in PD2]
if (len(l_theta1) != PD1.shape[0]) or (len(l_theta2) != PD2.shape[0]):
raise ValueError("The projected points and origin do not match")
PD_delta1 = [[np.sqrt(x ** 2 / 2.0)] * 2 for x in l_theta1]
PD_delta2 = [[np.sqrt(x ** 2 / 2.0)] * 2 for x in l_theta2]
# i have the input now to compute the sw
sw = 0
theta = 0.5
step = 1.0 / M
for i in range(M):
l_theta = np.array(
[np.cos(theta * np.pi), np.sin(theta * np.pi)], dtype=np.float32
)
V1 = [np.dot(l_theta, x) for x in PD1] + [np.dot(l_theta, x) for x in PD_delta2]
V2 = [np.dot(l_theta, x) for x in PD2] + [np.dot(l_theta, x) for x in PD_delta1]
sw += step * cityblock(sorted(V1), sorted(V2))
theta += step
return sw
|
c8de271435b9b393f7230c13f6eb746e3d566828
| 3,637,705
|
def update_handler(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <https://flask.palletsprojects.com/en/1.1.x/api/#flask.Flask.make_response>`.
"""
print(request)
request_json = request.get_json()
content_type = request.headers['content-type']
if content_type == 'application/json':
request_json = request.get_json(silent=True)
if request_json and 'name' in request_json:
name = request_json['name']
else:
raise ValueError("JSON is invalid, or missing a 'name' property")
elif content_type == 'application/octet-stream':
name = request.data
elif content_type == 'text/plain':
name = request.data
elif content_type == 'application/x-www-form-urlencoded':
name = request.form.get('name')
else:
raise ValueError("Unknown content type: {}".format(content_type))
return 'Hello {}!'.format(escape(name))
# if request.args and 'message' in request.args:
# return request.args.get('message')
# elif request_json and 'message' in request_json:
# return request_json['message']
# else:
# return f'Hello World xxx!'
|
5fa052ddbd9e4016645e0ee81be1d8aeaaca7531
| 3,637,706
|
def usercourse(request, course_code):
"""
The function is use for course content
"""
user = request.user
extrainfo = ExtraInfo.objects.select_related().get(user=user) # get the type of user
courseid = Courses.objects.select_related().get(code=course_code)
classes = OnlineClasses.objects.select_related().filter(course_id=courseid.id)
if extrainfo.user_type == 'faculty':
if request.method == 'POST':
if 'submiturl' in request.POST:
topic = request.POST.get('topicName')
class_date = request.POST.get('date')
start_time = request.POST.get('StartTime')
end_time = request.POST.get('EndTime')
upload_url = request.POST.get('ClassURL')
OnlineClasses.objects.create(course_id = courseid,
class_date=class_date,
start_time=start_time,
end_time=end_time,
description=topic,
upload_url=upload_url
)
if 'deleteurl' in request.POST:
classid = request.POST.get('delete-id')
OnlineClasses.objects.get(id=classid).delete()
return render(request, "online_cms/course_new.html", {'classes': classes, 'extrainfo': extrainfo})
|
ab5519f211e4c6e2574536f1a1c5a781d3529e7d
| 3,637,707
|
def add_device(config_id, name, device_type_id, device_subtype_id, ip4address, ip6address, properties):
"""Add device to BAM."""
response = get_api()._api_client.service.addDevice(config_id, name, device_type_id, device_subtype_id, ip4address,
ip6address, properties)
return get_api().get_entity_by_id(response)
|
84c66b5ab3951b764a8669bb49438eb6101e1355
| 3,637,708
|
def cleanse_param_name(name):
"""Converts Chainer parameter names to ONNX names.
Note ONNX identifiers must be a valid C identifier.
Args:
name (str): A Chainer parameter name (e.g., /l/W).
Returns
A valid ONNX name (e.g., param_l_W).
"""
return 'param' + name.replace('/', '_')
|
9b7774aabeeab322f53321b91195333359c8ee7b
| 3,637,710
|
def calc_checksum_for_ip_change(old_ip_packet, new_ip_packet, old_checksum, is_ipv6=False):
""" ip地址改变之后重新获取校检码
:param old_ip_packet:
:param new_ip_packet:
:param old_checksum:
:param is_ipv6:是否是ipv6
:return:
"""
final_checksum = old_checksum
a = 0
b = 1
# tmpcsum = old_checksum
if is_ipv6:
n = 8
else:
n = 2
i = 0
while i < n:
old_field = (old_ip_packet[a] << 8) | old_ip_packet[b]
new_field = (new_ip_packet[a] << 8) | new_ip_packet[b]
# final_checksum = checksum.calc_incre_checksum(final_checksum, old_field, new_field)
final_checksum = fn_utils.calc_incre_csum(final_checksum, old_field, new_field)
a = a + 2
b = b + 2
i += 1
return final_checksum
|
7bcc7d96b6b8eef9c1ef93ca922ec192194785ff
| 3,637,711
|
def get_sender_password():
"""Get sender password
"""
try:
return Setting.objects.get(slug=KEY_SENDER_PASSWORD)
except Setting.DoesNotExist:
return None
|
80e0c0843b02f7a27d62727fc6b104a566cc7442
| 3,637,712
|
def standardize_data(df):
"""Standardizes the data by cleaning string values and standardizing column
names.
df: Pandas dataframe to standardize.
"""
# Clean string values in the dataframe.
df = df.applymap(
lambda x: x.replace('"', '').strip() if isinstance(x, str) else x)
# Standardize column names.
df = df.rename(columns=COL_NAME_MAPPING)
# Add race metadata columns.
if std_col.RACE_CATEGORY_ID_COL in df.columns:
std_col.add_race_columns_from_category_id(df)
return df
|
68a00c00003206e1875ca166de02336fb845fce3
| 3,637,713
|
def MatrixExp6(se3mat):
"""Computes the matrix exponential of an se3 representation of
exponential coordinates
:param se3mat: A matrix in se3
:return: The matrix exponential of se3mat
Example Input:
se3mat = np.array([[0, 0, 0, 0],
[0, 0, -1.57079632, 2.35619449],
[0, 1.57079632, 0, 2.35619449],
[0, 0, 0, 0]])
Output:
np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, 3.0],
[ 0, 0, 0, 1]])
"""
omgtheta = so3ToVec(se3mat[0: 3, 0: 3])
if omgtheta.norm() == 0:
return (eye(3).row_join(se3mat[0: 3, 3])).col_join(Matrix([[0, 0, 0, 1]]))
else:
theta = AxisAng3(omgtheta)[1]
omgmat = se3mat[0: 3, 0: 3] / theta
R = MatrixExp3(se3mat[0: 3, 0: 3])
p = (eye(3) * theta + (1 - cos(theta)) * omgmat + (theta - sin(theta)) \
* omgmat*omgmat) * se3mat[0: 3, 3] / theta
T = (R.row_join(p)).col_join(Matrix(1,4,[0,0,0,1]))
return T
|
5fb0c8ec0a43410c8bb85e98b3edbd8ab23efea0
| 3,637,714
|
import re
def parse_log(content, arg_parser=json_arg_parser):
""" Parse important information from log files.
These log files are small so we are making the logic a little simpler by loading all
the content into memory at once rather than using an iostream.
Args:
content (string): the string content of the file
Returns:
args (dict<string, value>): a dictionary of function arguments of the program that
created the log and the value those arguments were set to.
history (list<(int, float)>): a list of tuples of time (in epoch index) and corresponding
classification loss
runtime (float): runtime of program in seconds
"""
lines = content.split('\n')
# Part 1: parse arguments
arg_pair_lists = exception_safe_map(arg_parser, lines[:20], exception=NotArgLineException)
args = dict(chain.from_iterable(arg_pair_lists))
# parse CV
for l in lines[:10]:
m = re.match(r'subjects (\d+) are held out', l)
if m:
args['held_out'] = m.group(1)
# Part 2: parse history
history_matches = imap(
lambda l: re.match(r'epoch (\d+), validation accuracy (.*)%', l),
lines)
history_matches = compress(*tee(history_matches, 2)) # filter out the 'Nones'
history = [(int(h.group(1)), float(h.group(2))) for h in history_matches]
# Part 3: parse run time
runtime = None
for l in lines[-3:]:
m = re.match(r'Code ran for ran for (.+)m', l)
if m:
runtime = float(m.group(1))
break
if runtime is None or len(history) == 0 or len(args) == 0:
raise BadLogFileException('file was not formatted properly')
return args, history, runtime
|
88659c548cdd95bd152ee0a829486301b42956c4
| 3,637,715
|
def _ohlc_dict(df_or_figure, open='', high='', low='', close='', volume='',
validate='', **kwargs):
"""
Returns a dictionary with the actual column names that
correspond to each of the OHLCV values.
df_or_figure : DataFrame or Figure
open : string
Column name to be used for OPEN values
high : string
Column name to be used for HIGH values
low : string
Column name to be used for LOW values
close : string
Column name to be used for CLOSE values
volume : string
Column name to be used for VOLUME values
validate : string
Validates that the stated column exists
Example:
validate='ohv' | Will ensure Open, High
and close values exist.
"""
c_dir = {}
ohlcv = ['open', 'high', 'low', 'close', 'volume']
if type(df_or_figure) == pd.DataFrame:
cnames = df_or_figure.columns
elif type(df_or_figure) == Figure or type(df_or_figure) == dict:
cnames = df_or_figure.axis['ref'].keys()
elif type(df_or_figure) == pd.Series:
cnames = [df_or_figure.name]
c_min = dict([(v.lower(), v) for v in cnames])
for _ in ohlcv:
if _ in c_min.keys():
c_dir[_] = c_min[_]
else:
for c in cnames:
if _ in c.lower():
c_dir[_] = c
if open:
c_dir['open'] = open
if high:
c_dir['high'] = high
if low:
c_dir['low'] = low
if close:
c_dir['close'] = close
if volume:
c_dir['volume'] = volume
for v in list(c_dir.values()):
if v not in cnames:
raise StudyError('{0} is not a valid column name'.format(v))
if validate:
errs = []
val = validate.lower()
s_names = dict([(_[0], _) for _ in ohlcv])
cols = [_[0] for _ in c_dir.keys()]
for _ in val:
if _ not in cols:
errs.append(s_names[_])
if errs:
raise StudyError('Missing Columns: {0}'.format(', '.join(errs)))
return c_dir
|
e6512a307217cb79b56942aa8c469a56d3cac8fc
| 3,637,716
|
def _s_to_b(value):
"""[string to binary single value]"""
try:
return bytes(value, 'utf-8')
except:
return value
|
bbabffa2fbd2ec62778a19c8ab3e1fe410b4640f
| 3,637,717
|
def get_or_create(
*, db_session, email: str, incident: Incident = None, **kwargs
) -> IndividualContact:
"""Gets or creates an individual."""
# we fetch the individual contact from the database
individual_contact = get_by_email_and_project(
db_session=db_session, email=email, project_id=incident.project.id
)
# we try to fetch the individual's contact information using the contact plugin
contact_plugin = plugin_service.get_active_instance(
db_session=db_session, project_id=incident.project.id, plugin_type="contact"
)
individual_info = {}
if contact_plugin:
individual_info = contact_plugin.instance.get(email, db_session=db_session)
kwargs["email"] = individual_info.get("email", email)
kwargs["name"] = individual_info.get("fullname", "Unknown")
kwargs["weblink"] = individual_info.get("weblink", "")
if not individual_contact:
# we create a new contact
individual_contact_in = IndividualContactCreate(**kwargs, project=incident.project)
individual_contact = create(
db_session=db_session, individual_contact_in=individual_contact_in
)
else:
# we update the existing contact
individual_contact_in = IndividualContactUpdate(**kwargs, project=incident.project)
individual_contact = update(
db_session=db_session,
individual_contact=individual_contact,
individual_contact_in=individual_contact_in,
)
return individual_contact
|
eacc9c048551f430927bdfe8c67a1af5209c0b18
| 3,637,718
|
def stats(request):
"""Return stats as JSON according to different GET query parameters."""
offset = request.GET.get('offset', '0')
limit = request.GET.get('limit', '10')
order_by = request.GET.get('order_by', 'public_backlinks')
return build_stats(offset, limit, order_by)
|
d252e8654a4a70f4de56e937b14cc449b6e477b6
| 3,637,719
|
def zero_crossing(arr, rank=1):
"""Calculates the zero crossing rate"""
if rank == 1:
nzc = tf.cast(tf.count_nonzero(tf_diff_axis(tf.sign(arr))), tf.float32)
else:
nzc = tf.cast(tf.count_nonzero(tf_diff_axis(tf.sign(arr)), axis=rank - 1), tf.float32)
arrlen = tf.cast(arr.shape[rank - 1], tf.float32)
return tf.divide(nzc, arrlen, name='zcr')
|
c7a6271d1cbf299278a06845e753d0e431716df8
| 3,637,721
|
def normalize_command(command):
"""Convert `command` to the string representation.
"""
if isinstance(command, list):
if len(command) == 1:
# This is either a quoted compound shell command or a simple
# one-item command. Pass it as is.
command = command[0]
else:
command = " ".join(shlex_quote(c) for c in command)
return command
|
700559f7b96ba4ea37f639fdc438db5c2ad70c29
| 3,637,722
|
def make_struct(*args, **kwargs):
"""Create a Struct class according to the given format"""
exec _structdef(*args, **kwargs)
return Struct
|
2fece3443e516019492af454f3f4b99bba2bd481
| 3,637,723
|
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked
|
08abab5fd1db346e2fedfc9e7a9ad7542e6424a7
| 3,637,724
|
def is_conn() -> bool:
"""是否连接核心网"""
return param.parent.ia != utz.IA_INVALID and param.parent.is_conn
|
8e3b06d49473caf43bf97fb133aec49907535777
| 3,637,725
|
def GetConstants():
"""Returns a list of all available constant values used by some Nexpose Criteria"""
return _get_filtered_classes(NexposeCriteriaConstant)
|
24be59ec50dada727efdb394c247435111ab4b5f
| 3,637,726
|
import json
def getEmpiresForUser(user_email):
"""Fetches empires for the given user.
Even though the empires should be in the data store already, we force fetch them from the server. This is
because it could be a new user and it hasn't synced yet, but also this provides a way for the user to force
their empire to update after changing names or shield (otherwise, they'd have to wait for ~3 hours when the
cron job runs)."""
keyname = 'profile:empires-for-user:'+user_email
empires = memcache.get(keyname)
if not empires:
# we fire off an HTTP request to each of the realms to get empire details about this email address
urls = {}
for realm_name,base_url in REALMS.items():
urls[realm_name] = base_url+'empires/search?email=' + user_email
# make simultaneous calls to all the URLs
rpcs = {}
for realm_name,url in urls.items():
rpc = urlfetch.create_rpc()
urlfetch.make_fetch_call(rpc, url, headers = {'Accept': 'text/json'})
rpcs[realm_name] = rpc
empires = {}
for realm_name, rpc in rpcs.items():
result = rpc.get_result()
if result.status_code == 200:
empire = json.loads(result.content)
if empire:
empire = empire["empires"][0]
empires[realm_name] = empire
# while we're here, save it to the data store
model.profile.Empire.Save(realm_name, empire)
memcache.set(keyname, empires, time=3600)
return empires
|
9d2e460c726a36b8071cdf6ea2ebeb8b36e468bc
| 3,637,727
|
def netflix(es, ps, e0, l=0.0001):
"""Combine predictions with the optimal weights to minimize RMSE.
Ref: Töscher, A., Jahrer, M., & Bell, R. M. (2009). The bigchaos solution to the netflix grand prize.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
(tuple):
- (np.array): ensemble predictions
- (np.array): weights for input predictions
"""
m = len(es)
n = len(ps[0])
X = np.stack(ps).T
pTy = 0.5 * (n * e0 ** 2 + (X ** 2).sum(axis=0) - n * np.array(es) ** 2)
w = np.linalg.pinv(X.T.dot(X) + l * n * np.eye(m)).dot(pTy)
return X.dot(w), w
|
359ca02bb6c7f9a3d4d25fe2b41a4bcac5fd086f
| 3,637,728
|
import fastapi
async def create_movie(
*,
session: aio_session.AsyncSession = fastapi.Depends(
dependencies.get_session),
movie_in: movie_model.MovieCreate,
current_patron: patron_model.Patron = fastapi.Depends( # pylint: disable=unused-argument
dependencies.get_current_active_patron),
) -> movie_model.Movie:
"""Creates a new movie."""
movie_db = await movie_crud.MovieCRUD.get_by_title(session,
movie_in.title_en)
if movie_db:
raise fastapi.HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="An movie with this title already exists in the system.",
)
if current_patron.id != movie_in.proposed_by:
raise fastapi.HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="https://www.youtube.com/watch?v=Z4oDZCJMDeY")
movie = await movie_crud.MovieCRUD.create(session, model_in=movie_in)
return movie
|
88c1acca8980788031e9a64d22dd2ca0e629cc5c
| 3,637,730
|
import time
import math
def project_gdf(gdf, to_crs=None, to_latlong=False, verbose=False):
"""
https://github.com/gboeing/osmnx/blob/v0.9/osmnx/projection.py#L58
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid.
The simple calculation in this function works well for most latitudes, but
won't work for some far northern locations like Svalbard and parts of far
northern Norway.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, projects to latlong instead of to UTM
Returns
-------
GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
# if gdf has no gdf_name attribute, create one now
if not hasattr(gdf, 'gdf_name'):
gdf.gdf_name = 'unnamed'
# if to_crs was passed-in, use this value to project the gdf
if to_crs is not None:
projected_gdf = gdf.to_crs(to_crs)
# if to_crs was not passed-in, calculate the centroid of the geometry to
# determine UTM zone
else:
if to_latlong:
# if to_latlong is True, project the gdf to latlong
latlong_crs = default_crs
projected_gdf = gdf.to_crs(latlong_crs)
if verbose:
print('Projected the GeoDataFrame "{}" to default_crs in {:,.2f} seconds'.format(gdf.gdf_name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, just return it
# if (gdf.crs is not None) and ('proj' in gdf.crs) and (gdf.crs['proj'] == 'utm'):
if gdf.crs.is_projected and gdf.crs.coordinate_operation.name.upper().startswith('UTM'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the UTM
# CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'WGS84',
'ellps': 'WGS84',
'proj' : 'utm',
'zone' : utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if verbose:
print('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} seconds'.format(gdf.gdf_name, utm_zone, time.time()-start_time))
projected_gdf.gdf_name = gdf.gdf_name
return projected_gdf
|
aed2c42282301d2623c92dd1516f99d953afc1c2
| 3,637,731
|
from .. import __version__
from ..importer import IMPORTED
from .driver import schema_all_drivers
from .executor import schema_all_executors
from .flow import schema_flow
from .meta import schema_metas
from .request import schema_requests
from .pod import schema_pod
def get_full_schema() -> dict:
"""
Return the full schema for Jina core as a dict.
"""
definitions = {}
for s in [
schema_all_drivers,
schema_all_executors,
schema_flow,
schema_metas,
schema_requests,
schema_pod,
IMPORTED.schema_executors,
IMPORTED.schema_drivers
]:
definitions.update(s)
# fix CompoundExecutor
definitions['Jina::Executors::CompoundExecutor']['properties']['components'] = {
'$ref': '#/definitions/Jina::Executors::All'
}
return {
'$id': f'https://api.jina.ai/schemas/{__version__}.json',
'$schema': 'http://json-schema.org/draft-07/schema#',
'description': 'The YAML schema of Jina objects (Flow, Executor, Drivers).',
'type': 'object',
'oneOf':
[{'$ref': '#/definitions/Jina::Flow'}] +
[{"$ref": f"#/definitions/{k}"} for k in IMPORTED.schema_executors.keys()],
'definitions': definitions
}
|
db31d02fc1ef7ef3ed19cefffc3dcd0cdfdbb237
| 3,637,732
|
def power_method(A, x0, n_iter=1):
"""Compute the first singular components by power method."""
for i in range(n_iter):
x0 = A.T @ A @ x0
v = x0 / norm(x0)
s = norm(A @ v)
u = A @ v / s
return u, s, v
|
7efc860520535aab42aeda24e15e4d4f5c340901
| 3,637,733
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.