content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import Callable
from typing import Tuple
def metropolis_hastings(
proposal: Proposal,
state: State,
step_size: float,
ns: int,
unif: float,
inverse_transform: Callable
) -> Tuple[State, Info, np.ndarray, bool]:
"""Computes the Metropolis-Hastings accept-reject criterion given a proposal, a
current state of the chain, a integration step-size, and a number of
itnegration steps. We also provide a uniform random variable for
determining the accept-reject criterion and the inverse transformation
function for transforming parameters from an unconstrained space to a
constrained space.
Args:
proposal: A proposal operator to advance the state of the Markov chain.
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
step_size: The integration step-size.
num_steps: The number of integration steps.
unif: Uniform random number for determining the accept-reject decision.
inverse_transform: Inverse transformation to map samples back to the
original space.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
info: An information object with the updated number of fixed point
iterations and boolean indicator for successful integration.
q: The position variable in the constrained space.
accept: Whether or not the proposal was accepted.
"""
ham = hamiltonian(
state.momentum,
state.log_posterior,
state.logdet_metric,
state.inv_metric)
q, fldj = inverse_transform(state.position)
ildj = -fldj
new_state, prop_info = proposal.propose(state, step_size, ns)
new_chol, new_logdet = new_state.sqrtm_metric, new_state.logdet_metric
new_q, new_fldj = inverse_transform(new_state.position)
new_ham = hamiltonian(
new_state.momentum,
new_state.log_posterior,
new_state.logdet_metric,
new_state.inv_metric)
# Notice the relevant choice of sign when the Jacobian determinant of the
# forward or inverse transform is used.
#
# Write this expression as,
# (exp(-new_ham) / exp(new_fldj)) / (exp(-ham) * exp(ildj))
#
# See the following resource for understanding the Metropolis-Hastings
# correction with a Jacobian determinant correction [1].
#
# [1] https://wiki.helsinki.fi/download/attachments/48865399/ch7-rev.pdf
logu = np.log(unif)
metropolis = logu < ham - new_ham - new_fldj - ildj + prop_info.logdet
accept = np.logical_and(metropolis, prop_info.success)
if accept:
state = new_state
q = new_q
ildj = -new_fldj
state.momentum *= -1.0
return state, prop_info, q, accept
|
b5390d8a420ebb3d62c700fe246127935b658b6c
| 3,642,133
|
from datetime import datetime
def Now():
"""Returns a datetime.datetime instance representing the current time.
This is just a wrapper to ease testing against the datetime module.
Returns:
An instance of datetime.datetime.
"""
return datetime.datetime.now()
|
9a0657011e10b47eb755a575216944a786218f2e
| 3,642,135
|
def ndvi_list_hdf(hdf_dir, satellite=None):
"""
List all the available HDF files, grouped by tile
Args:
hdf_dir: directory containing one subdirectory per year which contains
HDF files
satellite: None to select both Tera and Aqua, 'mod13q1' for MODIS,
'myd13q1' for Aqua
Returns:
list: A dict (keyed by tilename) of list of (full filepath,
timestamp_ms) tuples, sorted by timestamp_ms
"""
files = collections.defaultdict(lambda: [])
for subdir in os.listdir(hdf_dir):
subdir = os.path.join(hdf_dir, subdir)
if not os.path.isdir(subdir):
continue
for hdf_file in os.listdir(subdir):
if not hdf_file.endswith('.hdf'):
continue
try:
full_fname = os.path.join(subdir, hdf_file)
d = parse_ndvi_filename(hdf_file)
if satellite is not None and satellite != d['satellite']:
continue
files[d['tile_name']].append((full_fname, d['timestamp_ms']))
except ValueError as e:
print e
for tile_name in files.keys():
files[tile_name] = sorted(files[tile_name], key=lambda t: t[1])
return files
|
068062bdef503b6652c62c142a1cf80d830fc8db
| 3,642,136
|
def create_provisioned_product_name(account_name: str) -> str:
"""
Replaces all space characters in an Account Name with hyphens,
also removes all trailing and leading whitespace
"""
return account_name.strip().replace(" ", "-")
|
743e7438f421d5d42c071d27d1b0fa2a816a9b4d
| 3,642,138
|
def case34():
"""
Create the IEEE 34 bus from IEEE PES Test Feeders:
"https://site.ieee.org/pes-testfeeders/resources/”.
OUTPUT:
**net** - The pandapower format network.
"""
net = pp.create_empty_network()
# Linedata
# CF-300
line_data = {'c_nf_per_km': 3.8250977, 'r_ohm_per_km': 0.69599766,
'x_ohm_per_km': 0.5177677,
'c0_nf_per_km': 1.86976748, 'r0_ohm_per_km': 1.08727498,
'x0_ohm_per_km': 1.47374703,
'max_i_ka': 0.23, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-300', element='line')
# CF-301
line_data = {'c_nf_per_km': 3.66884364, 'r_ohm_per_km': 1.05015841,
'x_ohm_per_km': 0.52265586,
'c0_nf_per_km': 1.82231544, 'r0_ohm_per_km': 1.48350255,
'x0_ohm_per_km': 1.60203942,
'max_i_ka': 0.18, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-301', element='line')
# CF-302
line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427,
'x_ohm_per_km': 0.30768221,
'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427,
'x0_ohm_per_km': 0.30768221,
'max_i_ka': 0.14, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-302', element='line')
# CF-303
line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427,
'x_ohm_per_km': 0.30768221,
'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427,
'x0_ohm_per_km': 0.30768221,
'max_i_ka': 0.14, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-303', element='line')
# CF-304
line_data = {'c_nf_per_km': 0.90382554, 'r_ohm_per_km': 0.39802955,
'x_ohm_per_km': 0.29436416,
'c0_nf_per_km': 0.90382554, 'r0_ohm_per_km': 0.39802955,
'x0_ohm_per_km': 0.29436416,
'max_i_ka': 0.18, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-304', element='line')
# Busses
# bus0 = pp.create_bus(net, name='Bus 0', vn_kv=69.0, type='n', zone='34_BUS')
bus_800 = pp.create_bus(net, name='Bus 800', vn_kv=24.9, type='n', zone='34_BUS')
bus_802 = pp.create_bus(net, name='Bus 802', vn_kv=24.9, type='n', zone='34_BUS')
bus_806 = pp.create_bus(net, name='Bus 806', vn_kv=24.9, type='n', zone='34_BUS')
bus_808 = pp.create_bus(net, name='Bus 808', vn_kv=24.9, type='n', zone='34_BUS')
bus_810 = pp.create_bus(net, name='Bus 810', vn_kv=24.9, type='n', zone='34_BUS')
bus_812 = pp.create_bus(net, name='Bus 812', vn_kv=24.9, type='n', zone='34_BUS')
bus_814 = pp.create_bus(net, name='Bus 814', vn_kv=24.9, type='n', zone='34_BUS')
bus_850 = pp.create_bus(net, name='Bus 850', vn_kv=24.9, type='n', zone='34_BUS')
bus_816 = pp.create_bus(net, name='Bus 816', vn_kv=24.9, type='n', zone='34_BUS')
bus_818 = pp.create_bus(net, name='Bus 818', vn_kv=24.9, type='n', zone='34_BUS')
bus_820 = pp.create_bus(net, name='Bus 820', vn_kv=24.9, type='n', zone='34_BUS')
bus_822 = pp.create_bus(net, name='Bus 822', vn_kv=24.9, type='n', zone='34_BUS')
bus_824 = pp.create_bus(net, name='Bus 824', vn_kv=24.9, type='n', zone='34_BUS')
bus_826 = pp.create_bus(net, name='Bus 826', vn_kv=24.9, type='n', zone='34_BUS')
bus_828 = pp.create_bus(net, name='Bus 828', vn_kv=24.9, type='n', zone='34_BUS')
bus_830 = pp.create_bus(net, name='Bus 830', vn_kv=24.9, type='n', zone='34_BUS')
bus_854 = pp.create_bus(net, name='Bus 854', vn_kv=24.9, type='n', zone='34_BUS')
bus_852 = pp.create_bus(net, name='Bus 852', vn_kv=24.9, type='n', zone='34_BUS')
bus_832 = pp.create_bus(net, name='Bus 832', vn_kv=24.9, type='n', zone='34_BUS')
bus_858 = pp.create_bus(net, name='Bus 858', vn_kv=24.9, type='n', zone='34_BUS')
bus_834 = pp.create_bus(net, name='Bus 834', vn_kv=24.9, type='n', zone='34_BUS')
bus_842 = pp.create_bus(net, name='Bus 842', vn_kv=24.9, type='n', zone='34_BUS')
bus_844 = pp.create_bus(net, name='Bus 844', vn_kv=24.9, type='n', zone='34_BUS')
bus_846 = pp.create_bus(net, name='Bus 846', vn_kv=24.9, type='n', zone='34_BUS')
bus_848 = pp.create_bus(net, name='Bus 848', vn_kv=24.9, type='n', zone='34_BUS')
bus_860 = pp.create_bus(net, name='Bus 860', vn_kv=24.9, type='n', zone='34_BUS')
bus_836 = pp.create_bus(net, name='Bus 836', vn_kv=24.9, type='n', zone='34_BUS')
bus_840 = pp.create_bus(net, name='Bus 840', vn_kv=24.9, type='n', zone='34_BUS')
bus_862 = pp.create_bus(net, name='Bus 862', vn_kv=24.9, type='n', zone='34_BUS')
bus_838 = pp.create_bus(net, name='Bus 838', vn_kv=24.9, type='n', zone='34_BUS')
bus_864 = pp.create_bus(net, name='Bus 864', vn_kv=24.9, type='n', zone='34_BUS')
bus_888 = pp.create_bus(net, name='Bus 888', vn_kv=4.16, type='n', zone='34_BUS')
bus_890 = pp.create_bus(net, name='Bus 890', vn_kv=4.16, type='n', zone='34_BUS')
bus_856 = pp.create_bus(net, name='Bus 856', vn_kv=24.9, type='n', zone='34_BUS')
# Lines
pp.create_line(net, bus_800, bus_802, length_km=0.786384, std_type='CF-300', name='Line 0')
pp.create_line(net, bus_802, bus_806, length_km=0.527304, std_type='CF-300', name='Line 1')
pp.create_line(net, bus_806, bus_808, length_km=9.823704, std_type='CF-300', name='Line 2')
pp.create_line(net, bus_808, bus_810, length_km=1.769059, std_type='CF-303', name='Line 3')
pp.create_line(net, bus_808, bus_812, length_km=11.43000, std_type='CF-300', name='Line 4')
pp.create_line(net, bus_812, bus_814, length_km=9.061704, std_type='CF-300', name='Line 5')
# pp.create_line(net, bus_814, bus_850, length_km=0.003048, std_type='CF-301', name='Line 6')
pp.create_line(net, bus_816, bus_818, length_km=0.521208, std_type='CF-302', name='Line 7')
pp.create_line(net, bus_816, bus_824, length_km=3.112008, std_type='CF-301', name='Line 8')
pp.create_line(net, bus_818, bus_820, length_km=14.67612, std_type='CF-302', name='Line 9')
pp.create_line(net, bus_820, bus_822, length_km=4.187952, std_type='CF-302', name='Line 10')
pp.create_line(net, bus_824, bus_826, length_km=0.923544, std_type='CF-303', name='Line 11')
pp.create_line(net, bus_824, bus_828, length_km=0.256032, std_type='CF-301', name='Line 12')
pp.create_line(net, bus_828, bus_830, length_km=6.230112, std_type='CF-301', name='Line 13')
pp.create_line(net, bus_830, bus_854, length_km=0.158496, std_type='CF-301', name='Line 14')
pp.create_line(net, bus_832, bus_858, length_km=1.493520, std_type='CF-301', name='Line 15')
pp.create_line(net, bus_834, bus_860, length_km=0.615696, std_type='CF-301', name='Line 16')
pp.create_line(net, bus_834, bus_842, length_km=0.085344, std_type='CF-301', name='Line 17')
pp.create_line(net, bus_836, bus_840, length_km=0.262128, std_type='CF-301', name='Line 18')
pp.create_line(net, bus_836, bus_862, length_km=0.085344, std_type='CF-301', name='Line 19')
pp.create_line(net, bus_842, bus_844, length_km=0.411480, std_type='CF-301', name='Line 20')
pp.create_line(net, bus_844, bus_846, length_km=1.109472, std_type='CF-301', name='Line 21')
pp.create_line(net, bus_846, bus_848, length_km=0.161544, std_type='CF-301', name='Line 22')
pp.create_line(net, bus_850, bus_816, length_km=0.094488, std_type='CF-301', name='Line 23')
# pp.create_line(net, bus_852, bus_832, length_km=0.003048, std_type='CF-301', name='Line 24')
pp.create_line(net, bus_854, bus_856, length_km=7.110984, std_type='CF-303', name='Line 25')
pp.create_line(net, bus_854, bus_852, length_km=11.22578, std_type='CF-301', name='Line 26')
pp.create_line(net, bus_858, bus_864, length_km=0.493776, std_type='CF-302', name='Line 27')
pp.create_line(net, bus_858, bus_834, length_km=1.776984, std_type='CF-301', name='Line 28')
pp.create_line(net, bus_860, bus_836, length_km=0.816864, std_type='CF-301', name='Line 29')
pp.create_line(net, bus_860, bus_838, length_km=1.481328, std_type='CF-304', name='Line 30')
pp.create_line(net, bus_888, bus_890, length_km=3.218688, std_type='CF-300', name='Line 31')
# Regulator 1
pp.create_transformer_from_parameters(net, bus_814, bus_850, sn_mva=2.5, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=0.320088*2.5, vk_percent=0.357539*2.5,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16,
tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False,
name='Regulator 1')
# Regulator 2
pp.create_transformer_from_parameters(net, bus_852, bus_832, sn_mva=2.5, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=0.320088*2.5, vk_percent=0.357539*2.5,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16,
tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False,
name='Regulator 2')
# # Substation
# pp.create_transformer_from_parameters(net, bus0, bus_800, sn_mva=2.5, vn_hv_kv=69.0,
# vn_lv_kv=24.9, vkr_percent=1.0, vk_percent=8.062257,
# pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
# tap_side='lv', tap_neutral=0, tap_max=2, tap_min=-2,
# tap_step_percent=2.5, tap_pos=0, tap_phase_shifter=False,
# name='Substation')
# Traformer
pp.create_transformer_from_parameters(net, bus_832, bus_888, sn_mva=0.5, vn_hv_kv=24.9,
vn_lv_kv=4.16, vkr_percent=1.9, vk_percent=4.5,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
name='Transformer 1')
# Loads
pp.create_load(net, bus_806, p_mw=0.055, q_mvar=0.029, name='Load 806')
pp.create_load(net, bus_810, p_mw=0.016, q_mvar=0.008, name='Load 810')
pp.create_load(net, bus_820, p_mw=0.034, q_mvar=0.017, name='Load 820')
pp.create_load(net, bus_822, p_mw=0.135, q_mvar=0.070, name='Load 822')
pp.create_load(net, bus_824, p_mw=0.005, q_mvar=0.002, name='Load 824')
pp.create_load(net, bus_826, p_mw=0.004, q_mvar=0.020, name='Load 826')
pp.create_load(net, bus_828, p_mw=0.004, q_mvar=0.002, name='Load 828')
pp.create_load(net, bus_830, p_mw=0.007, q_mvar=0.003, name='Load 830')
pp.create_load(net, bus_856, p_mw=0.004, q_mvar=0.002, name='Load 856')
pp.create_load(net, bus_858, p_mw=0.015, q_mvar=0.007, name='Load 858')
pp.create_load(net, bus_864, p_mw=0.002, q_mvar=0.001, name='Load 864')
pp.create_load(net, bus_834, p_mw=0.032, q_mvar=0.017, name='Load 834')
pp.create_load(net, bus_860, p_mw=0.029, q_mvar=0.073, name='Load 860')
pp.create_load(net, bus_836, p_mw=0.082, q_mvar=0.043, name='Load 836')
pp.create_load(net, bus_840, p_mw=0.040, q_mvar=0.020, name='Load 840')
pp.create_load(net, bus_838, p_mw=0.028, q_mvar=0.014, name='Load 838')
pp.create_load(net, bus_844, p_mw=0.009, q_mvar=0.005, name='Load 844')
pp.create_load(net, bus_846, p_mw=0.037, q_mvar=0.031, name='Load 846')
pp.create_load(net, bus_848, p_mw=0.023, q_mvar=0.011, name='Load 848')
pp.create_load(net, bus_860, p_mw=0.060, q_mvar=0.048, name='Load 860 spot')
pp.create_load(net, bus_840, p_mw=0.027, q_mvar=0.021, name='Load 840 spot')
pp.create_load(net, bus_844, p_mw=0.405, q_mvar=0.315, name='Load 844 spot')
pp.create_load(net, bus_848, p_mw=0.060, q_mvar=0.048, name='Load 848 spot')
pp.create_load(net, bus_890, p_mw=0.450, q_mvar=0.225, name='Load 890 spot')
pp.create_load(net, bus_830, p_mw=0.045, q_mvar=0.020, name='Load 830 spot')
# External grid
pp.create_ext_grid(net, bus_800, vm_pu=1.0, va_degree=0.0, s_sc_max_mva=10.0,
s_sc_min_mva=10.0, rx_max=1, rx_min=1, r0x0_max=1, x0x_max=1)
# Distributed generators
pp.create_sgen(net, bus_848, p_mw=0.66, q_mvar=0.500, name='DG 1', max_p_mw=0.66, min_p_mw=0, max_q_mvar=0.5, min_q_mvar=0)
pp.create_sgen(net, bus_890, p_mw=0.50, q_mvar=0.375, name='DG 2', max_p_mw=0.50, min_p_mw=0, max_q_mvar=0.375, min_q_mvar=0)
pp.create_sgen(net, bus_822, p_mw=0.1, type='PV', name='PV 1', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_856, p_mw=0.1, type='PV', name='PV 2', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_838, p_mw=0.1, type='PV', name='PV 3', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_822, p_mw=0.1, type='WP', name='WP 1', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_826, p_mw=0.1, type='WP', name='WP 2', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_838, p_mw=0.1, type='WP', name='WP 3', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
# Shunt capacity bank
pp.create_shunt(net, bus_840, q_mvar=-0.12, name='SCB 1', step=4, max_step=4)
pp.create_shunt(net, bus_864, q_mvar=-0.12, name='SCB 2', step=4, max_step=4)
# storage
pp.create_storage(net, bus_810, p_mw=0.5, max_e_mwh=2, sn_mva=1.0, soc_percent=50, min_e_mwh=0.2, name='Storage')
return net
|
8e04a125df0e0a64008724d419bafe19481f5ac1
| 3,642,139
|
def _hack_namedtuple(cls):
"""Make class generated by namedtuple picklable."""
name = cls.__name__
fields = cls._fields
def reduce(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = reduce
cls._is_namedtuple_ = True
return cls
|
89468f0ffb5506ef0c9a33fec0d390576638e659
| 3,642,140
|
import tokenize
def build_model():
"""
Returns built and tuned model using pipeline
Parameters:
No arguments
Returns:
cv (estimator): tuned model
"""
pipeline = Pipeline([
('Features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('starting_verb', StartingVerbExtractor())
])),
('clf', MultiOutputClassifier(DecisionTreeClassifier()))
])
# now we can perform another grid search on this new estimator to be sure we have the best parameters
parameters = {
#'Features__text_pipeline__vect__max_df': [0.5,1.0],
'Features__text_pipeline__tfidf__smooth_idf': (True, False)
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
|
94bc0ad8a3eb48531cb6229972099369a9b9cf61
| 3,642,142
|
def INPUT_BTN(**attributes):
"""
Utility function to create a styled button
"""
return SPAN(INPUT(_class = "button-right",
**attributes),
_class = "button-left")
|
6c6610626367795518ca737b53950d3687ae4d91
| 3,642,143
|
def load_annotations(file_path):
"""Loads a file containing annotations for multiple documents.
The file should contain lines with the following format:
<DOCUMENT ID> <LINES> <SPAN START POSITIONS> <SPAN LENGTHS> <SEVERITY>
Fields are separated by tabs; LINE, SPAN START POSITIONS and SPAN LENGTHS
can have a list of values separated by white space.
Args:
file_path: path to the file.
Returns:
a dictionary mapping document id's to a list of annotations.
"""
annotations = defaultdict(list)
with open(file_path, 'r', encoding='utf8') as f:
for i, line in enumerate(f):
line = line.strip()
if not line:
continue
fields = line.split('\t')
doc_id = fields[0]
try:
annotation = Annotation.from_fields(fields[1:])
except OverlappingSpans:
msg = 'Overlapping spans when reading line %d of file %s '
msg %= (i, file_path)
print(msg)
continue
annotations[doc_id].append(annotation)
return annotations
|
0c674142ae0d99670e63959c3c00ed0ca2c8fac1
| 3,642,144
|
import re
def search(request, template_name='blog/post_search.html'):
"""
Search for blog posts.
This template will allow you to setup a simple search form that will try to return results based on
given search strings. The queries will be put through a stop words filter to remove words like
'the', 'a', or 'have' to help imporve the result set.
Template: ``blog/post_search.html``
Context:
object_list
List of blog posts that match given search term(s).
search_term
Given search term.
"""
context = {}
if request.GET:
stop_word_list = re.compile(STOP_WORDS_RE, re.IGNORECASE)
search_term = '%s' % request.GET['q']
cleaned_search_term = stop_word_list.sub('', search_term)
cleaned_search_term = cleaned_search_term.strip()
if len(cleaned_search_term) != 0:
post_list = Post.objects.published().filter(Q(title__icontains=cleaned_search_term) | Q(body__icontains=cleaned_search_term) | Q(tags__icontains=cleaned_search_term) | Q(categories__title__icontains=cleaned_search_term))
context = {'object_list': post_list, 'search_term':search_term}
else:
message = 'Search term was too vague. Please try again.'
context = {'message':message}
return render(request, template_name, context)
|
fdb72279b6ed5fe5e87c888b7a10c8a3ed8f94d0
| 3,642,145
|
import math
def orient_data (data, header, header_out=None, MLBG_rot90_flip=False, log=None,
tel=None):
"""Function to remap [data] from the CD matrix defined in [header] to
the CD matrix taken from [header_out]. If the latter is not
provided the output orientation will be North up, East left.
If [MLBG_rot90_flip] is switched on and the data is from MeerLICHT or
BlackGEM, the data will be oriented within a few degrees from
North up, East left while preserving the pixel values in the new,
*remapped* reference, D and Scorr images.
"""
# rotation matrix:
# R = [[dx * cos(theta), dy * -sin(theta)],
# [dx * sin(theta), dy * cos(theta)]]
# with theta=0: North aligned with positive y-axis
# and East with the positive x-axis (RA increases to the East)
#
# N.B.: np.dot(R, [[x], [y]]) = np.dot([x,y], R.T)
#
# matrices below are defined using the (WCS) header keywords
# CD?_?:
#
# [ CD1_1 CD2_1 ]
# [ CD1_2 CD2_2 ]
#
# orient [data] with its orientation defined in [header] to the
# orientation defined in [header_out]. If the latter is not
# provided, the output orientation will be North up, East left.
# check if input data is square; if it is not, the transformation
# will not be done properly.
assert data.shape[0] == data.shape[1]
# define data CD matrix, assumed to be in [header]
CD_data = read_CD_matrix (header, log=log)
# determine output CD matrix, either from [header_out] or North
# up, East left
if header_out is not None:
CD_out = read_CD_matrix (header_out, log=log)
else:
# define de CD matrix with North up and East left, using the
# pixel scale from the input [header]
pixscale = read_header(header, ['pixscale'])
cdelt = pixscale/3600
CD_out = np.array([[-cdelt, 0], [0, cdelt]])
# check if values of CD_data and CD_out are similar
CD_close = [math.isclose(CD_data[i,j], CD_out[i,j], rel_tol=1e-3)
for i in range(2) for j in range(2)]
#if log is not None:
# log.info ('CD_close: {}'.format(CD_close))
if np.all(CD_close):
#if log is not None:
# log.info ('data CD matrix already similar to CD_out matrix; '
# 'no need to remap data')
# if CD matrix values are all very similar, do not bother to
# do the remapping
data2return = data
elif MLBG_rot90_flip and tel in ['ML1', 'BG2', 'BG3', 'BG4']:
#if log is not None:
# log.info ('for ML/BG: rotating data by exactly 90 degrees and for '
# 'ML also flip left/right')
# rotate data by exactly 90 degrees counterclockwise (when
# viewing data with y-axis increasing to the top!) and for ML1
# also flip in the East-West direction; for ML/BG this will
# result in an image within a few degrees of the North up,
# East left orientation while preserving the original pixel
# values of the new, *remapped* reference, D and Scorr images.
data2return = np.rot90(data, k=-1)
if tel=='ML1':
data2return = np.fliplr(data2return)
# equivalent operation: data2return = np.flipud(np.rot90(data))
else:
#if log is not None:
# log.info ('remapping data from input CD matrix: {} to output CD '
# 'matrix: {}'.format(CD_data, CD_out))
# transformation matrix, which is the dot product of the
# output CD matrix and the inverse of the data CD matrix
CD_data_inv = np.linalg.inv(CD_data)
CD_trans = np.dot(CD_out, CD_data_inv)
# transpose and flip because [affine_transform] performs
# np.dot(matrix, [[y],[x]]) rather than np.dot([x,y], matrix)
matrix = np.flip(CD_trans.T)
# offset, calculated from
#
# [xi - dxi, yo - dyo] = np.dot( [xo - dxo, yo - dyo], CD_trans )
#
# where xi, yi are the input coordinates corresponding to the
# output coordinates xo, yo in data and dxi/o, dyi/o are the
# corresponding offsets from the point of
# rotation/transformation, resulting in
#
# [xi, yi] = np.dot( [xo, yo], CD_trans ) + offset
# with
# offset = -np.dot( [dxo, dyo], CD_trans ) + [dxi, dyi]
# setting [dx0, dy0] and [dxi, dyi] to the center
center = (np.array(data.shape)-1)/2
offset = -np.dot(center, np.flip(CD_trans)) + center
# infer transformed data
data2return = ndimage.affine_transform(data, matrix, offset=offset,
mode='nearest')
return data2return
|
6ef27074692f46de56e5decd7d6b315e11c4d686
| 3,642,146
|
def _batchnorm_to_groupnorm(module: nn.modules.batchnorm._BatchNorm) -> nn.Module:
"""
Converts a BatchNorm ``module`` to GroupNorm module.
This is a helper function.
Args:
module: BatchNorm module to be replaced
Returns:
GroupNorm module that can replace the BatchNorm module provided
Notes:
A default value of 32 is chosen for the number of groups based on the
paper *Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour*
https://arxiv.org/pdf/1706.02677.pdf
"""
return nn.GroupNorm(min(32, module.num_features), module.num_features, affine=True)
|
1b923a28a4727b72768acf0fc00d93d9012c5349
| 3,642,147
|
def _compute_bic(
data: np.array,
n_clusters: int
) -> BICResult:
"""Compute the BIC statistic.
Parameters
----------
data: np.array
The data to cluster.
n_clusters: int
Number of clusters to test.
Returns
-------
results: BICResult
The results as a BICResult object.
"""
gm = GaussianMixture(n_clusters)
gm.fit(data)
return BICResult(gm.bic(data), n_clusters)
|
3eb1a759a60f834f4e4fb7e364c9bce4ffb61230
| 3,642,148
|
def release_branch_name(config):
"""
build expected release branch name from current config
"""
branch_name = "{0}{1}".format(
config.gitflow_release_prefix(),
config.package_version()
)
return branch_name
|
0d97c515aca8412882c8b260405a63d20b4b0f63
| 3,642,149
|
def torch2numpy(data):
""" Transfer data from the torch tensor (on CPU) to the numpy array (on CPU). """
return data.numpy()
|
c7ca4123743c4f054d809f0e307a4de079b0af10
| 3,642,150
|
def new_schema(name, public_name, is_active=True, **options):
"""
This function adds a schema in schema model and creates physical schema.
"""
try:
schema = Schema(name=name, public_name=public_name, is_active=is_active)
schema.save()
except IntegrityError:
raise Exception('Schema already exists.')
create_schema(name, **options)
return schema
|
efd6ed2737c6a25e8beeaef9f7fffebdb9592f10
| 3,642,151
|
def find_appropriate_timestep(simulation_factory,
equilibrium_samples,
M,
midpoint_operator,
temperature,
timestep_range,
DeltaF_neq_threshold=1.0,
max_samples=10000,
batch_size=1000,
verbose=True
):
"""Perform binary search* over the timestep range, trying to find
the maximum timestep that results in DeltaF_neq that doesn't exceed threshold
or have gross instability problems.
(*Not-quite-binary-search: instead of deterministic comparisons,
it performs hypothesis tests at regular intervals.)
Sketch
------
* Maintain an interval (min_timestep, max_timestep)
* At each iteration:
* timestep <- (min_timestep + max_timestep) / 2
* Only simulate long enough to be confident that DeltaF_neq(timestep) != threshold.
* If we're confident DeltaF_neq(timestep) > threshold, reduce max_timestep to current timestep.
* If we're confident DeltaF_neq(timestep) < threshold, increase min_timestep to current timestep
Parameters
----------
simulation_factory: function
accepts a timestep argument and returns a simulation equipped with an integrator with that
timestep
equilibrium_samples: list
list of samples from the configuration distribution at equilibrium
M: int
protocol length
midpoint_operator: function
accepts a simulation as an argument, doesn't return anything
temperature: unit'd quantity
temperature used to resample velocities
timestep_range: iterable
(min_timestep, max_timestep)
DeltaF_neq_threshold: double, default=1.0
maximum allowable DeltaF_neq
max_samples: int
number of samples
verbose: boolean
if True, print a bunch of stuff to the command prompt
Returns
-------
timestep: unit'd quantity
Maximum timestep tested that doesn't exceed the DeltaF_neq_threshold
"""
max_iter = 10
alpha = 1.96 # for now hard-coded confidence level
min_timestep, max_timestep = timestep_range[0], timestep_range[-1]
for i in range(max_iter):
timestep = (min_timestep + max_timestep) / 2
if verbose:
print("Current feasible range: [{:.3f}fs, {:.3f}fs]".format(
min_timestep.value_in_unit(unit.femtosecond),
max_timestep.value_in_unit(unit.femtosecond)
))
print("Testing: {:.3f}fs".format(timestep.value_in_unit(unit.femtosecond)))
simulation = simulation_factory(timestep)
simulation_crashed = False
changed_timestep_range = False
W_shads_F, W_shads_R, W_midpoints = [], [], []
def update_lists(W_shad_F, W_midpoint, W_shad_R):
W_shads_F.append(W_shad_F)
W_midpoints.append(W_midpoint)
W_shads_R.append(W_shad_R)
# collect up to max_samples protocol samples, making a decision about whether to proceed
# every batch_size samples
for _ in range(max_samples / batch_size):
# collect another batch_size protocol samples
for _ in range(batch_size):
# draw equilibrium sample
#x, v = equilibrium_sampler()
#simulation.context.setPositions(x)
#simulation.context.setVelocities(v)
simulation.context.setPositions(equilibrium_samples[np.random.randint(len(equilibrium_samples))])
simulation.context.setVelocitiesToTemperature(temperature)
# collect and store measurements
# if the simulation crashes, set simulation_crashed flag
try:
update_lists(*apply_protocol(simulation, M, midpoint_operator))
except:
simulation_crashed = True
if verbose: print("A simulation crashed! Considering this timestep unstable...")
# if we didn't crash, update estimate of DeltaF_neq upper and lower confidence bounds
DeltaF_neq, sq_uncertainty = estimate_nonequilibrium_free_energy(np.array(W_shads_F)[:,-1], np.array(W_shads_R)[:,-1])
if np.isnan(DeltaF_neq + sq_uncertainty):
if verbose:
print("A simulation encountered NaNs!")
simulation_crashed = True
bound = alpha * np.sqrt(sq_uncertainty)
DeltaF_neq_lcb, DeltaF_neq_ucb = DeltaF_neq - bound, DeltaF_neq + bound
out_of_bounds = (DeltaF_neq_lcb > DeltaF_neq_threshold) or (DeltaF_neq_ucb < DeltaF_neq_threshold)
if verbose and (out_of_bounds or simulation_crashed):
print("After collecting {} protocol samples, DeltaF_neq is likely in the following interval: "
"[{:.3f}, {:.3f}]".format(len(W_shads_F), DeltaF_neq_lcb, DeltaF_neq_ucb))
# if (DeltaF_neq_lcb > threshold) or (nans are encountered), then we're pretty sure this timestep is too big,
# and we can move on to try a smaller one
if simulation_crashed or (DeltaF_neq_lcb > DeltaF_neq_threshold):
if verbose:
print("This timestep is probably too big!\n")
max_timestep = timestep
changed_timestep_range = True
break
# else, if (DeltaF_neq_ucb < threshold), then we're pretty sure we can get
# away with a larger timestep
elif (DeltaF_neq_ucb < DeltaF_neq_threshold):
if verbose:
print("We can probably get away with a larger timestep!\n")
min_timestep = timestep
changed_timestep_range = True
break
# else, the threshold is within the upper and lower confidence bounds, and we keep going
if (not changed_timestep_range):
timestep = (min_timestep + max_timestep) / 2
if verbose:
print("\nTerminating early: found the following timestep: ".format(timestep.value_in_unit(unit.femtosecond)))
return timestep
if verbose:
timestep = (min_timestep + max_timestep) / 2
print("\nTerminating: found the following timestep: ".format(timestep.value_in_unit(unit.femtosecond)))
return timestep
|
94000a07cfc8cf00e3440ff242c63da5c8be5d00
| 3,642,152
|
def critical_bands():
"""
Compute the Critical bands as defined in the book:
Psychoacoustics by Zwicker and Fastl. Table 6.1 p. 159
"""
# center frequencies
fc = [
50,
150,
250,
350,
450,
570,
700,
840,
1000,
1170,
1370,
1600,
1850,
2150,
2500,
2900,
3400,
4000,
4800,
5800,
7000,
8500,
10500,
13500,
]
# boundaries of the bands (e.g. the first band is from 0Hz to 100Hz
# with center 50Hz, fb[0] to fb[1], center fc[0]
fb = [
0,
100,
200,
300,
400,
510,
630,
770,
920,
1080,
1270,
1480,
1720,
2000,
2320,
2700,
3150,
3700,
4400,
5300,
6400,
7700,
9500,
12000,
15500,
]
# now just make pairs
bands = [[fb[j], fb[j + 1]] for j in range(len(fb) - 1)]
return np.array(bands), fc
|
6301a6ee86d0ea3fb588213aa8b9453b14fb7036
| 3,642,153
|
def repackage(r, amo_id, amo_file, target_version=None, sdk_dir=None):
"""Pull amo_id/amo_file.xpi, schedule xpi creation, return hashtag
"""
# validate entries
# prepare data
hashtag = get_random_string(10)
sdk = SDK.objects.all()[0]
# if (when?) choosing sdk_dir will be possible
# sdk = SDK.objects.get(dir=sdk_dir) if sdk_dir else SDK.objects.all()[0]
sdk_source_dir = sdk.get_source_dir()
# extract packages
tasks.repackage.delay(
amo_id, amo_file, sdk_source_dir, hashtag, target_version)
# call build xpi task
# respond with a hashtag which will identify downloadable xpi
# URL to check if XPI is ready:
# /xpi/check_download/{hashtag}/
# URL to download:
# /xpi/download/{hashtag}/{desired_filename}/
return HttpResponse('{"hashtag": "%s"}' % hashtag,
mimetype='application/json')
|
9527f2fbe6077e25eee72a570f2e9702cbf3b510
| 3,642,154
|
def edges_to_adj_list(edges):
"""
Transforms a set of edges in an adjacency list (represented as a dictiornary)
For UNDIRECTED graphs, i.e. if v2 in adj_list[v1], then v1 in adj_list[v2]
INPUT:
- edges : a set or list of edges
OUTPUT:
- adj_list: a dictionary with the vertices as keys, each with
a set of adjacent vertices.
"""
adj_list = {} # store in dictionary
for v1, v2 in edges:
if v1 in adj_list: # edge already in it
adj_list[v1].add(v2)
else:
adj_list[v1] = set([v2])
if v2 in adj_list: # edge already in it
adj_list[v2].add(v1)
else:
adj_list[v2] = set([v1])
return adj_list
|
683f10e9a0a9b8a29d63b276b2e550ebe8287a05
| 3,642,155
|
from typing import Optional
def _get_lookups(
name: str,
project: interface.Project,
base: Optional[str] = None) -> list[str]:
"""[summary]
Args:
name (str): [description]
design (Optional[str]): [description]
kind (Optional[str]): [description]
Returns:
list[str]: [description]
"""
lookups = [name]
if name in project.outline.designs:
lookups.append(project.outline.designs[name])
if name in project.outline.kinds:
lookups.append(project.outline.kinds[name])
if base is not None:
lookups.append(base)
return lookups
|
c8f700a19bbae0167c8474f033d625a763743db8
| 3,642,156
|
def unwrap(value):
"""
Unwraps the given Document or DocumentList as applicable.
"""
if isinstance(value, Document):
return value.to_dict()
elif isinstance(value, DocumentList):
return value.to_list()
else:
return value
|
7e25c2935ff0a467e51097c4291e8d5f751c34db
| 3,642,157
|
def home_all():
"""Home page view.
On this page a summary campaign manager view will shown with all campaigns.
"""
context = dict(
oauth_consumer_key=OAUTH_CONSUMER_KEY,
oauth_secret=OAUTH_SECRET,
all=True,
map_provider=map_provider()
)
# noinspection PyUnresolvedReferences
return render_template('index.html', **context)
|
d987486f30cc5a8f6e697d9ccb92741b2d2067e4
| 3,642,158
|
import math
def _sqrt(x):
"""_sqrt."""
isnumpy = isinstance(x, np.ndarray)
isscalar = np.isscalar(x)
return np.sqrt(x) if isnumpy else math.sqrt(x) if isscalar else x.sqrt()
|
16f566493deeaaf35841548e6db89408ca686bfe
| 3,642,159
|
def update_subnet(context, id, subnet):
"""Update values of a subnet.
: param context: neutron api request context
: param id: UUID representing the subnet to update.
: param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
"""
LOG.info("update_subnet %s for tenant %s" %
(id, context.tenant_id))
with context.session.begin():
subnet_db = db_api.subnet_find(context, id=id, scope=db_api.ONE)
if not subnet_db:
raise exceptions.SubnetNotFound(id=id)
s = subnet["subnet"]
always_pop = ["_cidr", "cidr", "first_ip", "last_ip", "ip_version",
"segment_id", "network_id"]
admin_only = ["do_not_use", "created_at", "tenant_id",
"next_auto_assign_ip", "enable_dhcp"]
utils.filter_body(context, s, admin_only, always_pop)
dns_ips = utils.pop_param(s, "dns_nameservers", [])
host_routes = utils.pop_param(s, "host_routes", [])
gateway_ip = utils.pop_param(s, "gateway_ip", None)
allocation_pools = utils.pop_param(s, "allocation_pools", None)
if not CONF.QUARK.allow_allocation_pool_update:
if allocation_pools:
raise exceptions.BadRequest(
resource="subnets",
msg="Allocation pools cannot be updated.")
alloc_pools = allocation_pool.AllocationPools(
subnet_db["cidr"],
policies=models.IPPolicy.get_ip_policy_cidrs(subnet_db))
else:
alloc_pools = allocation_pool.AllocationPools(subnet_db["cidr"],
allocation_pools)
if gateway_ip:
alloc_pools.validate_gateway_excluded(gateway_ip)
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route
break
if default_route is None:
route_model = db_api.route_find(
context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id,
scope=db_api.ONE)
if route_model:
db_api.route_update(context, route_model,
gateway=gateway_ip)
else:
db_api.route_create(context,
cidr=str(routes.DEFAULT_ROUTE),
gateway=gateway_ip, subnet_id=id)
if dns_ips:
subnet_db["dns_nameservers"] = []
for dns_ip in dns_ips:
subnet_db["dns_nameservers"].append(db_api.dns_create(
context,
ip=netaddr.IPAddress(dns_ip)))
if host_routes:
subnet_db["routes"] = []
for route in host_routes:
subnet_db["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
if CONF.QUARK.allow_allocation_pool_update:
if isinstance(allocation_pools, list):
cidrs = alloc_pools.get_policy_cidrs()
ip_policies.ensure_default_policy(cidrs, [subnet_db])
subnet_db["ip_policy"] = db_api.ip_policy_update(
context, subnet_db["ip_policy"], exclude=cidrs)
subnet = db_api.subnet_update(context, subnet_db, **s)
return v._make_subnet_dict(subnet)
|
f1ac159f612d3b8a5459ee3b70c440cf7cf84cd5
| 3,642,160
|
def validate_params():
"""@rtype bool"""
def validate_single_param(param_name, required_type):
"""@rtype bool"""
inner_result = True
if not rospy.has_param(param_name):
rospy.logfatal('Parameter {} is not defined but needed'.format(param_name))
inner_result = False
else:
if type(required_type) is list and len(required_type) > 0:
if type(rospy.get_param(param_name)) in required_type:
rospy.logfatal('Parameter {} is not any of type {}'.format(param_name, required_type))
inner_result = False
else:
if type(rospy.get_param(param_name)) is not required_type:
rospy.logfatal('Parameter {} is not of type {}'.format(param_name, required_type))
inner_result = False
return inner_result
result = True
result = result and validate_single_param('~update_frequency', int)
result = result and validate_single_param('~do_cpu', bool)
result = result and validate_single_param('~do_memory', bool)
result = result and validate_single_param('~do_network', bool)
return result
|
8734d9db7e29b8b6c30dc8a3ae72b0cf18c85310
| 3,642,161
|
def user_exists(username):
"""Return True if the username exists, or False if it doesn't."""
try:
adobe_api.AdobeAPIObject(username)
except adobe_api.AdobeAPINoUserException:
return False
return True
|
3767bec38c8058e7bd193e5532e4150ca501a96a
| 3,642,163
|
def bags_containing_bag(bag: str, rules: dict[str, list]) -> int:
"""Returns the bags that have bag in their rules."""
return {r_bag
for r_bag, r_rule in rules.items()
for _, r_color in r_rule
if bag in r_color}
|
f9e67a4ade4dd9bdf25e05669741c71270007215
| 3,642,164
|
def default_mutable_arguments():
"""Explore default mutable arguments, which are a dangerous game in themselves.
Why do mutable default arguments suffer from this apparent problem? A function's
default values are evaluated at the point of function definition in the defining
scope. In particular, we can examine these bindings by printing
append_twice.__defaults__ after append_twice has been defined. For this function,
we have
print(append_twice.__defaults__) # ([],)
If a binding for `lst` is not supplied, then the `lst` name inside append_twice
falls back to the array object that lives inside append_twice.__defaults__.
In particular, if we update `lst` in place during one function call, we have changed
the value of the default argument. That is,
print(append_twice.__defaults__) # ([], )
append_twice(1)
print(append_twice.__defaults__) # ([1, 1], )
append_twice(2)
print(append_twice.__defaults__) # ([1, 1, 2, 2], )
In each case where a user-supplied binding for `lst is not given, we modify the
single (mutable) default value, which leads to this crazy behavior.
"""
def append_twice(a, lst=[]):
"""Append a value to a list twice."""
lst.append(a)
lst.append(a)
return lst
print(append_twice(1, lst=[4])) # => [4, 1, 1]
print(append_twice(11, lst=[2, 3, 5, 7])) # => [2, 3, 5, 7, 11, 11]
print(append_twice(1)) # => [1, 1]
print(append_twice(2)) # => [1, 1, 2, 2]
print(append_twice(3))
|
a58a8c2807e29af68d501aa5ad4b33ad1aa80252
| 3,642,165
|
def is_text_file(file_):
"""
detect if file is of type text
:param file_: file to be tested
:returns: `bool` of whether the file is text
"""
with open(file_, 'rb') as ff:
data = ff.read(1024)
return not is_binary_string(data)
|
d064b51ea239f34ed97d47416b1f411650ce8a1a
| 3,642,166
|
from typing import Union
from datetime import datetime
from typing import List
import pytz
def soft_update_datetime_field(
model_inst: models.Model,
field_name: str,
warehouse_field_value: Union[datetime, None],
) -> List[str]:
"""
Uses Django ORM to update DateTime field of model instance if the field value is null and the warehouse data is non-null.
"""
model_name: str = model_inst.__class__.__name__
current_field_value: Union[datetime, None] = getattr(model_inst, field_name)
# Skipping update if the field already has a value, provided by a previous cron run or administrator
if current_field_value is not None:
logger.info(
f'Skipped update of {field_name} for {model_name} instance ({model_inst.id}); existing value was found')
else:
if warehouse_field_value:
warehouse_field_value = warehouse_field_value.replace(tzinfo=pytz.UTC)
setattr(model_inst, field_name, warehouse_field_value)
logger.info(f'Updated {field_name} for {model_name} instance ({model_inst.id})')
return [field_name]
return []
|
33034a548ee572706cd1e6e696d5a9249ad0b528
| 3,642,167
|
import itertools
def plot_confusion_matrix(
y_true,
y_pred,
normalize=False,
cmap=plt.cm.Blues,
label_list = None,
visible=True,
savepath=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cm = confusion_matrix(y_true, y_pred)
acc = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred, average="micro")
title = f"Confusion Matrix, Acc: {acc:.2f}, F1: {f1:.2f}"
if label_list == None:
classes = range(0, max(y_true))
else:
classes = label_list
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.figure(figsize=(13,13))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if savepath is not None:
plt.savefig(savepath)
if visible:
plt.show()
return acc, f1
|
f15d2170ba0e869cb47e554ea374f93b05dbcab8
| 3,642,168
|
def _test_pressure_reconstruction(self, g, recon_p, point_val, point_coo):
"""
Testing pressure reconstruction. This function uses the reconstructed
pressure local polynomial and perform an evaluation at the Lagrangian
points, and checks if the those values are equal to the point_val array.
Parameters
----------
g : PorePy object
Grid.
recon_p : NumPy nd-Array
Reconstructed pressure polynomial.
point_val : NumPy nd-Array
Pressure avlues at the Lagrangian nodes.
point_coo : NumPy array
Coordinates at the Lagrangian nodes.
Returns
-------
None.
"""
def assert_reconp(eval_poly, point_val):
np.testing.assert_allclose(
eval_poly,
point_val,
rtol=1e-6,
atol=1e-3,
err_msg="Pressure reconstruction has failed"
)
eval_poly = utils.eval_P1(recon_p, point_coo)
assert_reconp(eval_poly, point_val)
return None
|
b70b202cc21ba632f18af2f5fcf72f7b6d509e91
| 3,642,169
|
def logout():
"""Logout
:return: Function used to log out the current user
"""
logout_user()
return redirect(url_for('index'))
|
f5e2ef30b47c645ba5671395a115eb6d6c9425f1
| 3,642,170
|
def get_user_messages(user, index=0, number=0):
"""
返回指定user按时间倒序的从index索引开始的number个message
"""
if not user or user.is_anonymous or index < 0 or number < 0:
return tuple()
# noinspection PyBroadException
try:
if index == 0 and number == 0:
all_message = user.messages.all()
else:
all_message = user.messages.all()[index:index+number]
except Exception as e:
all_message = tuple()
return all_message
|
bb0c499e5ca8ec650d2ebca12852d2345733e882
| 3,642,172
|
def third_party_apps_default_dc_modules_and_settings(klass):
"""
Decorator for DefaultDcSettingsSerializer class.
Updates modules and settings fields defined in installed third party apps.
"""
logger.info('Loading third party apps DEFAULT DC modules and settings.')
for third_party_app, app_dc_settings in get_third_party_apps_serializer_settings():
try:
app_dc_settings.DEFAULT_DC_MODULES
except AttributeError:
logger.info('Skipping app: %s does not have any DEFAULT DC modules defined.', third_party_app)
else:
_update_serializer_modules(third_party_app, app_dc_settings.DEFAULT_DC_MODULES, klass, default_dc=True)
try:
app_dc_settings.DEFAULT_DC_SETTINGS
except AttributeError:
logger.info('Skipping app: %s does not have any DEFAULT DC settings defined.', third_party_app)
else:
_update_serializer_settings(third_party_app, app_dc_settings, klass, default_dc=True)
return klass
|
59be03a271e60352b429d45ecff647100388f9ab
| 3,642,173
|
from typing import Union
from pathlib import Path
def split_lvis(
n_experiences: int,
train_transform=None,
eval_transform=None,
shuffle=True,
root_path: Union[str, Path] = None,
):
"""
Creates the example Split LVIS benchmark.
This is a toy benchmark created only to show how a detection benchmark can
be created. It was not meant to be used for research purposes!
:param n_experiences: The number of train experiences to create.
:param train_transform: The train transformation.
:param eval_transform: The eval transformation.
:param shuffle: If True, the dataset will be split randomly
:param root_path: The root path of the dataset. Defaults to None,
which means that the default path will be used.
:return: A :class:`DetectionScenario` instance.
"""
train_dataset = LvisDataset(root=root_path, train=True)
val_dataset = LvisDataset(root=root_path, train=False)
all_cat_ids = set(train_dataset.lvis_api.get_cat_ids())
all_cat_ids.union(val_dataset.lvis_api.get_cat_ids())
return split_detection_benchmark(
n_experiences=n_experiences,
train_dataset=train_dataset,
test_dataset=val_dataset,
n_classes=len(all_cat_ids),
train_transform=train_transform,
eval_transform=eval_transform,
shuffle=shuffle,
)
|
efece586ec6bfbc45911ed9f4f2ad5ead2cfd88b
| 3,642,174
|
def compute_log_ksi_normalized(log_edge_pot, #'(t-1,t)',
log_node_pot, # '(t, label)',
T,
n_labels,
log_alpha,
log_beta,
temp_array_1,
temp_array_2):
""" to obtain the two-slice posterior marginals p(y_t = i, y_t+1 = j| X_1:T) = normalized ksi_t,t+1(i,j) """
# in the following, will index log_ksi only with t, to stand for log_ksi[t,t+1]. including i,j: log_ksi[t,i,j]
log_alpha = compute_log_alpha(log_edge_pot, log_node_pot, T, n_labels, log_alpha, temp_array_1, temp_array_2)
log_beta = compute_log_beta(log_edge_pot, log_node_pot, T, n_labels, log_beta, temp_array_1, temp_array_2)
log_ksi = np.empty((T-1, n_labels, n_labels))
for t in range(T-1):
psi_had_beta = log_node_pot[t+1,:] + log_beta[t+1, :] # represents psi_t+1 \hadamard beta_t+1 in MLAPP eq 17.67
log_ksi[t,:,:] = log_edge_pot
for c in range(n_labels):
for d in range(n_labels):
log_ksi[t,c,d] += log_alpha[t,d] + psi_had_beta[c]
# normalize current ksi[t,:,:] over both dimensions. This is not required of ksi, strictly speaking, but the output of the function needs to be normalized, and it's cheaper to do it in-place on ksi than to create a fresh variable to hold the normalized values
log_ksi[t,:,:] -= lse_numba_2d(log_ksi[t,:,:])
return log_ksi
|
e4e6ea464851ba64d640e14fd7c88e9c52f28f50
| 3,642,175
|
from typing import Any
from typing import Type
def _deserialize_union(x: Any, field_type: Type) -> Any:
"""Deserialize values for Union typed fields
Args:
x (Any): value to be deserialized.
field_type (Type): field type.
Returns:
[Any]: desrialized value.
"""
for arg in field_type.__args__:
# stop after first matching type in Union
try:
x = _deserialize(x, arg)
break
except ValueError:
pass
return x
|
01793983a0a82fc16c03adbe57f52de9be5c81ea
| 3,642,177
|
def read_simplest_expandable(expparams, config):
"""
Read expandable parameters from config file of the type `param_1`.
Parameters
----------
expparams : dict, dict.keys, set, or alike
The parameter names that should be considered as expandable.
Usually, this is a module subdictionary of `type_simplest_ep`.
config : dict, dict.keys, set, or alike
The user configuration file.
Returns
-------
set of str
The parameters in `config` that comply with `expparams`.
"""
new = set()
for param in config:
try:
name, idx = param.split("_")
except ValueError:
continue
if idx.isdigit() and name in expparams:
new.add(param)
return new
|
4e2068e4a6cbca050da6a33a24b5fb0d2477e4e3
| 3,642,178
|
from typing import Callable
from typing import Iterable
from typing import Any
def rec_map_reduce_array_container(
reduce_func: Callable[[Iterable[Any]], Any],
map_func: Callable[[Any], Any],
ary: ArrayOrContainerT) -> "DeviceArray":
"""Perform a map-reduce over array containers recursively.
:param reduce_func: callable used to reduce over the components of *ary*
(and those of its sub-containers) if *ary* is a
:class:`~arraycontext.ArrayContainer`. Must be associative.
:param map_func: callable used to map a single array of type
:class:`arraycontext.ArrayContext.array_types`. Returns an array of the
same type or a scalar.
.. note::
The traversal order is unspecified. *reduce_func* must be associative in
order to guarantee a sensible result. This is because *reduce_func* may be
called on subsets of the component arrays, and then again (potentially
multiple times) on the results. As an example, consider a container made up
of two sub-containers, *subcontainer0* and *subcontainer1*, that each
contain two component arrays, *array0* and *array1*. The same result must be
computed whether traversing recursively::
reduce_func([
reduce_func([
map_func(subcontainer0.array0),
map_func(subcontainer0.array1)]),
reduce_func([
map_func(subcontainer1.array0),
map_func(subcontainer1.array1)])])
reducing all of the arrays at once::
reduce_func([
map_func(subcontainer0.array0),
map_func(subcontainer0.array1),
map_func(subcontainer1.array0),
map_func(subcontainer1.array1)])
or any other such traversal.
"""
def rec(_ary: ArrayOrContainerT) -> ArrayOrContainerT:
try:
iterable = serialize_container(_ary)
except NotAnArrayContainerError:
return map_func(_ary)
else:
return reduce_func([
rec(subary) for _, subary in iterable
])
return rec(ary)
|
885862371ece1e1f041a44693704300945d8d4a0
| 3,642,179
|
import json
def load_augmentations_config(
placeholder_params: dict, path_to_config: str = "configs/augmentations.json"
) -> dict:
"""Load the json config with params of all transforms
Args:
placeholder_params (dict): dict with values of placeholders
path_to_config (str): path to the json config file
"""
with open(path_to_config, "r") as config_file:
augmentations = json.load(config_file)
for name, params in augmentations.items():
params = [fill_placeholders(param, placeholder_params) for param in params]
return augmentations
|
49f3170033411418e7e5468aecdcdc612a677e66
| 3,642,181
|
import numpy
def simplify_mask(mask, r_ids, r_p_zip, replace=True):
"""Simplify the mask by replacing all `region_ids` with their `root_parent_id`
The `region_ids` and `parent_ids` are paired from which a tree is inferred. The root
of this tree is value `0`. `region_ids` that have a corresponding `parent_id` of 0
are penultimate roots. This method replaces each `region_id` with its penultimate `parent_id`.
It *simplifies* the volume.
:param mask: a 3D volume
:type mask: `numpy.array`
:param r_id: sequence of `region_id`
:type r_id: iterable
:param r_p_zip: sequence of 2-tuples with `region_id` and `parent_id`
:type r_p_zip: iterable
:param bool replace: if `True` then the returned `mask` will have values; `False` will leave the `mask` unchanged (useful for running tests to speed things up)
:return: `simplified_mask`, `segment_colours`, `segment_ids`
:rtype: tuple
"""
simplified_mask = numpy.ndarray(mask.shape, dtype=int) # @UnusedVariable
simplified_mask = 0
# group regions_ids by parent_id
root_parent_id_group = dict()
for r in r_ids:
p = get_root(r_p_zip, r)
if p not in root_parent_id_group:
root_parent_id_group[p] = [r]
else:
root_parent_id_group[p] += [r]
if replace:
# It is vastly faster to use multiple array-wide comparisons than to do
# comparisons element-wise. Therefore, we generate a string to be executed
# that will do hundreds of array-wide comparisons at a time.
# Each comparison is for all region_ids for a parent_id which will
# then get assigned the parent_id.
for parent_id, region_id_list in root_parent_id_group.items():
# check whether any element in the mask has a value == r0 OR r1 ... OR rN
# e.g. (mask == r0) | (mask == r1) | ... | (mask == rN)
comp = ' | '.join(['( mask == %s )' % r for r in region_id_list])
# set those that satisfy the above to have the parent_id
# Because parent_ids are non-overlapping (i.e. no region_id has two parent_ids)
# we can do successive summation instead of assignments.
full_op = 'simplified_mask += (' + comp + ') * %s' % parent_id
exec(full_op)
else:
simplified_mask = mask
segment_ids = root_parent_id_group.keys()
# segment_colors = [r_c_zip[s] for s in segment_ids]
return simplified_mask, segment_ids
|
b8344a893319ad7a26f931b2edbc6ef452b82c24
| 3,642,182
|
def getStops(ll):
"""
getStops
Returns a list of stops based off of a lat long pair
:param: ll { lat : float, lng : float }
:return: list
"""
if not ll:
return None
url = "%sstops?appID=%s&ll=%s,%s" % (BASE_URI, APP_ID, ll['lat'], ll['lng'])
try:
f = urlopen(url)
except HTTPError:
return None
response = f.read()
dom = parseString(response)
stopElems = dom.getElementsByTagName("location")
stops = []
for se in stopElems:
locid = se.getAttribute("locid")
desc = se.getAttribute("desc")
direction = se.getAttribute("dir")
stops.append("ID: %s, %s on %s" % (locid, direction, desc))
return stops
|
eedfc49a02ab6c2ccf45e241262236804007a156
| 3,642,183
|
import warnings
import six
def rws(log_joint, observed, latent, axis=None):
"""
Implements Reweighted Wake-sleep from (Bornschein, 2015). This works for
both continuous and discrete latent `StochasticTensor` s.
:param log_joint: A function that accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
`StochasticTensor` names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed `StochasticTensor` s to their values.
:param latent: A dictionary of ``(string, (Tensor, Tensor))``) pairs.
Mapping from names of latent `StochasticTensor` s to their samples and
log probabilities.
:param axis: The sample dimension(s) to reduce when computing the
outer expectation in log likelihood and in the cost for adapting
proposals. If `None`, no dimension is reduced.
:return: A Tensor. The surrogate cost to minimize.
:return: A Tensor. Estimated log likelihoods.
"""
warnings.warn("rws(): This function will be deprecated in the coming "
"version (0.3.1). Variational utilities are moving to "
"`zs.variational`. Features of the original rws() can be "
"achieved by two new variational objectives. For learning "
"model parameters, please use the importance weighted "
"objective: `zs.variational.iw_objective()`. For adapting "
"the proposal, the new rws gradient estimator can be "
"accessed by first constructing the inclusive KL divergence "
"objective using `zs.variational.klpq` and then calling "
"its rws() method.", category=FutureWarning)
latent_k, latent_v = map(list, zip(*six.iteritems(latent)))
latent_outputs = dict(zip(latent_k, map(lambda x: x[0], latent_v)))
latent_logpdfs = map(lambda x: x[1], latent_v)
joint_obs = merge_dicts(observed, latent_outputs)
log_joint_value = log_joint(joint_obs)
entropy = -sum(latent_logpdfs)
log_w = log_joint_value + entropy
if axis is not None:
log_w_max = tf.reduce_max(log_w, axis, keep_dims=True)
w_u = tf.exp(log_w - log_w_max)
w_tilde = tf.stop_gradient(
w_u / tf.reduce_sum(w_u, axis, keep_dims=True))
log_likelihood = log_mean_exp(log_w, axis)
fake_log_joint_cost = -tf.reduce_sum(w_tilde * log_joint_value, axis)
fake_proposal_cost = tf.reduce_sum(w_tilde * entropy, axis)
cost = fake_log_joint_cost + fake_proposal_cost
else:
cost = log_w
log_likelihood = log_w
return cost, log_likelihood
|
eb6278919dd484884b3110681680e67d3ee17d2f
| 3,642,184
|
def fit_svr(X, y, kernel: str = 'rbf') -> LinearSVR:
"""
Fit support vector regression for the given input X and expected labes y.
:param X: Feature data
:param y: Labels that should be correctly computed
:param kernel: type of kernel used by the SVR {‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’}, default=’rbf’
:return: SVR that is fitted to X and y
"""
svr = LinearSVR()
svr.fit(X=X, y=y)
return svr
|
10a38fca990c4ab058d582fbe38bd05df7456660
| 3,642,185
|
import typing
def process_get_namespaces_from_accounts(
status: int,
json: list,
network_type: models.NetworkType,
) -> typing.Sequence[models.NamespaceInfo]:
"""
Process the "/account/namespaces" HTTP response.
:param status: Status code for HTTP response.
:param json: JSON data for response message.
"""
assert status == 200
return [models.NamespaceInfo.create_from_dto(i, network_type) for i in json]
|
748bdca72db0640e75f8a0c063f7968ea9583e94
| 3,642,186
|
def _hexsplit(string):
""" Split a hex string into 8-bit/2-hex-character groupings separated by spaces"""
return ' '.join([string[i:i+2] for i in range(0, len(string), 2)])
|
672e475edeaafaa08254845e620b0a771b294fa8
| 3,642,188
|
def get_analysis_id(analysis_id):
"""
Get the new analysis id
:param analysis_id: analysis_index DataFrame
:return: new analysis_id
"""
if analysis_id.size == 0:
analysis_id = 0
else:
analysis_id = np.nanmax(analysis_id.values) + 1
return int(analysis_id)
|
3318764daadca6c1e1921847f623fcac169e2cb5
| 3,642,189
|
from typing import Union
def get_station_pqr(station_name: str, rcu_mode: Union[str, int], db):
"""
Get PQR coordinates for the relevant subset of antennas in a station.
Args:
station_name: Station name, e.g. 'DE603LBA' or 'DE603'
rcu_mode: RCU mode (0 - 6, can be string)
db: instance of LofarAntennaDatabase from lofarantpos
Example:
>>> from lofarantpos.db import LofarAntennaDatabase
>>> db = LofarAntennaDatabase()
>>> pqr = get_station_pqr("DE603", "outer", db)
>>> pqr.shape
(96, 3)
>>> pqr[0, 0]
1.7434713
>>> pqr = get_station_pqr("LV614", "5", db)
>>> pqr.shape
(96, 3)
"""
full_station_name = get_full_station_name(station_name, rcu_mode)
station_type = get_station_type(full_station_name)
if 'LBA' in station_name or str(rcu_mode) in ('1', '2', '3', '4', 'inner', 'outer'):
# Get the PQR positions for an individual station
station_pqr = db.antenna_pqr(full_station_name)
# Exception: for Dutch stations (sparse not yet accommodated)
if (station_type == 'core' or station_type == 'remote') and int(rcu_mode) in (3, 4):
station_pqr = station_pqr[0:48, :]
elif (station_type == 'core' or station_type == 'remote') and int(rcu_mode) in (1, 2):
station_pqr = station_pqr[48:, :]
elif 'HBA' in station_name or str(rcu_mode) in ('5', '6', '7', '8'):
selected_dipole_config = {
'intl': GENERIC_INT_201512, 'remote': GENERIC_REMOTE_201512, 'core': GENERIC_CORE_201512
}
selected_dipoles = selected_dipole_config[station_type] + \
np.arange(len(selected_dipole_config[station_type])) * 16
station_pqr = db.hba_dipole_pqr(full_station_name)[selected_dipoles]
else:
raise RuntimeError("Station name did not contain LBA or HBA, could not load antenna positions")
return station_pqr.astype('float32')
|
d796639866421876bc58a7621d37bbe7239da6df
| 3,642,190
|
from typing import List
def hello_world(cities: List[str] = ["Berlin", "Paris"]) -> bool:
"""
Hello world function.
Arguments:
- cities: List of cities in which 'hello world' is posted.
Return:
- success: Whether or not function completed successfully.
"""
try:
[print("Hello {}!".format(c)) for c in cities] # for loop one-liner
return True
except KeyboardInterrupt:
return False
finally:
pass
|
a24f0f47c9b44c97f46524d354fff0ed9a735fe3
| 3,642,191
|
import random
def random_samples(traj_obs, expert, num_sample):
"""Randomly sample a subset of states to collect expert feedback.
Args:
traj_obs: observations from a list of trajectories.
expert: an expert policy.
num_sample: the number of samples to collect.
Returns:
new expert data.
"""
expert_data = []
for i in range(len(traj_obs)):
obs = traj_obs[i]
random.shuffle(obs)
new_expert_data = []
chosen = np.random.choice(range(len(obs)),
size=min(num_sample, len(obs)),
replace=False)
for ch in chosen:
state = obs[ch].observation
action_step = expert.action(obs[ch])
action = action_step.action
new_expert_data.append((state, action))
expert_data.extend(new_expert_data)
return expert_data
|
55aa4312c095ce97b8cf2840ff9ca61e393dff63
| 3,642,193
|
def get_p2_vector(img):
"""
Returns a p2 vector.
We calculate the p2 vector by taking the radial mean of
the autocorrelation of the input image.
"""
radvars = []
dimX = img.shape[0]
dimY = img.shape[1]
fftimage = np.fft.fft2(img)
final_image = np.fft.ifft2(fftimage*np.conj(fftimage))
finImg = np.abs(final_image)/(dimX*dimY)
centrdImg = np.fft.fftshift(finImg)
center = [int(dimX/2), int(dimY/2)]
radvar, _ = radial_profile(centrdImg, center, (dimX, dimY))
radvars.append(radvar)
p2_vec = np.array(radvars)
return p2_vec[0]
|
7544751bf268d6eea432e21efe3d3a7703b16c1b
| 3,642,195
|
def multiplex(n, q, **kwargs):
""" Convert one queue into several equivalent Queues
>>> q1, q2, q3 = multiplex(3, in_q)
"""
out_queues = [Queue(**kwargs) for i in range(n)]
def f():
while True:
x = q.get()
for out_q in out_queues:
out_q.put(x)
t = Thread(target=f)
t.daemon = True
t.start()
return out_queues
|
4de6fa4fd495c2b320c4cdf28aa56df4411b7aa9
| 3,642,197
|
def stack(arrays, axis=0):
"""
Join a sequence of arrays along a new axis.
The `axis` parameter specifies the index of the new axis in the dimensions
of the result. For example, if ``axis=0`` it will be the first dimension
and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array_create.array([1, 2, 3])
>>> b = np.array_create.array([2, 3, 4])
>>> np.stack((a, b))
array_create.array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array_create.array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = [array_create.array(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = set(arr.shape for arr in arrays)
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
if not -result_ndim <= axis < result_ndim:
msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim)
raise IndexError(msg)
if axis < 0:
axis += result_ndim
sl = (slice(None),) * axis + (None,)
expanded_arrays = [arr[sl] for arr in arrays]
return concatenate(expanded_arrays, axis=axis)
|
ba8a2b514c32a1dc7a15215e5e26a90f2ace9a26
| 3,642,198
|
def verifica_cc(numero):
"""verifica_cc(numero): int -> tuple
Funcao que verifica o numero do cartao, indicando a categoria e a rede emissora"""
numero_final = str(numero)
if luhn_verifica(numero_final) == True:
categor = categoria(numero_final)
rede_cartao = valida_iin(numero_final)
if rede_cartao == "":
return "cartao invalido"
else:
return (categor, rede_cartao)
else:
return "cartao invalido"
|
f6d3501b8154c05058006575f8aa33c228b9ade6
| 3,642,200
|
def create_security_group(stack, name, rules=()):
"""Add EC2 Security Group Resource."""
ingress_rules = []
for rule in rules:
ingress_rules.append(
SecurityGroupRule(
"{0}".format(rule['name']),
CidrIp=rule['cidr'],
FromPort=rule['from_port'],
ToPort=rule['to_port'],
IpProtocol=rule['protocol'],
)
)
return stack.stack.add_resource(
SecurityGroup(
'{0}SecurityGroup'.format(name),
GroupDescription="{0} Security Group".format(name),
SecurityGroupIngress=ingress_rules,
VpcId=Ref(stack.vpc),
))
|
e4d2b81fc1c3b0b3231725aa8757ea644d2efdf6
| 3,642,201
|
from typing import List
import tqdm
def features_targets_and_externals(
df: pd.DataFrame,
region_ordering: List[str],
id_col: str,
time_col: str,
time_encoder: OneHotEncoder,
weather: Weather_container,
time_interval: str,
latitude: str,
longitude: str,
):
"""
Function that computes the node features (outflows), target values (next step prediction)
and external data such as time_encoding and weather information
Args:
df (pd.DataFrame): [description]
region_ordering (List[str]): [description]
id_col (str): [description]
time_col (str): [description]
time_encoder (OneHotEncoder): [description]
weather (Weather_container): [description]
Returns:
[type]: [description]
"""
id_grouped_df = df.groupby(id_col)
lat_dict = dict()
lng_dict = dict()
for node in region_ordering:
grid_group_df = id_grouped_df.get_group(node)
lat_dict[node] = grid_group_df[latitude].mean()
lng_dict[node] = grid_group_df[longitude].mean()
grouped_df = df.groupby([time_col, id_col])
dt_range = pd.date_range(df[time_col].min(), df[time_col].max(), freq=time_interval)
node_inflows = np.zeros((len(dt_range), len(region_ordering), 1))
lat_vals = np.zeros((len(dt_range), len(region_ordering)))
lng_vals = np.zeros((len(dt_range), len(region_ordering)))
targets = np.zeros((len(dt_range) - 1, len(region_ordering)))
# arrays for external data
weather_external = np.zeros((len(dt_range), 4))
num_cats = 0
for cats in time_encoder.categories_:
num_cats += len(cats)
time_external = np.zeros((len(dt_range), num_cats))
# Loop through every (timestep, node) pair in dataset. For each find number of outflows and set as feature
# also set the next timestep for the same node as the target.
for t, starttime in tqdm(enumerate(dt_range), total=len(dt_range)):
for i, node in enumerate(region_ordering):
query = (starttime, node)
try:
group = grouped_df.get_group(query)
node_inflows[t, i] = len(group)
except KeyError:
node_inflows[t, i] = 0
lat_vals[t, i] = lat_dict[node]
lng_vals[t, i] = lng_dict[node]
# current solution:
# The target to predict, is the number of inflows at next timestep.
if t > 0:
targets[t - 1, i] = node_inflows[t, i]
time_obj = group[time_col].iloc[0]
time_external[t, :] = time_encoder.transform(
np.array([[time_obj.hour, time_obj.weekday(), time_obj.month]])
).toarray()
start_time_dt = pd.Timestamp(starttime).to_pydatetime()
weather_dat = weather.get_weather_df(start=start_time_dt, end=start_time_dt + timedelta(hours=1))
weather_dat = np.nan_to_num(weather_dat, copy=False, nan=0.0)
weather_external[t, :] = weather_dat
time_external = time_external[:-1, :]
# normalize weather features
weather_external = (weather_external - weather_external.mean(axis=0)) / (weather_external.std(axis=0) + 1e-6)
weather_external = weather_external[:-1, :]
X = node_inflows[:-1, :, :]
lng_vals = lng_vals[:-1, :]
lat_vals = lat_vals[:-1, :]
feature_scaler = StandardScaler()
feature_scaler.fit(X[:, :, 0])
target_scaler = StandardScaler()
target_scaler.fit(targets)
return X, lat_vals, lng_vals, targets, time_external, weather_external, feature_scaler, target_scaler
|
c9fc9fc210407ec596facda1bc43952ce9c6b98a
| 3,642,202
|
def transform_child_joint_frame_to_parent_inertial_frame(child_body):
"""Return the homogeneous transform from the child joint frame to the parent inertial frame."""
parent_joint = child_body.parent_joint
parent = child_body.parent_body
if parent_joint is not None and parent.inertial is not None:
h_p_c = parent_joint.homogeneous # from parent to child link/joint frame
h_c_p = get_inverse_homogeneous(h_p_c) # from child to parent link/joint frame
h_p_pi = parent.inertial.homogeneous # from parent link/joint frame to inertial frame
h_c_pi = h_c_p.dot(h_p_pi) # from child link/joint frame to parent inertial frame
return h_c_pi
|
0ab8761ef40101368fb3f2b657c329cd8cf5cf2b
| 3,642,204
|
def team_to_repos(api, no_repos, organization):
"""Create a team_to_repos mapping for use in _add_repos_to_teams, anc create
each team and repo. Return the team_to_repos mapping.
"""
num_teams = 10
# arrange
team_names = ["team-{}".format(i) for i in range(num_teams)]
repo_names = ["some-repo-{}".format(i) for i in range(num_teams)]
for name in team_names:
organization.create_team(name, permission="pull")
for name in repo_names:
organization.create_repo(name)
team_to_repos = {
team_name: [repo_name]
for team_name, repo_name in zip(team_names, repo_names)
}
return team_to_repos
|
390da146c3f96c554f9194f8551a066eec535533
| 3,642,205
|
def box_minus(plus_transform: pin.SE3, minus_transform: pin.SE3) -> np.ndarray:
"""
Compute the box minus between two transforms:
.. math::
T_1 \\boxminus T_2 = \\log(T_1 \\cdot T_2^{-1})
This operator allows us to think about orientation "differences" as
similarly as possible to position differences, but mind the frames! Its
formula has two use cases, depending on whether the common frame :math:`C`
between the two transforms is their source or their target.
When the common frame is the target, denoting by :math:`T_{CP}` the
transform from frame :math:`P` (source) to frame :math:`C` (target), the
resulting twist is expressed in the target frame:
.. math::
{}_C \\xi_{CM} = T_{CP} \\boxminus T_{CM}
When the common frame is the source frame, denoting by :math:`T_{MC}` the
transform from frame :math:`C` (source) to frame :math:`M` (target), the
resulting twist is expressed in the target frame of the transform on the
right-hand side of the operator:
.. math::
-{}_M \\xi_{M} = T_{PC} \\boxminus T_{MC}
Args:
plus_transform: Transform :math:`T_1` on the left-hand side of the box
minus operator.
minus_transform: Transform :math:`T_2` on the right-hand side of the
box minus operator.
Returns:
In the first case :math:`T_{CP} \\boxminus T_{CM}`, the outcome is a
spatial twist :math:`{}_C \\xi_{CM}` expressed in the common frame
:math:`C`.
In the second case :math:`T_{PC} \\boxminus T_{MC}`, the outcome is a
body twist :math:`-{}_M \\xi_{CM}` (mind the unitary minus).
Note:
Prefer using :func:`pink.tasks.utils.body_box_minus` to calling this
function in the second use case :math:`T_{PC} \\boxminus T_{MC}`.
"""
diff_array = plus_transform.act(minus_transform.inverse())
twist: np.ndarray = pin.log(diff_array).vector
return twist
|
838f5e8b4f91450c311c72d4526e4c8fd3c9d6f7
| 3,642,206
|
import struct
def padandsplit(message):
"""
returns a two-dimensional array X[i][j] of 32-bit integers, where j ranges
from 0 to 16.
First pads the message to length in bytes is congruent to 56 (mod 64),
by first adding a byte 0x80, and then padding with 0x00 bytes until the
message length is congruent to 56 (mod 64). Then adds the little-endian
64-bit representation of the original length. Finally, splits the result
up into 64-byte blocks, which are further parsed as 32-bit integers.
"""
origlen = len(message)
padlength = 64 - ((origlen - 56) % 64) # minimum padding is 1!
message += b"\x80"
message += b"\x00" * (padlength - 1)
message += struct.pack("<Q", origlen * 8)
assert (len(message) % 64 == 0)
return [
[
struct.unpack("<L", message[i + j:i + j + 4])[0]
for j in range(0, 64, 4)
]
for i in range(0, len(message), 64)
]
|
ea06a3fc91e19ed0dbea6ddcc2ee6d554fb5a40f
| 3,642,207
|
import requests
def base_put(url_path, content):
"""
Do a PUT to the REST API
"""
response = requests.put(url=settings.URL_API + url_path, json=content)
return response
|
dde94c1dba0d8a931a0eae0e8f5ce63d1f5a62a1
| 3,642,208
|
def inverse_rotation(theta: float) -> np.ndarray:
"""
Compute inverse of the 2d rotation matrix that rotates a
given vector by theta without use of numpy.linalg.inv and numpy.linalg.solve.
Arguments:
theta: rotation angle
Return:
Inverse of the rotation matrix
"""
rotation_matrix(theta)
m = np.zeros((2, 2))
m[0, 0] = (np.cos(theta_rad)) / (diag - offDiag)
m[0, 1] = (np.sin(theta_rad)) / (diag - offDiag)
m[1, 0] = -(np.sin(theta_rad)) / (diag - offDiag)
m[1, 1] = (np.cos(theta_rad)) / (diag - offDiag)
return m
|
732183f7577969a1ecbbd0ee5ed86342c65991fc
| 3,642,209
|
import functools
def _config_validation_decorator(func):
"""A decorator used to easily run validations on configs loaded into dicts.
Add this decorator to any method that returns the config as a dict.
Raises:
ValueError: If the configuration fails validation
"""
@functools.wraps(func)
def validation_wrapper(*args, **kwargs):
config_dict = func(*args, **kwargs)
validate_dict(config_dict)
return config_dict
return validation_wrapper
|
1a63254e43c2920d6952105d9860138c395cbf2b
| 3,642,210
|
import functools
def image_transpose_exif(im):
"""
https://stackoverflow.com/questions/4228530/pil-thumbnail-is-rotating-my-image
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
Parameters
----------
im: PIL.Image
The image to be rotated.
"""
exif_orientation_tag = 0x0112
exif_transpose_sequences = [ # Val 0th row 0th col
[], # 0 (reserved)
[], # 1 top left
[Image.FLIP_LEFT_RIGHT], # 2 top right
[Image.ROTATE_180], # 3 bottom right
[Image.FLIP_TOP_BOTTOM], # 4 bottom left
[Image.FLIP_LEFT_RIGHT, Image.ROTATE_90], # 5 left top
[Image.ROTATE_270], # 6 right top
[Image.FLIP_TOP_BOTTOM, Image.ROTATE_90], # 7 right bottom
[Image.ROTATE_90], # 8 left bottom
]
try:
seq = exif_transpose_sequences[im._getexif()[exif_orientation_tag]]
except Exception:
return im
else:
return functools.reduce(type(im).transpose, seq, im)
|
4f166ea59c097e4306bd43db7165e56e8d289b6a
| 3,642,211
|
def opt_pore_diameter(elements, coordinates, bounds=None, com=None, **kwargs):
"""Return optimised pore diameter and it's COM."""
args = elements, coordinates
if com is not None:
pass
else:
com = center_of_mass(elements, coordinates)
if bounds is None:
pore_r = pore_diameter(elements, coordinates, com=com)[0] / 2
bounds = (
(com[0]-pore_r, com[0]+pore_r),
(com[1]-pore_r, com[1]+pore_r),
(com[2]-pore_r, com[2]+pore_r)
)
minimisation = minimize(
correct_pore_diameter, x0=com, args=args, bounds=bounds)
pored = pore_diameter(elements, coordinates, com=minimisation.x)
return (pored[0], pored[1], minimisation.x)
|
f75a7c4246bc2ad096de309795d61afea78f7c3e
| 3,642,213
|
def animate_operators(operators, date):
"""Main."""
results = []
failures = []
length = len(operators)
count = 1
for i in operators:
try:
i = i.encode('utf-8')
except:
i = unicode(i, 'utf-8')
i = i.encode('utf-8')
print(i, count, "/", length)
try:
output = animate_one_day(i, date)
results.append(output)
print("success!")
output.to_csv("sketches/{}/{}/data/indiv_operators/{}.csv".format(OUTPUT_NAME, DATE, i))
except Exception:
failures.append(i)
print("failed:")
count += 1
return results, failures
|
d8dd6afdd4a13ab62a4c821bb43050af07fdc455
| 3,642,214
|
def add_stocks(letter, page, get_last_page=False):
"""
goes through each row in table and adds to df if it is a stock
returns the appended df
"""
df = pd.DataFrame()
res = req.get(BASE_LINK.format(letter, page))
soup = bs(res.content, 'lxml')
table = soup.find('table', {'id': 'CompanylistResults'})
stks = table.findAll('tr')
stocks_on_page = (len(stks) - 1) / 2
for stk in stks[1:]:
deets = stk.findAll('td')
if len(deets) != 7:
continue
company_name = deets[0].text.strip()
ticker = deets[1].text.strip()
market_cap = deets[2].text.strip()
# 4th entry is blank
country = deets[4].text.strip()
ipo_year = deets[5].text.strip()
subsector = deets[6].text.strip()
df = df.append(pd.Series({'company_name': company_name,
'market_cap': market_cap,
'country': country,
'ipo_year': ipo_year,
'subsector': subsector},
name=ticker))
if get_last_page:
# get number of pages
lastpage_link = soup.find('a', {'id': 'two_column_main_content_lb_LastPage'})
last_page_num = int(lastpage_link['href'].split('=')[-1])
return df, total_num_stocks, last_page_num
return df, stocks_on_page
|
ce86ef68a107fbae8d0028486bef8567dc24c43e
| 3,642,217
|
def available_parent_amount_rule(model, pr):
"""
Each parent has a limited resource budget; it cannot allocate more than that.
:param ConcreteModel model:
:param int pr: parent resource
:return: boolean indicating whether pr is staying within budget
"""
if model.parent_possible_allocations[pr]:
return sum(model.PARENT_AMT[pr, i] for i in model.parent_possible_allocations[pr]) <= model.avail_parent_amt[pr]
else:
return Constraint.Skip
|
e1ccc7e9ad4941bfffebefd34217cd58c5bc18e5
| 3,642,218
|
def extract_coords(filename):
"""Extract J2000 coordinates from filename or filepath
Parameters
----------
filename : str
name or path of file
Returns
-------
str
J2000 coordinates
"""
# in case path is entered as argument
filename = filename.split("/")[-1] if "/" in filename else filename
# to check whether declination is positive or negative
plus_minus = "+" if "+" in filename else "-"
# extracting right acesnsion (ra) and declination(dec) from filename
filename = filename.split("_")[0].strip("J").split(plus_minus)
ra_extracted = [
"".join(filename[0][0:2]),
"".join(filename[0][2:4]),
"".join(filename[0][4:]),
]
dec_extracted = [
"".join(filename[1][0:2]),
"".join(filename[1][2:4]),
"".join(filename[1][4:]),
]
coordinates = " ".join(ra_extracted) + " " + plus_minus + " ".join(dec_extracted)
# return coordinates as a string in HH MM SS.SSS format
return coordinates
|
57f0ca79223116caa770a1dbea2eda84df146855
| 3,642,219
|
def exponential(mantissa, base, power, left, right):
"""Return the exponential signal.
The signal's value will be `mantissa * base ^ (power * time)`.
Parameters:
mantissa: The mantissa, i.e. the scale of the signal
base: The exponential base
power: The exponential power
left: Left bound of the signal
right: Rright bound of the signal
Returns:
ndarray[float]: The values of the signal
ndarray[int]: The interval of the signal from left bound
to right bound
"""
n = np.arange(left, right+1, 1)
x = mantissa * (base ** (power * n))
return x, n
|
a2fbd76b6426f600d19eb9caeb4edac88dea9a9c
| 3,642,220
|
def get_features(features, featurestore=None, featuregroups_version_dict={}, join_key=None, online=False):
"""
Gets a list of features (columns) from the featurestore. If no featuregroup is specified it will query hopsworks
metastore to find where the features are stored. It will try to construct the query first from the cached metadata,
if that fails it will re-try after reloading the cache
Example usage:
>>> # The API will default to version 1 for feature groups and the project's feature store
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore())
>>> #You can also explicitly define feature group, version, feature store, and join-key:
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroups_version_dict={"trx_graph_summary_features": 1,
>>> "trx_summary_features": 1}, join_key="cust_id")
Args:
:features: a list of features to get from the featurestore
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroups: (Optional) a dict with (fg --> version) for all the featuregroups where the features resides
:featuregroup_version: the version of the featuregroup, defaults to 1
:join_key: (Optional) column name to join on
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
Returns:
A dataframe with all the features
"""
# try with cached metadata
try:
return core._do_get_features(features,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
featurestore=featurestore,
featuregroups_version_dict=featuregroups_version_dict,
join_key=join_key,
online=online)
# Try again after updating cache
except:
return core._do_get_features(features, core._get_featurestore_metadata(featurestore, update_cache=True),
featurestore=featurestore,
featuregroups_version_dict=featuregroups_version_dict,
join_key=join_key,
online=online)
|
03cfc250bd921b291ac38fce5beddac3144e65ba
| 3,642,221
|
def get_flex_bounds(x, samples, nsig=1):
"""
Here, we wish to report the distribution of the subchunks 'sample'
along with the value of the full sample 'x'
So this function will return x, x_lower_bound, x_upper_bound,
where the range of the lower and upper bound expresses
the standard deviation of the sample distribution, the mean
of which is often not aligned with x.
"""
mean=np.mean(samples); sig=np.std(samples)
return [x, nsig*sig+x-mean, nsig*sig+mean-x]
|
0fb4120307f61aafce902e92a32c66fd9aad91bf
| 3,642,222
|
def _parse_multi_header(headers):
"""
Parse out and return the data necessary for generating ZipkinAttrs.
Returns a dict with the following keys:
'trace_id': str or None
'span_id': str or None
'parent_span_id': str or None
'sampled_str': '0', '1', 'd', or None (defer)
"""
parsed = {
"trace_id": headers.get("X-B3-TraceId", None),
"span_id": headers.get("X-B3-SpanId", None),
"parent_span_id": headers.get("X-B3-ParentSpanId", None),
"sampled_str": headers.get("X-B3-Sampled", None),
}
# Normalize X-B3-Flags and X-B3-Sampled to None, '0', '1', or 'd'
if headers.get("X-B3-Flags") == "1":
parsed["sampled_str"] = "d"
if parsed["sampled_str"] == "true":
parsed["sampled_str"] = "1"
elif parsed["sampled_str"] == "false":
parsed["sampled_str"] = "0"
if parsed["sampled_str"] not in (None, "1", "0", "d"):
raise ValueError("Got invalid X-B3-Sampled: %s" % parsed["sampled_str"])
for k in ("trace_id", "span_id", "parent_span_id"):
if parsed[k] == "":
raise ValueError("Got empty-string %r" % k)
if parsed["trace_id"] and not parsed["span_id"]:
raise ValueError("Got X-B3-TraceId but not X-B3-SpanId")
elif parsed["span_id"] and not parsed["trace_id"]:
raise ValueError("Got X-B3-SpanId but not X-B3-TraceId")
# Handle the common case of no headers at all
if not parsed["trace_id"] and not parsed["sampled_str"]:
raise ValueError() # won't trigger a log message
return parsed
|
2ac3d0cbee196385e970bcc85827c1a467b5bb3b
| 3,642,223
|
import numpy
def get_tgimg(img):
"""
处理提示图片,提取提示字符
:param img: 提示图片
:type img:
:return: 返回原图描边,提示图片按顺序用不同颜色框,字符特征图片列表
:rtype: img 原图, out 特征图片列表(每个字), templets 角度变换后的图
"""
imgBW = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = imgBW.shape
_, imgBW = cv2.threshold(imgBW, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img2 = cv2.erode(imgBW, None, iterations=3)
img2 = cv2.dilate(img2, None, iterations=3)
out = numpy.full((20 + h, 20 + w), 255, numpy.uint8)
copy_image(out, 10, 10, img2)
out, cnts, hierarchy = cv2.findContours(out, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE)
rects = []
# cnts[-1] 边框
for cnt in cnts[:-1]:
cnt -= 10
x1 = cnt[:, :, 0].min()
y1 = cnt[:, :, 1].min()
x2 = cnt[:, :, 0].max()
y2 = cnt[:, :, 1].max()
x1 = 0 if x1 < 0 else x1
y1 = 0 if y1 < 0 else y1
x2 = w - 1 if x2 > w - 1 else x2
y2 = h - 1 if y2 > h - 1 else y2
rects.append((x1, y1, x2, y2))
cv2.drawContours(img, cnt, -1, [0, 0, 255])
# cv2.rectangle(img, (x1, y1), (x2, y2), [0, 0, 255])
rects.sort()
out = numpy.full(imgBW.shape, 255, numpy.uint8)
x0 = spacing = 3
templets = []
for x1, y1, x2, y2 in rects:
imgchar = numpy.full((30, 30), 255, numpy.uint8)
tmpl = imgBW[y1:y2 + 1, x1:x2 + 1]
if value2 != (max_value2 // 2):
tmpl = rotate_image(tmpl, (max_value2 // 2 - value2) * 10)
templets.append(tmpl)
copy_image(imgchar, 0, (30 - y2 + y1 - 1) // 2, tmpl)
copy_image(out, x0, 0, imgchar)
x0 += x2 - x1 + 1 + spacing
out = cv2.cvtColor(out, cv2.COLOR_GRAY2BGR)
i = 0
x0 = spacing
for x1, y1, x2, y2 in rects:
cv2.rectangle(out, (x0, 0), (x0 + x2 - x1 + 1, 29), COLORS[i])
x0 += x2 - x1 + 1 + spacing
i += 1
return img, out, templets
|
5f48e2b639dd3027e6463b1a99a8b7c13c043f88
| 3,642,224
|
def brand_profitsharing_order_query(self, transaction_id, out_order_no, sub_mchid):
"""查询连锁品牌分账结果
:param transaction_id: 微信支付订单号,示例值:'4208450740201411110007820472'
:param out_order_no: 商户分账单号,只能是数字、大小写字母_-|*@,示例值:'P20150806125346'
:param sub_mchid: 子商户的商户号,由微信支付生成并下发。示例值:'1900000109'
"""
if sub_mchid:
path = '/v3/brand/profitsharing/orders?sub_mchid=%s' % sub_mchid
else:
raise Exception('sub_mchid is not assigned.')
if transaction_id and out_order_no:
path = '%s&transaction_id=%s&out_order_no=%s' % (transaction_id, out_order_no)
else:
raise Exception('transaction_id or out_order_no is not assigned.')
return self._core.request(path)
|
cb1af072f2b4f94f632817baff6cdfea66110873
| 3,642,225
|
def get_controller_from_module(module, cname):
"""
Extract classes that inherit from BaseController
"""
if hasattr(module, '__controller__'):
controller_classname = module.__controller__
else:
controller_classname = cname[0].upper() + cname[1:].lower() + 'Controller'
controller_class = module.__dict__.get(controller_classname, None)
return controller_class
|
b450105f6ec38a03fe461c5d9c07c4652da0efd3
| 3,642,226
|
def exp(d: D) -> NumDict:
"""Compute the base-e exponential of d."""
return d.exp()
|
a4d5baf6bdfadb48add80096bb4d167f01572b69
| 3,642,227
|
def Main(operation, args):
"""Supports 2 operations
1. Consulting the existing data (get)
> get ["{address}"]
2. Inserting data about someone else (certify)
> certify ["{address}","{hash}"]
"""
if len(args) == 0:
Log('You need to provide at least 1 parameter - [address]')
return 'Error: You need to provide at least 1 parameter - [address]'
address = args[0]
if len(address) != 20:
Log('Wrong address size')
return 'Error: Wrong address size'
if operation == 'get':
return get_certs(address)
elif operation == 'certify':
# Caller cannot add certifications to his address
if CheckWitness(address):
Log('You cannot add certifications for yourself')
return 'Error: You cannot add certifications for yourself'
if 3 != len(args):
Log('Certify requires 3 parameters - [address] [caller_address] [hash]')
return 'Error: Certify requires 3 parameters - [address] [caller_address] [hash]'
caller_address = args[1]
# To make sure the address is from the caller
if not CheckWitness(caller_address):
Log('You need to provide your own address')
return 'Error: You need to provide your own address'
content = args[2]
return add_certification(address, caller_address, content)
else:
Log('Invalid Operation')
return 'Error": "Invalid Operation'
|
0dac2ddb4dc3d259e30f5a3c100a39ff8d7b940d
| 3,642,228
|
def get_latest_file_list_orig1(input_list, start_time, num_files):
"""
Return a list of file names, trying to get one from each index file in input_list.
The starting time is start_time and the number of days to investigate is num_days.
"""
out = []
for rind in input_list:
# Create time_list
time_list = time_list(start_time, rind.get_hours() * 3600, num_files)
# print "rind: dir", rind.get_base_dir(), rind.get_index_date()
line_list, index_date_list = rind.readlines_list_rev(time_list, 1)
flist = get_files(line_list)
if flist != []:
out.append("%s/%s/%s" % (rind.get_base_dir(), index_date_list[0], flist[0]))
else:
out.append("None")
print out
return out
|
744b5392d136129a1135cea3ad577817798ef582
| 3,642,229
|
def get_ogheader(blob, url=None):
"""extract Open Graph markup into a dict
The OG header section is delimited by a line of only `---`.
Note that the page title is not provided as Open Graph metadata if
the image metadata is not specified.
"""
found = False
ogheader = dict()
for line in blob.split('\n'):
if line == '---':
found = True
break
if line.startswith('image: '):
toks = line.split()
assert len(toks) == 2
ogheader['image'] = toks[1]
if not found:
ogheader = dict() # Ignore any matches as false positives
return ogheader
if url is not None:
assert 'url' not in ogheader
ogheader['url'] = url
for line in blob.split('\n'):
if line.startswith('# '):
ogheader['title'] = line[2:]
return ogheader
|
4edd7c5545ddef241ee2bfd5e316e47a336aaa3f
| 3,642,230
|
def list_ingredient():
"""List all ingredients currently in the database"""
ingredients = IngredientCollection()
ingredients.load_all()
return jsonify(ingredients=[x.to_dict() for x in ingredients.models])
|
d3275dba18922b9f4558f23eedda3ae25d8a25d9
| 3,642,231
|
import re
def ParseSavedQueries(cnxn, post_data, project_service, prefix=''):
"""Parse form data for the Saved Queries part of an admin form."""
saved_queries = []
for i in xrange(1, MAX_QUERIES + 1):
if ('%ssavedquery_name_%s' % (prefix, i)) not in post_data:
continue # skip any entries that are blank or have no predicate.
name = post_data['%ssavedquery_name_%s' % (prefix, i)].strip()
if not name:
continue # skip any blank entries
if '%ssavedquery_id_%s' % (prefix, i) in post_data:
query_id = int(post_data['%ssavedquery_id_%s' % (prefix, i)])
else:
query_id = None # a new query_id will be generated by the DB.
project_names_str = post_data.get(
'%ssavedquery_projects_%s' % (prefix, i), '')
project_names = [pn.strip().lower()
for pn in re.split('[],;\s]+', project_names_str)
if pn.strip()]
project_ids = project_service.LookupProjectIDs(
cnxn, project_names).values()
base_id = int(post_data['%ssavedquery_base_%s' % (prefix, i)])
query = post_data['%ssavedquery_query_%s' % (prefix, i)].strip()
subscription_mode_field = '%ssavedquery_sub_mode_%s' % (prefix, i)
if subscription_mode_field in post_data:
subscription_mode = post_data[subscription_mode_field].strip()
else:
subscription_mode = None
saved_queries.append(tracker_bizobj.MakeSavedQuery(
query_id, name, base_id, query, subscription_mode=subscription_mode,
executes_in_project_ids=project_ids))
return saved_queries
|
5db4ecdf22eb61c1c43914f00042862142664590
| 3,642,232
|
def label_anchors(anchors, anchor_is_untruncated, gt_classes, gt_bboxes, background_id, iou_low_threshold=0.41, iou_high_threshold=0.61):
""" Get the labels of the anchors. Each anchor can be labeled as positive (1), negative (0) or ambiguous (-1). Truncated anchors are always labeled as ambiguous. """
n = anchors.shape[0]
k = gt_bboxes.shape[0]
# Compute the IoUs of the anchors and ground truth boxes
tiled_anchors = np.tile(np.expand_dims(anchors, 1), (1, k, 1))
tiled_gt_bboxes = np.tile(np.expand_dims(gt_bboxes, 0), (n, 1, 1))
tiled_anchors = tiled_anchors.reshape((-1, 4))
tiled_gt_bboxes = tiled_gt_bboxes.reshape((-1, 4))
ious, ioas, iogs = iou_bbox(tiled_anchors, tiled_gt_bboxes)
ious = ious.reshape(n, k)
ioas = ioas.reshape(n, k)
iogs = iogs.reshape(n, k)
# Label each anchor based on its max IoU
max_ious = np.max(ious, axis=1)
max_ioas = np.max(ioas, axis=1)
max_iogs = np.max(iogs, axis=1)
best_gt_bbox_ids = np.argmax(ious, axis=1)
labels = -np.ones((n), np.int32)
positive_idx = np.where(max_ious >= iou_high_threshold)[0]
negative_idx = np.where(max_ious < iou_low_threshold)[0]
labels[positive_idx] = 1
labels[negative_idx] = 0
# Truncated anchors are always ambiguous
ignore_idx = np.where(anchor_is_untruncated==0)[0]
labels[ignore_idx] = -1
bboxes = gt_bboxes[best_gt_bbox_ids]
classes = gt_classes[best_gt_bbox_ids]
classes[np.where(labels<1)[0]] = background_id
max_ious[np.where(anchor_is_untruncated==0)[0]] = -1
max_ioas[np.where(anchor_is_untruncated==0)[0]] = -1
max_iogs[np.where(anchor_is_untruncated==0)[0]] = -1
return labels, bboxes, classes, max_ious, max_ioas, max_iogs
|
39dc4d29f5a2491c2f818e7af2c01e1824afff56
| 3,642,233
|
import hashlib
def make_hash_md5(obj):
"""make_hash_md5
Args:
obj (any): anything that can be hashed.
Returns:
hash (str): hash from object.
"""
hasher = hashlib.md5()
hasher.update(repr(make_hashable(obj)).encode())
return hasher.hexdigest()
|
c8c0f0202f171e2557eba6a3824ac2f9a07dada9
| 3,642,234
|
def fbx_data_bindpose_element(root, me_obj, me, scene_data, arm_obj=None, mat_world_arm=None, bones=[]):
"""
Helper, since bindpose are used by both meshes shape keys and armature bones...
"""
if arm_obj is None:
arm_obj = me_obj
# We assume bind pose for our bones are their "Editmode" pose...
# All matrices are expected in global (world) space.
bindpose_key = get_blender_bindpose_key(arm_obj.bdata, me)
fbx_pose = elem_data_single_int64(root, b"Pose", get_fbx_uuid_from_key(bindpose_key))
fbx_pose.add_string(fbx_name_class(me.name.encode(), b"Pose"))
fbx_pose.add_string(b"BindPose")
elem_data_single_string(fbx_pose, b"Type", b"BindPose")
elem_data_single_int32(fbx_pose, b"Version", FBX_POSE_BIND_VERSION)
elem_data_single_int32(fbx_pose, b"NbPoseNodes", 1 + (1 if (arm_obj != me_obj) else 0) + len(bones))
# First node is mesh/object.
mat_world_obj = me_obj.fbx_object_matrix(scene_data, global_space=True)
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", me_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_obj))
# Second node is armature object itself.
if arm_obj != me_obj:
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", arm_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_arm))
# And all bones of armature!
mat_world_bones = {}
for bo_obj in bones:
bomat = bo_obj.fbx_object_matrix(scene_data, rest=True, global_space=True)
mat_world_bones[bo_obj] = bomat
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", bo_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(bomat))
return mat_world_obj, mat_world_bones
|
9d205cd3c7a0242dbfaad42d1e7f0b9b3b81eb75
| 3,642,235
|
def partitioned_rml_estimator(y, sigma2i, iterations=50):
"""
Implementation of the robust maximum likelihood estimator.
Parameters
----------
y : :py:class:`~numpy.ndarray`, (n_replicates, n_variants)
The variant scores matrix
sigma2i : :py:class:`~numpy.ndarray`, (n_replicates, n_variants)
The score variance matrix
iterations : `int`
Number of iterations to perform.
Returns
-------
`tuple`
Tuple of :py:class:`~numpy.ndarray` objects, corresponding to
``betaML``, ``var_betaML``, ``eps``.
Notes
-----
@book{demidenko2013mixed,
title={Mixed models: theory and applications with R},
author={Demidenko, Eugene},
year={2013},
publisher={John Wiley \& Sons}
}
"""
# Initialize each array to be have len number of variants
max_replicates = y.shape[0]
betaML = np.zeros(shape=(y.shape[1],)) * np.nan
var_betaML = np.zeros(shape=(y.shape[1],)) * np.nan
eps = np.zeros(shape=(y.shape[1],)) * np.nan
nreps = np.zeros(shape=(y.shape[1],)) * np.nan
y_num_nans = np.sum(np.isnan(y), axis=0)
for k in range(0, max_replicates - 1, 1):
# Partition y based on the number of NaNs a column has,
# corresponding to the number of replicates a variant has
# across selections.
selector = y_num_nans == k
if np.sum(selector) == 0:
continue
y_k = np.apply_along_axis(lambda col: col[~np.isnan(col)], 0, y[:, selector])
sigma2i_k = np.apply_along_axis(
lambda col: col[~np.isnan(col)], 0, sigma2i[:, selector]
)
betaML_k, var_betaML_k, eps_k = rml_estimator(y_k, sigma2i_k, iterations)
# Handles the case when SE is 0 resulting in NaN values.
betaML_k[np.isnan(betaML_k)] = 0.0
var_betaML_k[np.isnan(var_betaML_k)] = 0.0
eps_k[np.isnan(eps_k)] = 0.0
betaML[selector] = betaML_k
var_betaML[selector] = var_betaML_k
eps[selector] = eps_k
nreps[selector] = max_replicates - k
return betaML, var_betaML, eps, nreps
|
b4ec6ad8af85cdf29470fa132d7f6008617b3a66
| 3,642,236
|
import math
def inv_kinema_cal_3(JOINT_ANGLE_OFFSET, L, H, position_to_move):
"""逆運動学を解析的に解く関数.
指先のなす角がηになるようなジョイント角度拘束条件を追加して逆運動学問題を解析的に解く
引数1:リンク長さの配列.nd.array(6).単位は[m]
引数2:リンク高さの配列.nd.array(1).単位は[m]
引数3:目標位置(直交座標系)行列.nd.array((3, 1)).単位は[m]
戻り値(成功したとき):ジョイント角度配列.nd.array((6)).単位は[°]
戻り値(失敗したとき):引数に関係なくジョイント角度配列(90, 90, 90, 90, 90, 0).nd.array((6)).単位は[°]を返す
※戻り値のq_3,q_4はサーボの定義と異なる
"""
final_offset = 0.012
#final_offset = 0
# position_to_move(移動先位置)の円筒座標系表現
r_before = math.sqrt(position_to_move[0, 0] ** 2 + position_to_move[1, 0] ** 2) + 0.03
r_to_move = math.sqrt(r_before ** 2 + final_offset ** 2) # [m]
#r_to_move = math.sqrt(r_before ** 2) # [m]
#theta_to_move = np.arctan2(position_to_move[1, 0], position_to_move[0, 0]) # [rad]
theta_to_move = np.arctan2(position_to_move[1, 0], position_to_move[0, 0]) - np.arcsin(final_offset / r_before) # [rad]
#theta_to_move = np.arccos(position_to_move[0, 0] / r_to_move) - np.arcsin(final_offset / r_before) # [rad]
z_to_move = position_to_move[2, 0] # [m]
print('移動先の円筒座標系表現は\n', r_to_move, '[m]\n', int(theta_to_move * 180 / np.pi), '[°]\n', z_to_move, '[m]')
# 計算のため定義する定数
A = L[2]
B = L[3]
# 逆運動学解析解計算
#old1 = time.time()
deta = np.pi / 180 # ηの刻み幅.i[°]ずつ実行
eta = np.arange(0, np.pi + deta, deta, dtype = 'float64') # 全ηの配列
print('etaの形は', eta.shape)
# パターンa
q_2_a = np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_a_1 = np.concatenate([[eta], [q_2_a]], 0) # 縦に連結
qlist_a_2 = np.delete(qlist_a_1, np.where((np.isnan(qlist_a_1)) | (qlist_a_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180)) < qlist_a_1))[1], 1) # q_2_aがNAN,またはジョイント制限外の列を削除
q_3_a = np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_a_2[0, :]) - H[0] * np.sin(qlist_a_2[0, :])- A * np.cos(qlist_a_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_a_2[0, :]) - H[0] * np.cos(qlist_a_2[0, :]) - A * np.sin(qlist_a_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_a_2[1, :] + np.pi / 4 # [rad]
qlist_a_3 = np.concatenate([qlist_a_2, [q_3_a]], 0) # 縦に連結
qlist_a_4 = np.delete(qlist_a_3, np.where((np.isnan(qlist_a_3)) | (qlist_a_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_a_3))[1], 1) # q_3_aがNAN,またはジョイント制限外の列を削除
q_4_a = -qlist_a_4[0, :] + np.pi - qlist_a_4[1, :] - qlist_a_4[2, :]
qlist_a_5 = np.concatenate([qlist_a_4, [q_4_a]], 0) # 縦に連結
qlist_a_6 = np.delete(qlist_a_5, np.where((qlist_a_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_a_5))[1], 1) # q_4_aがジョイント制限外の列を削除
#print('qlist_a_6の形は', qlist_a_6.shape)
#print('qlist_a_6 = ', qlist_a_6)
# パターンb
q_2_b = np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_b_1 = np.concatenate([[eta], [q_2_b]], 0) # 縦に連結
qlist_b_2 = np.delete(qlist_b_1, np.where((np.isnan(qlist_b_1)) | (qlist_b_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_bがNAN,またはジョイント制限外の列を削除
q_3_b = np.pi - np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_b_2[0, :]) - H[0] * np.sin(qlist_b_2[0, :])- A * np.cos(qlist_b_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_b_2[0, :]) - H[0] * np.cos(qlist_b_2[0, :]) - A * np.sin(qlist_b_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_b_2[1, :] + np.pi / 4 # [rad]
qlist_b_3 = np.concatenate([qlist_b_2, [q_3_b]], 0) # 縦に連結
qlist_b_4 = np.delete(qlist_b_3, np.where((np.isnan(qlist_b_3)) | (qlist_b_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_b_3))[1], 1) # q_3_bがNAN,またはジョイント制限外の列を削除
q_4_b = -qlist_b_4[0, :] + np.pi - qlist_b_4[1, :] - qlist_b_4[2, :]
qlist_b_5 = np.concatenate([qlist_b_4, [q_4_b]], 0) # 縦に連結
qlist_b_6 = np.delete(qlist_b_5, np.where((qlist_b_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_b_5))[1], 1) # q_3_bがジョイント制限外の列を削除
#print('qlist_b_6の形は', qlist_b_6.shape)
#print('qlist_b_6 = ', qlist_b_6)
# パターンc
q_2_c = np.pi - np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_c_1 = np.concatenate([[eta], [q_2_c]], 0) # 縦に連結
qlist_c_2 = np.delete(qlist_c_1, np.where((np.isnan(qlist_c_1)) | (qlist_c_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_cがNAN,またはジョイント制限外の列を削除
q_3_c = np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_c_2[0, :]) - H[0] * np.sin(qlist_c_2[0, :])- A * np.cos(qlist_c_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_c_2[0, :]) - H[0] * np.cos(qlist_c_2[0, :]) - A * np.sin(qlist_c_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_c_2[1, :] + np.pi / 4 # [rad]
qlist_c_3 = np.concatenate([qlist_c_2, [q_3_c]], 0) # 縦に連結
qlist_c_4 = np.delete(qlist_c_3, np.where((np.isnan(qlist_c_3)) | (qlist_c_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_c_3))[1], 1) # q_3_cがNAN,またはジョイント制限外の列を削除
q_4_c = -qlist_c_4[0, :] + np.pi - qlist_c_4[1, :] - qlist_c_4[2, :]
qlist_c_5 = np.concatenate([qlist_c_4, [q_4_c]], 0) # 縦に連結
qlist_c_6 = np.delete(qlist_c_5, np.where((qlist_c_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_c_5))[1], 1) # q_3_cがジョイント制限外の列を削除
#print('qlist_c_6の形は', qlist_c_6.shape)
#print('qlist_c_6 = ', (qlist_c_6 * 180 / np.pi).astype('int64'))
# パターンd
q_2_d = np.pi - np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_d_1 = np.concatenate([[eta], [q_2_d]], 0) # 縦に連結
qlist_d_2 = np.delete(qlist_d_1, np.where((np.isnan(qlist_d_1)) | (qlist_d_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_dがNAN,またはジョイント制限外の列を削除
q_3_d = np.pi - np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_d_2[0, :]) - H[0] * np.sin(qlist_d_2[0, :])- A * np.cos(qlist_d_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_d_2[0, :]) - H[0] * np.cos(qlist_d_2[0, :]) - A * np.sin(qlist_d_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_d_2[1, :] + np.pi / 4 # [rad]
qlist_d_3 = np.concatenate([qlist_d_2, [q_3_d]], 0) # 縦に連結
qlist_d_4 = np.delete(qlist_d_3, np.where((np.isnan(qlist_d_3)) | (qlist_d_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_d_3))[1], 1) # q_3_dがNAN,またはジョイント制限外の列を削除
q_4_d = -qlist_d_4[0, :] + np.pi - qlist_d_4[1, :] - qlist_d_4[2, :]
qlist_d_5 = np.concatenate([qlist_d_4, [q_4_d]], 0) # 縦に連結
qlist_d_6 = np.delete(qlist_d_5, np.where((qlist_d_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_d_5))[1], 1) # q_3_dがジョイント制限外の列を削除
#print('qlist_d_6の形は', qlist_d_6.shape)
#print('qlist_d_6 = ', qlist_d_6)
#print('ベクトル化で計算', time.time() - old1,'[s]')
qlist_abcd_6 = np.concatenate([qlist_a_6, qlist_b_6, qlist_c_6, qlist_d_6], 1) # パターンa,b,c,dの実行結果を横に連結
print(qlist_abcd_6)
qlist_q2norm = np.abs(np.pi / 2 - qlist_abcd_6[1, :]) # π/2 - q_2の絶対値
print(qlist_q2norm)
qlist_abcd_62 = np.concatenate([qlist_abcd_6, [qlist_q2norm]], 0) # 縦連結
print(qlist_abcd_62)
k = np.where(qlist_abcd_62[4, :] == np.min(qlist_abcd_62[4, :])) # 最もq_2がπ/2に近い列のタプルを取得
print(k)
print(qlist_abcd_62[:, k])
# サーボ指令角度への変換とint化(pyFirmataのpwmは整数値指令しか受け付けない)
q_1_command = int(np.round(theta_to_move * 180 / np.pi)) # [°]
q_2_command = int(np.round(qlist_abcd_62[1, k] * 180 / np.pi)) # [°]
q_3_command = int(np.round(qlist_abcd_62[2, k] * 180 / np.pi)) # [°]
q_4_command = int(np.round(qlist_abcd_62[3, k] * 180 / np.pi)) # [°]
q_5_command = int(np.round(np.pi / 2 * 180 / np.pi)) # [°]
q_6_command = int(np.round(0 * 180 / np.pi)) # [°]
z = np.array([q_1_command, q_2_command, q_3_command, q_4_command, q_5_command, q_6_command])
print(z)
return z
|
4368c847b9918f3682e2ca0336008af49f0823cf
| 3,642,237
|
from django.contrib.auth import authenticate, login
def http_basic_auth(func):
"""
Attempts to login user with u/p provided in HTTP_AUTHORIZATION header.
If successful, returns the view, otherwise returns a 401.
If PING_BASIC_AUTH is False, then just return the view function
Modified code by:
http://djangosnippets.org/users/bthomas/
from
http://djangosnippets.org/snippets/1304/
"""
@wraps(func)
def _decorator(request, *args, **kwargs):
if getattr(settings, 'PING_BASIC_AUTH', PING_BASIC_AUTH):
if request.META.has_key('HTTP_AUTHORIZATION'):
authmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if authmeth.lower() == 'basic':
auth = auth.strip().decode('base64')
username, password = auth.split(':', 1)
user = authenticate(username=username, password=password)
if user:
login(request, user)
return func(request, *args, **kwargs)
else:
return HttpResponse("Invalid Credentials", status=401)
else:
return HttpResponse("No Credentials Provided", status=401)
else:
return func(request, *args, **kwargs)
return _decorator
|
fd99ce1464acb88bd9f68b6b85233dd44cb81bfd
| 3,642,241
|
def stats_to_df(stats_data):
""" Transform Statistical API response into a pandas.DataFrame
"""
df_data = []
for single_data in stats_data['data']:
df_entry = {}
is_valid_entry = True
df_entry['interval_from'] = parse_time(
single_data['interval']['from']).date()
df_entry['interval_to'] = parse_time(
single_data['interval']['to']).date()
for output_name, output_data in single_data['outputs'].items():
for band_name, band_values in output_data['bands'].items():
band_stats = band_values['stats']
if band_stats['sampleCount'] == band_stats['noDataCount']:
is_valid_entry = False
break
for stat_name, value in band_stats.items():
col_name = f'{output_name}_{band_name}_{stat_name}'
if stat_name == 'percentiles':
for perc, perc_val in value.items():
perc_col_name = f'{col_name}_{perc}'
df_entry[perc_col_name] = perc_val
else:
df_entry[col_name] = value
if is_valid_entry:
df_data.append(df_entry)
return pd.DataFrame(df_data)
|
d77d3ee46c68c737ce8274458d8564256f8121a7
| 3,642,242
|
def make_risk_metrics(
stocks,
weights,
start_date,
end_date
):
"""
Parameters:
stocks: List of tickers compatiable with the yfinance module
weights: List of weights, probably going to be evenly distributed
"""
if mlfinlabExists:
Var, VaR, CVaR, CDaR = generate_risk_stats(
stocks,
weights,
start_date=start_date,
end_date=end_date
)
else:
Var, VaR, CVaR, CDaR = 0,0,0,0
return [
{
"value": Var,
"name": "Variance",
"description": "This measure can be used to compare portfolios" \
" based on estimations of the volatility of returns."
},
{
"value": VaR,
"name": "Value at Risk",
"description": "This measure can be used to compare portfolios" \
" based on the amount of investments that can be lost in the next observation, assuming the returns for assets follow a multivariate normal distribution."
},
{
"value": CVaR,
"name": "Expected Shortfall",
"description": "This measure can be used to compare portfolios" \
" based on the average amount of investments that can be lost in a worst-case scenario, assuming the returns for assets follow a multivariate normal distribution."
},
{
"value": CDaR,
"name": "Conditional Drawdown at Risk",
"description": "This measure can be used to compare portfolios"
" based on the average amount of a portfolio drawdown in a worst-case scenario, assuming the drawdowns follow a normal distribution."
}
]
|
8a24d542a8b7475a66c0c914866ee4225564b8ed
| 3,642,243
|
def decrypt(bin_k, bin_cipher):
"""decrypt w/ DES"""
return Crypto.Cipher.DES.new(bin_k).decrypt(bin_cipher)
|
fa8331b792ae4003c2fc14fd84b2ac82306bc7b2
| 3,642,244
|
from ucscsdk.mometa.vnic.VnicIScsiLCP import VnicIScsiLCP
from ucscsdk.mometa.vnic.VnicVlan import VnicVlan
def lcp_iscsi_vnic_add(handle, name, parent_dn, addr="derived",
admin_host_port="ANY",
admin_vcon="any", stats_policy_name="global-default",
admin_cdn_name=None, cdn_source="vnic-name",
switch_id="A", pin_to_group_name=None, vnic_name=None,
qos_policy_name=None,
adaptor_profile_name="global-default",
ident_pool_name=None, order="unspecified",
nw_templ_name=None, vlan_name="default",
**kwargs):
"""
Adds iSCSI vNIC to LAN Connectivity Policy
Args:
handle (UcscHandle)
parent_dn (string) : Dn of LAN connectivity policy name
name (string) : Name of iscsi vnic
admin_host_port (string) : Admin host port placement for vnic
admin_vcon (string) : Admin vcon for vnic
stats_policy_name (string) : Stats policy name
cdn_source (string) : CDN source ['vnic-name', 'user-defined']
admin_cdn_name (string) : CDN name
switch_id (string): Switch id
pin_to_group_name (string) : Pinning group name
vnic_name (string): Overlay vnic name
qos_policy_name (string): Qos policy name
adaptor_profile_name (string): Adaptor profile name
ident_pool_name (string) : Identity pool name
order (string) : Order of the vnic
nw_templ_name (string) : Network template name
addr (string) : Address of the vnic
vlan_name (string): Name of the vlan
**kwargs: Any additional key-value pair of managed object(MO)'s
property and value, which are not part of regular args.
This should be used for future version compatibility.
Returns:
VnicIScsiLCP : Managed Object
Example:
lcp_iscsi_vnic_add(handle, "test_iscsi",
"org-root/lan-conn-pol-samppol2",
nw_ctrl_policy_name="test_nwpol", switch_id= "A",
vnic_name="vnic1",
adaptor_profile_name="global-SRIOV")
"""
mo = handle.query_dn(parent_dn)
if not mo:
raise UcscOperationError("lcp_iscsi_vnic_add",
"LAN connectivity policy '%s' does not exist"
% parent_dn)
if cdn_source not in ['vnic-name', 'user-defined']:
raise UcscOperationError("lcp_iscsi_vnic_add",
"Invalid CDN source name")
admin_cdn_name = "" if cdn_source == "vnic-name" else admin_cdn_name
mo_1 = VnicIScsiLCP(parent_mo_or_dn=mo,
addr=addr,
admin_host_port=admin_host_port,
admin_vcon=admin_vcon,
stats_policy_name=stats_policy_name,
cdn_source=cdn_source,
admin_cdn_name=admin_cdn_name,
switch_id=switch_id,
pin_to_group_name=pin_to_group_name,
vnic_name=vnic_name,
qos_policy_name=qos_policy_name,
adaptor_profile_name=adaptor_profile_name,
ident_pool_name=ident_pool_name,
order=order,
nw_templ_name=nw_templ_name,
name=name)
mo_1.set_prop_multiple(**kwargs)
VnicVlan(parent_mo_or_dn=mo_1, name="", vlan_name=vlan_name)
handle.add_mo(mo_1)
handle.commit()
return mo_1
|
6d87f8f3adebaa56850dfb14137fe049ff6e01ee
| 3,642,246
|
def fixture_ecomax_with_data(ecomax: EcoMAX) -> EcoMAX:
"""Return ecoMAX instance with test data."""
ecomax.product = ProductInfo(model="test_model")
ecomax.set_data(_test_data)
ecomax.set_parameters(_test_parameters)
return ecomax
|
4f496342d461eb39e4689ed266471b11bdf3f1f5
| 3,642,247
|
async def request_get_stub(url: str, stub_for: str, status_code: int = 200):
"""Returns an object with stub response.
Args:
url (str): A request URL.
stub_for (str): Type of stub required.
Returns:
StubResponse: A StubResponse object.
"""
return StubResponse(stub_for=stub_for, status_code=status_code)
|
f4c4f9a0610e8d95f920ddee76c4264e23c08283
| 3,642,249
|
import torch
def single_gpu_test(model, data_loader, rescale=True, show=False, out_dir=None):
"""Test with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
show (bool): Whether show results during infernece. Default: False.
out_dir (str, optional): If specified, the results will be dumped
into the directory to save output results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
seg_targets = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
if 'gt_semantic_seg' in data:
target = data.pop('gt_semantic_seg')
for gt in target:
gt = gt.cpu().numpy()[0] # 1*h*w ==> h*w
seg_targets.append(gt)
with torch.no_grad():
result = model(return_loss=False, rescale=rescale, **data)
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result,
palette=dataset.PALETTE,
show=show,
out_file=out_file)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
if seg_targets:
return [results, seg_targets]
return results
|
0e548186e5909b1a7b72d6fd6ed16c80e233e0b6
| 3,642,250
|
def readAllCarts():
"""
This function responds to a request for /api/people
with the complete lists of people
:return: json string of list of people
"""
# Create the list of people from our data
return[CART[key] for key in sorted(CART.keys())]
|
7ec9b25b36c238a6bfae3963482d610ed09d1d75
| 3,642,251
|
import random
import logging
def build_encapsulated_packet(select_test_interface, ptfadapter, tor, tunnel_traffic_monitor):
"""Build the encapsulated packet sent from T1 to ToR."""
_, server_ipv4 = select_test_interface
config_facts = tor.get_running_config_facts()
try:
peer_ipv4_address = [_["address_ipv4"] for _ in config_facts["PEER_SWITCH"].values()][0]
except IndexError:
raise ValueError("Failed to get peer ToR address from CONFIG_DB")
tor_ipv4_address = [_ for _ in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]
if is_ipv4_address(_.split("/")[0])][0]
tor_ipv4_address = tor_ipv4_address.split("/")[0]
inner_dscp = random.choice(range(0, 33))
inner_ttl = random.choice(range(3, 65))
inner_packet = testutils.simple_ip_packet(
ip_src="1.1.1.1",
ip_dst=server_ipv4,
ip_dscp=inner_dscp,
ip_ttl=inner_ttl
)[IP]
packet = testutils.simple_ipv4ip_packet(
eth_dst=tor.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_src=peer_ipv4_address,
ip_dst=tor_ipv4_address,
ip_dscp=inner_dscp,
ip_ttl=255,
inner_frame=inner_packet
)
logging.info("the encapsulated packet to send:\n%s", tunnel_traffic_monitor._dump_show_str(packet))
return packet
|
e7776a602eeb0dbe9bcd8707b71dacfe4ac36338
| 3,642,252
|
def index():
"""
Renders the index page.
"""
return render_template("index.html")
|
cc7630c3bbaf32c3be705a7205df715f959a5683
| 3,642,253
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.