content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def chi2Significant(tuple, unigrams, bigrams):
"""Returns true, if token1 and token2 are significantly coocurring,
false otherwise. The used test is the Chi2-test.
Parameters:
tuple: tuple of tokens
unigrams: unigrams dictionary data structure
bigrams: bigrams dictionary data structure
"""
yes_yes = bigrams.get(tuple, 0)
yes_not = unigrams.get(tuple[0], 0) - yes_yes
not_yes = unigrams.get(tuple[1], 0) - bigrams.get(tuple, 0)
not_not = sum(bigrams.values()) - 1 - yes_not - not_yes + yes_yes
chi2score = chi2Score((yes_yes, yes_not, not_yes, not_not),
expectationFromObservationDF1((yes_yes, yes_not, not_yes, not_not)))
if chi2score and chi2score > df1chi2sigscore:
return True
return False
|
79c55bb581ed4714c6e455d8ee85d923604fe6b6
| 3,647,421
|
def excel2schema(schema_excel_filename, schema_urlprefix, options, schema_dir=None):
""" given an excel filename, convert it into memory object,
and output JSON representation based on options.
params:
schema_excel_filename -- string, excel filename
schema_urlprefix -- string, urlprefix for downloading schema's jsonld
ie. the jsonld version of schema can be obtained from URL
<code><schema_urlprefix><schema_release_identifier>.jsonld</code>,
e.g. http://localhost:8080/getschema/cns_top_v2.0.jsonld
{ schema_urlprefix = http://localhost:8080/getschema/
schema_release_identifier = cns_top_v2.0
schema_name = cns_top
schema_vesrion = v2.0
}
options -- string, comma seperated strings, each define expected output component, see <code>mem4export</code>
return json dict see mem4export
"""
schema_excel_json = excel2json2018(schema_excel_filename)
return table2schema(schema_excel_json, schema_urlprefix, options, schema_dir)
|
9d19fef99f9ca56f463a3559d7b5f5342f12067b
| 3,647,422
|
def assign_id_priority(handle):
"""
Assign priority according to agent id (lower id means higher priority).
:param agent:
:return:
"""
return handle
|
8e1b22748d263fc12749790e601a4197b5d6370e
| 3,647,423
|
def learning_rate_with_decay(
batch_size, batch_denom, num_images, boundary_epochs, decay_rates):
"""Get a learning rate that decays step-wise as training progresses.
Args:
batch_size: the number of examples processed in each training batch.
batch_denom: this value will be used to scale the base learning rate.
`0.1 * batch size` is divided by this number, such that when
batch_denom == batch_size, the initial learning rate will be 0.1.
num_images: total number of images that will be used for training.
boundary_epochs: list of ints representing the epochs at which we
decay the learning rate.
decay_rates: list of floats representing the decay rates to be used
for scaling the learning rate. It should have one more element
than `boundary_epochs`, and all elements should have the same type.
Returns:
Returns a function that takes a single argument - the number of batches
trained so far (global_step)- and returns the learning rate to be used
for training the next batch.
"""
initial_learning_rate = 0.1 * batch_size / batch_denom
batches_per_epoch = num_images / batch_size
# Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(global_step):
global_step = tf.cast(global_step, tf.int32)
return tf.train.piecewise_constant(global_step, boundaries, vals)
return learning_rate_fn
|
8d33c30e47c27e6de974a342638e0017026be15d
| 3,647,424
|
def stepedit_SignType(*args):
"""
* Returns a SignType fit for STEP (creates the first time)
:rtype: Handle_IFSelect_Signature
"""
return _STEPEdit.stepedit_SignType(*args)
|
d22a33dab4764924f7bd6645deee70403e6f3b39
| 3,647,425
|
def create_heatmap(out, data, row_labels, col_labels, title, colormap, vmax, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, cmap=colormap, vmin=0, vmax=vmax, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
plt.gcf().subplots_adjust(bottom=0.25)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
ax.tick_params(axis='both', which='major', labelsize=6)
ax.tick_params(axis='both', which='minor', labelsize=6)
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=90, ha="right")
plt.title(title)
# Turn spines off and create white grid.
#for edge, spine in ax.spines.items():
# spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.6, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.6, minor=True)
ax.grid(which="minor", color="k", linestyle='-', linewidth=0.5)
ax.tick_params(which="minor", bottom=False, left=False)
f = plt.savefig(out)
plt.clf()
return im, cbar
|
c060951e0c830444665c0a3c5a9b6e244ae32bb4
| 3,647,426
|
def ccw(a: complex, b: complex, c: complex) -> int:
"""The sign of counter-clockwise angle of points abc.
Args:
a (complex): First point.
b (complex): Second point.
c (complex): Third point.
Returns:
int: If the three points are not colinear, then returns the sign of
counter-clockwise angle of abc. That is, if the points abc make
counter-clockwise turn, it returns +1. If clockwise turn, returns
-1.
If they are colinear, returns one of +2, -2, or 0. This depends on
the order of points on the line.
"""
b -= a
c -= a
if cross(b, c) > 0:
# counter-clockwise
return +1
elif cross(b, c) < 0:
# clockwise
return -1
elif dot(b, c) < 0:
# c--a--b on line
return +2
elif abs(b) < abs(c):
# a--b--c on line
return -2
else:
# b--c--a on line
return 0
|
d6fac5f560b26299e2bf7aefef0ebabf33672323
| 3,647,427
|
def _tree_selector(X, leaf_size=40, metric='minkowski'):
"""
Selects the better tree approach for given data
Parameters
----------
X : {array-like, pandas dataframe} of shape (n_samples, n_features)
The input data.
leaf_size : int, default=40
Number of points to switch to brute-force search of neighbors
metric : str or DistanceMetric object, default='minkowski'
The distance metric to use for the neighborhood tree. Refer
to the DistanceMetric class documentation from sklearn for a list
of available metrics
Returns
-------
tree : {KDTree or BallTree}
The best tree to be used to find neighbors given data
"""
# Low dimensional spaces are fit to KD-Tree
if X.shape[1] < 30:
return KDTree(X, leaf_size=leaf_size, metric=metric)
# High dimensional spaces are fit to Ball Tree
if X.shape[1] >= 30:
return BallTree(X, leaf_size=leaf_size, metric=metric)
|
ad0d175c17585009fb92e6f4a813b1a1ef19e535
| 3,647,428
|
def get_diff_objects(diff_object_mappings, orig_datamodel_object_list):
"""获取diff_objects
:param diff_object_mappings: 变更对象内容mapping
:param orig_datamodel_object_list: 操作前/上一次发布内容列表
:return: diff_objects: diff_objects列表
"""
# 1)从diff_object_mappings中获取diff_objects
diff_objects = []
field_diff_objects = []
model_relation_diff_objects = []
master_table_diff_object = None
for key, value in list(diff_object_mappings.items()):
if value['object_type'] == DataModelObjectType.FIELD.value:
if value not in field_diff_objects:
field_diff_objects.append(value)
elif value['object_type'] == DataModelObjectType.MODEL_RELATION.value:
if value not in model_relation_diff_objects:
model_relation_diff_objects.append(value)
elif value['object_type'] == DataModelObjectType.MASTER_TABLE.value:
master_table_diff_object = value
elif value not in diff_objects:
diff_objects.append(value)
# 2)将field_diff_objects放入主表的diff_objects中
# 如果字段有diff
if field_diff_objects or model_relation_diff_objects:
# 如果字段有diff,master_table_diff_object还是None
if master_table_diff_object is None:
for datamodel_object_dict in orig_datamodel_object_list:
if datamodel_object_dict['object_type'] == DataModelObjectType.MASTER_TABLE.value:
master_table_diff_object = {
'diff_type': DataModelObjectOperationType.UPDATE.value,
'object_id': datamodel_object_dict['object_id'],
'object_type': datamodel_object_dict['object_type'],
}
break
# 将field_diff_objects的内容放在主表对应的object中
master_table_diff_object['diff_objects'] = field_diff_objects + model_relation_diff_objects
diff_objects.append(master_table_diff_object)
# 字段没有diff,但为主表整体非修改
elif (
master_table_diff_object is not None
and master_table_diff_object['diff_type'] != DataModelObjectOperationType.UPDATE.value
):
diff_objects.append(master_table_diff_object)
return diff_objects
|
d6619b289944dba9f541b1f893d0f908b9ee8b48
| 3,647,429
|
def find_lcs(s1, s2):
"""find the longest common subsequence between s1 ans s2"""
m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
max_len = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] > max_len:
max_len = m[i + 1][j + 1]
p = i + 1
return s1[p - max_len:p], max_len
|
3b35307c6ab287d2088d25bb3826589d7d62be8b
| 3,647,430
|
def calculate_vertical_vorticity_cost(u, v, w, dx, dy, dz, Ut, Vt,
coeff=1e-5):
"""
Calculates the cost function due to deviance from vertical vorticity
equation. For more information of the vertical vorticity cost function,
see Potvin et al. (2012) and Shapiro et al. (2009).
Parameters
----------
u: 3D array
Float array with u component of wind field
v: 3D array
Float array with v component of wind field
w: 3D array
Float array with w component of wind field
dx: float array
Spacing in x grid
dy: float array
Spacing in y grid
dz: float array
Spacing in z grid
coeff: float
Weighting coefficient
Ut: float
U component of storm motion
Vt: float
V component of storm motion
Returns
-------
Jv: float
Value of vertical vorticity cost function.
References
----------
Potvin, C.K., A. Shapiro, and M. Xue, 2012: Impact of a Vertical Vorticity
Constraint in Variational Dual-Doppler Wind Analysis: Tests with Real and
Simulated Supercell Data. J. Atmos. Oceanic Technol., 29, 32–49,
https://doi.org/10.1175/JTECH-D-11-00019.1
Shapiro, A., C.K. Potvin, and J. Gao, 2009: Use of a Vertical Vorticity
Equation in Variational Dual-Doppler Wind Analysis. J. Atmos. Oceanic
Technol., 26, 2089–2106, https://doi.org/10.1175/2009JTECHA1256.1
"""
dvdz = np.gradient(v, dz, axis=0)
dudz = np.gradient(u, dz, axis=0)
dwdz = np.gradient(w, dx, axis=2)
dvdx = np.gradient(v, dx, axis=2)
dwdy = np.gradient(w, dy, axis=1)
dwdx = np.gradient(w, dx, axis=2)
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=2)
dudy = np.gradient(u, dy, axis=1)
zeta = dvdx - dudy
dzeta_dx = np.gradient(zeta, dx, axis=2)
dzeta_dy = np.gradient(zeta, dy, axis=1)
dzeta_dz = np.gradient(zeta, dz, axis=0)
jv_array = ((u - Ut) * dzeta_dx + (v - Vt) * dzeta_dy +
w * dzeta_dz + (dvdz * dwdx - dudz * dwdy) +
zeta * (dudx + dvdy))
return np.sum(coeff*jv_array**2)
|
6abc76df0f75f827b1048cdb07d71bbb311d7ef5
| 3,647,431
|
import httpx
def fetch_hero_stats() -> list:
"""Retrieves hero win/loss statistics from OpenDotaAPI."""
r = httpx.get("https://api.opendota.com/api/heroStats")
heroes = r.json()
# Rename pro_<stat> to 8_<stat>, so it's easier to work with our enum
for hero in heroes:
for stat in ["win", "pick", "ban"]:
hero[f"{Bracket.PRO.value}_{stat}"] = hero.pop(f"pro_{stat}")
return heroes
|
1362beaa82eeb29859df4fe1280d6ac7b1073be1
| 3,647,432
|
def test_source_locations_are_within_correct_range(tokamak_source):
"""Tests that each source has RZ locations within the expected range.
As the function converting (a,alpha) coordinates to (R,Z) is not bijective,
we cannot convert back to validate each individual point. However, we can
determine whether the generated points are contained within the shell of
the last closed magnetic surface. See "Tokamak D-T neutron source models
for different plasma physics confinement modes", C. Fausser et al., Fusion
Engineering and Design, 2012 for more info.
"""
R_0 = tokamak_source.major_radius
A = tokamak_source.minor_radius
El = tokamak_source.elongation
delta = tokamak_source.triangularity
def get_R_on_LCMS(alpha):
"""Gets R on the last closed magnetic surface for a given alpha"""
return R_0 + A * np.cos(alpha + delta * np.sin(alpha))
approx_lt = lambda x, y: x < y or np.isclose(x, y)
approx_gt = lambda x, y: x > y or np.isclose(x, y)
for source in tokamak_source.sources:
R, Z = source.space.r.x[0], source.space.z.x[0]
# First test that the point is contained with a simple box with
# lower left (r_min,-z_max) and upper right (r_max,z_max)
assert approx_gt(R, R_0 - A)
assert approx_lt(R, R_0 + A)
assert approx_lt(abs(Z), A * El)
# For a given Z, we can determine the two values of alpha where
# where a = minor_radius, and from there determine the upper and
# lower bounds for R.
alpha_1 = np.arcsin(abs(Z) / (El * A))
alpha_2 = np.pi - alpha_1
R_max, R_min = get_R_on_LCMS(alpha_1), get_R_on_LCMS(alpha_2)
assert approx_lt(R_max, R_0 + A)
assert approx_gt(R_min, R_0 - A)
assert approx_lt(R, R_max)
assert approx_gt(R, R_min)
|
5c7c668c73403e1c5d37b852e110fcdf8a36023e
| 3,647,433
|
def GetTDryBulbFromEnthalpyAndHumRatio(MoistAirEnthalpy: float, HumRatio: float) -> float:
"""
Return dry bulb temperature from enthalpy and humidity ratio.
Args:
MoistAirEnthalpy : Moist air enthalpy in Btu lb⁻¹ [IP] or J kg⁻¹
HumRatio : Humidity ratio in lb_H₂O lb_Air⁻¹ [IP] or kg_H₂O kg_Air⁻¹ [SI]
Returns:
Dry-bulb temperature in °F [IP] or °C [SI]
Reference:
ASHRAE Handbook - Fundamentals (2017) ch. 1 eqn 30
Notes:
Based on the `GetMoistAirEnthalpy` function, rearranged for temperature.
"""
if HumRatio < 0:
raise ValueError("Humidity ratio is negative")
BoundedHumRatio = max(HumRatio, MIN_HUM_RATIO)
if isIP():
TDryBulb = (MoistAirEnthalpy - 1061.0 * BoundedHumRatio) / (0.240 + 0.444 * BoundedHumRatio)
else:
TDryBulb = (MoistAirEnthalpy / 1000.0 - 2501.0 * BoundedHumRatio) / (1.006 + 1.86 * BoundedHumRatio)
return TDryBulb
|
3ea565eb338913f9c87e2e4d260606e437d30f8c
| 3,647,434
|
def calibration_runs(instr, exper, runnum=None):
"""
Return the information about calibrations associated with the specified run
(or all runs of the experiment if no specific run number is provided).
The result will be packaged into a dictionary of the following type:
<runnum> : { 'calibrations' : [<calibtype1>, <calibtype2>, ... ] ,
'comment' : <text>
}
Where:
<runnum> : the run number
<calibtype*> : the name of the calibration ('dark', 'flat', 'geometry', etc.)
<text> : an optional comment for the run
PARAMETERS:
@param instr: the name of the instrument
@param exper: the name of the experiment
@param run: the run number (optional)
"""
run_numbers = []
if runnum is None:
run_numbers = [run['num'] for run in experiment_runs(instr, exper)]
else:
run_numbers = [runnum]
result = {}
for runnum in run_numbers:
run_info = {'calibrations': [], 'comment':''}
for attr in run_attributes(instr, exper, runnum, 'Calibrations'):
if attr['name'] == 'comment': run_info['comment'] = attr['val']
elif attr['val'] : run_info['calibrations'].append(attr['name'])
result[runnum] = run_info
return result
|
7f5f3d274c03664e87a13946ea21978ecdd30d74
| 3,647,435
|
def fetch_eia(api_key, plant_id, file_path):
"""
Read in EIA data of wind farm of interest
- from EIA API for monthly productions, return monthly net energy generation time series
- from local Excel files for wind farm metadata, return dictionary of metadata
Args:
api_key(:obj:`string`): 32-character user-specific API key, obtained from EIA
plant_id(:obj:`string`): 5-character EIA power plant code
file_path(:obj:`string`): directory with EIA metadata .xlsx files in 2017
Returns:
:obj:`pandas.Series`: monthly net energy generation in MWh
:obj:`dictionary`: metadata of the wind farm with 'plant_id'
"""
# EIA metadata
plant_var_list = [
"City",
"Latitude",
"Longitude",
"Balancing Authority Name",
"Transmission or Distribution System Owner",
]
wind_var_list = [
"Utility Name",
"Plant Name",
"State",
"County",
"Nameplate Capacity (MW)",
"Operating Month",
"Operating Year",
"Number of Turbines",
"Predominant Turbine Manufacturer",
"Predominant Turbine Model Number",
"Turbine Hub Height (Feet)",
]
def meta_dic_fn(metafile, sheet, var_list):
all_plant = pd.read_excel(file_path + metafile, sheet_name=sheet, skiprows=1)
eia_plant = all_plant.loc[all_plant["Plant Code"] == np.int(plant_id)] # specific wind farm
if eia_plant.shape[0] == 0: # Couldn't locate EIA ID in database
raise Exception("Plant ID not found in EIA database")
eia_info = eia_plant[var_list] # select column
eia_info = eia_info.reset_index(drop=True) # reset index to 0
eia_dic = eia_info.T.to_dict() # convert to dictionary
out_dic = eia_dic[0] # remove extra level of dictionary, "0" in this case
return out_dic
# file path with 2017 EIA metadata files
plant_dic = meta_dic_fn("2___Plant_Y2017.xlsx", "Plant", plant_var_list)
wind_dic = meta_dic_fn("3_2_Wind_Y2017.xlsx", "Operable", wind_var_list)
# convert feet to meter
hubheight_meter = np.round(
unit_conversion.convert_feet_to_meter(wind_dic["Turbine Hub Height (Feet)"])
)
wind_dic.update({"Turbine Hub Height (m)": hubheight_meter})
wind_dic.pop("Turbine Hub Height (Feet)", None) # delete hub height in feet
out_dic = plant_dic.copy()
out_dic.update(wind_dic) # append dictionary
# EIA monthly energy production data
api = eia.API(api_key) # get data from EIA
series_search_m = api.data_by_series(series="ELEC.PLANT.GEN.%s-ALL-ALL.M" % plant_id)
eia_monthly = pd.DataFrame(series_search_m) # net monthly energy generation of wind farm in MWh
eia_monthly.columns = ["eia_monthly_mwh"] # rename column
eia_monthly = eia_monthly.set_index(
pd.DatetimeIndex(eia_monthly.index)
) # convert to DatetimeIndex
return eia_monthly, out_dic
|
cb7543b0ceeacacfd699c94f677dd9c1200c8714
| 3,647,436
|
def calc_dist_mat(e: Extractor, indices: list) -> np.array:
"""
Calculates distance matrix among threads with indices specified
Arguments:
e : Extractor
extractor object
indices : list of ints
list of indices corresponding to which threads are present for the distance matrix calculation
"""
# initialize distance matrix
dmat = np.zeros((len(indices), len(indices)))
# calculate dmat, non-diagonals only
for i in range(len(indices)):
for j in range(i+1, len(indices)):
pos1 = e.spool.threads[indices[i]].positions
pos2 = e.spool.threads[indices[j]].positions
dmat[i,j] = np.linalg.norm(pos1 - pos2, axis = 1).mean()
dmat = dmat + dmat.T
return dmat
|
9398990dd0444b6a1d3a00a9c09a08f88d752b83
| 3,647,437
|
def spitzer_conductivity2(nele, tele, znuc, zbar):
"""
Compute the Spitzer conductivity
Parameters:
-----------
- nele [g/cm³]
- tele [eV]
- znuc: nuclear charge
- zbar: mean ionization
Returns:
--------
- Spitzer conductivity [cm².s⁻¹]
"""
lnLam = coulomb_logarithm(nele, znuc, tele)
return 2e21*tele**(5./2)/(lnLam*nele*(zbar+1))
|
3337515bbb989d8a7fb4994ab9e654781b2a7216
| 3,647,438
|
import re
def parse_benchmark_results(benchmark_output, min_elements=None, max_elements=None):
"""
:type benchmark_output list[str]
:type min_elements int|None
:type max_elements int|None
:rtype BenchmarkResults
:return The parsed benchmark results file. The data member dict looks like this:
{
benchmark_function_str: {
data_size_int: {
container_type_str: {
num_elements_int: cpu_time_nanoseconds
}
}
}
}
While the sizes_in_bytes and cardinalities members are sorted lists.
"""
def data_type_to_size(data_type):
if data_type == "int":
return 4
elif data_type == "size_16":
return 16
elif data_type == "size_64":
return 64
raise Exception("Unknown type " + data_type)
# Regex for individual iterations of the benchmark
# Group 1: benchmark function name, e.g., BM_vector_sequential_read
# Group 2: container type, e.g., FixedArray<size_16>
# Group 3: data type, e.g., int or size_16
# Group 4: number of elements, between 4 and 16384
# Group 5: clock time in ns
# Group 6: CPU time in ns
# Group 7: iteration count
benchmark_re = re.compile(r"^(\w+)<([\w<>:, ]+), (\w+)>\/(\d+)\s+(\d+) ns\s+(\d+) ns\s+(\d+)$")
data = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(int))))
data_sizes = set()
cardinalities = set()
for line in benchmark_output:
match = benchmark_re.match(line)
if match:
benchmark_fn = match.group(1)
container_type = match.group(2)
if container_type.startswith('std_'):
container_type = container_type.replace('std_', 'std::')
data_size = data_type_to_size(match.group(3))
num_elements = int(match.group(4))
cpu_time = int(match.group(6))
meets_min_requirements = not min_elements or num_elements >= min_elements
meets_max_requirements = not max_elements or num_elements <= max_elements
if meets_min_requirements and meets_max_requirements:
data[benchmark_fn][data_size][container_type][num_elements] = cpu_time
data_sizes.add(data_size)
cardinalities.add(num_elements)
return BenchmarkResults(data=data, sizes_in_bytes=sorted(data_sizes), cardinalities=sorted(cardinalities))
|
e4994e77e61ca67ee47677ba75573ab65199c1d4
| 3,647,439
|
def read_float_with_comma(num):
"""Helper method to parse a float string representation that has
a comma as decimal separator.
Can't use locale as the page being parsed could not be in the
same locale as the python running environment
Args:
num (str): the float string to parse
Returns:
float: the parsed float
"""
return float(num.replace(",", "."))
|
ff2e65ef35ba1fded06d8abb5ed252a6bffdceaa
| 3,647,441
|
def remote_repr(arg):
"""Return the `repr()` rendering of the supplied `arg`."""
return arg
|
d284a0f3a6d08ceae198aacf68554da9cc264b1b
| 3,647,442
|
def log(pathOrURL, limit=None, verbose=False, searchPattern=None, revision=None, userpass=None):
"""
:param pathOrURL: working copy path or remote url
:param limit: when the revision is a range, limit the record count
:param verbose:
:param searchPattern:
- search in the limited records(by param limit)
- matches any of the author, date, log message text, if verbose is True also a changed path
- The search pattern use "glob syntax" wildcards
? matches any single character
* matches a sequence of arbitrary characters
[abc] matches any of the characters listed inside the brackets
example:
revision=(5, 10) limit=2 output: 5, 6
revision=(10, 5) limit=2 output: 10, 9
:param commonOptions.revision: single revision number or revision range tuple/list
- if range specified, format as (5, 10) or (10, 50) are both supported
- for (5, 10): return list ordered by 5 -> 10
- for (10, 5): return list ordered by 10 -> 5
- the bound revision 5 or 10 also included
"""
cmd = 'log'
cmd += ' ' + pathOrURL
cmd += ' --xml'
if limit is not None:
cmd += ' -l %s' % limit
if verbose:
cmd += ' -v'
if searchPattern is not None:
cmd += ' --search %s' % searchPattern
cmd += ' ' + makeRevisionOptionStr(revision)
cmd += ' ' + makeUserPassOptionStr(userpass)
result = execOutputSubCommand(cmd)
root = ET.fromstring(result)
ret = []
for logentryNode in root.iterfind('logentry'):
logentry = {}
ret.append(logentry)
logentry['#revision'] = logentryNode.attrib['revision']
logentry['author'] = logentryNode.find('author').text
logentry['date'] = logentryNode.find('date').text
logentry['msg'] = logentryNode.find('msg').text
pathsNode = logentryNode.find('paths')
if pathsNode is not None:
paths = []
logentry['paths'] = paths
for path_node in pathsNode.iterfind('path'):
path = {}
paths.append(path)
path['#'] = path_node.text
path['#prop-mods'] = True if path_node.attrib['prop-mods']=='true' else False
path['#text-mods'] = True if path_node.attrib['text-mods']=='true' else False
path['#kind'] = path_node.attrib['kind']
path['#action'] = path_node.attrib['action']
return ret
|
ba489018ea9e1cdaec62620711421df2aa2c3617
| 3,647,443
|
def value_cards(cards: [Card], trump: Suite, lead_suite: Suite) -> (Card, int):
"""Returns a tuple (card, point value) which ranks each card in a hand, point value does not matter"""
card_values = []
for card in cards:
if vm.is_trump(card, trump):
card_values.append((card, vm.trump_value(card, trump) + 20))
elif card.suite == lead_suite:
card_values.append((card, card.face_card.value + 10))
else:
card_values.append((card, card.face_card.value))
return card_values
|
3d89e1db3dee8a7af881a236c1328b70eb7ef2c7
| 3,647,444
|
import atexit
def mount_raw_image(path):
"""Mount raw image using OS specific methods, returns pathlib.Path."""
loopback_path = None
if PLATFORM == 'Darwin':
loopback_path = mount_raw_image_macos(path)
elif PLATFORM == 'Linux':
loopback_path = mount_raw_image_linux(path)
# Check
if not loopback_path:
std.print_error(f'Failed to mount image: {path}')
# Register unmount atexit
atexit.register(unmount_loopback_device, loopback_path)
# Done
return loopback_path
|
a3923bbebb0ec20a0ed380af54942f9c69071ea0
| 3,647,445
|
import math
def calculate_weights_indices(in_length, out_length, scale, kernel_width, antialiasing):
"""
Get weights and indices
"""
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = np.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = np.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = np.repeat(left.reshape(out_length, 1), P).reshape(out_length, P) + \
np.broadcast_to(np.linspace(
0, P - 1, P).reshape(1, P), (out_length, P))
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = np.repeat(
u.reshape(out_length, 1), P).reshape(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = np.sum(weights, 1).reshape(out_length, 1)
weights = weights / np.repeat(weights_sum, P).reshape(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = np.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
#indices = indices.narrow(1, 1, P - 2)
indices = indices[:, 1:P-1]
#weights = weights.narrow(1, 1, P - 2)
weights = weights[:, 1:P-1]
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
#indices = indices.narrow(1, 0, P - 2)
indices = indices[:, 0:P-1]
#weights = weights.narrow(1, 0, P - 2)
weights = weights[:, 0:P-1]
weights = np.ascontiguousarray(weights)
indices = np.ascontiguousarray(indices)
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
|
bdbbe2ed1b10bad70c116c99524691a450626a8d
| 3,647,446
|
def adfuller_test(series, signif=0.05, name='', verbose=False):
"""Perform ADFuller to test for Stationarity of given series, print report and return if series is stationary"""
r = adfuller(series, autolag='AIC')
output = {'test_statistic': round(r[0], 4), 'pvalue': round(r[1], 4), 'n_lags': round(r[2], 4), 'n_obs': r[3]}
p_value = output['pvalue']
def adjust(val, length=6):
return str(val).ljust(length)
# Print Summary
if verbose:
print(' Augmented Dickey-Fuller Test on "{}"'.format(name), "\n ", '-' * 47)
print(' Null Hypothesis: Data has unit root. Non-Stationary.')
print(' Significance Level = {}'.format(signif))
print(' Test Statistic = {}'.format(output["test_statistic"]))
print(' No. Lags Chosen = {}'.format(output["n_lags"]))
for key, val in r[4].items():
print(' Critical value {} = {}'.format(adjust(key), round(val, 3)))
if p_value <= signif:
if verbose:
print(" => P-Value = {}. Rejecting Null Hypothesis.".format(p_value))
print(" => Series is Stationary.")
return True
else:
if verbose:
print(" => P-Value = {}. Weak evidence to reject the Null Hypothesis.".format(p_value))
print(" => Series is Non-Stationary.")
return False
|
03c91c771b6f514bf614af69c3f9db607b256498
| 3,647,447
|
def datetime_to_timestring(dt_):
"""
Returns a pretty formatting string from a datetime object.
For example,
>>>datetime.time(hour=9, minute=10, second=30)
..."09:10:30"
:param dt_: :class:`datetime.datetime` or :class:`datetime.time`
:returns: :class:`str`
"""
return pad(dt_.hour)+':'+pad(dt_.minute)+':'+pad(dt_.second)
|
541adb72ee7c8cf1dc2f9755a37c90d6120189e2
| 3,647,448
|
import importlib
from typing import Type
def get_class_for_name(name: str, module_name: str = __name__) -> Type:
"""Gets a class from a module based on its name.
Tread carefully with this. Personally I feel like it's only safe to use
with dataclasses with known interfaces.
Parameters
----------
name : str
Name of the class we're trying to get the class object for.
module_name: str, optional
Which module to get a class from, by defualt __name__.
Returns
-------
Type
[description]
"""
this_module = importlib.import_module(module_name)
this_class = getattr(this_module, name)
return this_class
|
73058c179187aac277221b33f4e1e65934a49a6a
| 3,647,449
|
def get_cache_file_static():
"""
Helper function to get the path to the VCR cache file for requests
that must be updated by hand in cases where regular refreshing is
infeasible, i.e. limited access to the real server.
To update this server recording:
1) delete the existing recording
2) re-run all tests (with API keys for telescopes in place)
3) replace any secret information (such as API keys) with dummy values
4) commit recording
"""
return "data/tests/test_server_recordings_static.yaml"
|
44649f243322230a1a750e038d66cef725fbbc9b
| 3,647,450
|
def get_FAAM_mineral_dust_calibration(instrument='PCASP', rtn_values=True):
"""
Retrieve FAAM mineral dust calibration
"""
# Location and name of calibration files?
folder = '{}/FAAM/'.format(get_local_folder('ARNA_data'))
if instrument == 'PCASP':
# NOTE: range ~0.1-4 microns
filename = 'PCASP1_faam_20200128_v001_r000_cal.nc'
# NOTE: dust values are a nc subgroup!
# group = 'bin_cal'
group = 'bin_cal/mineral_dust'
# group = 'flow_cal'
# The real part of the refractive index was taken as 1.53 which is a common value and is in the OPAC database. It is quite a bit smaller than the 1.547 that was reported by Weinzierl et al. [2011] but has been shown to have a relatively weak effect on the instrument response. The values of the imaginary part were based on references in Ryder et al. [2019] along with the frequency distribution of k(550nm) presented in fig 9 of Ryder et al. [2013]. So the minimum value was extended from 0.0015i to 0.001i. Calculating the bin boundaries with these multiple Mie curves was done with Gaussian centre-weighted averaging with 0.001i and 0.0024i being +/-2 sigma extreme values.
elif instrument == 'CDP':
# NOTE: range ~4-120 microns
filename = 'CDP1_faam_20200208_v001_r000_cal.nc'
# NOTE: dust values are a nc subgroup!
group = 'master_cal/mineral_dust'
# Open and return the widths and
ds = xr.open_dataset(folder+filename, group=group)
# Get values for bin centres and widths in microns (1E-6 metres)
BinWidths = ds['dia_width'].values.flatten()
BinCentres = ds['dia_centre'].values.flatten()
d = {'BinWidths': BinWidths, 'BinCentres': BinCentres}
if rtn_values:
return d
else:
return ds
|
e9d7d9241ea7afab00d29e44404904e494141faa
| 3,647,452
|
import joblib
def load_classifier(path=False):
"""
Load the ALLSorts classifier from a pickled file.
...
Parameters
__________
path : str
Path to a pickle object that holds the ALLSorts model.
Default: "/models/allsorts/allsorts.pkl.gz"
Returns
__________
allsorts_clf : ALLSorts object
ALLSorts object, unpacked, ready to go.
"""
if not path:
path = str(root_dir()) + "/models/allsorts/allsorts.pkl.gz"
message("Loading classifier...")
allsorts_clf = joblib.load(path)
return allsorts_clf
|
f74402cea1cb329036b9e95c8c6264ee15584c65
| 3,647,453
|
import requests
import time
def get_response(url: str, *, max_attempts=5) -> requests.Response:
"""Return the response.
Tries to get response max_attempts number of times, otherwise return None
Args:
url (str): url string to be retrieved
max_attemps (int): number of request attempts for same url
E.g.,
r = get_response(url)
r = xmltodict.parse(r.text)
# or
r = json.load(r.text)
"""
for count, x in enumerate(range(max_attempts)):
try:
response = requests.get(url, timeout=10)
return response
except:
time.sleep(0.01)
# if count exceeded
return None
|
7d5a01cd3535fbdae9bc0e502409300dd05be76c
| 3,647,454
|
def hamming_distance(lhs, rhs):
"""Returns the Hamming Distance of Two Equal Strings
Usage
>>> nt.hamming_distance('Pear','Pearls')
"""
return len([(x, y) for x, y in zip(lhs, rhs) if x != y])
|
8bf24f47c829169cfaa89af755b7722eb26155d9
| 3,647,455
|
def get_uleb128(byte_str):
"""
Gets a unsigned leb128 number from byte sting
:param byte_str: byte string
:return: byte string, integer
"""
uleb_parts = []
while byte_str[0] >= 0x80:
uleb_parts.append(byte_str[0] - 0x80)
byte_str = byte_str[1:]
uleb_parts.append(byte_str[0])
byte_str = byte_str[1:]
uleb_parts = uleb_parts[::-1]
integer = 0
for i in range(len(uleb_parts) - 1):
integer = (integer + uleb_parts[i]) << 7
integer += uleb_parts[-1]
return byte_str, integer
|
1e9c02dc7c191686e7d7a19d8b8c82f95044c845
| 3,647,456
|
def expired_response():
"""
Expired token callback.
Author:
Lucas Antognoni
Arguments:
Response:
json
{
'error': (boolean),
'message': (str)
}
Response keys:
- 'error': True.
- 'message': Error message.
"""
return jsonify({
'error': True,
'message': 'Token has expired'
}), 401
|
1cf4ecc4ea0ee9ca51379d0990ff957f558f1557
| 3,647,457
|
def check_shots_vs_bounds(shot_dict, mosaic_bounds, max_out_of_bounds = 3):
"""Checks whether all but *max_out_of_bounds* shots are within mosaic bounds
Parameters
----------
shot_dict : dict
A dictionary (see czd_utils.scancsv_to_dict()) with coordinates of all
shots in a .scancsv file:
{shot: [x_coords, y_coords], ...}
mosaic_bounds : list
A list of bounds to a .Align file (see get_mos_bounds()):
[min_x, max_x, min_y, max_y]
max_out_of_bounds : int, optional
Max number of out-of-bounds shots allowed for a \
'match' between mosaic and .scancsv. The default is 3.
Returns
-------
Boolean
True or False, depending on whether all but *max_out_of_bounds* \
shots are within mosaic bounds
"""
total_out_of_bounds = 0
min_x, max_x, min_y, max_y = mosaic_bounds
for eachcoords in shot_dict.values():
if not min_x <= eachcoords[0] <= max_x or not min_y <= eachcoords[1] <= max_y:
total_out_of_bounds += 1
return total_out_of_bounds <= max_out_of_bounds
|
de36f7f2a32a2a7120236d0bd5e43520de0c7ea5
| 3,647,458
|
import torch
def wrap(func, *args, unsqueeze=False):
"""
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
:param func:
:param args:
:param unsqueeze:
:return:
"""
# Convert input types where applicable
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
# Convert output types where applicable
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
return result.numpy()
else:
return result
|
a611458daea9b0ec780237a102b00f126370ffc4
| 3,647,459
|
from typing import Iterable
from typing import Tuple
from typing import Any
def iter_schemas(schema: Schema, strict_enums: bool = True) -> Iterable[Tuple[str, Any]]:
"""
Build zero or more JSON schemas for a marshmallow schema.
Generates: name, schema pairs.
"""
builder = Schemas(build_parameter=build_parameter, strict_enums=strict_enums)
return builder.iter_schemas(schema)
|
a0f203d00caa74562d0ff6fa077b236b23a2946b
| 3,647,460
|
import dill
def deserializer(serialized):
"""Example deserializer function with extra sanity checking.
:param serialized: Serialized byte string.
:type serialized: bytes
:return: Deserialized job object.
:rtype: kq.Job
"""
assert isinstance(serialized, bytes), "Expecting a bytes"
return dill.loads(serialized)
|
8895a1c40eaf5e30dd10015b87a0b94da0edf9ac
| 3,647,461
|
def sym_auc_score(X, y):
"""Compute the symmetric auroc score for the provided sample.
symmetric auroc score is defined as 2*abs(auroc-0.5)
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of auroc scores.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])
scores = np.apply_along_axis(_auc_score, 0, X, y)
return np.abs(scores - 0.5) * 2.0
|
77427e57fc737a0daffe8b966b51ad0ae3602ceb
| 3,647,462
|
def visibility_of_element_wait(driver, xpath, timeout=10):
"""Checking if element specified by xpath is visible on page
:param driver: webdriver instance
:param xpath: xpath of web element
:param timeout: time after looking for element will be stopped (default: 10)
:return: first element in list of found elements
"""
timeout_message = f"Element for xpath: '{xpath}' and url: {driver.current_url} not found in {timeout} seconds"
locator = (By.XPATH, xpath)
element_located = EC.visibility_of_element_located(locator)
wait = WebDriverWait(driver, timeout)
return wait.until(element_located, timeout_message)
|
964c2254af36361fb2390e4192208ec3e5f02a2d
| 3,647,463
|
def _read_byte(stream):
"""Read byte from stream"""
read_byte = stream.read(1)
if not read_byte:
raise Exception('No more bytes!')
return ord(read_byte)
|
767766ef0d7a52c41b7686f994a503bc8cc7fe8d
| 3,647,464
|
from directions.models import Issledovaniya
import xlwt
from collections import OrderedDict
from operator import itemgetter
import directions.models as d
from operator import itemgetter
from django.utils.text import Truncator
import directions.models as d
from operator import itemgetter
import json
from datetime import datetime
import pytz
import calendar
def statistic_xls(request):
"""Генерация XLS"""
wb = xlwt.Workbook(encoding='utf-8')
response = HttpResponse(content_type='application/ms-excel')
request_data = request.POST if request.method == "POST" else request.GET
pk = request_data.get("pk", "")
tp = request_data.get("type", "")
date_start_o = request_data.get("date-start", "")
date_end_o = request_data.get("date-end", "")
users_o = request_data.get("users", "[]")
user_o = request_data.get("user")
date_values_o = request_data.get("values", "{}")
date_type = request_data.get("date_type", "d")
depart_o = request_data.get("department")
if tp == 'lab' and pk == '0':
tp = 'all-labs'
symbols = (u"абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ", u"abvgdeejzijklmnoprstufhzcss_y_euaABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA") # Словарь для транслитерации
tr = {ord(a): ord(b) for a, b in zip(*symbols)} # Перевод словаря для транслита
borders = xlwt.Borders()
borders.left = xlwt.Borders.THIN
borders.right = xlwt.Borders.THIN
borders.top = xlwt.Borders.THIN
borders.bottom = xlwt.Borders.THIN
if "-" in date_start_o:
date_start_o = normalize_date(date_start_o)
date_end_o = normalize_date(date_end_o)
date_start, date_end = try_parse_range(date_start_o, date_end_o)
if date_start and date_end and tp not in ["lab_sum", "covid_sum", "lab_details"]:
delta = date_end - date_start
if abs(delta.days) > 60:
slog.Log(key=tp, type=101, body=json.dumps({"pk": pk, "date": {"start": date_start_o, "end": date_end_o}}), user=request.user.doctorprofile).save()
return JsonResponse({"error": "period max - 60 days"})
if date_start_o != "" and date_end_o != "":
slog.Log(key=tp, type=100, body=json.dumps({"pk": pk, "date": {"start": date_start_o, "end": date_end_o}}), user=request.user.doctorprofile).save()
# Отчет по динамике анализов
if tp == "directions_list_dynamic":
pk = json.loads(pk)
dn = Napravleniya.objects.filter(pk__in=pk)
cards = {}
napr_client = set()
depart_napr = OrderedDict()
depart_fraction = OrderedDict()
one_param = "one_param"
for d in dn:
if d.department() is None or d.department().p_type != 2:
continue
c = d.client
napr_client.add(c.pk)
# Проверить, что все направления относятся к одной карте. И тип "Лаборатория"
if len(napr_client) > 1:
response['Content-Disposition'] = str.translate("attachment; filename=\"Назначения.xls\"", tr)
ws = wb.add_sheet("Вакцинация")
row_num = 0
row = [
("Пациент", 7000),
("Карта", 6000),
("Направление", 4000),
("Дата", 4000),
("Назначение", 7000),
]
wb.save(response)
return response
# Распределить направления по подразделениям: "depart_napr"
# {БИО:[напр1, напр2, напр3], КДЛ: [напр11, напр21, напр31], ИММ: [напр41, напр42, напр43]}
tmp_num_dir = []
department_title = d.department().id
department_id = d.department().id
if department_title in depart_napr.keys():
tmp_num_dir = depart_napr.get(department_title)
tmp_num_dir.append(d.pk)
depart_napr[department_title] = tmp_num_dir
else:
tmp_num_dir.append(d.pk)
depart_napr[department_title] = tmp_num_dir
# По исследованиям строим структуру "depart_fraction":
# Будущие заголовки в Excel. Те исследования у, к-рых по одной фракции в общий подсловарь,
# у к-рых больше одного показателя (фракции) в самостоятельные подсловари. Выборка из справочника, НЕ из "Результатов"
# пример стр-ра: {биохим: {услуги, имеющие по 1 фракции:[фр1-усл1, фр2-усл2, фр3-усл3],
# усл1:[фр1, фр2, фр3],усл2:[фр1, фр2, фр3],
# усл2:[фр1, фр2, фр3],усл2:[фр1, фр2, фр3]}
# порядок фракций "По весу".
one_param_temp = OrderedDict()
for i in Issledovaniya.objects.filter(napravleniye=d):
dict_research_fraction = OrderedDict()
research_iss = i.research_id
dict_research_fraction = {
p: str(t) + ',' + str(u) for p, t, u in directory.Fractions.objects.values_list('pk', 'title', 'units').filter(research=i.research).order_by("sort_weight")
}
if depart_fraction.get(department_id) is not None:
if len(dict_research_fraction.keys()) == 1:
one_param_temp = depart_fraction[department_id][one_param]
one_param_temp.update(dict_research_fraction)
depart_fraction[department_id].update({one_param: one_param_temp})
else:
depart_fraction[department_id].update({research_iss: dict_research_fraction})
else:
depart_fraction.update({department_id: {}})
if len(dict_research_fraction) == 1:
depart_fraction[department_id].update({one_param: dict_research_fraction})
else:
depart_fraction[department_id].update({research_iss: dict_research_fraction})
depart_fraction[department_id].update({one_param: {}})
# Все возможные анализы в направлениях - стр-ра А
# направления по лабораториям (тип лаборатории, [номера направлений])
obj = []
for type_lab, l_napr in depart_napr.items():
a = [
[p, r, n, datetime.datetime.strftime(utils.localtime(t), "%d.%m.%y")]
for p, r, n, t in Issledovaniya.objects.values_list('pk', 'research_id', 'napravleniye_id', 'time_confirmation').filter(napravleniye_id__in=l_napr)
]
obj.append(a)
for i in obj:
for j in i:
result_k = {fr_id: val for fr_id, val in Result.objects.values_list('fraction', 'value').filter(issledovaniye_id=j[0])}
j.append(result_k)
finish_obj = []
for i in obj:
for j in i:
j.pop(0)
finish_obj.append(j)
# Строим стр-ру {тип лаборатория: id-анализа:{(направление, дата):{id-фракции:результат,id-фракции:результат}}}
finish_ord = OrderedDict()
for t_lab, name_iss in depart_fraction.items():
finish_ord[t_lab] = {}
for iss_id, fract_dict in name_iss.items():
if fract_dict:
frac = True
else:
frac = False
finish_ord[t_lab][iss_id] = {}
opinion_dict = {
(
'напр',
'дата',
): fract_dict
}
val_dict = fract_dict.copy()
finish_ord[t_lab][iss_id].update(opinion_dict)
for k, v in fract_dict.items():
val_dict[k] = ''
# Строим стр-ру {id-анализа:{(направление, дата,):{id-фракции:результат,id-фракции:результат}}}
# one_param - это анализы у которых несколько параметров-фракции (ОАК, ОАМ)
if iss_id != 'one_param' or iss_id != '' or iss_id is not None:
for d in finish_obj:
tmp_dict = {}
if iss_id == d[0]:
for i, j in d[3].items():
val_dict[i] = j
tmp_dict[
(
d[1],
d[2],
)
] = deepcopy(val_dict)
finish_ord[t_lab][iss_id].update(tmp_dict)
# Строим стр-ру {one_param:{(направление, дата,):{id-фракции:результат,id-фракции:результат}}}
# one_param - это анализы у которых только один параметр-фракции (холестерин, глюкоза и др.)
key_tuple = (
(
0,
0,
),
)
if iss_id == 'one_param' and frac:
tmp_dict = {}
for d in finish_obj:
if key_tuple != (
d[1],
d[2],
):
for k, v in fract_dict.items():
val_dict[k] = ''
for u, s in val_dict.items():
if d[3].get(u):
val_dict[u] = d[3].get(u)
tmp_dict[
(
d[1],
d[2],
)
] = deepcopy(val_dict)
key_tuple = (
d[1],
d[2],
)
finish_ord[t_lab][iss_id].update(tmp_dict)
response['Content-Disposition'] = str.translate("attachment; filename=\"Назначения.xls\"", tr)
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
font_style.borders = borders
font_style_b = xlwt.XFStyle()
font_style_b.alignment.wrap = 1
font_style_b.font.bold = True
font_style_b.borders = borders
ws = wb.add_sheet("Динамика")
row_num = 0
for k, v in finish_ord.items():
col_num = 0
ws.write(row_num, 0, label=Podrazdeleniya.objects.values_list('title').get(pk=k))
row_num += 1
col_num = 0
for name_iss, fr_id in v.items():
if name_iss != 'one_param':
ws.write(row_num, 0, label=Researches.objects.values_list('title').get(pk=name_iss))
else:
ws.write(row_num, 0, label=name_iss)
row_num += 1
a, b = '', ''
for i, j in fr_id.items():
col_num = 0
a, b = i
ws.write(row_num, col_num, label=a)
col_num += 1
ws.write(row_num, col_num, label=b)
ss = ''
for g, h in j.items():
col_num += 1
ss = str(h)
ws.write(row_num, col_num, label=ss)
row_num += 1
col_num += 1
row_num += 1
row_num += 1
if tp == "directions_list":
pk = json.loads(pk)
dn = Napravleniya.objects.filter(pk__in=pk)
cards = {}
for d in dn:
c = d.client
if c.pk not in cards:
cards[c.pk] = {
"card": c.number_with_type(),
"fio": c.individual.fio(),
"bd": c.individual.bd(),
"hn": d.history_num,
"d": {},
}
cards[c.pk]["d"][d.pk] = {
"r": [],
"dn": str(dateformat.format(d.data_sozdaniya.date(), settings.DATE_FORMAT)),
}
for i in Issledovaniya.objects.filter(napravleniye=d):
cards[c.pk]["d"][d.pk]["r"].append(
{
"title": i.research.title,
}
)
response['Content-Disposition'] = str.translate("attachment; filename=\"Назначения.xls\"", tr)
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
font_style.borders = borders
font_style_b = xlwt.XFStyle()
font_style_b.alignment.wrap = 1
font_style_b.font.bold = True
font_style_b.borders = borders
ws = wb.add_sheet("Вакцинация")
row_num = 0
row = [
("Пациент", 7000),
("Карта", 6000),
("Направление", 4000),
("Дата", 4000),
("Назначение", 7000),
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style_b)
ws.col(col_num).width = row[col_num][1]
row_num += 1
for ck in cards.keys():
c = cards[ck]
started = False
for dk in c["d"].keys():
if not started:
row = [
"{} {}".format(c["fio"], c["bd"]),
c["card"],
]
started = True
else:
row = ["", ""]
s2 = False
for r in c["d"][dk]["r"]:
if not s2:
s2 = True
row.append(str(dk))
row.append(c["d"][dk]["dn"])
else:
row.append("")
row.append("")
row.append("")
row.append("")
row.append(r["title"])
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
row = []
if tp == "statistics-visits":
date_start, date_end = try_parse_range(date_start_o, date_end_o)
t = request.GET.get("t", "sum")
fio = request.user.doctorprofile.get_full_fio()
dep = request.user.doctorprofile.podrazdeleniye.get_title()
dirs = Napravleniya.objects.filter(
visit_date__range=(
date_start,
date_end,
),
visit_who_mark=request.user.doctorprofile,
).order_by("visit_date")
if t == "sum":
response['Content-Disposition'] = str.translate("attachment; filename=\"Суммарный отчёт по посещениям.xls\"", tr)
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
font_style.borders = borders
font_style_b = xlwt.XFStyle()
font_style_b.alignment.wrap = 1
font_style_b.font.bold = True
font_style_b.borders = borders
ws = wb.add_sheet("Посещения")
row_num = 0
row = [
(fio, 7000),
(dep, 7000),
("", 3000),
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style)
ws.col(col_num).width = row[col_num][1]
row_num += 1
row = [
date_start_o + " - " + date_end_o,
"",
"",
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
row = [
"",
"",
"",
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style if col_num > 0 else font_style_b)
row_num += 1
row = [
"Услуга",
"Источник финансирования",
"Количество",
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style_b)
row_num += 1
iss = {}
for d in dirs:
for i in Issledovaniya.objects.filter(napravleniye=d).order_by("research__title").order_by("napravleniye__istochnik_f"):
rt = i.research.title
istf = i.napravleniye.istochnik_f.base.title + " - " + i.napravleniye.fin_title
if rt not in iss:
iss[rt] = {}
if istf not in iss[rt]:
iss[rt][istf] = 0
iss[rt][istf] += 1
for k in iss:
for istf in iss[k]:
row = [
k,
istf,
iss[k][istf],
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
elif tp == "vac":
date_start, date_end = try_parse_range(date_start_o, date_end_o)
response['Content-Disposition'] = str.translate("attachment; filename=\"Вакцинация.xls\"", tr)
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
font_style.borders = borders
font_style_b = xlwt.XFStyle()
font_style_b.alignment.wrap = 1
font_style_b.font.bold = True
font_style_b.borders = borders
ts = ["Название", "Доза", "Серия", "Срок годности", "Способ введения", "Дата постановки вакцины"]
ws = wb.add_sheet("Вакцинация")
row_num = 0
row = [("Исполнитель", 6000), ("Подтверждено", 5000), ("RMIS UID", 5000), ("Вакцина", 5000), ("Код", 4000)]
for t in ts:
row.append((t, 4000))
row.append(("Этап", 2500))
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style_b)
ws.col(col_num).width = row[col_num][1]
row_num += 1
for i in Issledovaniya.objects.filter(
research__podrazdeleniye__vaccine=True,
time_confirmation__range=(
date_start,
date_end,
),
).order_by("time_confirmation"):
if i.napravleniye:
row = [
i.doc_confirmation_fio,
i.time_confirmation.astimezone(pytz.timezone(settings.TIME_ZONE)).strftime("%d.%m.%Y %X"),
i.napravleniye.client.individual.get_rmis_uid_fast(),
i.research.title,
i.research.code,
]
else:
continue
v = {}
for p in ParaclinicResult.objects.filter(issledovaniye=i):
field_type = p.get_field_type()
if p.field.get_title(force_type=field_type) in ts:
if field_type == 1:
v_date = p.value.replace("-", ".")
v[p.field.get_title(force_type=field_type)] = v_date
else:
v[p.field.get_title(force_type=field_type)] = p.value
for t in ts:
row.append(v.get(t, ""))
row.append("V")
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
elif tp == "statistics-tickets-print":
data_date = request_data.get("date_values")
data_date = json.loads(data_date)
if request_data.get("date_type") == 'd':
d1 = datetime.datetime.strptime(data_date['date'], '%d.%m.%Y')
d2 = datetime.datetime.strptime(data_date['date'], '%d.%m.%Y')
month_obj = ''
else:
month_obj = int(data_date['month']) + 1
_, num_days = calendar.monthrange(int(data_date['year']), month_obj)
d1 = datetime.date(int(data_date['year']), month_obj, 1)
d2 = datetime.date(int(data_date['year']), month_obj, num_days)
type_fin = request_data.get("fin")
title_fin = IstochnikiFinansirovaniya.objects.filter(pk=type_fin).first()
if title_fin.title == 'ОМС' and title_fin.base.internal_type:
can_null = 1
else:
can_null = 0
users_o = json.loads(user_o)
us_o = None
if users_o != -1:
us = int(users_o)
us_o = [DoctorProfile.objects.get(pk=us)]
elif depart_o != -1:
depart = Podrazdeleniya.objects.get(pk=depart_o)
us_o = DoctorProfile.objects.filter(podrazdeleniye=depart)
wb = openpyxl.Workbook()
wb.remove(wb.get_sheet_by_name('Sheet'))
styles_obj = structure_sheet.style_sheet()
wb.add_named_style(styles_obj[0])
start_date = datetime.datetime.combine(d1, datetime.time.min)
end_date = datetime.datetime.combine(d2, datetime.time.max)
# Проверить, что роль у объекта Врач-Лаборант, или Лаборант, или Врач параклиники, или Лечащий врач
if us_o:
for i in us_o:
if i.is_member(["Лечащий врач", "Врач-лаборант", "Врач параклиники", "Лаборант", "Врач консультаций"]):
res_oq = sql_func.direct_job_sql(i.pk, start_date, end_date, type_fin, can_null)
res_job = sql_func.indirect_job_sql(i.pk, start_date, end_date)
if res_job:
ws = wb.create_sheet(f'{i.get_fio()}-Косвенные')
ws = structure_sheet.inderect_job_base(ws, i, d1, d2)
dict_job = {}
for r_j in res_job:
key_type_job = r_j[1]
key_date = utils.strfdatetime(r_j[0], "%d.%m.%Y")
value_total = r_j[2]
temp_dict = dict_job.get(key_date, {})
temp_dict.update({key_type_job: value_total})
dict_job[key_date] = temp_dict
structure_sheet.inderect_job_data(ws, dict_job)
ws = wb.create_sheet(i.get_fio())
ws = structure_sheet.statistics_tickets_base(ws, i, type_fin, d1, d2, styles_obj[0], styles_obj[1])
ws = structure_sheet.statistics_tickets_data(ws, res_oq, i, styles_obj[2])
if month_obj:
# issledovaniye_id(0), research_id(1), date_confirm(2), doc_confirmation_id(3), def_uet(4),
# co_executor_id(5), co_executor_uet(6), co_executor2_id(7), co_executor2_uet(8), research_id(9),
# research_title(10), research - co_executor_2_title(11)
# строим стр-ру {дата:{наименование анализа:УЕТ за дату, СО2:УЕТ за дату}}
total_report_dict = OrderedDict()
r_sql = sql_func.total_report_sql(i.pk, start_date, end_date, type_fin)
titles_set = OrderedDict()
for n in r_sql:
titles_set[n[10]] = ''
titles_set[n[11]] = ''
temp_uet, temp_uet2 = 0, 0
if i.pk == n[3]:
temp_uet = n[4] if n[4] else 0
if i.pk == n[5] and n[5] != n[3]:
temp_uet = n[6] if n[6] else 0
if i.pk == n[7]:
temp_uet2 = n[8] if n[8] else 0
# попытка получить значения за дату
if total_report_dict.get(n[2]):
temp_d = total_report_dict.get(n[2])
# попытка получить такие же анализы
current_uet = temp_d.get(n[10], 0)
current_uet2 = temp_d.get(n[11], 0)
current_uet = current_uet + temp_uet
current_uet2 = current_uet2 + temp_uet2
temp_dict = {n[10]: current_uet, n[11]: current_uet2}
total_report_dict[int(n[2])].update(temp_dict)
else:
total_report_dict[int(n[2])] = {n[10]: temp_uet, n[11]: temp_uet2}
titles_list = list(titles_set.keys())
ws = wb.create_sheet(i.get_fio() + ' - Итог')
ws = structure_sheet.job_total_base(ws, month_obj, type_fin)
ws, cell_research = structure_sheet.jot_total_titles(ws, titles_list)
ws = structure_sheet.job_total_data(ws, cell_research, total_report_dict)
response['Content-Disposition'] = str.translate("attachment; filename=\"Статталоны.xlsx\"", tr)
wb.save(response)
return response
elif tp == "statistics-passed":
d_s = request_data.get("date-start")
d_e = request_data.get("date-end")
d1 = datetime.datetime.strptime(d_s, '%d.%m.%Y')
d2 = datetime.datetime.strptime(d_e, '%d.%m.%Y')
start_date = datetime.datetime.combine(d1, datetime.time.min)
end_date = datetime.datetime.combine(d2, datetime.time.max)
passed_oq = sql_func.passed_research(start_date, end_date)
wb = openpyxl.Workbook()
wb.remove(wb.get_sheet_by_name('Sheet'))
ws = wb.create_sheet(f'{d_s}-{d_e}')
ws = structure_sheet.passed_research_base(ws, d_s)
ws = structure_sheet.passed_research_data(ws, passed_oq)
response['Content-Disposition'] = str.translate("attachment; filename=\"Движения.xlsx\"", tr)
wb.save(response)
return response
elif tp == "call-patient":
return call_patient.call_patient(request_data, response, tr, COVID_QUESTION_ID)
elif tp == "swab-covidt":
return swab_covid.swab_covid(request_data, response, tr, COVID_QUESTION_ID)
elif tp == "cert-not-workt":
return cert_notwork.cert_notwork(request_data, response, tr, COVID_QUESTION_ID)
elif tp == "statistics-onco":
d_s = request_data.get("date-start")
d_e = request_data.get("date-end")
d1 = datetime.datetime.strptime(d_s, '%d.%m.%Y')
d2 = datetime.datetime.strptime(d_e, '%d.%m.%Y')
start_date = datetime.datetime.combine(d1, datetime.time.min)
end_date = datetime.datetime.combine(d2, datetime.time.max)
onco_query = sql_func.disp_diagnos('U999', start_date, end_date)
wb = openpyxl.Workbook()
wb.remove(wb.get_sheet_by_name('Sheet'))
ws = wb.create_sheet(f'{d_s}-{d_e}')
ws = structure_sheet.onco_base(ws, d_s, d_e)
ws = structure_sheet.passed_onco_data(ws, onco_query)
response['Content-Disposition'] = str.translate("attachment; filename=\"Онкоподозрения.xlsx\"", tr)
wb.save(response)
return response
elif tp == "statistics-research":
response['Content-Disposition'] = str.translate("attachment; filename=\"Услуги.xlsx\"", tr)
pk = request_data.get("research")
user_groups = request.user.groups.values_list('name', flat=True)
research_id = int(pk)
data_date = request_data.get("date_values")
data_date = json.loads(data_date)
if request_data.get("date_type") == 'd':
d1 = datetime.datetime.strptime(data_date['date'], '%d.%m.%Y')
d2 = datetime.datetime.strptime(data_date['date'], '%d.%m.%Y')
month_obj = ''
else:
month_obj = int(data_date['month']) + 1
_, num_days = calendar.monthrange(int(data_date['year']), month_obj)
d1 = datetime.date(int(data_date['year']), month_obj, 1)
d2 = datetime.date(int(data_date['year']), month_obj, num_days)
wb = openpyxl.Workbook()
wb.remove(wb.get_sheet_by_name('Sheet'))
ws = wb.create_sheet("Отчет")
research_title = Researches.objects.values_list('title').get(pk=research_id)
start_date = datetime.datetime.combine(d1, datetime.time.min)
end_date = datetime.datetime.combine(d2, datetime.time.max)
hospital_id = request.user.doctorprofile.hospital_id
if 'Статистика-все МО' in user_groups:
hospital_id = -1
if research_id == DEATH_RESEARCH_PK:
if 'Свидетельство о смерти-доступ' not in user_groups:
return JsonResponse({"error": "Нет доступа к данному отчету"})
if 'Статистика свидетельство о смерти-все МО' in user_groups:
hospital_id = -1
researches_sql = sql_func.statistics_death_research(research_id, start_date, end_date, hospital_id)
unique_issledovaniya = get_unique_directions(researches_sql)
child_iss = get_expertis_child_iss_by_issledovaniya(unique_issledovaniya) if unique_issledovaniya else None
expertise_final_data = {}
if child_iss:
data = {i.child_iss: i.parent_id for i in child_iss}
child_iss_tuple = tuple(set([i.child_iss for i in child_iss]))
result_expertise = get_expertis_results_by_issledovaniya(child_iss_tuple)
result_val = {}
for i in result_expertise:
if not result_val.get(i.issledovaniye_id, ""):
result_val[i.issledovaniye_id] = "Экспертиза;"
if i.value.lower() == "да":
result_val[i.issledovaniye_id] = f"{result_val[i.issledovaniye_id]} {i.title};"
for k, v in result_val.items():
if not expertise_final_data.get(data.get(k, "")):
expertise_final_data[data.get(k)] = ""
expertise_final_data[data.get(k)] = f"{expertise_final_data[data.get(k)]} {v}"
data_death = death_form_result_parse(researches_sql, reserved=False)
wb.remove(wb.get_sheet_by_name('Отчет'))
ws = wb.create_sheet("По документам")
ws = structure_sheet.statistic_research_death_base(ws, d1, d2, research_title[0])
ws = structure_sheet.statistic_research_death_data(ws, data_death, expertise_final_data)
reserved_researches_sql = sql_func.statistics_reserved_number_death_research(research_id, start_date, end_date, hospital_id)
data_death_reserved = death_form_result_parse(reserved_researches_sql, reserved=True)
ws2 = wb.create_sheet("Номера в резерве")
ws2 = structure_sheet.statistic_reserved_research_death_base(ws2, d1, d2, research_title[0])
ws2 = structure_sheet.statistic_reserved_research_death_data(ws2, data_death_reserved)
card_has_death_date = sql_func.card_has_death_date(research_id, start_date, end_date)
card_tuple = tuple(set([i.id for i in card_has_death_date]))
if card_tuple:
temp_data = sql_func.statistics_death_research_by_card(research_id, card_tuple, hospital_id)
prev_card = None
prev_direction = None
final_data = []
count = 0
for k in temp_data:
if k.client_id == prev_card and prev_direction != k.napravleniye_id and count != 0:
continue
else:
final_data.append(k)
prev_card = k.client_id
prev_direction = k.napravleniye_id
count += 1
data_death_card = death_form_result_parse(final_data, reserved=False)
ws3 = wb.create_sheet("По людям")
ws3 = structure_sheet.statistic_research_death_base_card(ws3, d1, d2, research_title[0])
ws3 = structure_sheet.statistic_research_death_data_card(ws3, data_death_card)
else:
ws = structure_sheet.statistic_research_base(ws, d1, d2, research_title[0])
researches_sql = sql_func.statistics_research(research_id, start_date, end_date, hospital_id)
ws = structure_sheet.statistic_research_data(ws, researches_sql)
elif tp == "journal-get-material":
access_to_all = 'Просмотр статистики' in request.user.groups.values_list('name', flat=True) or request.user.is_superuser
users = [x for x in json.loads(users_o) if (access_to_all or (x.isdigit() and int(x) == request.user.doctorprofile.pk)) and DoctorProfile.objects.filter(pk=x).exists()]
date_values = json.loads(date_values_o)
monthes = {
"0": "Январь",
"1": "Февраль",
"2": "Март",
"3": "Апрель",
"4": "Май",
"5": "Июнь",
"6": "Июль",
"7": "Август",
"8": "Сентябрь",
"9": "Октябрь",
"10": "Ноябрь",
"11": "Декабрь",
}
date_values["month_title"] = monthes[date_values["month"]]
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Забор_биоматериала.xls\"", tr)
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
font_style.borders = borders
font_style_b = xlwt.XFStyle()
font_style_b.alignment.wrap = 1
font_style_b.font.bold = True
font_style_b.borders = borders
for user_pk in users:
user_row = DoctorProfile.objects.get(pk=user_pk)
ws = wb.add_sheet("{} {}".format(user_row.get_fio(dots=False), user_pk))
row_num = 0
row = [("Исполнитель: ", 4000), (user_row.get_full_fio(), 7600)]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style)
ws.col(col_num).width = row[col_num][1]
row_num += 1
row = ["Подразделение: ", user_row.podrazdeleniye.title]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
row = ["Дата: ", date_values["date"] if date_type == "d" else "{month_title} {year}".format(**date_values)]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
daterow = row_num
row_num += 3
row = [
("№", 4000),
("ФИО", 7600),
("Возраст", 3000),
("Карта", 6000),
("Число направлений", 5000),
("Номера направлений", 6000),
("Наименования исследований", 20000),
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style_b)
ws.col(col_num).width = row[col_num][1]
row_num += 1
if date_type == "d":
day = date_values.get("date", "01.01.2015")
day1 = datetime.date(int(day.split(".")[2]), int(day.split(".")[1]), int(day.split(".")[0]))
day2 = day1 + datetime.timedelta(days=1)
elif date_type == "m":
month = int(date_values.get("month", "0")) + 1
next_m = month + 1 if month < 12 else 1
year = int(date_values.get("year", "2015"))
next_y = year + 1 if next_m == 1 else year
day1 = datetime.date(year, month, 1)
day2 = datetime.date(next_y, next_m, 1)
else:
day1 = day2 = timezone.now()
iss_list = (
Issledovaniya.objects.filter(tubes__doc_get=user_row, tubes__time_get__isnull=False, tubes__time_get__range=(day1, day2))
.order_by("napravleniye__client__individual__patronymic", "napravleniye__client__individual__name", "napravleniye__client__individual__family")
.distinct()
)
patients = {}
for iss in iss_list:
k = iss.napravleniye.client.individual_id
if k not in patients:
client = iss.napravleniye.client.individual
patients[k] = {"fio": client.fio(short=True, dots=True), "age": client.age_s(direction=iss.napravleniye), "directions": [], "researches": [], "cards": []}
if iss.napravleniye_id not in patients[k]["directions"]:
patients[k]["directions"].append(iss.napravleniye_id)
kn = iss.napravleniye.client.number_with_type()
if kn not in patients[k]["cards"]:
patients[k]["cards"].append(kn)
patients[k]["researches"].append(iss.research.title)
n = 0
for p_pk in patients:
n += 1
row = [
str(n),
patients[p_pk]["fio"],
patients[p_pk]["age"],
", ".join(patients[p_pk]["cards"]),
len(patients[p_pk]["directions"]),
", ".join([str(x) for x in patients[p_pk]["directions"]]),
", ".join(patients[p_pk]["researches"]),
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
row = ["Число пациентов: ", str(len(patients))]
for col_num in range(len(row)):
ws.write(daterow + 1, col_num, row[col_num], font_style)
elif tp == "lab":
lab = Podrazdeleniya.objects.get(pk=int(pk))
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Лаборатория_{}_{}-{}.xls\"".format(lab.title.replace(" ", "_"), date_start_o, date_end_o), tr)
date_start, date_end = try_parse_range(date_start_o, date_end_o)
for card_base in list(CardBase.objects.filter(hide=False)) + [None]:
cb_title = "Все базы" if not card_base else card_base.short_title
for finsource in list(IstochnikiFinansirovaniya.objects.filter(base=card_base)) + [False]:
finsource_title = "Все источники"
if isinstance(finsource, IstochnikiFinansirovaniya):
finsource_title = finsource.title
ws = wb.add_sheet(cb_title + " " + finsource_title + " выполн.")
font_style = xlwt.XFStyle()
font_style.borders = borders
row_num = 0
row = ["Период: ", "{0} - {1}".format(date_start_o, date_end_o)]
for col_num in range(len(row)):
if col_num == 0:
ws.write(row_num, col_num, row[col_num], font_style)
else:
ws.write_merge(row_num, row_num, col_num, col_num + 2, row[col_num], style=font_style)
row_num += 1
font_style = xlwt.XFStyle()
font_style.borders = borders
row = [(lab.title, 16000)]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style)
ws.col(col_num).width = row[col_num][1]
ws.write(row_num, col_num + 1, "", font_style)
row_num = 2
row = ["Выполнено исследований", cb_title + " " + finsource_title]
for col_num in range(len(row)):
if col_num == 0:
ws.write(row_num, col_num, row[col_num], font_style)
else:
ws.write_merge(row_num, row_num, col_num, col_num + 1, row[col_num], style=font_style)
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
font_style.borders = borders
pki = int(pk)
otds = {pki: defaultdict(lambda: 0)}
otds_pat = {pki: defaultdict(lambda: 0)}
ns = 0
for obj in directory.Researches.objects.filter(podrazdeleniye__pk=lab.pk):
if finsource is not False:
iss_list = Issledovaniya.objects.filter(
research__pk=obj.pk, time_confirmation__isnull=False, time_confirmation__range=(date_start, date_end), napravleniye__istochnik_f=finsource
)
elif card_base:
iss_list = Issledovaniya.objects.filter(
research__pk=obj.pk, time_confirmation__isnull=False, time_confirmation__range=(date_start, date_end), napravleniye__istochnik_f__base=card_base
)
else:
iss_list = Issledovaniya.objects.filter(research__pk=obj.pk, time_confirmation__isnull=False, time_confirmation__range=(date_start, date_end))
iss_list = iss_list.filter(napravleniye__isnull=False)
for researches in iss_list:
n = False
for x in d.Result.objects.filter(issledovaniye=researches):
x = x.value.lower().strip()
n = any([y in x for y in ["забор", "тест", "неправ", "ошибк", "ошибочный", "кров", "брак", "мало", "недостаточно", "реактив"]]) or x == "-"
if n:
break
if n:
continue
if researches.napravleniye:
otd_pk = "external-" + str(researches.napravleniye.imported_org_id) if not researches.napravleniye.doc else researches.napravleniye.doc.podrazdeleniye_id
else:
otd_pk = "empty"
if otd_pk not in otds:
otds[otd_pk] = defaultdict(lambda: 0)
otds[otd_pk][obj.pk] += 1
otds[pki][obj.pk] += 1
if any([x.get_is_norm()[0] == "normal" for x in researches.result_set.all()]):
continue
if otd_pk not in otds_pat:
otds_pat[otd_pk] = defaultdict(lambda: 0)
otds_pat[otd_pk][obj.pk] += 1
otds_pat[pki][obj.pk] += 1
style = xlwt.XFStyle()
style.borders = borders
font = xlwt.Font()
font.bold = True
style.font = font
otd_local_keys = [x for x in otds.keys() if isinstance(x, int)]
otd_external_keys = [int(x.replace("external-", "")) for x in otds.keys() if isinstance(x, str) and "external-" in x and x != "external-None"]
for otdd in (
list(Podrazdeleniya.objects.filter(pk=pki))
+ list(Podrazdeleniya.objects.filter(pk__in=[x for x in otd_local_keys if x != pki]))
+ list(RMISOrgs.objects.filter(pk__in=otd_external_keys))
):
row_num += 2
row = [
otdd.title if otdd.pk != pki else "Сумма по всем отделениям",
"" if otdd.pk != pki else "Итого",
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], style=style)
rows = []
ok = otds.get(otdd.pk, otds.get("external-{}".format(otdd.pk), {}))
for obj in directory.Researches.objects.filter(pk__in=[x for x in ok.keys()]):
row = [
obj.title,
ok[obj.pk],
]
rows.append(row)
ns += 1
for row in sorted(rows, key=itemgetter(0)):
row_num += 1
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
ws_pat = wb.add_sheet(cb_title + " " + finsource_title + " паталог.")
row_num = 0
row = ["Период: ", "{0} - {1}".format(date_start_o, date_end_o)]
for col_num in range(len(row)):
if col_num == 0:
ws_pat.write(row_num, col_num, row[col_num], font_style)
else:
ws_pat.write_merge(row_num, row_num, col_num, col_num + 2, row[col_num], style=font_style)
row_num = 1
row = [
(lab.title, 16000),
]
for col_num in range(len(row)):
ws_pat.write(row_num, col_num, row[col_num][0], font_style)
ws_pat.col(col_num).width = row[col_num][1]
ws_pat.write(row_num, col_num + 1, "", font_style)
font_style = xlwt.XFStyle()
font_style.borders = borders
row_num = 2
row = ["Паталогии", cb_title + " " + finsource_title]
for col_num in range(len(row)):
if col_num == 0:
ws_pat.write(row_num, col_num, row[col_num], font_style)
else:
ws_pat.write_merge(row_num, row_num, col_num, col_num + 1, row[col_num], style=font_style)
otd_local_keys = [x for x in otds_pat.keys() if isinstance(x, int)]
otd_external_keys = [int(x.replace("external-", "")) for x in otds_pat.keys() if isinstance(x, str) and "external-" in x]
for otdd in (
list(Podrazdeleniya.objects.filter(pk=pki))
+ list(Podrazdeleniya.objects.filter(pk__in=[x for x in otd_local_keys if x != pki]))
+ list(RMISOrgs.objects.filter(pk__in=otd_external_keys))
):
row_num += 2
row = [
otdd.title,
"" if otdd.pk != pki else "Итого",
]
for col_num in range(len(row)):
ws_pat.write(row_num, col_num, row[col_num], style=style)
rows = []
ok = otds_pat.get(otdd.pk, otds_pat.get("external-{}".format(otdd.pk), {}))
for obj in directory.Researches.objects.filter(pk__in=[x for x in otds_pat.get(otdd.pk, ok.keys())]):
row = [
obj.title,
ok[obj.pk],
]
rows.append(row)
for row in sorted(rows, key=itemgetter(0)):
row_num += 1
for col_num in range(len(row)):
ws_pat.write(row_num, col_num, row[col_num], font_style)
if ns == 0:
ws.sheet_visible = False
ws_pat.sheet_visible = False
elif tp == "lab_sum":
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Лаборатория_Колво_{}-{}.xls\"".format(date_start_o, date_end_o), tr)
wb = openpyxl.Workbook()
wb.remove(wb.get_sheet_by_name('Sheet'))
ws = wb.create_sheet("Кол-во по лаборатории")
d1 = datetime.datetime.strptime(date_start_o, '%d.%m.%Y')
d2 = datetime.datetime.strptime(date_end_o, '%d.%m.%Y')
start_date = datetime.datetime.combine(d1, datetime.time.min)
end_date = datetime.datetime.combine(d2, datetime.time.max)
lab_podr = get_lab_podr()
lab_podr = tuple([i[0] for i in lab_podr])
researches_by_sum = sql_func.statistics_sum_research_by_lab(lab_podr, start_date, end_date)
ws = structure_sheet.statistic_research_by_sum_lab_base(ws, d1, d2, "Кол-во по лабораториям")
ws = structure_sheet.statistic_research_by_sum_lab_data(ws, researches_by_sum)
elif tp == "lab_details":
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Лаборатория_детали_{}-{}.xls\"".format(date_start_o, date_end_o), tr)
wb = openpyxl.Workbook()
wb.remove(wb.get_sheet_by_name('Sheet'))
ws = wb.create_sheet("Детали по лаборатории")
d1 = datetime.datetime.strptime(date_start_o, '%d.%m.%Y')
d2 = datetime.datetime.strptime(date_end_o, '%d.%m.%Y')
start_date = datetime.datetime.combine(d1, datetime.time.min)
end_date = datetime.datetime.combine(d2, datetime.time.max)
lab_podr = get_lab_podr()
lab_podr = tuple([i[0] for i in lab_podr])
researches_deatails = sql_func.statistics_details_research_by_lab(lab_podr, start_date, end_date)
ws = structure_sheet.statistic_research_by_details_lab_base(ws, d1, d2, "Детали по лаборатории")
ws = structure_sheet.statistic_research_by_details_lab_data(ws, researches_deatails)
elif tp == "covid_sum":
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Лаборатория_Колво_{}-{}.xls\"".format(date_start_o, date_end_o), tr)
wb = openpyxl.Workbook()
wb.remove(wb.get_sheet_by_name('Sheet'))
ws = wb.create_sheet("Кол-во по Ковид")
pk = request_data.get("research")
d1 = datetime.datetime.strptime(date_start_o, '%d.%m.%Y')
d2 = datetime.datetime.strptime(date_end_o, '%d.%m.%Y')
start_date = datetime.datetime.combine(d1, datetime.time.min)
end_date = datetime.datetime.combine(d2, datetime.time.max)
result_patient = sql_get_result_by_direction(pk, start_date, end_date)
cards = tuple(set([i.client_id for i in result_patient]))
document_card = sql_get_documents_by_card_id(cards)
patient_docs = {}
document_type = {4: "снилс", 5: "рождение", 1: "паспорт", 3: "полис"}
for doc in document_card:
data = None
if doc.document_type_id in [4, 3]:
data = {document_type.get(doc.document_type_id): doc.number}
elif doc.document_type_id in [1, 5]:
data = {document_type.get(doc.document_type_id): f"{doc.serial}@{doc.number}"}
if patient_docs.get(doc.card_id, None):
temp_docs = patient_docs.get(doc.card_id)
temp_docs.append(data)
patient_docs[doc.card_id] = temp_docs
else:
if data:
patient_docs[doc.card_id] = [data]
ws = structure_sheet.statistic_research_by_covid_base(ws, d1, d2, "Кол-во по ковид")
ws = structure_sheet.statistic_research_by_covid_data(ws, result_patient, patient_docs)
elif tp == "lab-staff":
lab = Podrazdeleniya.objects.get(pk=int(pk))
researches = list(directory.Researches.objects.filter(podrazdeleniye=lab, hide=False).order_by('title').order_by("sort_weight").order_by("direction_id"))
pods = list(Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.DEPARTMENT).order_by("title"))
response['Content-Disposition'] = str.translate(
"attachment; filename=\"Статистика_Исполнители_Лаборатория_{0}_{1}-{2}.xls\"".format(lab.title.replace(" ", "_"), date_start_o, date_end_o), tr
)
date_start, date_end = try_parse_range(date_start_o, date_end_o)
iss = Issledovaniya.objects.filter(research__podrazdeleniye=lab, time_confirmation__isnull=False, time_confirmation__range=(date_start, date_end))
font_style_wrap = xlwt.XFStyle()
font_style_wrap.alignment.wrap = 1
font_style_wrap.borders = borders
font_style_vertical = xlwt.easyxf('align: rotation 90')
font_style_vertical.borders = borders
def val(v):
return "" if v == 0 else v
def nl(v):
return v + ("" if len(v) > 19 else "\n")
for executor in DoctorProfile.objects.filter(user__groups__name__in=("Врач-лаборант", "Лаборант"), podrazdeleniye__p_type=Podrazdeleniya.LABORATORY).order_by("fio").distinct():
cnt_itogo = {}
ws = wb.add_sheet(executor.get_fio(dots=False) + " " + str(executor.pk))
row_num = 0
row = [("Исполнитель", 5500), ("Отделение", 5000)]
for research in researches:
row.append(
(
Truncator(research.title).chars(30),
1300,
)
)
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style_wrap if col_num < 2 else font_style_vertical)
ws.col(col_num).width = row[col_num][1]
row_num += 1
itogo_row = [executor.get_fio(dots=True), nl("Итого")]
empty_row = ["", ""]
cnt_local_itogo = {}
for pod in pods:
row = [executor.get_fio(dots=True), nl(pod.title)]
cnt = {}
for research in researches:
if research.title not in cnt.keys():
cnt[research.title] = 0
if research.title not in cnt_local_itogo.keys():
cnt_local_itogo[research.title] = 0
if research.title not in cnt_itogo.keys():
cnt_itogo[research.title] = 0
for i in iss.filter(doc_confirmation=executor, napravleniye__doc__podrazdeleniye=pod, research=research):
isadd = False
allempty = True
for r in Result.objects.filter(issledovaniye=i):
value = r.value.lower().strip()
if value != "":
allempty = False
n = any([y in value for y in ["забор", "тест", "неправ", "ошибк", "ошибочный", "кров", "брак", "мало", "недостаточно", "реактив"]])
if not n:
isadd = True
if not isadd or allempty:
continue
cnt[research.title] += 1
cnt_itogo[research.title] += 1
cnt_local_itogo[research.title] += 1
for research in researches:
row.append(val(cnt[research.title]))
# data["otds"][pod.title] += 1
# data["all"][pod.title] += 1
# cnt_all[pod.title] += 1
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style_wrap)
row_num += 1
for research in researches:
itogo_row.append(val(cnt_local_itogo[research.title]))
empty_row.append("")
for col_num in range(len(itogo_row)):
ws.write(row_num, col_num, itogo_row[col_num], font_style_wrap)
row_num += 1
elif tp == "otd":
otd = Podrazdeleniya.objects.get(pk=int(pk))
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Отделение_{0}_{1}-{2}.xls\"".format(otd.title.replace(" ", "_"), date_start_o, date_end_o), tr)
ws = wb.add_sheet("Выписано направлений")
font_style = xlwt.XFStyle()
row_num = 0
row = ["За период: ", "{0} - {1}".format(date_start_o, date_end_o)]
date_start_o, date_end_o = try_parse_range(date_start_o, date_end_o)
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
font_style = xlwt.XFStyle()
row = [otd.title]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
font_style = xlwt.XFStyle()
row_num += 1
row = [
(u"Всего выписано", 6000),
(str(Napravleniya.objects.filter(doc__podrazdeleniye=otd, data_sozdaniya__range=(date_start_o, date_end_o)).count()), 3000),
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style)
ws.col(col_num).width = row[col_num][1]
row_num += 1
researches = Issledovaniya.objects.filter(napravleniye__doc__podrazdeleniye=otd, napravleniye__data_sozdaniya__range=(date_start_o, date_end_o), time_confirmation__isnull=False)
naprs = len(set([v.napravleniye_id for v in researches]))
row = [u"Завершенных", str(naprs)]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
elif tp == "list-users":
response['Content-Disposition'] = str.translate("attachment; filename=\"Список_пользователей.xls\"", tr)
ws = wb.add_sheet("Пользователи")
row_num = 0
font_style = xlwt.XFStyle()
for p in Podrazdeleniya.objects.filter(hide=False).order_by("title"):
has = False
for u in DoctorProfile.objects.filter(podrazdeleniye=p).exclude(user__username="admin").order_by("fio"):
has = True
row = [("ID отделения %s" % p.pk, 9000), (p.title, 9000), ("ID пользователя %s" % u.pk, 9000), (u.user.username, 5000), (u.fio, 10000)]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style)
ws.col(col_num).width = row[col_num][1]
row_num += 1
if has:
row_num += 1
elif tp == "lab-receive":
lab = Podrazdeleniya.objects.get(pk=int(pk))
response['Content-Disposition'] = str.translate(
"attachment; filename=\"Статистика_Принято_емкостей_{0}_{1}-{2}.xls\"".format(lab.title.replace(" ", "_"), date_start_o, date_end_o), tr
)
date_start, date_end = try_parse_range(date_start_o, date_end_o)
ws = wb.add_sheet(lab.title)
font_style_wrap = xlwt.XFStyle()
font_style_wrap.alignment.wrap = 1
font_style_wrap.borders = borders
font_style = xlwt.XFStyle()
font_style.borders = borders
row_num = 0
row = [
(lab.title + ", принято емкостей за {0}-{1}".format(date_start_o, date_end_o), 16000),
]
replace = [{"from": "-", "to": " "}, {"from": ".", "to": " "}, {"from": " и ", "to": " "}]
n = len(row) - 1
pods = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.DEPARTMENT).order_by("title")
for pod in pods:
n += 1
title = pod.title
for rep in replace:
title = title.replace(rep["from"], rep["to"])
tmp = title.split()
title = []
nx = 0
for x in tmp:
x = x.strip()
if len(x) == 0:
continue
title.append(x if x.isupper() else x[0].upper() + ("" if nx > 0 else x[1:7]))
nx += 1
row.append(
(
"".join(title),
3700,
)
)
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style)
ws.col(col_num).width = row[col_num][1]
row_num += 1
for tube in directory.Tubes.objects.filter(releationsft__fractions__research__podrazdeleniye=lab).distinct().order_by("title"):
row = [tube.title]
for pod in pods:
gets = (
d.TubesRegistration.objects.filter(issledovaniya__research__podrazdeleniye=lab, type__tube=tube, time_recive__range=(date_start, date_end), doc_get__podrazdeleniye=pod)
.filter(Q(notice="") | Q(notice__isnull=True))
.distinct()
)
row.append("" if not gets.exists() else str(gets.count()))
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
elif tp == "all-labs":
labs = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.LABORATORY).exclude(title="Внешние организации")
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Все_Лаборатории_{0}-{1}.xls\"".format(date_start_o, date_end_o), tr)
ws = wb.add_sheet("Выполненых анализов")
font_style = xlwt.XFStyle()
row_num = 0
row = ["За период: ", "{0} - {1}".format(date_start_o, date_end_o)]
date_start_o, date_end_o = try_parse_range(date_start_o, date_end_o)
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = [
(u"Лаборатория", 9000),
(u"Выполнено анализов", 8000),
]
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num][0], font_style)
ws.col(col_num).width = columns[col_num][1]
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
all = 0
for lab in labs:
row_num += 1
c = Issledovaniya.objects.filter(research__podrazdeleniye=lab, time_confirmation__isnull=False, time_confirmation__range=(date_start_o, date_end_o)).count()
row = [lab.title, c]
all += c
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
row = [
"",
"Всего: " + str(all),
]
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 3
font_style.alignment.horz = 3
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
elif tp == "tubes-using":
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Использование_Емкостей_{0}-{1}.xls\"".format(date_start_o, date_end_o), tr)
per = "{0} - {1}".format(date_start_o, date_end_o)
ws = wb.add_sheet("Общее использование емкостей")
font_style = xlwt.XFStyle()
row_num = 0
row = ["За период: ", per]
date_start_o, date_end_o = try_parse_range(date_start_o, date_end_o)
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = [
(u"Тип емкости", 9000),
(u"Материал взят в процедурном каб", 9000),
(u"Принято лабораторией", 8000),
(u"Не принято лабораторией", 8000),
(u"Потеряны", 4000),
]
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num][0], font_style)
ws.col(col_num).width = columns[col_num][1]
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
all_get = 0
all_rec = 0
all_nrec = 0
all_lost = 0
for tube in Tubes.objects.all():
row_num += 1
c_get = TubesRegistration.objects.filter(type__tube=tube, time_get__isnull=False, time_get__range=(date_start_o, date_end_o)).count()
c_rec = TubesRegistration.objects.filter(type__tube=tube, time_recive__isnull=False, notice="", time_get__range=(date_start_o, date_end_o)).count()
c_nrec = TubesRegistration.objects.filter(type__tube=tube, time_get__isnull=False, time_get__range=(date_start_o, date_end_o)).exclude(notice="").count()
str1 = ""
str2 = ""
if c_nrec > 0:
str1 = str(c_nrec)
if c_get - c_rec - all_nrec > 0:
str2 = str(c_get - c_rec - all_nrec)
all_lost += c_get - c_rec - all_nrec
row = [tube.title, c_get, c_rec, str1, str2]
all_get += c_get
all_rec += c_rec
all_nrec += c_nrec
for col_num in range(len(row)):
font_style.alignment.wrap = 1
font_style.alignment.horz = 1
if col_num > 0:
font_style.alignment.wrap = 3
font_style.alignment.horz = 3
ws.write(row_num, col_num, row[col_num], font_style)
labs = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.LABORATORY).exclude(title="Внешние организации")
for lab in labs:
ws = wb.add_sheet(lab.title)
font_style = xlwt.XFStyle()
row_num = 0
row = ["За период: ", per]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
row_num += 1
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = [
(u"Тип емкости", 9000),
(u"Материал взят в процедурном каб", 9000),
(u"Принято лабораторией", 8000),
(u"Не принято лабораторией", 8000),
(u"Потеряны", 4000),
]
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num][0], font_style)
ws.col(col_num).width = columns[col_num][1]
font_style = xlwt.XFStyle()
font_style.alignment.wrap = 1
all_get = 0
all_rec = 0
all_nrec = 0
all_lost = 0
for tube in Tubes.objects.all():
row_num += 1
c_get = TubesRegistration.objects.filter(
issledovaniya__research__podrazdeleniye=lab, type__tube=tube, time_get__isnull=False, time_get__range=(date_start_o, date_end_o)
).count()
c_rec = TubesRegistration.objects.filter(
issledovaniya__research__podrazdeleniye=lab, type__tube=tube, time_recive__isnull=False, notice="", time_get__range=(date_start_o, date_end_o)
).count()
c_nrec = (
TubesRegistration.objects.filter(issledovaniya__research__podrazdeleniye=lab, type__tube=tube, time_get__isnull=False, time_get__range=(date_start_o, date_end_o))
.exclude(notice="")
.count()
)
str1 = ""
str2 = ""
if c_nrec > 0:
str1 = str(c_nrec)
if c_get - c_rec - all_nrec > 0:
str2 = str(c_get - c_rec - all_nrec)
all_lost += c_get - c_rec - all_nrec
row = [tube.title, c_get, c_rec, str1, str2]
all_get += c_get
all_rec += c_rec
all_nrec += c_nrec
for col_num in range(len(row)):
font_style.alignment.wrap = 1
font_style.alignment.horz = 1
if col_num > 0:
font_style.alignment.wrap = 3
font_style.alignment.horz = 3
ws.write(row_num, col_num, row[col_num], font_style)
elif tp == "uets":
usrs = DoctorProfile.objects.filter(podrazdeleniye__p_type=Podrazdeleniya.LABORATORY).order_by("podrazdeleniye__title")
response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_УЕТс_{0}-{1}.xls\"".format(date_start_o, date_end_o), tr)
ws = wb.add_sheet("УЕТы")
font_style = xlwt.XFStyle()
row_num = 0
row = ["За период: ", "{0} - {1}".format(date_start_o, date_end_o)]
date_start_o, date_end_o = try_parse_range(date_start_o, date_end_o)
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
font_style = xlwt.XFStyle()
font_style.font.bold = True
row_num += 1
row = [
(u"Лаборатория", 8000),
(u"ФИО", 8000),
(u"УЕТы", 2500),
]
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num][0], font_style)
ws.col(col_num).width = row[col_num][1]
font_style = xlwt.XFStyle()
for usr in usrs:
researches_uets = {}
researches = Issledovaniya.objects.filter(doc_save=usr, time_save__isnull=False, time_save__range=(date_start_o, date_end_o))
for issledovaniye in researches:
if usr.labtype == 1:
uet_tmp = sum([v.uet_doc for v in directory.Fractions.objects.filter(research=issledovaniye.research)])
else:
uet_tmp = sum([v.uet_lab for v in directory.Fractions.objects.filter(research=issledovaniye.research)])
researches_uets[issledovaniye.pk] = {"uet": uet_tmp}
researches = Issledovaniya.objects.filter(doc_confirmation=usr, time_confirmation__isnull=False, time_confirmation__range=(date_start_o, date_end_o))
for issledovaniye in researches:
if usr.labtype == 1:
uet_tmp = sum([v.uet_doc for v in directory.Fractions.objects.filter(research=issledovaniye.research)])
else:
uet_tmp = sum([v.uet_lab for v in directory.Fractions.objects.filter(research=issledovaniye.research)])
researches_uets[issledovaniye.pk] = {"uet": uet_tmp}
uets = sum([researches_uets[v]["uet"] for v in researches_uets.keys()])
row_num += 1
row = [
usr.podrazdeleniye.title,
usr.get_full_fio(),
uets,
]
for col_num in range(len(row)):
font_style.alignment.wrap = 1
font_style.alignment.horz = 1
if col_num > 2:
font_style.alignment.wrap = 3
font_style.alignment.horz = 3
ws.write(row_num, col_num, row[col_num], font_style)
elif tp == "message-ticket":
filters = {'pk': int(request_data.get("hospital"))}
any_hospital = request.user.doctorprofile.all_hospitals_users_control
if not any_hospital:
filters['pk'] = request.user.doctorprofile.get_hospital_id()
response['Content-Disposition'] = str.translate(f"attachment; filename=\"Обращения {date_start_o.replace('.', '')} {date_end_o.replace('.', '')} {filters['pk']}.xlsx\"", tr)
wb = openpyxl.Workbook()
wb.remove(wb.get_sheet_by_name('Sheet'))
ws = wb.create_sheet("Обращения")
styles_obj = structure_sheet.style_sheet()
wb.add_named_style(styles_obj[0])
if int(filters['pk']) == -1 and any_hospital:
filters = {}
rows_hosp = list(Hospitals.objects.values_list('pk', flat=True).filter(hide=False, **filters))
d1 = datetime.datetime.strptime(date_start_o, '%d.%m.%Y')
d2 = datetime.datetime.strptime(date_end_o, '%d.%m.%Y')
ws = structure_sheet.statistic_message_ticket_base(ws, date_start_o, date_end_o, styles_obj[3])
start_date = datetime.datetime.combine(d1, datetime.time.min)
end_date = datetime.datetime.combine(d2, datetime.time.max)
message_ticket_sql = sql_func.message_ticket(rows_hosp, start_date, end_date)
ws = structure_sheet.statistic_message_ticket_data(ws, message_ticket_sql, styles_obj[3])
ws = wb.create_sheet("Итоги-Обращения")
message_total_purpose_sql = sql_func.message_ticket_purpose_total(rows_hosp, start_date, end_date)
ws = structure_sheet.statistic_message_purpose_total_data(ws, message_total_purpose_sql, date_start_o, date_end_o, styles_obj[3])
wb.save(response)
return response
|
62ce2d5ab3e036fa74783e1b96169ff477bc6abd
| 3,647,465
|
def get_checkers():
"""Get default checkers to run on code.
:returns: List of default checkers to run.
"""
return [function, readability]
|
c4a7668e1f2ca0d8d9dc673b43274065551023b5
| 3,647,467
|
def get_token():
"""
Get or create token.
"""
try:
token = Token.objects.get(name=settings.TOKEN_NAME)
except Token.DoesNotExist:
client_id = raw_input("Client id:")
client_secret = raw_input("Client secret:")
token = Token.objects.create(
name=settings.TOKEN_NAME,
scope=settings.TOKEN_SCOPE,
client_id=client_id,
client_secret=client_secret
)
if not (token.access_token and token.refresh_token):
authorize_token(token)
return token
|
107f0dfc7148d4964f181e2f7ff14038860a56ab
| 3,647,468
|
def get_labels_from_sample(sample):
"""
Each label of Chinese words having at most N-1 elements, assuming that it contains N characters that may be grouped.
Parameters
----------
sample : list of N characters
Returns
-------
list of N-1 float on [0,1] (0 represents no split)
"""
labels = []
for word in sample:
if len(word) > 1:
for _ in range(len(word)-1):
labels.append(0) # within a word, append a '0' for each interstice
labels.append(1) # at the end of a word, append a '1'
else:
labels.append(1)
labels = labels[:-1] # Throw away the last value, it doesn't represent an interstice
return labels
|
4b21b878d1ae23b08569bda1f3c3b91e7a6c48b9
| 3,647,469
|
import warnings
def _recarray_from_array(arr, names, drop_name_dim=_NoValue):
""" Create recarray from input array `arr`, field names `names`
"""
if not arr.dtype.isbuiltin: # Structured array as input
# Rename fields
dtype = np.dtype([(n, d[1]) for n, d in zip(names, arr.dtype.descr)])
return arr.view(dtype)
# Can drop name axis for > 1D arrays or row vectors (scalar per name).
can_name_drop = arr.ndim > 1 or len(names) > 1
if can_name_drop and drop_name_dim is _NoValue:
warnings.warn(
'Default behavior of make_recarray and > 1D arrays will '
'change in next Nipy release. Current default returns\n'
'array with same number of dimensions as input, with '
'axis corresponding to the field names having length 1\n; '
'Future default will be to drop this length 1 axis. Please '
'change your code to use explicit True or False for\n'
'compatibility with future Nipy.',
VisibleDeprecationWarning,
stacklevel=2)
# This default will change to True in next version of Nipy
drop_name_dim = False
dtype = np.dtype([(n, arr.dtype) for n in names])
# At least for numpy <= 1.7.1, the dimension that numpy applies the names
# to depends on the memory layout (C or F). Ensure C layout for consistent
# application of names to last dimension.
rec_arr = np.ascontiguousarray(arr).view(dtype)
if can_name_drop and drop_name_dim:
rec_arr.shape = arr.shape[:-1]
return rec_arr
|
7e041dac3f0e74f82bd36a02174edc39950030d3
| 3,647,470
|
def pad(mesh: TriangleMesh,
*,
side: str,
width: int,
opts: str = '',
label: int = None) -> TriangleMesh:
"""Pad a triangle mesh.
Parameters
----------
mesh : TriangleMesh
The mesh to pad.
side : str
Side to pad, must be one of `left`, `right`, `top`, `bottom`.
width : int
Width of the padded area.
opts : str, optional
Optional arguments passed to `triangle.triangulate`.
label : int, optional
The label to assign to the padded area. If not defined, generates the
next unique label based on the existing ones.
Returns
-------
new_mesh : TriangleMesh
Padded triangle mesh.
Raises
------
ValueError
When the value of `side` is invalid.
"""
if label is None:
label = mesh.unique_labels.max() + 1
if width == 0:
return mesh
top_edge, right_edge = mesh.points.max(axis=0)
bottom_edge, left_edge = mesh.points.min(axis=0)
if side == 'bottom':
is_edge = mesh.points[:, 0] == bottom_edge
corners = np.array([[bottom_edge - width, right_edge],
[bottom_edge - width, left_edge]])
elif side == 'left':
is_edge = mesh.points[:, 1] == left_edge
corners = np.array([[bottom_edge, left_edge - width],
[top_edge, left_edge - width]])
elif side == 'top':
is_edge = mesh.points[:, 0] == top_edge
corners = np.array([[top_edge + width, right_edge],
[top_edge + width, left_edge]])
elif side == 'right':
is_edge = mesh.points[:, 1] == right_edge
corners = np.array([[bottom_edge, right_edge + width],
[top_edge, right_edge + width]])
else:
raise ValueError('Side must be one of `right`, `left`, `bottom`'
f'`top`. Got {side=}')
edge_coords = mesh.points[is_edge]
coords = np.vstack([edge_coords, corners])
pad_mesh = simple_triangulate(points=coords, opts=opts)
mesh_edge_index = np.argwhere(is_edge).flatten()
pad_edge_index = np.arange(len(mesh_edge_index))
edge_mapping = np.vstack([pad_edge_index, mesh_edge_index])
n_verts = len(mesh.points)
n_edge_verts = len(edge_coords)
n_pad_verts = len(pad_mesh.points) - n_edge_verts
mesh_index = np.arange(n_verts, n_verts + n_pad_verts)
pad_index = np.arange(n_edge_verts, n_edge_verts + n_pad_verts)
pad_mapping = np.vstack([pad_index, mesh_index])
# mapping for the cell indices cells in `pad_mesh` to the source mesh.
mapping = np.hstack([edge_mapping, pad_mapping])
shape = pad_mesh.cells.shape
pad_cells = pad_mesh.cells.copy().ravel()
mask = np.in1d(pad_cells, mapping[0, :])
pad_cells[mask] = mapping[1,
np.searchsorted(mapping[0, :], pad_cells[mask])]
pad_cells = pad_cells.reshape(shape)
pad_verts = pad_mesh.points[n_edge_verts:]
pad_labels = np.ones(len(pad_cells)) * label
# append values to source mesh
points = np.vstack([mesh.points, pad_verts])
cells = np.vstack([mesh.cells, pad_cells])
labels = np.hstack([mesh.labels, pad_labels])
new_mesh = TriangleMesh(points=points, cells=cells, labels=labels)
return new_mesh
|
7da2a20b060a6243cd3d1c4ec3192cfba833fd27
| 3,647,471
|
from desimodel import footprint
from desitarget import io as dtio
import time
def make_qa_plots(targs, qadir='.', targdens=None, max_bin_area=1.0, weight=True,
imaging_map_file=None, truths=None, objtruths=None, tcnames=None,
cmx=False, bit_mask=None, mocks=False):
"""Make DESI targeting QA plots given a passed set of targets.
Parameters
----------
targs : :class:`~numpy.array` or `str`
An array of targets in the DESI data model format. If a string is passed then the
targets are read from the file with the passed name (supply the full directory path).
qadir : :class:`str`, optional, defaults to the current directory
The output directory to which to write produced plots.
targdens : :class:`dictionary`, optional, set automatically by the code if not passed
A dictionary of DESI target classes and the goal density for that class. Used to
label the goal density on histogram plots.
max_bin_area : :class:`float`, optional, defaults to 1 degree
The bin size in the passed coordinates is chosen automatically to be as close as
possible to this value without exceeding it.
weight : :class:`boolean`, optional, defaults to True
If this is set, weight pixels using the ``DESIMODEL`` HEALPix footprint file to
ameliorate under dense pixels at the footprint edges.
imaging_map_file : :class:`str`, optional, defaults to no weights
If `weight` is set, then this file contains the location of the imaging HEALPixel
map (e.g. made by :func:` desitarget.randoms.pixmap()` if this is not
sent, then the weights default to 1 everywhere (i.e. no weighting).
truths : :class:`~numpy.array` or `str`
The truth objects from which the targs were derived in the DESI data model format.
If a string is passed then read from that file (supply the full directory path).
objtruths : :class:`dict`
Object type-specific truth metadata.
tcnames : :class:`list`, defaults to None
A list of strings, e.g. ['QSO','LRG','ALL'] If passed, return only the QA pages
for those specific bits. A useful speed-up when testing.
cmx : :class:`boolean`, defaults to ``False``
Pass as ``True`` to operate on commissioning bits instead of SV or main survey
bits. Commissioning files have no MWS or BGS columns.
bit_mask : :class:`~numpy.array`, optional, defaults to ``None``
Load the bit names from this passed mask (with zero density constraints)
instead of the main survey bits.
mocks : :class:`boolean`, optional, default=False
If ``True``, add plots that are only relevant to mocks at the bottom of the webpage.
Returns
-------
:class:`float`
The total area of the survey used to make the QA plots.
Notes
-----
- The ``DESIMODEL`` environment variable must be set to find the default expected
target densities.
- On execution, a set of .png plots for target QA are written to `qadir`.
"""
# ADM set up the default logger from desiutil.
log = get_logger()
start = time()
log.info('Start making targeting QA plots...t = {:.1f}s'.format(time()-start))
if mocks and targs is None and truths is None and objtruths is None:
if isinstance(targs, str):
targs, truths, objtruths = collect_mock_data(targs)
if mockdata == 0:
mocks = False
else:
pass # = mockdata
else:
log.warning('To make mock-related plots, targs must be a directory+file-location string...')
log.warning('...will proceed by only producing the non-mock plots...')
else:
# ADM if a filename was passed, read in the targets from that file.
if isinstance(targs, str):
targs = fitsio.read(targs)
log.info('Read in targets...t = {:.1f}s'.format(time()-start))
truths, objtruths = None, None
# ADM determine the nside for the passed max_bin_area.
for n in range(1, 25):
nside = 2 ** n
bin_area = hp.nside2pixarea(nside, degrees=True)
if bin_area <= max_bin_area:
break
# ADM calculate HEALPixel numbers once, here, to avoid repeat calculations
# ADM downstream.
pix = footprint.radec2pix(nside, targs["RA"], targs["DEC"])
log.info('Calculated HEALPixel for each target...t = {:.1f}s'
.format(time()-start))
# ADM set up the weight of each HEALPixel, if requested.
weights = np.ones(len(targs))
# ADM a count of the uniq pixels that are covered, useful for area calculations.
uniqpixset = np.array(list(set(pix)))
# ADM the total pixel weight assuming none of the areas are fractional
# ADM or need rewighted (i.e. each pixel's weight is 1).
totalpixweight = len(uniqpixset)
if weight:
# ADM load the imaging weights file.
if imaging_map_file is not None:
pixweight = dtio.load_pixweight_recarray(imaging_map_file, nside)["FRACAREA"]
# ADM determine what HEALPixels each target is in, to set the weights.
fracarea = pixweight[pix]
# ADM weight by 1/(the fraction of each pixel that is in the DESI footprint)
# ADM except for zero pixels, which are all outside of the footprint.
w = np.where(fracarea == 0)
fracarea[w] = 1 # ADM to guard against division by zero warnings.
weights = 1./fracarea
weights[w] = 0
# ADM if we have weights, then redetermine the total pix weight.
totalpixweight = np.sum(pixweight[uniqpixset])
log.info('Assigned weights to pixels based on DESI footprint...t = {:.1f}s'
.format(time()-start))
# ADM calculate the total area (useful for determining overall average densities
# ADM from the total number of targets/the total area).
pixarea = hp.nside2pixarea(nside, degrees=True)
totarea = pixarea*totalpixweight
# ADM Current goal target densities for DESI.
if targdens is None:
targdens = _load_targdens(tcnames=tcnames, bit_mask=bit_mask, mocks=mocks)
if mocks:
dndz = _load_dndz()
# ADM clip the target densities at an upper density to improve plot edges
# ADM by rejecting highly dense outliers.
upclipdict = {k: 5000. for k in targdens}
if bit_mask is not None:
main_mask = bit_mask
else:
main_mask = desi_mask
upclipdict = {'ELG': 4000, 'LRG': 1200, 'QSO': 400, 'ALL': 8000,
'STD_FAINT': 300, 'STD_BRIGHT': 300,
# 'STD_FAINT': 200, 'STD_BRIGHT': 50,
'LRG_1PASS': 1000, 'LRG_2PASS': 500,
'BGS_FAINT': 2500, 'BGS_BRIGHT': 2500, 'BGS_WISE': 2500, 'BGS_ANY': 5000,
'MWS_ANY': 2000, 'MWS_BROAD': 2000, 'MWS_WD': 50, 'MWS_NEARBY': 50,
'MWS_MAIN_RED': 2000, 'MWS_MAIN_BLUE': 2000}
for objtype in targdens:
if 'ALL' in objtype:
w = np.arange(len(targs))
else:
if ('BGS' in objtype) and not('ANY' in objtype) and not(cmx):
w = np.where(targs["BGS_TARGET"] & bgs_mask[objtype])[0]
elif ('MWS' in objtype) and not('ANY' in objtype) and not(cmx):
w = np.where(targs["MWS_TARGET"] & mws_mask[objtype])[0]
else:
w = np.where(targs["DESI_TARGET"] & main_mask[objtype])[0]
if len(w) > 0:
# ADM make RA/Dec skymaps.
qaskymap(targs[w], objtype, qadir=qadir, upclip=upclipdict[objtype],
weights=weights[w], max_bin_area=max_bin_area)
log.info('Made sky map for {}...t = {:.1f}s'
.format(objtype, time()-start))
# ADM make histograms of densities. We already calculated the correctly
# ADM ordered HEALPixels and so don't need to repeat that calculation.
qahisto(pix[w], objtype, qadir=qadir, targdens=targdens, upclip=upclipdict[objtype],
weights=weights[w], max_bin_area=max_bin_area, catispix=True)
log.info('Made histogram for {}...t = {:.1f}s'
.format(objtype, time()-start))
# ADM make color-color plots
qacolor(targs[w], objtype, targs[w], qadir=qadir, fileprefix="color")
log.info('Made color-color plot for {}...t = {:.1f}s'
.format(objtype, time()-start))
# ADM make magnitude histograms
qamag(targs[w], objtype, qadir=qadir, fileprefix="nmag", area=totarea)
log.info('Made magnitude histogram plot for {}...t = {:.1f}s'
.format(objtype, time()-start))
if truths is not None:
# ADM make noiseless color-color plots
qacolor(truths[w], objtype, targs[w], qadir=qadir, mocks=True,
fileprefix="mock-color", nodustcorr=True)
log.info('Made (mock) color-color plot for {}...t = {:.1f}s'
.format(objtype, time()-start))
# ADM make N(z) plots
mock_qanz(truths[w], objtype, qadir=qadir, area=totarea, dndz=dndz,
fileprefixz="mock-nz", fileprefixzmag="mock-zvmag")
log.info('Made (mock) redshift plots for {}...t = {:.1f}s'
.format(objtype, time()-start))
# # ADM plot what fraction of each selected object is actually a contaminant
# mock_qafractype(truths[w], objtype, qadir=qadir, fileprefix="mock-fractype")
# log.info('Made (mock) classification fraction plots for {}...t = {:.1f}s'.format(objtype, time()-start))
# ADM make Gaia-based plots if we have Gaia columns
if "PARALLAX" in targs.dtype.names and np.sum(targs['PARALLAX'] != 0) > 0:
qagaia(targs[w], objtype, qadir=qadir, fileprefix="gaia")
log.info('Made Gaia-based plots for {}...t = {:.1f}s'
.format(objtype, time()-start))
log.info('Made QA plots...t = {:.1f}s'.format(time()-start))
return totarea
|
a7dffff1273456ac387fe68e71e154f385610ac5
| 3,647,472
|
import itertools
from re import I
from re import X
def krauss_basis(qubits):
"""
Helper function to return the Krauss operator basis formed by the Cartesian
product of [I, X, Y, Z] for the n-qubit.
:param qubits: number of qubits
:type qubits: int
:return: Krauss operator
:rtype: np.ndarray, float
"""
return [i for i in itertools.product([I, X, Y, Z], repeat=qubits)]
|
471ddb5dc1840f162cd8a0f64789b1d0afa2d712
| 3,647,473
|
def choose_fun_cov(str_cov: str) -> constants.TYPING_CALLABLE:
"""
It chooses a covariance function.
:param str_cov: the name of covariance function.
:type str_cov: str.
:returns: covariance function.
:rtype: callable
:raises: AssertionError
"""
assert isinstance(str_cov, str)
if str_cov in ('eq', 'se'):
fun_cov = cov_se
elif str_cov == 'matern32':
fun_cov = cov_matern32
elif str_cov == 'matern52':
fun_cov = cov_matern52
else:
raise NotImplementedError('choose_fun_cov: allowed str_cov condition,\
but it is not implemented.')
return fun_cov
|
1c0fd2d06456ec0765186694a9cb0e78a511859e
| 3,647,474
|
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
确定时用于计算的实用函数(log_sum_exp)
这将用于确定批处理中所有示例的不可用信心损失。
参数:
x(变量(张量):来自conf层的conf preds
"""
x_max = x.data.max()
return np.log(np.sum(np.exp(x-x_max), 1, keepdims=True)) + x_max
|
dc18d31b85c0c29dab39874ba4d4148fef868106
| 3,647,476
|
def session_hook(func):
"""
hook opens a database session do a session_hook(read or write) and closes the connection after the run()
func: function that communicates with the database (e.g fun(*args, db: Session))
returns;
data: The return from func
error: in case of an error in hook
"""
def run(*args, **kwargs):
global db
try:
db = SessionLocal()
data = func(db, *args, **kwargs)
return data
except exc.IntegrityError as e:
print(e._message())
db.rollback()
raise CustomException(error='Operation fail')
except Exception as e:
raise (Exception(e))
finally:
db.close()
return run
|
74e22a5adbfc470c3dbc068eb4b190608b2b426e
| 3,647,477
|
from datetime import datetime
def float_index_to_time_index(df):
"""Convert a dataframe float indices to `datetime64['us']` indices."""
df.index = df.index.map(datetime.utcfromtimestamp)
df.index = pd.to_datetime(df.index, unit="us", utc=True)
return df
|
5e7d1aa8430afd22ad4e3f931dc39b8a480c3ffa
| 3,647,478
|
def correlated_hybrid_matrix(data_covmat,theory_covmat=None,theory_corr=None,cap=True,cap_off=0.99):
"""
Given a diagonal matrix data_covmat,
and a theory matrix theory_covmat or its correlation matrix theory_corr,
produce a hybrid non-diagonal matrix that has the same diagonals as the data matrix
but has correlation coefficient given by theory.
"""
if theory_corr is None:
assert theory_covmat is not None
theory_corr = cov2corr(theory_covmat)
r = theory_corr
def _cap(imat,cval,csel):
imat[imat>1] = 1
imat[imat<-1] = -1
imat[csel][imat[csel]>cval] = cval
imat[csel][imat[csel]>-cval] = -cval
d = data_covmat.copy()
sel = np.where(~np.eye(d.shape[0],dtype=bool))
d[sel] = 1
dcorr = 1./cov2corr(d)
if cap: _cap(r,cap_off,sel)
fcorr = dcorr * r
d[sel] = fcorr[sel]
return d
|
0438f7bc0d5aa34506af59b74d990b21713e5e6d
| 3,647,479
|
from typing import OrderedDict
def prepare_config(self, config=None):
"""Set defaults and check fields.
Config is a dictionary of values. Method creates new config using
default class config. Result config keys are the same as default config keys.
Args:
self: object with get_default_config method.
config: User-provided config.
Returns:
Config dictionary with defaults set.
"""
default_config = self.get_default_config()
if config is None:
config = {}
elif isinstance(config, str):
config = read_config(config)
elif not isinstance(config, dict):
raise ConfigError("Config dictionary or filename expected, got {}".format(type(config)))
# Check type.
if CONFIG_TYPE in config:
cls_name = type(self).__name__
if cls_name != config[CONFIG_TYPE]:
raise ConfigError("Type mismatch: expected {}, got {}".format(
config[CONFIG_TYPE], cls_name))
# Sample hyperparameters.
_propagate_hyper_names(config)
if CONFIG_HYPER in config:
# Type of config[CONFIG_HYPER] is checked in _propagate_hyper_names.
config = config.copy()
for key, hopt in config[CONFIG_HYPER].items():
# There can be unexpected hyperparameters for another implementation.
# Skip them.
if key not in default_config:
continue
config[key] = Hyper(**hopt).sample()
# Merge configs.
for key in config:
if key in {CONFIG_TYPE, CONFIG_HYPER}:
continue
if key not in default_config:
value = config[key]
if isinstance(value, dict) and _is_hyper_only_config(value):
# Subconfigs can contain hyper parameters for alternative configurations.
pass
else:
raise ConfigError("Unknown parameter {}".format(key))
new_config = OrderedDict()
for key, value in default_config.items():
new_config[key] = config.get(key, value)
return new_config
|
4876ac8900857cb3962d22f0afe99e6426d1ff5c
| 3,647,480
|
import re
import math
def number_to_block(number, block_number=0):
"""
Given an address number, normalizes it to the block number.
>>> number_to_block(1)
'0'
>>> number_to_block(10)
'0'
>>> number_to_block(100)
'100'
>>> number_to_block(5)
'0'
>>> number_to_block(53)
'0'
>>> number_to_block(153)
'100'
>>> number_to_block(1000)
'1000'
>>> number_to_block(1030)
'1000'
>>> number_to_block(1359)
'1300'
>>> number_to_block(13593)
'13500'
>>> number_to_block('01')
'0'
>>> number_to_block('00')
'0'
>>> number_to_block('foo')
'foo'
>>> number_to_block('3xx')
'300'
>>> number_to_block('3XX')
'300'
>>> number_to_block('3pp')
'3pp'
>>> number_to_block('XX')
'0'
>>> number_to_block('X')
'X'
block_number lets you customize the "XX" of "3XX block".
>>> number_to_block(234, 99)
'299'
>>> number_to_block(12345, 99)
'12399'
"""
number = re.sub('(?i)xx', '00', str(number))
try:
number = int(number)
except (TypeError, ValueError):
return number
return str(int(math.floor(number / 100.0)) * 100 + block_number)
|
1504d79469dccc06e867fbf5a80507566efb5019
| 3,647,481
|
import math
def distance_between_vehicles(self_vhc_pos, self_vhc_orientation, self_vhc_front_length, self_vhc_rear_length,
self_vhc_width, ext_vhc_pos, ext_vhc_orientation, ext_vhc_width, ext_vhc_rear_length,
ext_vhc_front_length):
"""Only in 2-D space (no z-axis in positions)"""
ext_vhc_frnt_left = rotate_point_ccw([-ext_vhc_width, ext_vhc_front_length], -ext_vhc_orientation) + ext_vhc_pos
ext_vhc_frnt_right = rotate_point_ccw([ext_vhc_width, ext_vhc_front_length], -ext_vhc_orientation) + ext_vhc_pos
ext_vhc_rear_left = rotate_point_ccw([-ext_vhc_width, -ext_vhc_rear_length], -ext_vhc_orientation) + ext_vhc_pos
ext_vhc_rear_right = rotate_point_ccw([ext_vhc_width, -ext_vhc_rear_length], -ext_vhc_orientation) + ext_vhc_pos
ext_vhc_frnt_left_in_vhc_coord = rotate_point_ccw(ext_vhc_frnt_left - self_vhc_pos, -self_vhc_orientation)
ext_vhc_frnt_right_in_vhc_coord = rotate_point_ccw(ext_vhc_frnt_right - self_vhc_pos, -self_vhc_orientation)
ext_vhc_rear_left_in_vhc_coord = rotate_point_ccw(ext_vhc_rear_left - self_vhc_pos, -self_vhc_orientation)
ext_vhc_rear_right_in_vhc_coord = rotate_point_ccw(ext_vhc_rear_right - self_vhc_pos, -self_vhc_orientation)
ext_vehicle_lines = [[ext_vhc_frnt_left_in_vhc_coord, ext_vhc_frnt_right_in_vhc_coord],
[ext_vhc_frnt_right_in_vhc_coord, ext_vhc_rear_right_in_vhc_coord],
[ext_vhc_rear_right_in_vhc_coord, ext_vhc_rear_left_in_vhc_coord],
[ext_vhc_rear_left_in_vhc_coord, ext_vhc_frnt_left_in_vhc_coord]]
ext_vehicle_corners = [ext_vhc_frnt_left_in_vhc_coord, ext_vhc_frnt_right_in_vhc_coord,
ext_vhc_rear_right_in_vhc_coord, ext_vhc_rear_left_in_vhc_coord]
ego_points = [np.array([-self_vhc_width, self_vhc_front_length]),
np.array([self_vhc_width, self_vhc_front_length]),
np.array([-self_vhc_width, self_vhc_rear_length]),
np.array([self_vhc_width, self_vhc_rear_length])]
distance = math.inf
# Compute the minimum distance from each corner of the external vehicle to the edges of the ego vehicle:
# This is easier because the external vehicle is already represented in the ego vehicle's coordinate system.
for ext_vehicle_corner in ext_vehicle_corners:
if -self_vhc_width < ext_vehicle_corner[0] < self_vhc_width:
x_dist = 0.0
elif ext_vehicle_corner[0] > self_vhc_width:
x_dist = ext_vehicle_corner[0] - self_vhc_width
else:
x_dist = -self_vhc_width - ext_vehicle_corner[0]
if -self_vhc_rear_length < ext_vehicle_corner[1] < self_vhc_front_length:
y_dist = 0.0
elif ext_vehicle_corner[1] > self_vhc_front_length:
y_dist = ext_vehicle_corner[1] - self_vhc_front_length
else:
y_dist = -self_vhc_rear_length - ext_vehicle_corner[1]
temp_dist = math.sqrt(x_dist**2 + y_dist**2)
distance = min(distance, temp_dist)
# Compute the minimum distance from each corner of the ego vehicle to the edges of the external vehicle:
for ego_point in ego_points:
num_inside_pts = 0
for ext_vehicle_line in ext_vehicle_lines:
(temp_dist, t) = line_dist(ext_vehicle_line[0], ext_vehicle_line[1], ego_point)
if 0.0001 < t < 0.9999: # NOT (on a line or outside one of the lines).
# When the closest point on the line is one end of the line (t==0 or t==1),
# then the point is outside the rectangle.
num_inside_pts += 1
else:
distance = min(distance, temp_dist)
if num_inside_pts == len(ext_vehicle_lines):
distance = 0.0
if distance == 0.0:
break
return distance
|
bcd10598ff83d2ca1e2a03eb759649346151d475
| 3,647,483
|
def searchlight_dictdata(faces, nrings, vertex_list):
"""
Function to generate neighbor vertex relationship for searchlight analysis
The format of dictdata is [label]:[vertices]
Parameters:
-----------
faces:
nrings:
vertex_list: vertex-index relationship, e.g. vertex_list[29696] = 32492
Returns:
--------
output_vx
"""
output_vx = {}
vertex_list = list(vertex_list)
index_dict = dict((value, idx) for idx,value in enumerate(vertex_list))
for i, vl in enumerate(vertex_list):
print('{0}:{1}'.format(i+1, vl))
neighbor_vxidx = surf_tools.get_n_ring_neighbor(int(vl), faces, n=nrings)[0]
neighbor_vxidx.intersection_update(set(index_dict.keys()))
neighbor_vx = [index_dict[nv] for nv in neighbor_vxidx]
output_vx[i] = neighbor_vx
return output_vx
|
10f89bf6981b474a202e836be0aeeb13afa5f873
| 3,647,484
|
def parse_resource_uri(resource_uri):
"""
Parse a resource uri (like /api/v1/prestataires/1/) and return
the resource type and the object id.
"""
match = resource_pattern.search(resource_uri)
if not match:
raise ValueError("Value %s is not a resource uri." % resource_uri)
return match.group(1), match.group(2)
|
f5c6ef26b1546a5b51c290701863f60c6f518e60
| 3,647,485
|
from typing import List
def foldl(func: tp.Callable, acc, lst: List):
"""
>>> foldl(lambda x, y: x + y, 0, Nil())
0
>>> foldl(lambda x, y: x + y, 2, from_seq([1, 2, 3]))
8
>>> foldl(lambda x, y: x - y, 1, from_seq([3, 2, 1]))
-5
"""
return acc if null(lst) else foldl(func, func(acc, head(lst)), tail(lst))
|
397582c1fbdcad4b46f8d64960fc1562aefa9ff8
| 3,647,486
|
def generate_rules(F, support_data, min_confidence=0.5, verbose=True):
"""Generates a set of candidate rules from a list of frequent itemsets.
For each frequent itemset, we calculate the confidence of using a
particular item as the rule consequent (right-hand-side of the rule). By
testing and merging the remaining rules, we recursively create a list of
pruned rules.
Parameters
----------
F : list
A list of frequent itemsets.
support_data : dict
The corresponding support data for the frequent itemsets (L).
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
Returns
-------
rules : list
The list of candidate rules above the minimum confidence threshold.
"""
rules = []
for i in range(1, len(F)):
for freq_set in F[i]:
H1 = [frozenset([itemset]) for itemset in freq_set]
if (i > 1):
rules_from_conseq(freq_set, H1, support_data, rules, min_confidence, verbose)
else:
calc_confidence(freq_set, H1, support_data, rules, min_confidence, verbose)
return rules
|
687b2158d5460d9993c10bbded91d01eda4cbfec
| 3,647,487
|
def cond_model(model1, model2):
"""Conditional.
Arguments:
model1 {MentalModel} -- antecedent
model2 {MentalModel} -- consequent
Returns:
MentalModel -- the conditional model
"""
mental = and_model(model1, model2)
mental.ell += 1
fully = merge_fullex(
and_model(model1, model2),
and_model(not_model(model1), not_model(model2)),
and_model(not_model(model1), model2)
)
return merge_mental_and_full(mental, fully)
|
d4d923b10f6140defc59dbf10b682422ff1014a0
| 3,647,488
|
def get_pages():
"""Select all pages and order them by page_order."""
pages = query_db("SELECT page_order, name, shortname, available FROM pages ORDER BY page_order")
return pages
|
b0b3f934c0c7133a798f3d78e195c4d26dcf590b
| 3,647,489
|
def set_default_interface(etree):
"""
Sets the default interface that PyAMF will use to deal with XML entities
(both objects and blobs).
"""
global types, ET, modules
t = _get_etree_type(etree)
_types = set(types or [])
_types.update([t])
types = tuple(_types)
modules[t] = etree
old, ET = ET, etree
return old
|
ed2aee2bb029a3a07d18cfea1b6887d236d5c48c
| 3,647,490
|
from typing import Counter
import base64
def return_var_plot(result, attr_name, attr_type, option=0):
"""Method that generates the corresponding plot for each attribute, based
on the type and the selection of the user."""
aval = f'{attr_name}_value'
if attr_type == 'NUMBER' or attr_type == 'DATE_TIME':
if aval not in result[0].keys():
return None
vals = [r[aval] for r in result]
if option == 0:
fig = px.histogram(x=vals)
fig.update_yaxes(title='Frequency')
elif option == 1:
fig = px.box(x=vals)
fig.update_xaxes(title=attr_name.capitalize())
return fig
elif attr_type == 'GEOLOCATION': #location
if aval not in result[0].keys():
return None
pois = [tuple(map(float, r[aval][7:-1].split(' '))) for r in result]
x, y = zip(*pois)
minx, miny, maxx, maxy = min(x), min(y), max(x), max(y)
bb = box(minx, miny, maxx, maxy)
map_center = [bb.centroid.y, bb.centroid.x]
m = folium.Map(location=map_center, tiles='OpenStreetMap', width='100%', height='100%')
m.fit_bounds(([bb.bounds[1], bb.bounds[0]], [bb.bounds[3], bb.bounds[2]]))
m.add_child(Fullscreen())
if option == 0:
coords, popups = [], []
poi_layer = folium.FeatureGroup(name='pois')
for r, yy, xx in zip(result, y, x):
coords.append([yy, xx])
popups.append(__prepare_popup(r))
poi_layer.add_child(MarkerCluster(locations=coords, popups=popups))
m.add_child(poi_layer)
folium.GeoJson(bb).add_to(m)
elif option == 1:
scores = [r['score'] for r in result]
HeatMap(zip(y,x,scores), radius=10).add_to(m)
elif option == 2:
if 'keywords_value' not in result[0].keys():
return None
kwds = [r['keywords_value'] for r in result]
scores = [r['score'] for r in result]
labels, eps = compute_clusters(pois)
pois = [Point(poi) for poi in pois]
d = {'geometry': pois, 'kwd': kwds, 'score': scores, 'cluster_id': labels}
gdf = GeoDataFrame(d, crs="EPSG:4326")
gdf = gdf[gdf.cluster_id >= 0]
aois = cluster_shapes(gdf, eps).set_index('cluster_id')
means = gdf.groupby('cluster_id').agg({'score': 'mean', 'kwd': lambda x: ' '.join(x)})
clustered_keys = pd.concat([aois, means], axis=1).reset_index(drop=False)
bins = list(clustered_keys['score'].quantile([0, 0.25, 0.5, 0.75, 1]))
folium.Choropleth(geo_data=clustered_keys, data=clustered_keys,
columns=['cluster_id','score'], bins=bins,
key_on='feature.properties.cluster_id',
fill_color='YlOrRd', fill_opacity=0.6,
line_opacity=0.5).add_to(m)
wc = WordCloud(width = 200, height = 150, random_state=1,
background_color='salmon', colormap='Pastel1',
collocations=False, stopwords = STOPWORDS)
for index, row in clustered_keys.iterrows():
c = Counter(row['kwd'])
s = wc.generate_from_frequencies(c)
plt.imshow(s, interpolation='bilinear')
plt.axis("off")
buf = BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight')
# Include image popup to the marker
html = '<img src="data:image/PNG;base64,{}" style="width:100%; height:100%; display:block">'.format
encoded = base64.b64encode(buf.getvalue()).decode()
iframe = IFrame(html(encoded), width=300, height=150)
popup = folium.Popup(iframe, min_width=300, max_width=300, parse_html=True) # max_width=2650
buf.close()
folium.GeoJson(row['geometry']).add_child(popup).add_to(m)
return m.get_root().render()
elif attr_type == 'KEYWORD_SET':
if aval not in result[0].keys():
return None
wc = WordCloud(width = 400, height = 300, random_state=1,
background_color='salmon', colormap='Pastel1',
collocations=False, stopwords = STOPWORDS)
c = Counter()
for r in result:
c.update(r[aval])
s = wc.generate_from_frequencies(c)
if option == 0:
fig = px.imshow(s, labels={})
fig.update_xaxes(showticklabels=False)
fig.update_yaxes(showticklabels=False)
fig.update_traces(hovertemplate=None, hoverinfo='skip' )
return fig
elif option == 1:
df = pd.DataFrame(c.most_common(10), columns=['Word', 'Frequency'])
df = df.sort_values('Frequency', ascending=True)
fig = px.bar(df, x="Frequency", y="Word", orientation='h')
fig.update_yaxes(title=None)
return fig
|
e1046bd3b41c9e7827ebf379578cc1d85396345e
| 3,647,492
|
def get_inequivalent_sites(sub_lattice, lattice):
"""Given a sub lattice, returns symmetry unique sites for substitutions.
Args:
sub_lattice (list of lists): array containing Cartesian coordinates
of the sub-lattice of interest
lattice (ASE crystal): the total lattice
Returns:
List of sites
"""
sg = get_sg(lattice)
inequivalent_sites = []
for site in sub_lattice:
new_site = True
# Check against the existing members of the list of inequivalent sites
if len(inequivalent_sites) > 0:
for inequiv_site in inequivalent_sites:
if smact.are_eq(site, inequiv_site) == True:
new_site = False
# Check against symmetry related members of the list of inequivalent sites
equiv_inequiv_sites, _ = sg.equivalent_sites(inequiv_site)
for equiv_inequiv_site in equiv_inequiv_sites:
if smact.are_eq(site, equiv_inequiv_site) == True:
new_site = False
if new_site == True:
inequivalent_sites.append(site)
return inequivalent_sites
|
39d8c827cde10053dc5508cb96f0a7d0c8b9d00e
| 3,647,493
|
import torch
def kld(means, var):
"""KL divergence"""
mean = torch.zeros_like(means)
scale = torch.ones_like(var)
return kl_divergence(Normal(means, torch.sqrt(var)), Normal(mean, scale)).sum(dim=1)
|
43652b302131efc8fa97940bec9918eeb8c97bf3
| 3,647,495
|
def add(vec_1, vec_2):
"""
This function performs vector addition. This is a good place
to play around with different collection types (list, tuple, set...),
:param vec_1: a subscriptable collection of length 3
:param vec_2: a subscriptable collection of length 3
:return vec_3: a subscriptable collection of length 3
"""
# add two vectors
vec_3 = [float(vec_1[0]) + float(vec_2[0]), float(vec_1[1]) + float(vec_2[1]), float(vec_1[2]) + float(vec_2[2])]
return vec_3
|
4a17a82422cef472decb37c376e8bf5259ade60a
| 3,647,496
|
from typing import Union
from typing import List
from typing import Any
import random
def generateOrnament(fromMIDINote:int, key:Key, mode:ModeNames, bpm:float) -> Union[List[Any],None]:
"""
Generate OSC arguments describing ornaments, with the form:
[ <ornamentName> <BPM> <beatSubdivision> [<listOfOrnamentNoteMIDIOffsets...>] ]
ASSUME This function is called on every beat, or with some organic
regularity so output over time is roughly consistent with itself.
Maintain module internal state to govern frequency and type of ornaments produced.
Random filters to manage internal state are arbitrrary, specific and experimental. YMMV.
Call generateOrnamentReset() to reset ornament module internal state.
"""
ornamentChoice :str = None
ornamentBlob :List[Any] = None
oscArgs :List[Any] = []
fourCount :int = 4
global ornamentState
#
if ornamentState["sixteenthTripletTurnaround"] > 0: # Check existing state.
ornamentState["sixteenthTripletTurnaround"] -= 1
if ornamentState["sixteenthTripletTurnaround"] == 2:
if z.percentTrue(35):
ornamentChoice = "sixteenthPop"
if not ornamentChoice:
if z.percentTrue(70): return None # Frequency to bypass ornaments.
ornamentChoice = random.choice(list(Ornaments.keys()))
#
if "sixteenthLeadIn" == ornamentChoice:
pass
elif "sixteenthPop" == ornamentChoice:
if ornamentState["sixteenthTripletTurnaround"] > 0 \
and ornamentState["sixteenthTripletTurnaround"] != 2:
return None
elif "sixteenthTripletTurnaround" == ornamentChoice:
# Generate no more often than once every fourCount.
# Optionally generate "sixteenthPop" at half-way (above).
#
if ornamentState["sixteenthTripletTurnaround"] > 0:
return None
ornamentState["sixteenthTripletTurnaround"] = fourCount
else:
log.error(f"UNRECOGNIZED ornament choice. ({ornamentChoice})")
return None
#
ornamentBlob = _translateOrnamentScaleToMIDI(ornamentChoice, fromMIDINote, key, mode)
if not ornamentBlob: return None
oscArgs = [ornamentChoice, bpm, ornamentBlob[0]]
oscArgs += ornamentBlob[1]
return oscArgs
|
6bbaa53cd42322474b6a8cf40c698e4edfd32497
| 3,647,497
|
def ms_to_samples(ms, sampling_rate):
"""
Convert a duration in milliseconds into samples.
Arguments:
ms (float):
Duration in ms.
sampling_rate (int):
Sampling rate of of the signal.
Returns:
int: Duration in samples.
"""
return int((ms / 1000) * sampling_rate)
|
a2bf63ad8cca580ae3307c33daa82bb1382d742c
| 3,647,498
|
def flatten(L):
"""Flatten a list recursively
Inspired by this fun discussion: https://stackoverflow.com/questions/12472338/flattening-a-list-recursively
np.array.flatten did not work for irregular arrays
and itertools.chain.from_iterable cannot handle arbitrarily nested lists
:param L: A list to flatten
:return: the flattened list
"""
if L == []:
return L
if isinstance(L[0], list):
return flatten(L[0]) + flatten(L[1:])
return L[:1] + flatten(L[1:])
|
c554a01a8308341d1c9620edc0783689e75fb526
| 3,647,499
|
def chi2(observed, expected):
"""
Return the chi2 sum of the provided observed and expected values.
:param observed: list of floats.
:param expected: list of floats.
:return: chi2 (float).
"""
if 0 in expected:
return 0.0
return sum((_o - _e) ** 2 / _e ** 2 for _o, _e in zip(observed, expected))
|
6050e98a823671de4a518d584a6e39bc519fa610
| 3,647,502
|
import math
def range_bearing(p1: LatLon, p2: LatLon, R: float = NM) -> tuple[float, Angle]:
"""Rhumb-line course from :py:data:`p1` to :py:data:`p2`.
See :ref:`calc.range_bearing`.
This is the equirectangular approximation.
Without even the minimal corrections for non-spherical Earth.
:param p1: a :py:class:`LatLon` starting point
:param p2: a :py:class:`LatLon` ending point
:param R: radius of the earth in appropriate units;
default is nautical miles.
Values include :py:data:`KM` for kilometers,
:py:data:`MI` for statute miles and :py:data:`NM` for nautical miles.
:returns: 2-tuple of range and bearing from p1 to p2.
"""
d_NS = R * (p2.lat.radians - p1.lat.radians)
d_EW = (
R
* math.cos((p2.lat.radians + p1.lat.radians) / 2)
* (p2.lon.radians - p1.lon.radians)
)
d = math.hypot(d_NS, d_EW)
tc = math.atan2(d_EW, d_NS) % (2 * math.pi)
theta = Angle(tc)
return d, theta
|
68860efbea6d8f1b36ff9e7b91a2a3779a57e611
| 3,647,503
|
import json
import logging
def cf_model_to_life(first_best, update_prod=False, pr_cache=False):
"""
We simulate the response of several variables to a shock to z and x.
We fixed the cross-section distribution of (X,Z) and set rho to rho_start
We apply a permanent shock to either X or Z, and fix the employment relationship, as well as (X,Z)
We then simulate forward the Rho, and the wage, and report several different variable of interest.
"""
nt = 20*4
np.random.seed(JMP_CONF['seeds']['model_to_life'])
# we load the model
model = wd.FullModel.load("res_main_model.pkl")
p = model.p
p.tax_expost_tau = p.tax_tau
p.tax_expost_lambda = p.tax_lambda
# we simulate from the model to get a cross-section
sim = wd.Simulator(model, p)
sdata = sim.simulate().get_sdata()
# we construct the different starting values
tm = sdata['t'].max()
d0 = sdata.query('e==1 & t==@tm')[['x','z','h','r']]
# we start at target rho
R0 = model.target_rho[ (d0['z'],d0['x']) ]
# starting with X and Z shocks
def get_z_pos(pr):
Z1_pos = np.minimum(sdata['z'].max(), d0['z'] + 1)
Z1_pos = np.where(np.random.uniform(size=len(Z1_pos)) > pr, Z1_pos, d0['z'] )
return(Z1_pos)
def get_z_neg(pr):
Z1_neg = np.maximum(0, d0['z'] - 1)
Z1_neg = np.where(np.random.uniform(size=len(Z1_neg)) > pr, Z1_neg, d0['z'] )
return(Z1_neg)
def get_x_pos(pr):
Xtrans_pos = np.array([1,2,3,4,4,6,7,8,9,9,11,12,13,14,14],int)
X1_pos = Xtrans_pos[d0['x']]
X1_pos = np.where(np.random.uniform(size=len(X1_pos)) > pr, X1_pos, d0['x'] )
return(X1_pos)
def get_x_neg(pr):
Xtrans_neg = np.array([0,0,1,2,3, 5,5,6,7,8, 10,10,11,12,13],int)
X1_neg = Xtrans_neg[d0['x']]
X1_neg = np.where( np.random.uniform(size=len(X1_neg)) > pr, X1_neg, d0['x'] )
return(X1_neg)
# simulate a control group
var_name = {'x':r'worker productivity $x$',
'w':r'log earnings $\log w$',
'W1':'worker promised value $V$',
'lceq':'worker cons. eq.',
'Pi':r'firm present value $J(x,z,V)$',
'y':r'log match output $\log f(x,z)$',
'pr_j2j':'J2J probability',
'pr_e2u':'E2U probability',
'target_wage':r'log of target wage $\log w^*(x,z)$',
'vs':'worker search decision $v_1$',
'effort':'effort cost $c(e)$'}
var_list = { k:'mean' for k in var_name.keys() }
def sim_agg(dd):
# compute consumption equivalent for W1
dd['lceq'] = model.pref.log_consumption_eq(dd['W1'])
dd['lpeq'] = model.pref.log_profit_eq(dd['W1'])
return(dd.groupby('t').agg(var_list))
if first_best:
model_fb = wd.FullModel.load("res_main_model_fb.pkl")
for iz in range(model_fb.p.num_z):
for ix in range(model_fb.p.num_x):
model_fb.rho_star[iz,:,ix] = model_fb.rho_grid
sim.model = model_fb
# let's find rho_star for the first best model
I=range(p.num_v)[::-1]
R0_fb = np.zeros((p.num_z,p.num_x))
for ix in range(p.num_x):
for iz in range(p.num_z):
R0_fb[iz,ix] = np.interp( 0.0,
model_fb.Vf_J[iz,I,ix],
model_fb.rho_grid[I])
R0 = R0_fb[ (d0['z'],d0['x']) ]
sdata0 = sim_agg(sim.simulate_force_ee(d0['x'],d0['z'],d0['h'],R0, nt, update_x=False, update_z=False, pb=True))
# we run for a grid of probabilities
if pr_cache:
with open("res_cf_pr_fb{}.json".format(first_best)) as f:
all = json.load(f)
else:
all = []
vec = np.linspace(0,1,10)
for i in range(len(vec)):
logging.info("simulating {}/{}".format(i, len(vec)))
res = {}
res['pr'] = vec[i]
pr = vec[i]
res['x_pos'] = sim.simulate_force_ee(
get_x_pos(pr), d0['z'],d0['h'],R0, nt,
update_x=False, update_z=False, pb=True)['y'].mean()
res['x_neg'] = sim.simulate_force_ee(
get_x_neg(pr), d0['z'],d0['h'],R0, nt,
update_x=False, update_z=False, pb=True)['y'].mean()
res['z_pos'] = sim.simulate_force_ee(
d0['x'], get_z_pos(pr), d0['h'],R0, nt,
update_x=False, update_z=False, pb=True)['y'].mean()
res['z_neg'] = sim.simulate_force_ee(
d0['x'], get_z_neg(pr), d0['h'],R0, nt,
update_x=False, update_z=False, pb=True)['y'].mean()
all.append(res)
# save to file!
# with open("res_cf_pr_fb{}.json".format(first_best), 'w') as fp:
# json.dump(all, fp)
df = pd.DataFrame(all)
df = df.sort_values(['x_pos'])
pr_x_pos = np.interp( sdata0['y'].mean() + 0.1, df['x_pos'] , df['pr'] )
df = df.sort_values(['x_neg'])
pr_x_neg = np.interp( sdata0['y'].mean() - 0.1, df['x_neg'] , df['pr'] )
df = df.sort_values(['z_pos'])
pr_z_pos = np.interp( sdata0['y'].mean() + 0.1, df['z_pos'] , df['pr'] )
df = df.sort_values(['z_neg'])
pr_z_neg = np.interp( sdata0['y'].mean() - 0.1, df['z_neg'] , df['pr'] )
logging.info(" chosen probability x pos:{}".format(pr_x_pos))
logging.info(" chosen probability x neg:{}".format(pr_x_neg))
logging.info(" chosen probability z pos:{}".format(pr_z_pos))
logging.info(" chosen probability z neg:{}".format(pr_z_neg))
sdata0 = sim_agg(sim.simulate_force_ee(d0['x'],d0['z'],d0['h'],R0, nt, update_x=update_prod, update_z=update_prod, pb=True))
# finaly we simulate at the probabilities that we have chosen.
sdata_x_pos = sim_agg(sim.simulate_force_ee(
get_x_pos(pr_x_pos),d0['z'],d0['h'],R0, nt,
update_x=update_prod, update_z=update_prod,pb=True))
sdata_x_neg = sim_agg(sim.simulate_force_ee(
get_x_neg(pr_x_neg),d0['z'],d0['h'],R0, nt,
update_x=update_prod, update_z=update_prod,pb=True))
sdata_z_pos = sim_agg(sim.simulate_force_ee(
d0['x'],get_z_pos(pr_z_pos),d0['h'],R0, nt,
update_x=update_prod, update_z=update_prod,pb=True))
sdata_z_neg = sim_agg(sim.simulate_force_ee(
d0['x'],get_z_neg(pr_z_neg),d0['h'],R0, nt,
update_x=update_prod, update_z=update_prod,pb=True))
# preparing the lead and lag plots
pp0 = lambda v : np.concatenate([ np.zeros(5), v ])
ppt = lambda v : np.concatenate([ [-4,-3,-2,-1,0], v ])
to_plot = {'w','pr_j2j','pr_e2u','vs','effort','Pi','y','W1','target_wage'}
to_plot = {k:v for k,v in var_name.items() if k in to_plot}
# Z shock response
plt.clf()
# plt.rcParams["figure.figsize"]=12,12
plt.figure(figsize=(12, 12), dpi=80)
for i,name in enumerate(to_plot.keys()):
plt.subplot(3, 3, i+1)
plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_pos[name] - sdata0[name]) )
plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_neg[name] - sdata0[name]), linestyle='--')
#plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_pos_fb[name] - sdata0[name]), linestyle='dashdot')
#plt.plot( ppt (dd0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) )
#plt.plot( ppt (dd0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) )
plt.axhline(0,linestyle=':',color="black")
plt.xlabel(var_name[name])
#plt.xlabel('years')
plt.xticks(range(0,21,5))
plt.ticklabel_format(axis="y", style="sci", scilimits=(-3,5))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
if first_best:
plt.savefig('../figures/figurew6-ir-zshock-fb.pdf', bbox_inches='tight')
else:
plt.savefig('../figures/figure4-ir-zshock.pdf', bbox_inches='tight')
plt.clf()
# plt.rcParams["figure.figsize"]=12,12
plt.figure(figsize=(12, 12), dpi=80)
for i,name in enumerate(to_plot.keys()):
plt.subplot(3, 3, i+1)
plt.plot( ppt (sdata0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) )
plt.plot( ppt (sdata0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) ,ls='--')
#plt.plot( ppt (dd0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) )
#plt.plot( ppt (dd0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) )
plt.axhline(0,linestyle=':',color="black")
plt.xlabel(var_name[name])
plt.xticks(range(0,21,5))
plt.ticklabel_format(axis="y", style="sci", scilimits=(-3,5))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
if first_best:
plt.savefig('../figures/figurew5-ir-xshock-fb.pdf', bbox_inches='tight')
else:
plt.savefig('../figures/figure3-ir-xshock.pdf', bbox_inches='tight')
|
131fd2a0edb202adacafd9a6416fecb7a1f77dc7
| 3,647,504
|
def kde_interpolation(poi, bw='scott', grid=None, resolution=1, area=None, return_contour_geojson=False):
"""Applies kernel density estimation to a set points-of-interest
measuring the density estimation on a grid of places (arbitrary points
regularly spaced).
Parameters
----------
poi : GeoDataFrame.
Corresponds to input data.
bw : 'scott', 'silverman' or float.
The bandwidth for kernel density estimation. Check `scipy docs`_ about their bw parameter of gaussian_kde.
grid : GeoDataFrame or None, default is None.
If a grid is not given, then it is provided according to the area parameter
and resolution.
resolution : float, default is 1.
Space in kilometers between the arbitrary points of resulting grid.
area : GeoDataFrame or None, default is None.
If area is given, grid will be bounded accordingly with the GeoDataFrame passed.
return_contour_geojson : bool, default is False.
If True, it returns the result of the kde as a contourplot in the geojson format.
Returns
-------
GeoDataFrame with a grid of points regularly spaced with the respective
density values for the input points-of-interest given.
Example
-------
>>> import geohunter as gh
>>> poi = gh.osm.Eagle().get(bbox='(-5.91,-35.29,-5.70,-35.15)',
amenity=['hospital' , 'police'], natural='*')
>>> neighborhood = gh.osm.Eagle().get(bbox='(-5.91,-35.29,-5.70,-35.15)',
largest_geom=True,
name='Ponta Negra')
>>> result = kde_interpolation(poi, bw='scott', area=neighborhood, resolution=0.5)
>>> ax = area.plot(edgecolor='black', color='white')
>>> result.plot(column='density', ax=ax)
.. _scipy docs:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html
"""
lonv, latv = None, None
if grid is None and area is None:
raise ValueError('grid or area must be given.')
if grid is None and isinstance(area, gpd.GeoDataFrame):
grid, lonv, latv = make_gridpoints(area, resolution, return_coords=True)
assert isinstance(poi, gpd.GeoDataFrame)
kernel = stats.gaussian_kde(np.vstack([poi.centroid.x, poi.centroid.y]),
bw_method=bw)
grid_ = grid[:]
grid_['density'] = kernel(grid[['lon', 'lat']].values.T)
if return_contour_geojson:
assert lonv is not None and latv is not None, \
"grid should not be passed for this operation. Try to pass area and pick a resolution level."
return contour_geojson(grid_['density'], lonv, latv,
cmin=grid_['density'].min(),
cmax=grid_['density'].max())
else:
return grid_
|
f0473e459e42075a3ad4070325aecb229b6b2d89
| 3,647,505
|
def nums2tcrs(nums):
"""Converts a list containing lists of numbers to amino acid sequences. Each number is considered to be an index of the alphabet."""
tcrs_letter=[]
n=len(nums)
for i in range(n):
num=nums[i]
tcr=''
for j in range(len(num)):
tcr+=alphabet[num[j]]
tcrs_letter.append(tcr)
return tcrs_letter
|
3f366e0bd593b799c7e88c84d583e7c6aeee066f
| 3,647,506
|
def extract_columns(data):
""" EXTRACTS COLUMNS TO USE IN `DictWriter()` """
columns = []
column_headers = data[0]
for key in column_headers:
columns.append(key)
return columns
|
6df143107612d311ab3c8870b9eccd3528ac3802
| 3,647,507
|
import numpy
def cylindric_grid(dr, dz, origin_z=None, layer=False, material="dfalt"):
"""
Generate a cylindric mesh as a radial XZ structured grid.
Parameters
----------
dr : array_like
Grid spacing along X axis.
dz : array_like
Grid spacing along Z axis.
origin_z : scalar, optional, default None
Depth of origin point.
layer : bool, optional, default False
If `True`, mesh will be generated by layers.
material : str, optional, default 'dfalt'
Default material name.
Returns
-------
toughio.Mesh
Output cylindric mesh.
"""
if not isinstance(dr, (list, tuple, numpy.ndarray)):
raise TypeError()
if not isinstance(dz, (list, tuple, numpy.ndarray)):
raise TypeError()
if not (origin_z is None or isinstance(origin_z, (int, float))):
raise TypeError()
if not isinstance(material, str):
raise TypeError()
dr = numpy.asarray(dr)
dz = numpy.asarray(dz)
if not (dr > 0.0).all():
raise ValueError()
if not (dz > 0.0).all():
raise ValueError()
origin_z = origin_z if origin_z is not None else -dz.sum()
mesh = structured_grid(
dr,
[1.0],
dz,
origin=[0.0, -0.5, origin_z],
layer=layer,
material=material,
)
return CylindricMesh(
dr,
dz,
layer,
points=mesh.points,
cells=mesh.cells,
point_data=mesh.point_data,
cell_data=mesh.cell_data,
field_data=mesh.field_data,
)
|
bf710bc212068ec76eb19edce3e8493689535697
| 3,647,508
|
import urllib
def get_clip_preview_feedback(program, event, classifier, start_time, audio_track, reviewer):
"""
Gets the feedback provided by a user for a Segment's clip
Returns:
Feedback if present. Empty Dictionary of no feedback exists.
"""
event = urllib.parse.unquote(event)
program = urllib.parse.unquote(program)
classifier = urllib.parse.unquote(classifier)
start_time = Decimal(urllib.parse.unquote(start_time))
tracknumber = urllib.parse.unquote(audio_track)
clip_preview_table = ddb_resource.Table(CLIP_PREVIEW_FEEDBACK_TABLE_NAME)
response = clip_preview_table.query(
KeyConditionExpression=Key("PK").eq(
f"{program}#{event}#{classifier}#{str(start_time)}#{str(tracknumber)}#{reviewer}")
)
if "Items" not in response or len(response["Items"]) == 0:
return {}
return response["Items"][0]
|
578952869606951057b8b8797698c320a02d1d00
| 3,647,509
|
import ast
import numpy
def interp(specStr, t):
"""Return the current value of t using linear interpolation.
<specStr> is a string containing a list of pairs e.g. '[[0,20],[30,65],[60,50],[90,75]]'
The first element of each pair is DAYS. The second is a NUMBER.
<t> is time in seconds"""
specList = ast.literal_eval(specStr)
X = [i[0] for i in specList]
Y = [i[1] for i in specList]
day = t/(60*60*24.0)
return numpy.interp(day,X,Y)
|
bc60affe122f2d17044e01a01509231e71eda47d
| 3,647,510
|
from bs4 import BeautifulSoup
def time_is(location):
"""
Retrieves the time in a location by parsing the time element in the html from Time.is .
:param location: str location of the place you want to find time (works for small towns as well).
:return: time str or None on failure.
"""
if BeautifulSoup:
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/51.0.2704.106 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Referrer': 'http://time.is/',
}
post_url = 'http://time.is/' + str(location)
time_data = util.web.http_get(post_url, header=header)
time_html = time_data['content']
soup = BeautifulSoup(time_html, "html.parser")
time = ''
try:
for hit in soup.findAll(attrs={'id': 'twd'}):
time = hit.contents[0].strip()
except KeyError:
pass
return time
else:
return None
|
e8f6675199f070fcad7eead98187683b48417757
| 3,647,511
|
import logging
def _generate_template_context(arguments: PackagingResourceArguments,
manifest: OdahuProjectManifest,
output_folder: str) -> DockerTemplateContext:
"""
Generate Docker packager context for templates
"""
logging.info('Building context for template')
return DockerTemplateContext(
model_name=manifest.model.name,
model_version=manifest.model.version,
odahuflow_version=manifest.odahuflowVersion,
timeout=arguments.timeout,
host=arguments.host,
port=arguments.port,
workers=arguments.workers,
threads=arguments.threads,
pythonpath=output_folder,
wsgi_handler=f'{HANDLER_MODULE}:{HANDLER_APP}',
model_location=ODAHU_SUB_PATH_NAME,
entrypoint_target=ENTRYPOINT_TEMPLATE,
handler_file=f'{HANDLER_MODULE}.py',
base_image=arguments.dockerfileBaseImage,
conda_file_name=CONDA_FILE_NAME,
conda_server_file_name=CONDA_SERVER_FILE_NAME,
entrypoint_docker=ENTRYPOINT_TEMPLATE
)
|
e973a44949d6d2df8bfcbf0be42b8214d1c95352
| 3,647,512
|
def get_records(fname):
"""
Read the records of an IRAF database file into a python list
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
A list of records
"""
f = open(fname)
dtb = f.read()
f.close()
recs = dtb.split('begin')[1:]
records = [Record(r) for r in recs]
return records
|
a1eb4500afcd1379db1efe8674c1ff256f2861b5
| 3,647,513
|
from typing import List
def get_all_clips_matching_filter(fid: int) -> List[Clip]:
"""
gets all te clips that is part of the project and matches the filter
:param fid: The filter the clips should match
:return: A list of all clips that is part of the project and matches the filter
"""
filter = get_filter_by_id(fid)
assert filter is not None
clips = get_all_clips_in_project(filter.project.id)
res = []
for clip in clips:
if clip.clip_match_filter(filter):
res.append(clip)
return res
|
eb69bf40ad397e970d85b425d4c2c0b25ee345fc
| 3,647,514
|
def get_gushim():
"""
get gush_id metadata
"""
detailed = request.args.get('detailed', '') == 'true'
gushim = helpers._get_gushim(fields={'gush_id': True, 'last_checked_at': True, '_id': False})
if detailed:
# Flatten list of gushim into a dict
g_flat = dict((g['gush_id'], {"gush_id": g['gush_id'],
"last_checked_at": g['last_checked_at'],
"plan_stats": {}}) for g in gushim)
# Get plan statistics from DB
stats = helpers._get_plan_statistics()
# Merge stats into gushim dict
for g in stats['result']:
try:
gush_id = g['_id']['gush_id']
status = g['_id']['status']
g_flat[gush_id]['plan_stats'][status] = g['count']
except KeyError, e:
# Gush has plans but is missing from list of gushim?
app.logger.warn("Gush #%d has plans but is not listed in the Gushim list", gush_id)
app.log_exception(e)
# De-flatten our dict
gushim = g_flat.values()
return helpers._create_response_json(gushim)
|
93a941090f515bb726e305856ec6e0ea644b5a34
| 3,647,515
|
def dump_source(buf, id):
"""Dump BASIC source."""
if id == ID_SP5030:
line_end_code = 0x0d
src_end_code = 0x0000
kind = "SP-5030"
elif id == ID_SBASIC:
line_end_code = 0x00
src_end_code = 0x0000
kind = "S-BASIC"
elif id == ID_HUBASIC:
line_end_code = 0x00
src_end_code = 0x0000
kind = "Hu-BASIC"
else:
return 1
if not found_word_endcode(buf, src_end_code):
print("Not found %s end code (0x%04X)" % (kind, src_end_code))
return 1
p = 0
while True:
line_length = get_word(buf, p)
if line_length == src_end_code:
# Found Source end code
break
# get 1 line data
line = buf[p:p + line_length]
if get_last_byte(line) != line_end_code:
print("Not found %s line end code (0x%02X)" % (kind, line_end_code))
return 1
line_number = get_word(line, 2)
if id == ID_SP5030:
lstr = get_line_sp5030(line, 4, line_end_code)
elif id == ID_SBASIC:
lstr = get_line_sbasic(line, 4, line_end_code)
elif id == ID_HUBASIC:
lstr = get_line_hubasic(line, 4, line_end_code)
if jp_flag:
# print("%d %s" % (line_number, lstr.encode('utf-8')))
print("%d %s" % (line_number, lstr.encode('cp932')))
else:
print("%d %s" % (line_number, lstr))
p += line_length
return 0
|
598fe1d9dd4be6f1c651be4f81bc9f8290496c3a
| 3,647,516
|
def dense_layers(sequences, training, regularizer, initializer,
num_layers=3, activation=tf.nn.relu):
"""
Create a chain of dense (fully-connected) neural network layers.
Args:
sequences (tf.Tensor): Input sequences.
training (bool): Whether the mode is training or not.
regularizer: TF weight reqularizer.
initializer: TF weight initializer.
num_layers (int):
activation (function): TF activation function.
Returns:
tf.Tensor: Output tensor.
"""
with tf.variable_scope('dense'):
output = sequences
for _ in range(num_layers):
output = tf.layers.dense(output, FLAGS.num_units_dense,
activation=activation,
kernel_initializer=initializer,
kernel_regularizer=regularizer)
output = tf.minimum(output, FLAGS.relu_cutoff)
output = tf.layers.dropout(output, rate=FLAGS.dense_dropout_rate, training=training)
# output = [batch_size, time, num_units_dense]
return output
|
72cebd7eb6487555c3efe8e6c14954dc2886e0c3
| 3,647,517
|
def apply_cst(im, cst):
""" Applies CST matrix to image.
Args:
im: input ndarray image ((height * width) x channel).
cst: a 3x3 CST matrix.
Returns:
transformed image.
"""
result = im
for c in range(3):
result[:, :, c] = (cst[c, 0] * im[:, :, 0] + cst[c, 1] * im[:, :, 1] +
cst[c, 2] * im[:, :, 2])
return result
|
7c63d07413bad5fcebf2dfe5f83f205d16280957
| 3,647,518
|
from typing import Tuple
import torch
def show_binary_classification_accuracy(best_m: nn.Module, local_loader: data_utils.DataLoader, chatty = False) -> Tuple:
"""
Given the model and dataloader, calculate the classification accuracy.
Returns true_positives, true_negatives, false_positives, false_negatives, roc_auc, pr for use elsewhere.
:param best_m:
:param local_loader:
:return:
"""
correct = 0; total = 0
false_positives = 0
false_negatives = 0
true_positives = 0
true_negatives = 0
pred_list = []
lab_list = []
with torch.no_grad():
for data, labels in local_loader:
outputs = best_m(data)
predicted = torch.argmax(outputs, dim=1)
#print(predicted)
#print(labels.shape[0])
total += labels.shape[0]
#print(labels.shape[0])
#print(labels)
correct += int((predicted == labels).sum())
pred_list.extend(predicted.detach().flatten().numpy())
lab_list.extend(labels.detach().flatten().numpy())
#Calculate false positives, etc.
for kt in zip(predicted, labels):
if kt[0] == kt[1] ==1:
true_positives+=1
elif kt[0] == kt[1] == 0:
true_negatives+=1
elif kt[0] == 1 and kt[1] == 0:
false_negatives+=1
elif kt[0] == 0 and kt[1] == 1:
false_positives+=1
accuracy = correct/total
print("Accuracy: %f" % (accuracy))
auc = roc_auc_score(pred_list, lab_list)
pr = precision_recall_curve(pred_list, lab_list)
if chatty:
print("True Positives", true_positives, " False Positives", false_positives, f" at {false_positives/(total-correct):.2f}")
print("True Negatives", true_negatives, " False Negatives", false_negatives, f" at {false_negatives/(total-correct):.2f}")
return accuracy, true_positives, true_negatives, false_positives, false_negatives, auc, pr, pred_list, lab_list
|
7743c51a8f64c46c625ccc3b8737b9553f79334f
| 3,647,519
|
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
|
2ad0fcc50721fe30e4d48f691420393748bf9df3
| 3,647,522
|
def feedback(olsys,H=1):
"""Calculate the closed-loop transfer function
olsys
cltf = --------------
1+H*olsys
where olsys is the transfer function of the open loop
system (Gc*Gp) and H is the transfer function in the feedback
loop (H=1 for unity feedback)."""
clsys=olsys/(1.0+H*olsys)
return clsys
|
ca78d05196068746a225038c0f401faad24c5f65
| 3,647,523
|
from typing import List
def get_sigma_grid(
init_sigma: float = 1.0, factor: int = 2, n_grid_points: int = 20
) -> List[float]:
"""Get a standard parameter grid for the cross validation strategy.
Parameters
----------
init_sigma : float, default=1.0
The initial sigma to use to populate the grid points.
factor : int, default=2
The log scale factor to use for both the beginning and end of the grid.
n_grid_points : int, default=20
The number of grid points to use.
Returns
-------
param_grid : List[float]
The parameter grid as per the specifications
Example
-------
>> param_grid = get_param_grid()
>> param_grid = get_param_grid(10.0, 3, 1_000)
"""
# create bounds for search space (logscale)
init_space = 10 ** (-factor)
end_space = 10 ** (factor)
# create param grid
param_grid = np.logspace(
np.log10(init_sigma * init_space),
np.log10(init_sigma * end_space),
n_grid_points,
)
return param_grid
|
33e49127bb2e116b8c209446ad1f614c44e5e128
| 3,647,524
|
def parse_csv(value_column):
"""Parses a CSV file based on the provided column types."""
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(ALL_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
classes = tf.cast(label, tf.int32) - 1
return features, classes
|
11d0f0508fd369ab50df45f71340d8336da676c0
| 3,647,525
|
def on_over_limit():
""" This is called when the rate limit is reached """
return jsonify(status='error', error=[_('Whoa, calm down and wait a bit before posting again.')])
|
f954abb1de5746ca49bbdff02894c1fe75fed106
| 3,647,526
|
def comment(strng,indent=''):
"""return an input string, commented out"""
template = indent + '# %s'
lines = [template % s for s in strng.splitlines(True)]
return ''.join(lines)
|
42386b7ed8de9127d7224481a5f5315d39b6ae97
| 3,647,527
|
def square(number):
"""
Calculates how many grains were on each square
:param number:
:return:
"""
if number <= 0 or not number or number > 64:
raise ValueError(ERROR)
return 2**(number - 1)
|
dd8d6f9dc95632effaf7bc8a705ffddd1de6c825
| 3,647,528
|
def health_check() -> ControllerResponse:
"""
Retrieve the current health of service integrations.
Returns
-------
dict
Response content.
int
HTTP status code.
dict
Response headers.
"""
status = {}
for name, obj in _getServices():
logger.info('Getting status of %s' % name)
status[name] = _healthy_session(obj)
return status, 200, {}
|
1915deb5283aac2c0ced935c66dbd3d1f5564e33
| 3,647,530
|
from openpype.scripts import publish_filesequence
def _get_script():
"""Get path to the image sequence script"""
try:
except Exception:
raise RuntimeError("Expected module 'publish_deadline'"
"to be available")
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
return module_path
|
8efa4f24ed070b859a8e406275feb1c989d6fb6c
| 3,647,532
|
def residual_unit(data, nchw_inshape, num_filter, stride, dim_match, name, bottle_neck=True,
workspace=256, memonger=False, conv_layout='NCHW', batchnorm_layout='NCHW',
verbose=False, cudnn_bn_off=False, bn_eps=2e-5, bn_mom=0.9, conv_algo=-1,
fuse_bn_relu=False, fuse_bn_add_relu=False, cudnn_tensor_core_only=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
nhwc_shape : tuple of int
Input minibatch shape in (n, c, h, w) format independent of actual layout
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
Returns
-------
(sym, nchw_outshape)
sym : the model symbol (up to this point)
nchw_outshape : tuple
(batch_size, features, height, width)
"""
nchw_shape = nchw_inshape
act = 'relu' if fuse_bn_relu else None
if bottle_neck:
conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0))
bn1 = batchnorm(data=conv1, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn1', cudnn_off=cudnn_bn_off, act_type=act)
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1))
bn2 = batchnorm(data=conv2, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn2', cudnn_off=cudnn_bn_off, act_type=act)
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2') if not fuse_bn_relu else bn2
conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0))
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
sc_nchw_shape = conv_nchw_out_shape(nchw_inshape, num_filter=num_filter, kernel=(1,1), stride=stride)
shortcut = batchnorm(data=conv1sc, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_sc', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
if fuse_bn_add_relu:
return (batchnorm_add_relu(data=conv3, addend=shortcut, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn3', cudnn_off=cudnn_bn_off),
nchw_shape)
else:
bn3 = batchnorm(data=conv3, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn3', cudnn_off=cudnn_bn_off)
return (mx.sym.Activation(data=bn3 + shortcut, act_type='relu', name=name + '_relu3'),
nchw_shape)
else:
conv1 = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1))
bn1 = batchnorm(data=conv1, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn1', cudnn_off=cudnn_bn_off, act_type=act)
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
conv2 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1))
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
sc_nchw_shape = conv_nchw_out_shape(nchw_inshape, num_filter=num_filter, kernel=(1,1), stride=stride)
shortcut = batchnorm(data=conv1sc, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_sc', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
if fuse_bn_add_relu:
return (batchnorm_add_relu(data=conv2, addend=shortcut, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn2', cudnn_off=cudnn_bn_off),
nchw_shape)
else:
bn2 = batchnorm(data=conv2, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn2', cudnn_off=cudnn_bn_off)
return (mx.sym.Activation(data=bn2 + shortcut, act_type='relu', name=name + '_relu2'),
nchw_shape)
|
a67edaf2a40a75619b389a6de8e8d20397b4df20
| 3,647,533
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.