hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91b5d5a9da8d21cc54215371e88cbf75203f4ad6
| 374
|
py
|
Python
|
tut2.py
|
ankit98040/TKINTER-JIS
|
8b650138bf8ab2449da83e910ee33c0caee69a8d
|
[
"Apache-2.0"
] | null | null | null |
tut2.py
|
ankit98040/TKINTER-JIS
|
8b650138bf8ab2449da83e910ee33c0caee69a8d
|
[
"Apache-2.0"
] | null | null | null |
tut2.py
|
ankit98040/TKINTER-JIS
|
8b650138bf8ab2449da83e910ee33c0caee69a8d
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
from PIL import Image, ImageTk
#python image library
#imagetk supports jpg image
a1 = Tk()
a1.geometry("455x244")
#for png image
#photo = PhotoImage(file="filename.png")
#a2 = Label(image = photo)
#a2.pack()
image = Image.open("PJXlVd.jpg")
photo = ImageTk.PhotoImage(image)
a2 = Label(image = photo)
a2.pack()
a1.mainloop()
| 17
| 41
| 0.671123
| 51
| 374
| 4.921569
| 0.509804
| 0.119522
| 0.095618
| 0.135458
| 0.183267
| 0.183267
| 0
| 0
| 0
| 0
| 0
| 0.043046
| 0.192513
| 374
| 22
| 42
| 17
| 0.788079
| 0.352941
| 0
| 0
| 0
| 0
| 0.078704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91b7b2d421c1a0795b99655b4fa4a8c0503e4114
| 1,056
|
py
|
Python
|
design_patterns/chapter5/mymath.py
|
FeliciaMJ/PythonLearningJourney
|
ae1bfac872ee29256e69df6e0e8e507321404cba
|
[
"Apache-2.0"
] | null | null | null |
design_patterns/chapter5/mymath.py
|
FeliciaMJ/PythonLearningJourney
|
ae1bfac872ee29256e69df6e0e8e507321404cba
|
[
"Apache-2.0"
] | null | null | null |
design_patterns/chapter5/mymath.py
|
FeliciaMJ/PythonLearningJourney
|
ae1bfac872ee29256e69df6e0e8e507321404cba
|
[
"Apache-2.0"
] | 2
|
2021-04-04T00:27:29.000Z
|
2021-06-05T03:26:53.000Z
|
# coding: utf-8
import functools
def memoize(fn):
known = dict()
@functools.wraps(fn)
def memoizer(*args):
if args not in known:
known[args] = fn(*args)
return known[args]
return memoizer
@memoize
def nsum(n):
'''返回前n个数字的和'''
assert(n >= 0), 'n must be >= 0'
return 0 if n == 0 else n + nsum(n-1)
@memoize
def fibonacci(n):
'''返回斐波那契数列的第n个数'''
assert(n >= 0), 'n must be >= 0'
return n if n in (0, 1) else fibonacci(n-1) + fibonacci(n-2)
if __name__ == '__main__':
from timeit import Timer
measure = [{'exec': 'fibonacci(100)', 'import': 'fibonacci',
'func': fibonacci}, {'exec': 'nsum(200)', 'import': 'nsum',
'func': nsum}]
for m in measure:
t = Timer('{}'.format(m['exec']), 'from __main__ import \
{}'.format(m['import']))
print('name: {}, doc: {}, executing: {}, time: \
{}'.format(m['func'].__name__, m['func'].__doc__,
m['exec'], t.timeit()))
| 25.142857
| 75
| 0.507576
| 131
| 1,056
| 3.938931
| 0.358779
| 0.011628
| 0.031008
| 0.034884
| 0.085271
| 0.085271
| 0.085271
| 0.085271
| 0
| 0
| 0
| 0.024523
| 0.304924
| 1,056
| 41
| 76
| 25.756098
| 0.678474
| 0.035985
| 0
| 0.142857
| 0
| 0
| 0.142999
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.142857
| false
| 0
| 0.214286
| 0
| 0.5
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91b88e3d926b20d74b8739d087b18e11fc2bf047
| 343
|
py
|
Python
|
pyhsms/core/connectionstate.py
|
cherish-web/pyhsms
|
83a88b8b45bf1aba30cb7572f44a02478009052b
|
[
"MIT"
] | 2
|
2021-05-01T12:02:12.000Z
|
2021-05-03T14:37:27.000Z
|
pyhsms/core/connectionstate.py
|
cherish-web/pyhsms
|
83a88b8b45bf1aba30cb7572f44a02478009052b
|
[
"MIT"
] | null | null | null |
pyhsms/core/connectionstate.py
|
cherish-web/pyhsms
|
83a88b8b45bf1aba30cb7572f44a02478009052b
|
[
"MIT"
] | null | null | null |
# _*_ coding: utf-8 _*_
#@Time : 2020/7/29 上午 09:49
#@Author : cherish_peng
#@Email : 1058386071@qq.com
#@File : connectionstate.py
#@Software : PyCharm
from enum import Enum
class ConnectionState(Enum):
'''
ConnectionState enum
'''
DisConnected = 0
Connecting=1
Connected=2
Selected=3
Retry=4
| 21.4375
| 32
| 0.626822
| 41
| 343
| 5.121951
| 0.878049
| 0.180952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105882
| 0.25656
| 343
| 16
| 33
| 21.4375
| 0.717647
| 0.524781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91b96455218c552cfb88f8804f7f9440605930b5
| 84,787
|
py
|
Python
|
lifelines/fitters/coxph_fitter.py
|
msanpe/lifelines
|
a73d441f6347332ca870bf2ec32eeeca410dc6de
|
[
"MIT"
] | null | null | null |
lifelines/fitters/coxph_fitter.py
|
msanpe/lifelines
|
a73d441f6347332ca870bf2ec32eeeca410dc6de
|
[
"MIT"
] | null | null | null |
lifelines/fitters/coxph_fitter.py
|
msanpe/lifelines
|
a73d441f6347332ca870bf2ec32eeeca410dc6de
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import pandas as pd
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio
from lifelines.utils import (
_get_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_cdf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_pandas,
CensoringType,
interpolate_at_times,
format_p_value,
)
__all__ = ["CoxPHFitter"]
class BatchVsSingle:
@staticmethod
def decide(batch_mode, n_unique, n_total, n_vars):
frac_dups = n_unique / n_total
if batch_mode or (
# https://github.com/CamDavidsonPilon/lifelines/issues/591 for original issue.
# new values from from perf/batch_vs_single script.
(batch_mode is None)
and (
(
6.876218e-01
+ -1.796993e-06 * n_total
+ -1.204271e-11 * n_total ** 2
+ 1.912500e00 * frac_dups
+ -8.121036e-01 * frac_dups ** 2
+ 4.916605e-06 * n_total * frac_dups
+ -5.888875e-03 * n_vars
+ 5.473434e-09 * n_vars * n_total
)
< 1
)
):
return "batch"
return "single"
class CoxPHFitter(BaseFitter):
r"""
This class implements fitting Cox's proportional hazard model:
.. math:: h(t|x) = h_0(t) \exp((x - \overline{x})' \beta)
Parameters
----------
alpha: float, optional (default=0.05)
the level in the confidence intervals.
tie_method: string, optional
specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: float, optional (default=0.0)
Attach an L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`\beta_i`.
The penalty is :math:`\frac{1}{2} \text{penalizer} ||\beta||^2`.
strata: list, optional
specify a list of columns to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
Examples
--------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>> rossi = load_rossi()
>>> cph = CoxPHFitter()
>>> cph.fit(rossi, 'week', 'arrest')
>>> cph.print_summary()
Attributes
----------
params_ : Series
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Series
The exp(coefficients)
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the hazard coefficients
durations: Series
The durations provided
event_observed: Series
The event_observed variable provided
weights: Series
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Series
the standard errors of the estimates
score_: float
the concordance index of the model.
baseline_hazard_: DataFrame
baseline_cumulative_hazard_: DataFrame
baseline_survival_: DataFrame
"""
_KNOWN_MODEL = True
def __init__(self, alpha=0.05, tie_method="Efron", penalizer=0.0, strata=None):
super(CoxPHFitter, self).__init__(alpha=alpha)
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != "Efron":
raise NotImplementedError("Only Efron is available at the moment.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
@CensoringType.right_censoring
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
params_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.params_ = pd.Series(params_, index=X.columns, name="coef") / self._norm_std
self.hazard_ratios_ = pd.Series(np.exp(self.params_), index=X.columns, name="exp(coef)")
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
def _preprocess_dataframe(self, df):
# this should be a pure function
df = df.copy()
if self.strata is not None:
df = df.sort_values(by=_to_list(self.strata) + [self.duration_col])
original_index = df.index.copy()
df = df.set_index(self.strata)
else:
df = df.sort_values(by=self.duration_col)
original_index = df.index.copy()
# Extract time and event
T = df.pop(self.duration_col)
E = (
df.pop(self.event_col)
if (self.event_col is not None)
else pd.Series(np.ones(self._n_examples), index=df.index, name="E")
)
W = (
df.pop(self.weights_col)
if (self.weights_col is not None)
else pd.Series(np.ones((self._n_examples,)), index=df.index, name="weights")
)
_clusters = df.pop(self.cluster_col).values if self.cluster_col else None
X = df.astype(float)
T = T.astype(float)
# we check nans here because converting to bools maps NaNs to True..
check_nans_or_infs(E)
E = E.astype(bool)
self._check_values(X, T, E, W)
return X, T, E, W, original_index, _clusters
def _check_values(self, X, T, E, W):
check_for_numeric_dtypes_or_raise(X)
check_nans_or_infs(T)
check_nans_or_infs(X)
check_low_var(X)
check_complete_separation(X, E, T, self.event_col)
# check to make sure their weights are okay
if self.weights_col:
if (W.astype(int) != W).any() and not self.robust:
warnings.warn(
"""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
""",
StatisticalWarning,
)
if (W <= 0).any():
raise ValueError("values in weight column %s must be positive." % self.weights_col)
def _fit_model(
self,
X,
T,
E,
weights=None,
initial_point=None,
step_size=None,
precision=1e-07,
show_progress=True,
max_steps=50,
): # pylint: disable=too-many-statements,too-many-branches
"""
Newton Rhaphson algorithm for fitting CPH model.
Note
----
The data is assumed to be sorted on T!
Parameters
----------
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
weights: (n) an iterable representing weights per observation.
initial_point: (d,) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float, optional
> 0.001 to determine a starting step size in NR algorithm.
precision: float, optional
the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: boolean, optional
since the fitter is iterative, show convergence
diagnostics.
max_steps: int, optional
the maximum number of iterations of the Newton-Rhaphson algorithm.
Returns
-------
beta: (1,d) numpy array.
"""
self.path = []
assert precision <= 1.0, "precision must be less than or equal to 1."
_, d = X.shape
# make sure betas are correct size.
if initial_point is not None:
assert initial_point.shape == (d,)
beta = initial_point
else:
beta = np.zeros((d,))
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
# Method of choice is just efron right now
if self.tie_method == "Efron":
decision = BatchVsSingle.decide(self._batch_mode, T.nunique(), X.shape[0], X.shape[1])
get_gradients = getattr(self, "_get_efron_values_%s" % decision)
self._batch_mode = decision == "batch"
else:
raise NotImplementedError("Only Efron is available.")
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
while converging:
self.path.append(beta.copy())
i += 1
if self.strata is None:
h, g, ll = get_gradients(X.values, T.values, E.values, weights.values, beta)
else:
g = np.zeros_like(beta)
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for _h, _g, _ll in self._partition_by_strata_and_apply(X, T, E, weights, get_gradients, beta):
g += _g
h += _h
ll += _ll
if i == 1 and np.all(beta == 0):
# this is a neat optimization, the null partial likelihood
# is the same as the full partial but evaluated at zero.
# if the user supplied a non-trivial initial point, we need to delay this.
self._ll_null_ = ll
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta
h.flat[:: d + 1] -= self.penalizer
# reusing a piece to make g * inv(h) * g.T faster later
try:
inv_h_dot_g_T = spsolve(-h, g, assume_a="pos", check_finite=False)
except ValueError as e:
if "infs or NaNs" in str(e):
raise ConvergenceError(
"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
else:
# something else?
raise e
except LinAlgError as e:
raise ConvergenceError(
"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
delta = inv_h_dot_g_T
if np.any(np.isnan(delta)):
raise ConvergenceError(
"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
)
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
# reusing an above piece to make g * inv(h) * g.T faster.
newton_decrement = g.dot(inv_h_dot_g_T) / 2
if show_progress:
print(
"\rIteration %d: norm_delta = %.5f, step_size = %.4f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
% (i, norm_delta, step_size, ll, newton_decrement, time.time() - start),
end="",
)
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll != 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < precision:
converging, completed = False, True
elif i >= max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
converging, completed = False, False
elif step_size <= 0.00001:
converging, completed = False, False
elif abs(ll) < 0.0001 and norm_delta > 1.0:
warnings.warn(
"The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/q/11109/11867 for more.\n",
ConvergenceWarning,
)
converging, completed = False, False
beta += step_size * delta
previous_ll = ll
step_size = step_sizer.update(norm_delta).next()
self._hessian_ = hessian
self._score_ = gradient
self.log_likelihood_ = ll
if show_progress and completed:
print("Convergence completed after %d iterations." % (i))
elif show_progress and not completed:
print("Convergence failed. See any warning messages.")
# report to the user problems that we detect.
if completed and norm_delta > 0.1:
warnings.warn(
"Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"
% norm_delta,
ConvergenceWarning,
)
elif not completed:
warnings.warn(
"Newton-Rhaphson failed to converge sufficiently in %d steps.\n" % max_steps, ConvergenceWarning
)
return beta
def _get_efron_values_single(self, X, T, E, weights, beta):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = np.zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * np.exp(np.dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = np.multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = np.arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = np.zeros((d,))
tie_phi = 0
tie_phi_x = np.zeros((d,))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik
@staticmethod
def _trivial_log_likelihood_batch(T, E, weights):
# used for log-likelihood test
n = T.shape[0]
log_lik = 0
_, counts = np.unique(-T, return_counts=True)
risk_phi = 0
pos = n
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
weights_at_t = weights[slice_]
phi_i = weights_at_t
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
# Calculate the sums of Tie set
deaths = E[slice_]
tied_death_counts = deaths.astype(int).sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
weights_deaths = weights_at_t[deaths]
weight_count = weights_deaths.sum()
if tied_death_counts > 1:
tie_phi = phi_i[deaths].sum()
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
pos -= count_of_removals
return log_lik
@staticmethod
def _trivial_log_likelihood_single(T, E, weights):
# assumes sorted on T!
log_lik = 0
n = T.shape[0]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
# Calculate phi values
phi_i = weights[i]
w = weights[i]
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
# Calculate sums of Ties, if this is an event
if ei:
tie_phi = tie_phi + phi_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
if tied_death_counts > 1:
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
# reset tie values
tied_death_counts = 0
weight_count = 0.0
tie_phi = 0
return log_lik
def _get_efron_values_batch(self, X, T, E, weights, beta): # pylint: disable=too-many-locals
"""
Assumes sorted on ascending on T
Calculates the first and second order vector differentials, with respect to beta.
A good explanation for how Efron handles ties. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
Returns
-------
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# weights = weights[:, None]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# counts are sorted by -T
_, counts = np.unique(-T, return_counts=True)
scores = weights * np.exp(np.dot(X, beta))
pos = n
ZERO_TO_N = np.arange(counts.max())
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
X_at_t = X[slice_]
weights_at_t = weights[slice_]
deaths = E[slice_]
phi_i = scores[slice_, None]
phi_x_i = phi_i * X_at_t
phi_x_x_i = np.dot(X_at_t.T, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
risk_phi_x = risk_phi_x + (phi_x_i).sum(0)
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate the sums of Tie set
tied_death_counts = deaths.sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
"""
I think there is another optimization that can be made if we sort on
T and E. Using some accounting, we can skip all the [death] indexing below.
"""
xi_deaths = X_at_t[deaths]
weights_deaths = weights_at_t[deaths]
x_death_sum = np.einsum("a,ab->b", weights_deaths, xi_deaths)
weight_count = weights_deaths.sum()
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
# a lot of this is now in Einstein notation for performance, but see original "expanded" code here
# https://github.com/CamDavidsonPilon/lifelines/blob/e7056e7817272eb5dff5983556954f56c33301b1/lifelines/fitters/coxph_fitter.py#L755-L789
# it's faster if we can skip computing these when we don't need to.
phi_x_i_deaths = phi_x_i[deaths]
tie_phi = phi_i[deaths].sum()
tie_phi_x = (phi_x_i_deaths).sum(0)
tie_phi_x_x = np.dot(xi_deaths.T, phi_x_i_deaths)
increasing_proportion = ZERO_TO_N[:tied_death_counts] / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
# computes outer products and sums them together.
# Naive approach is to
# 1) broadcast tie_phi_x_x and increasing_proportion into a (tied_death_counts, d, d) matrix
# 2) broadcast risk_phi_x_x and denom into a (tied_death_counts, d, d) matrix
# 3) subtract them, and then sum to (d, d)
# Alternatively, we can sum earlier without having to explicitly create (_, d, d) matrices. This is used here.
#
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
# no tensors here, but do some casting to make it easier in the converging step next.
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
# This is a batch outer product.
# given a matrix t, for each row, m, compute it's outer product: m.dot(m.T), and stack these new matrices together.
# which would be: np.einsum("Bi, Bj->Bij", t, t)
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
pos -= count_of_removals
return hessian, gradient, log_lik
def _partition_by_strata(self, X, T, E, weights, as_dataframes=False):
for stratum, stratified_X in X.groupby(self.strata):
stratified_E, stratified_T, stratified_W = (E.loc[[stratum]], T.loc[[stratum]], weights.loc[[stratum]])
if not as_dataframes:
yield (stratified_X.values, stratified_T.values, stratified_E.values, stratified_W.values), stratum
else:
yield (stratified_X, stratified_T, stratified_E, stratified_W), stratum
def _partition_by_strata_and_apply(self, X, T, E, weights, function, *args):
for (stratified_X, stratified_T, stratified_E, stratified_W), _ in self._partition_by_strata(X, T, E, weights):
yield function(stratified_X, stratified_T, stratified_E, stratified_W, *args)
def _compute_martingale(self, X, T, E, _weights, index=None):
# TODO: _weights unused
partial_hazard = self.predict_partial_hazard(X)[0].values
if not self.strata:
baseline_at_T = self.baseline_cumulative_hazard_.loc[T, "baseline cumulative hazard"].values
else:
baseline_at_T = np.empty(0)
for name, T_ in T.groupby(by=self.strata):
baseline_at_T = np.append(baseline_at_T, self.baseline_cumulative_hazard_[name].loc[T_])
martingale = E - (partial_hazard * baseline_at_T)
return pd.DataFrame(
{self.duration_col: T.values, self.event_col: E.values, "martingale": martingale.values}, index=index
)
def _compute_deviance(self, X, T, E, weights, index=None):
df = self._compute_martingale(X, T, E, weights, index)
rmart = df.pop("martingale")
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
log_term = np.where((E.values - rmart.values) <= 0, 0, E.values * np.log(E.values - rmart.values))
deviance = np.sign(rmart) * np.sqrt(-2 * (rmart + log_term))
df["deviance"] = deviance
return df
def _compute_scaled_schoenfeld(self, X, T, E, weights, index=None):
r"""
Let s_k be the kth schoenfeld residuals. Then E[s_k] = 0.
For tests of proportionality, we want to test if \beta_i(t) is \beta_i (constant) or not.
Let V_k be the contribution to the information matrix at time t_k. A main result from Grambsch and Therneau is that
\beta(t) = E[s_k*V_k^{-1} + \hat{beta}]
so define s_k^* = s_k*V_k^{-1} + \hat{beta} as the scaled schoenfeld residuals.
We can approximate V_k with Hessian/d, so the inverse of Hessian/d is (d * variance_matrix_)
Notes
-------
lifelines does not add the coefficients to the final results, but R does when you call residuals(c, "scaledsch")
"""
n_deaths = self.event_observed.sum()
scaled_schoenfeld_resids = n_deaths * self._compute_schoenfeld(X, T, E, weights, index).dot(
self.variance_matrix_
)
scaled_schoenfeld_resids.columns = self.params_.index
return scaled_schoenfeld_resids
def _compute_schoenfeld(self, X, T, E, weights, index=None):
# TODO: should the index by times, i.e. T[E]?
# Assumes sorted on T and on strata
# cluster does nothing to this, as expected.
_, d = X.shape
if self.strata is not None:
schoenfeld_residuals = np.empty((0, d))
for schoenfeld_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_schoenfeld_within_strata
):
schoenfeld_residuals = np.append(schoenfeld_residuals, schoenfeld_residuals_in_strata, axis=0)
else:
schoenfeld_residuals = self._compute_schoenfeld_within_strata(X.values, T.values, E.values, weights.values)
# schoenfeld residuals are only defined for subjects with a non-zero event.
df = pd.DataFrame(schoenfeld_residuals[E, :], columns=self.params_.index, index=index[E])
return df
def _compute_schoenfeld_within_strata(self, X, T, E, weights):
"""
A positive value of the residual shows an X value that is higher than expected at that death time.
"""
# TODO: the diff_against is gross
# This uses Efron ties.
n, d = X.shape
if not np.any(E):
# sometimes strata have no deaths. This means nothing is returned
# in the below code.
return np.zeros((n, d))
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
# Init number of ties and weights
weight_count = 0.0
tie_count = 0
scores = weights * np.exp(np.dot(X, self.params_))
diff_against = []
schoenfeld_residuals = np.empty((0, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i : i + 1]
score = scores[i : i + 1]
w = weights[i]
# Calculate phi values
phi_i = score
phi_x_i = phi_i * xi
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
# Calculate sums of Ties, if this is an event
diff_against.append((xi, ei))
if ei:
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
# Keep track of count
tie_count += 1 # aka death counts
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
for _ in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, np.zeros((1, d)), axis=0)
diff_against = []
continue
# There was atleast one event and no more ties remain. Time to sum.
weighted_mean = np.zeros((1, d))
for l in range(tie_count):
numer = risk_phi_x - l * tie_phi_x / tie_count
denom = risk_phi - l * tie_phi / tie_count
weighted_mean += numer / (denom * tie_count)
for xi, ei in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, ei * (xi - weighted_mean), axis=0)
# reset tie values
tie_count = 0
weight_count = 0.0
tie_phi = 0
tie_phi_x = np.zeros((1, d))
diff_against = []
return schoenfeld_residuals[::-1]
def _compute_delta_beta(self, X, T, E, weights, index=None):
"""
approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't drop these outliers, model them.
"""
score_residuals = self._compute_score(X, T, E, weights, index=index)
d = X.shape[1]
scaled_variance_matrix = self.variance_matrix_ * np.tile(self._norm_std.values, (d, 1)).T
delta_betas = score_residuals.dot(scaled_variance_matrix)
delta_betas.columns = self.params_.index
return delta_betas
def _compute_score(self, X, T, E, weights, index=None):
_, d = X.shape
if self.strata is not None:
score_residuals = np.empty((0, d))
for score_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_score_within_strata
):
score_residuals = np.append(score_residuals, score_residuals_in_strata, axis=0)
else:
score_residuals = self._compute_score_within_strata(X.values, T, E.values, weights.values)
return pd.DataFrame(score_residuals, columns=self.params_.index, index=index)
def _compute_score_within_strata(self, X, _T, E, weights):
# https://www.stat.tamu.edu/~carroll/ftp/gk001.pdf
# lin1989
# https://www.ics.uci.edu/~dgillen/STAT255/Handouts/lecture10.pdf
# Assumes X already sorted by T with strata
# TODO: doesn't handle ties.
# TODO: _T unused
n, d = X.shape
# we already unnormalized the betas in `fit`, so we need normalize them again since X is
# normalized.
beta = self.params_.values * self._norm_std
E = E.astype(int)
score_residuals = np.zeros((n, d))
phi_s = np.exp(np.dot(X, beta))
# need to store these histories, as we access them often
# this is a reverse cumulative sum. See original code in https://github.com/CamDavidsonPilon/lifelines/pull/496/files#diff-81ee0759dbae0770e1a02cf17f4cfbb1R431
risk_phi_x_history = (X * (weights * phi_s)[:, None])[::-1].cumsum(0)[::-1]
risk_phi_history = (weights * phi_s)[::-1].cumsum()[::-1][:, None]
# Iterate forwards
for i in range(0, n):
xi = X[i : i + 1]
phi_i = phi_s[i]
score = -phi_i * (
(
E[: i + 1] * weights[: i + 1] / risk_phi_history[: i + 1].T
).T # this is constant-ish, and could be cached
* (xi - risk_phi_x_history[: i + 1] / risk_phi_history[: i + 1])
).sum(0)
if E[i]:
score = score + (xi - risk_phi_x_history[i] / risk_phi_history[i])
score_residuals[i, :] = score
return score_residuals * weights[:, None]
def compute_residuals(self, training_dataframe, kind):
"""
Parameters
----------
training_dataframe : pandas DataFrame
the same training DataFrame given in `fit`
kind : string
{'schoenfeld', 'score', 'delta_beta', 'deviance', 'martingale', 'scaled_schoenfeld'}
"""
ALLOWED_RESIDUALS = {"schoenfeld", "score", "delta_beta", "deviance", "martingale", "scaled_schoenfeld"}
assert kind in ALLOWED_RESIDUALS, "kind must be in %s" % ALLOWED_RESIDUALS
warnings.filterwarnings("ignore", category=ConvergenceWarning)
X, T, E, weights, shuffled_original_index, _ = self._preprocess_dataframe(training_dataframe)
resids = getattr(self, "_compute_%s" % kind)(X, T, E, weights, index=shuffled_original_index)
return resids
def _compute_confidence_intervals(self):
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
se = self.standard_errors_
hazards = self.params_.values
return pd.DataFrame(
np.c_[hazards - z * se, hazards + z * se],
columns=["%g%% lower-bound" % ci, "%g%% upper-bound" % ci],
index=self.params_.index,
)
def _compute_standard_errors(self, X, T, E, weights):
if self.robust or self.cluster_col:
se = np.sqrt(self._compute_sandwich_estimator(X, T, E, weights).diagonal())
else:
se = np.sqrt(self.variance_matrix_.diagonal())
return pd.Series(se, name="se", index=self.params_.index)
def _compute_sandwich_estimator(self, X, T, E, weights):
delta_betas = self._compute_delta_beta(X, T, E, weights)
if self.cluster_col:
delta_betas = delta_betas.groupby(self._clusters).sum()
sandwich_estimator = delta_betas.T.dot(delta_betas)
return sandwich_estimator.values
def _compute_z_values(self):
return self.params_ / self.standard_errors_
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return stats.chi2.sf(U, 1)
@property
def summary(self):
"""Summary statistics describing the fit.
Set alpha property in the object before calling.
Returns
-------
df : DataFrame
Contains columns coef, np.exp(coef), se(coef), z, p, lower, upper"""
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
with np.errstate(invalid="ignore", divide="ignore", over="ignore", under="ignore"):
df = pd.DataFrame(index=self.params_.index)
df["coef"] = self.params_
df["exp(coef)"] = self.hazard_ratios_
df["se(coef)"] = self.standard_errors_
df["coef lower %g%%" % ci] = self.confidence_intervals_["%g%% lower-bound" % ci]
df["coef upper %g%%" % ci] = self.confidence_intervals_["%g%% upper-bound" % ci]
df["exp(coef) lower %g%%" % ci] = self.hazard_ratios_ * np.exp(-z * self.standard_errors_)
df["exp(coef) upper %g%%" % ci] = self.hazard_ratios_ * np.exp(z * self.standard_errors_)
df["z"] = self._compute_z_values()
df["p"] = self._compute_p_values()
df["-log2(p)"] = -np.log2(df["p"])
return df
def print_summary(self, decimals=2, **kwargs):
"""
Print summary statistics describing the fit, the coefficients, and the error bounds.
Parameters
-----------
decimals: int, optional (default=2)
specify the number of decimal places to show
kwargs:
print additional metadata in the output (useful to provide model names, dataset names, etc.) when comparing
multiple outputs.
"""
# Print information about data first
justify = string_justify(25)
headers = []
headers.append(("duration col", "'%s'" % self.duration_col))
if self.event_col:
headers.append(("event col", "'%s'" % self.event_col))
if self.weights_col:
headers.append(("weights col", "'%s'" % self.weights_col))
if self.cluster_col:
headers.append(("cluster col", "'%s'" % self.cluster_col))
if self.penalizer > 0:
headers.append(("penalizer", self.penalizer))
if self.robust or self.cluster_col:
headers.append(("robust variance", True))
if self.strata:
headers.append(("strata", self.strata))
headers.extend(
[
("number of observations", "{:g}".format(self.weights.sum())),
("number of events observed", "{:g}".format(self.weights[self.event_observed > 0].sum())),
("partial log-likelihood", "{:.{prec}f}".format(self.log_likelihood_, prec=decimals)),
("time fit was run", self._time_fit_was_called),
]
)
p = Printer(headers, self, justify, decimals, kwargs)
p.print()
def log_likelihood_ratio_test(self):
"""
This function computes the likelihood ratio test for the Cox model. We
compare the existing model (with all the covariates) to the trivial model
of no covariates.
"""
if hasattr(self, "_ll_null_"):
ll_null = self._ll_null_
else:
if self._batch_mode:
ll_null = self._trivial_log_likelihood_batch(
self.durations.values, self.event_observed.values, self.weights.values
)
else:
ll_null = self._trivial_log_likelihood_single(
self.durations.values, self.event_observed.values, self.weights.values
)
ll_alt = self.log_likelihood_
test_stat = 2 * ll_alt - 2 * ll_null
degrees_freedom = self.params_.shape[0]
p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom)
return StatisticalResult(
p_value,
test_stat,
name="log-likelihood ratio test",
null_distribution="chi squared",
degrees_freedom=degrees_freedom,
)
def predict_partial_hazard(self, X):
r"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
partial_hazard: DataFrame
Returns the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`\exp{(x - mean(x_{train}))'\beta}`
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
return np.exp(self.predict_log_partial_hazard(X))
def predict_log_partial_hazard(self, X):
r"""
This is equivalent to R's linear.predictors.
Returns the log of the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`(x - \text{mean}(x_{\text{train}})) \beta`
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
log_partial_hazard: DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
hazard_names = self.params_.index
if isinstance(X, pd.Series) and ((X.shape[0] == len(hazard_names) + 2) or (X.shape[0] == len(hazard_names))):
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
elif isinstance(X, pd.Series):
assert len(hazard_names) == 1, "Series not the correct argument"
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
index = _get_index(X)
if isinstance(X, pd.DataFrame):
order = hazard_names
X = X.reindex(order, axis="columns")
X = X.astype(float)
X = X.values
X = X.astype(float)
X = normalize(X, self._norm_mean.values, 1)
return pd.DataFrame(np.dot(X, self.params_), index=index)
def predict_cumulative_hazard(self, X, times=None, conditional_after=None):
"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. reset back to starting at 0.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if isinstance(X, pd.Series):
return self.predict_cumulative_hazard(X.to_frame().T, times=times, conditional_after=conditional_after)
n = X.shape[0]
if times is not None:
times = np.atleast_1d(times).astype(float)
if conditional_after is not None:
conditional_after = _to_1d_array(conditional_after).reshape(n, 1)
if self.strata:
cumulative_hazard_ = pd.DataFrame()
for stratum, stratified_X in X.groupby(self.strata):
try:
strata_c_0 = self.baseline_cumulative_hazard_[[stratum]]
except KeyError:
raise StatError(
dedent(
"""The stratum %s was not found in the original training data. For example, try
the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""
% (stratum, self.strata, stratum)
)
)
col = _get_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
n_ = stratified_X.shape[0]
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n_, 1)) + conditional_after
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(strata_c_0, conditional_after)
c_0_ = np.clip((c_0_ - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n_, 1))
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at).T
cumulative_hazard_ = cumulative_hazard_.merge(
pd.DataFrame(c_0_ * v.values[:, 0], columns=col, index=times_),
how="outer",
right_index=True,
left_index=True,
)
else:
v = self.predict_partial_hazard(X)
col = _get_index(v)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n, 1)) + conditional_after
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(self.baseline_cumulative_hazard_, conditional_after)
c_0 = np.clip((c_0 - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n, 1))
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at).T
cumulative_hazard_ = pd.DataFrame(c_0 * v.values[:, 0], columns=col, index=times_)
return cumulative_hazard_
def predict_survival_function(self, X, times=None, conditional_after=None):
"""
Predict the survival function for individuals, given their covariates. This assumes that the individual
just entered the study (that is, we do not condition on how long they have already lived for.)
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
survival_function : DataFrame
the survival probabilities of individuals over the timeline
"""
return np.exp(-self.predict_cumulative_hazard(X, times=times, conditional_after=conditional_after))
def predict_percentile(self, X, p=0.5, conditional_after=None):
"""
Returns the median lifetimes for the individuals, by default. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
p: float, optional (default=0.5)
the percentile, must be between 0 and 1.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
percentiles: DataFrame
See Also
--------
predict_median
"""
subjects = _get_index(X)
return qth_survival_times(p, self.predict_survival_function(X, conditional_after=conditional_after)[subjects]).T
def predict_median(self, X, conditional_after=None):
"""
Predict the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
percentiles: DataFrame
the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
See Also
--------
predict_percentile
"""
return self.predict_percentile(X, 0.5, conditional_after=conditional_after)
def predict_expectation(self, X):
r"""
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \int_0^\inf P(T > t) dt = \int_0^\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is really infinity and the returned
values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
"""
subjects = _get_index(X)
v = self.predict_survival_function(X)[subjects]
return pd.DataFrame(trapz(v.values.T, v.index), index=subjects)
def _compute_baseline_hazard(self, partial_hazards, name):
# https://stats.stackexchange.com/questions/46532/cox-baseline-hazard
ind_hazards = partial_hazards.copy()
ind_hazards["P"] *= ind_hazards["W"]
ind_hazards["E"] *= ind_hazards["W"]
ind_hazards_summed_over_durations = ind_hazards.groupby("T")[["P", "E"]].sum()
ind_hazards_summed_over_durations["P"] = ind_hazards_summed_over_durations["P"].loc[::-1].cumsum()
baseline_hazard = pd.DataFrame(
ind_hazards_summed_over_durations["E"] / ind_hazards_summed_over_durations["P"], columns=[name]
)
baseline_hazard.index.name = None
return baseline_hazard
def _compute_baseline_hazards(self):
if self.strata:
index = self.durations.unique()
baseline_hazards_ = pd.DataFrame(index=index).sort_index()
for name, stratum_predicted_partial_hazards_ in self._predicted_partial_hazards_.groupby(self.strata):
baseline_hazards_ = baseline_hazards_.merge(
self._compute_baseline_hazard(stratum_predicted_partial_hazards_, name),
left_index=True,
right_index=True,
how="left",
)
return baseline_hazards_.fillna(0)
return self._compute_baseline_hazard(self._predicted_partial_hazards_, name="baseline hazard")
def _compute_baseline_cumulative_hazard(self):
cumulative = self.baseline_hazard_.cumsum()
if not self.strata:
cumulative = cumulative.rename(columns={"baseline hazard": "baseline cumulative hazard"})
return cumulative
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
"""
survival_df = np.exp(-self.baseline_cumulative_hazard_)
if not self.strata:
survival_df = survival_df.rename(columns={"baseline cumulative hazard": "baseline survival"})
return survival_df
def plot(self, columns=None, hazard_ratios=False, ax=None, **errorbar_kwargs):
"""
Produces a visual representation of the coefficients (i.e. log hazard ratios), including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
hazard_ratios: bool, optional
by default, `plot` will present the log-hazard ratios (the coefficients). However, by turning this flag to True, the hazard ratios are presented instead.
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot(hazard_ratios=True)
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
errorbar_kwargs.setdefault("c", "k")
errorbar_kwargs.setdefault("fmt", "s")
errorbar_kwargs.setdefault("markerfacecolor", "white")
errorbar_kwargs.setdefault("markeredgewidth", 1.25)
errorbar_kwargs.setdefault("elinewidth", 1.25)
errorbar_kwargs.setdefault("capsize", 3)
z = inv_normal_cdf(1 - self.alpha / 2)
user_supplied_columns = True
if columns is None:
user_supplied_columns = False
columns = self.params_.index
yaxis_locations = list(range(len(columns)))
log_hazards = self.params_.loc[columns].values.copy()
order = list(range(len(columns) - 1, -1, -1)) if user_supplied_columns else np.argsort(log_hazards)
if hazard_ratios:
exp_log_hazards = np.exp(log_hazards)
upper_errors = exp_log_hazards * (np.exp(z * self.standard_errors_[columns].values) - 1)
lower_errors = exp_log_hazards * (1 - np.exp(-z * self.standard_errors_[columns].values))
ax.errorbar(
exp_log_hazards[order],
yaxis_locations,
xerr=np.vstack([lower_errors[order], upper_errors[order]]),
**errorbar_kwargs
)
ax.set_xlabel("HR (%g%% CI)" % ((1 - self.alpha) * 100))
else:
symmetric_errors = z * self.standard_errors_[columns].values
ax.errorbar(log_hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs)
ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100))
best_ylim = ax.get_ylim()
ax.vlines(1 if hazard_ratios else 0, -2, len(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65)
ax.set_ylim(best_ylim)
tick_labels = [columns[i] for i in order]
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(tick_labels)
return ax
def plot_covariate_groups(self, covariates, values, plot_baseline=True, **kwargs):
"""
Produces a plot comparing the baseline survival curve of the model versus
what happens when a covariate(s) is varied over values in a group. This is useful to compare
subjects' survival as we vary covariate(s), all else being held equal. The baseline survival
curve is equal to the predicted survival curve at all average values in the original dataset.
Parameters
----------
covariates: string or list
a string (or list of strings) of the covariate(s) in the original dataset that we wish to vary.
values: 1d or 2d iterable
an iterable of the specific values we wish the covariate(s) to take on.
plot_baseline: bool
also display the baseline survival, defined as the survival at the mean of the original dataset.
kwargs:
pass in additional plotting commands.
Returns
-------
ax: matplotlib axis, or list of axis'
the matplotlib axis that be edited.
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot_covariate_groups('prio', values=np.arange(0, 15, 3), cmap='coolwarm')
.. image:: images/plot_covariate_example1.png
>>> # multiple variables at once
>>> cph.plot_covariate_groups(['prio', 'paro'], values=[
>>> [0, 0],
>>> [5, 0],
>>> [10, 0],
>>> [0, 1],
>>> [5, 1],
>>> [10, 1]
>>> ], cmap='coolwarm')
.. image:: images/plot_covariate_example2.png
>>> # if you have categorical variables, you can do the following to see the
>>> # effect of all the categories on one plot.
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> # same as:
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=np.eye(3))
"""
from matplotlib import pyplot as plt
covariates = _to_list(covariates)
n_covariates = len(covariates)
values = np.asarray(values)
if len(values.shape) == 1:
values = values[None, :].T
if n_covariates != values.shape[1]:
raise ValueError("The number of covariates must equal to second dimension of the values array.")
for covariate in covariates:
if covariate not in self.params_.index:
raise KeyError("covariate `%s` is not present in the original dataset" % covariate)
set_kwargs_drawstyle(kwargs, "steps-post")
if self.strata is None:
axes = kwargs.pop("ax", None) or plt.figure().add_subplot(111)
x_bar = self._norm_mean.to_frame().T
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(n_covariates), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=axes, **kwargs)
if plot_baseline:
self.baseline_survival_.plot(ax=axes, ls=":", color="k", drawstyle="steps-post")
else:
axes = []
for stratum, baseline_survival_ in self.baseline_survival_.iteritems():
ax = plt.figure().add_subplot(1, 1, 1)
x_bar = self._norm_mean.to_frame().T
for name, value in zip(_to_list(self.strata), _to_tuple(stratum)):
x_bar[name] = value
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(len(covariates)), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=ax, **kwargs)
if plot_baseline:
baseline_survival_.plot(
ax=ax, ls=":", label="stratum %s baseline survival" % str(stratum), drawstyle="steps-post"
)
plt.legend()
axes.append(ax)
return axes
def check_assumptions(
self, training_df, advice=True, show_plots=False, p_value_threshold=0.01, plot_n_bootstraps=10, columns=None
):
"""
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
columns: list, optional
specify a subset of columns to test.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
"""
if not training_df.index.is_unique:
raise IndexError(
"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index"
)
residuals = self.compute_residuals(training_df, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(
self, training_df, time_transform=["rank", "km"], precomputed_residuals=residuals
)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.params_.index.intersection(columns or self.params_.index):
minumum_observed_p_value = test_results.summary.loc[variable, "p"].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, format_p_value(4)(minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(
fill(
" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(
n_uniques, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""",
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard assumption looks okay.")
@property
def score_(self):
"""
The concordance score (also known as the c-index) of the fit. The c-index is a generalization of the ROC AUC
to survival data, including censorships.
For this purpose, the ``score_`` is a measure of the predictive accuracy of the fitted model
onto the training dataset.
References
----------
https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
"""
# pylint: disable=access-member-before-definition
if not hasattr(self, "_concordance_score_"):
if self.strata:
# https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
num_correct, num_tied, num_pairs = 0, 0, 0
for _, _df in self._predicted_partial_hazards_.groupby(self.strata):
if _df.shape[0] == 1:
continue
_num_correct, _num_tied, _num_pairs = _concordance_summary_statistics(
_df["T"].values, -_df["P"].values, _df["E"].values
)
num_correct += _num_correct
num_tied += _num_tied
num_pairs += _num_pairs
else:
df = self._predicted_partial_hazards_
num_correct, num_tied, num_pairs = _concordance_summary_statistics(
df["T"].values, -df["P"].values, df["E"].values
)
self._concordance_score_ = _concordance_ratio(num_correct, num_tied, num_pairs)
return self._concordance_score_
return self._concordance_score_
| 40.374762
| 354
| 0.585868
| 10,644
| 84,787
| 4.496054
| 0.114619
| 0.006018
| 0.001943
| 0.005642
| 0.421556
| 0.36591
| 0.322551
| 0.295909
| 0.265568
| 0.256833
| 0
| 0.015482
| 0.320485
| 84,787
| 2,099
| 355
| 40.393997
| 0.815149
| 0.319188
| 0
| 0.331088
| 0
| 0.005775
| 0.048757
| 0.0005
| 0
| 0
| 0
| 0.001906
| 0.00385
| 1
| 0.042348
| false
| 0
| 0.018287
| 0.000962
| 0.106833
| 0.018287
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91ba64e37706ae1e4223523b060a3928b5d8e678
| 393
|
py
|
Python
|
nlp_server/config/test/test_config.py
|
asevans48/NLPServer
|
6feb1d89748165f9efea40d0777d355044c48176
|
[
"Apache-2.0"
] | null | null | null |
nlp_server/config/test/test_config.py
|
asevans48/NLPServer
|
6feb1d89748165f9efea40d0777d355044c48176
|
[
"Apache-2.0"
] | null | null | null |
nlp_server/config/test/test_config.py
|
asevans48/NLPServer
|
6feb1d89748165f9efea40d0777d355044c48176
|
[
"Apache-2.0"
] | null | null | null |
"""
Test configuration loading
@author aevans
"""
import os
from nlp_server.config import load_config
def test_load_config():
"""
Test loading a configuration
"""
current_dir = os.path.curdir
test_path = os.path.sep.join([current_dir, 'data', 'test_config.json'])
cfg = load_config.load_config(test_path)
assert cfg is not None
assert cfg.use_gpu is False
| 18.714286
| 75
| 0.699746
| 57
| 393
| 4.614035
| 0.526316
| 0.152091
| 0.106464
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198473
| 393
| 20
| 76
| 19.65
| 0.834921
| 0.180662
| 0
| 0
| 0
| 0
| 0.06689
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91bc729480a0e69ec82630c25580e01aa1aa5937
| 4,469
|
py
|
Python
|
frappe/utils/safe_exec.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
frappe/utils/safe_exec.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
frappe/utils/safe_exec.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
import os, json, inspect
import mimetypes
from html2text import html2text
from RestrictedPython import compile_restricted, safe_globals
import RestrictedPython.Guards
import frappe
import frappe.utils
import frappe.utils.data
from frappe.website.utils import (get_shade, get_toc, get_next_link)
from frappe.modules import scrub
from frappe.www.printview import get_visible_columns
import frappe.exceptions
class ServerScriptNotEnabled(frappe.PermissionError): pass
def safe_exec(script, _globals=None, _locals=None):
# script reports must be enabled via site_config.json
if not frappe.conf.server_script_enabled:
frappe.msgprint('Please Enable Server Scripts')
raise ServerScriptNotEnabled
# build globals
exec_globals = get_safe_globals()
if _globals:
exec_globals.update(_globals)
# execute script compiled by RestrictedPython
exec(compile_restricted(script), exec_globals, _locals) # pylint: disable=exec-used
def get_safe_globals():
datautils = frappe._dict()
if frappe.db:
date_format = frappe.db.get_default("date_format") or "yyyy-mm-dd"
time_format = frappe.db.get_default("time_format") or "HH:mm:ss"
else:
date_format = "yyyy-mm-dd"
time_format = "HH:mm:ss"
add_module_properties(frappe.utils.data, datautils, lambda obj: hasattr(obj, "__call__"))
if "_" in getattr(frappe.local, 'form_dict', {}):
del frappe.local.form_dict["_"]
user = getattr(frappe.local, "session", None) and frappe.local.session.user or "Guest"
out = frappe._dict(
# make available limited methods of frappe
json=json,
dict=dict,
frappe=frappe._dict(
_=frappe._,
_dict=frappe._dict,
flags=frappe.flags,
format=frappe.format_value,
format_value=frappe.format_value,
date_format=date_format,
time_format=time_format,
format_date=frappe.utils.data.global_date_format,
form_dict=getattr(frappe.local, 'form_dict', {}),
get_meta=frappe.get_meta,
get_doc=frappe.get_doc,
get_cached_doc=frappe.get_cached_doc,
get_list=frappe.get_list,
get_all=frappe.get_all,
get_system_settings=frappe.get_system_settings,
utils=datautils,
get_url=frappe.utils.get_url,
render_template=frappe.render_template,
msgprint=frappe.msgprint,
user=user,
get_fullname=frappe.utils.get_fullname,
get_gravatar=frappe.utils.get_gravatar_url,
full_name=frappe.local.session.data.full_name if getattr(frappe.local, "session", None) else "Guest",
request=getattr(frappe.local, 'request', {}),
session=frappe._dict(
user=user,
csrf_token=frappe.local.session.data.csrf_token if getattr(frappe.local, "session", None) else ''
),
socketio_port=frappe.conf.socketio_port,
get_hooks=frappe.get_hooks,
),
style=frappe._dict(
border_color='#d1d8dd'
),
get_toc=get_toc,
get_next_link=get_next_link,
_=frappe._,
get_shade=get_shade,
scrub=scrub,
guess_mimetype=mimetypes.guess_type,
html2text=html2text,
dev_server=1 if os.environ.get('DEV_SERVER', False) else 0
)
add_module_properties(frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception))
if not frappe.flags.in_setup_help:
out.get_visible_columns = get_visible_columns
out.frappe.date_format = date_format
out.frappe.time_format = time_format
out.frappe.db = frappe._dict(
get_list = frappe.get_list,
get_all = frappe.get_all,
get_value = frappe.db.get_value,
set_value = frappe.db.set_value,
get_single_value = frappe.db.get_single_value,
get_default = frappe.db.get_default,
escape = frappe.db.escape,
)
if frappe.response:
out.frappe.response = frappe.response
out.update(safe_globals)
# default writer allows write access
out._write_ = _write
out._getitem_ = _getitem
# allow iterators and list comprehension
out._getiter_ = iter
out._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence
out.sorted = sorted
return out
def _getitem(obj, key):
# guard function for RestrictedPython
# allow any key to be accessed as long as it does not start with underscore
if isinstance(key, str) and key.startswith('_'):
raise SyntaxError('Key starts with _')
return obj[key]
def _write(obj):
# guard function for RestrictedPython
# allow writing to any object
return obj
def add_module_properties(module, data, filter_method):
for key, obj in module.__dict__.items():
if key.startswith("_"):
# ignore
continue
if filter_method(obj):
# only allow functions
data[key] = obj
| 29.401316
| 118
| 0.762587
| 634
| 4,469
| 5.11041
| 0.291798
| 0.033951
| 0.033333
| 0.016667
| 0.129321
| 0.046914
| 0.046914
| 0.025309
| 0.025309
| 0.025309
| 0
| 0.002075
| 0.137167
| 4,469
| 152
| 119
| 29.401316
| 0.838174
| 0.101141
| 0
| 0.095652
| 0
| 0
| 0.046965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.008696
| 0.104348
| 0.008696
| 0.182609
| 0.026087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91bccefcfa09a20e9fc27c2975179329c5876dd6
| 2,461
|
py
|
Python
|
simplejson/ordered_dict.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
simplejson/ordered_dict.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
simplejson/ordered_dict.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError("expected at most 1 arguments, got %d" % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end]
self.__map = {}
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError("dictionary is empty")
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return "%s()" % (self.__class__.__name__,)
return "%s(%r)" % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self) == len(other) and all(p == q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| 26.180851
| 94
| 0.670053
| 363
| 2,461
| 4.247934
| 0.280992
| 0.049935
| 0.021401
| 0.027237
| 0.123865
| 0.123865
| 0.07393
| 0.045396
| 0.045396
| 0.045396
| 0
| 0.009548
| 0.191386
| 2,461
| 94
| 95
| 26.180851
| 0.765327
| 0.04551
| 0
| 0.142857
| 0
| 0
| 0.027742
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164835
| false
| 0
| 0
| 0.032967
| 0.406593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91be89ca5480384018cb3e20d95ea9abdcb4c1bb
| 4,136
|
py
|
Python
|
baselines/bc.py
|
bgalbraith/minerl-haiku-baselines
|
c33b14699af14c904394d9c4e30dee680a8718d6
|
[
"Apache-2.0"
] | 2
|
2020-07-11T07:56:46.000Z
|
2020-08-20T07:59:53.000Z
|
baselines/bc.py
|
bgalbraith/minerl-haiku-baselines
|
c33b14699af14c904394d9c4e30dee680a8718d6
|
[
"Apache-2.0"
] | null | null | null |
baselines/bc.py
|
bgalbraith/minerl-haiku-baselines
|
c33b14699af14c904394d9c4e30dee680a8718d6
|
[
"Apache-2.0"
] | null | null | null |
import dill
import haiku as hk
import jax
from jax.experimental import optix
import jax.numpy as jnp
from dataset import load_data
MINERL_ENV = 'MineRLTreechopVectorObf-v0'
PARAMS_FILENAME = 'bc_params_treechop.pkl'
class PovStack(hk.Module):
""" PovStack is a module for processing the point-of-view image data that
comes from the agent's viewport. This input is in NHWC format for a shape
of (N, 64, 64, 3).
This model is inspired from
https://github.com/minerllabs/baselines/blob/master/general/chainerrl/baselines/behavioral_cloning.py
"""
def __init__(self, name=None):
super().__init__(name=name)
conv_0 = hk.Conv2D(output_channels=32,
kernel_shape=(8, 8),
stride=4,
padding='SAME',
name='conv_0')
layer_0 = (conv_0, jax.nn.relu)
conv_1 = hk.Conv2D(output_channels=64,
kernel_shape=(4, 4),
stride=2,
padding='SAME',
name='conv_1')
layer_1 = (conv_1, jax.nn.relu)
conv_2 = hk.Conv2D(output_channels=64,
kernel_shape=(3, 3),
stride=1,
padding='SAME',
name='conv_2')
layer_2 = (conv_2, jax.nn.relu)
layer_3 = (hk.Flatten(),
hk.Linear(512, name='fc_0'), jax.nn.relu,
hk.Linear(128, name='fc_1'), jax.nn.relu)
self.layers = layer_0 + layer_1 + layer_2 + layer_3
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
class VectorStack(hk.Module):
""" VectorStack is a module for processing the obfuscated "vector" data that
is included in the agent's observation. This is a densely encoded form of
the discrete information regarding the state of the agent other than the
viewport, e.g. current inventory. The input is of shape (N, 64)
"""
def __init__(self, name=None):
super().__init__(name=name)
layer_0 = (hk.Linear(32, name='fc_0'), jax.nn.relu)
self.layers = layer_0
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
def behavioral_cloning(batch):
""" The full forward model definition """
x_0 = PovStack(name='pov_stack')(batch[0])
x_1 = VectorStack(name='vector_stack')(batch[1])
x = jnp.concatenate((x_0, x_1), axis=1)
return jnp.tanh(hk.Linear(64)(x))
@jax.jit
def mse_loss(logits, labels):
""" Mean Squared Error loss """
return jnp.mean(jnp.power(logits - labels, 2))
def main():
net = hk.transform(behavioral_cloning)
opt = optix.adam(0.001)
@jax.jit
def loss(params, batch):
""" The loss criterion for our model """
logits = net.apply(params, None, batch)
return mse_loss(logits, batch[2])
@jax.jit
def update(opt_state, params, batch):
grads = jax.grad(loss)(params, batch)
updates, opt_state = opt.update(grads, opt_state)
params = optix.apply_updates(params, updates)
return params, opt_state
@jax.jit
def accuracy(params, batch):
""" Simply report the loss for the current batch """
logits = net.apply(params, None, batch)
return mse_loss(logits, batch[2])
train_dataset, val_dataset = load_data(MINERL_ENV,
batch_size=32, epochs=100)
rng = jax.random.PRNGKey(2020)
batch = next(train_dataset)
params = net.init(rng, batch)
opt_state = opt.init(params)
for i, batch in enumerate(train_dataset):
params, opt_state = update(opt_state, params, batch)
if i % 1000 == 0:
print(accuracy(params, val_dataset))
if i % 10000 == 0:
with open(PARAMS_FILENAME, 'wb') as fh:
dill.dump(params, fh)
with open(PARAMS_FILENAME, 'wb') as fh:
dill.dump(params, fh)
if __name__ == '__main__':
main()
| 30.411765
| 105
| 0.579787
| 549
| 4,136
| 4.1949
| 0.306011
| 0.024316
| 0.023448
| 0.028658
| 0.261832
| 0.240122
| 0.208424
| 0.156318
| 0.156318
| 0.125054
| 0
| 0.032247
| 0.310203
| 4,136
| 135
| 106
| 30.637037
| 0.774974
| 0.173114
| 0
| 0.306818
| 0
| 0
| 0.036782
| 0.014354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.068182
| 0
| 0.284091
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91c49a7dc1b6f619c4919335c93fb67b97477b88
| 7,917
|
py
|
Python
|
decatt/model.py
|
achyudh/castor
|
d7a02ce03f2b71ef1fa490122dd4bbc8214b8b19
|
[
"Apache-2.0"
] | 132
|
2017-04-02T12:31:55.000Z
|
2019-03-09T07:53:29.000Z
|
decatt/model.py
|
sudipta90/castor
|
fa2f59535c71a0fb4586afbe543b81ba812c8630
|
[
"Apache-2.0"
] | 111
|
2017-04-01T23:00:24.000Z
|
2019-03-10T08:29:20.000Z
|
decatt/model.py
|
sudipta90/castor
|
fa2f59535c71a0fb4586afbe543b81ba812c8630
|
[
"Apache-2.0"
] | 53
|
2017-04-06T01:17:18.000Z
|
2019-02-27T03:10:35.000Z
|
import sys
import math
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DecAtt(nn.Module):
def __init__(self, num_units, num_classes, embedding_size, dropout, device=0,
training=True, project_input=True,
use_intra_attention=False, distance_biases=10, max_sentence_length=30):
"""
Create the model based on MLP networks.
:param num_units: size of the networks
:param num_classes: number of classes in the problem
:param embedding_size: size of each word embedding
:param use_intra_attention: whether to use intra-attention model
:param training: whether to create training tensors (optimizer)
:p/word_embeddingaram project_input: whether to project input embeddings to a
different dimensionality
:param distance_biases: number of different distances with biases used
in the intra-attention model
"""
super().__init__()
self.arch = "DecAtt"
self.num_units = num_units
self.num_classes = num_classes
self.project_input = project_input
self.embedding_size = embedding_size
self.distance_biases = distance_biases
self.intra_attention = False
self.max_sentence_length = max_sentence_length
self.device = device
self.bias_embedding = nn.Embedding(max_sentence_length,1)
self.linear_layer_project = nn.Linear(embedding_size, num_units, bias=False)
#self.linear_layer_intra = nn.Sequential(nn.Linear(num_units, num_units), nn.ReLU(), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_attend = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_compare = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Linear(num_units, num_classes), nn.LogSoftmax())
self.init_weight()
def init_weight(self):
self.linear_layer_project.weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].bias.data.fill_(0)
self.linear_layer_attend[4].weight.data.normal_(0, 0.01)
self.linear_layer_attend[4].bias.data.fill_(0)
self.linear_layer_compare[1].weight.data.normal_(0, 0.01)
self.linear_layer_compare[1].bias.data.fill_(0)
self.linear_layer_compare[4].weight.data.normal_(0, 0.01)
self.linear_layer_compare[4].bias.data.fill_(0)
self.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[1].bias.data.fill_(0)
self.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[4].bias.data.fill_(0)
#self.word_embedding.weight.data.copy_(torch.from_numpy(self.pretrained_emb))
def attention_softmax3d(self, raw_attentions):
reshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))
out = nn.functional.softmax(reshaped_attentions, dim=1)
return out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))
def _transformation_input(self, embed_sent):
embed_sent = self.linear_layer_project(embed_sent)
result = embed_sent
if self.intra_attention:
f_intra = self.linear_layer_intra(embed_sent)
f_intra_t = torch.transpose(f_intra, 1, 2)
raw_attentions = torch.matmul(f_intra, f_intra_t)
time_steps = embed_sent.size(1)
r = torch.arange(0, time_steps)
r_matrix = r.view(1,-1).expand(time_steps,time_steps)
raw_index = r_matrix-r.view(-1,1)
clipped_index = torch.clamp(raw_index,0,self.distance_biases-1)
clipped_index = Variable(clipped_index.long())
if torch.cuda.is_available():
clipped_index = clipped_index.to(self.device)
bias = self.bias_embedding(clipped_index)
bias = torch.squeeze(bias)
raw_attentions += bias
attentions = self.attention_softmax3d(raw_attentions)
attended = torch.matmul(attentions, embed_sent)
result = torch.cat([embed_sent,attended],2)
return result
def attend(self, sent1, sent2, lsize_list, rsize_list):
"""
Compute inter-sentence attention. This is step 1 (attend) in the paper
:param sent1: tensor in shape (batch, time_steps, num_units),
the projected sentence 1
:param sent2: tensor in shape (batch, time_steps, num_units)
:return: a tuple of 3-d tensors, alfa and beta.
"""
repr1 = self.linear_layer_attend(sent1)
repr2 = self.linear_layer_attend(sent2)
repr2 = torch.transpose(repr2,1,2)
raw_attentions = torch.matmul(repr1, repr2)
#self.mask = generate_mask(lsize_list, rsize_list)
# masked = mask(self.raw_attentions, rsize_list)
#masked = raw_attentions * self.mask
att_sent1 = self.attention_softmax3d(raw_attentions)
beta = torch.matmul(att_sent1, sent2) #input2_soft
raw_attentions_t = torch.transpose(raw_attentions,1,2).contiguous()
#self.mask_t = torch.transpose(self.mask, 1, 2).contiguous()
# masked = mask(raw_attentions_t, lsize_list)
#masked = raw_attentions_t * self.mask_t
att_sent2 = self.attention_softmax3d(raw_attentions_t)
alpha = torch.matmul(att_sent2,sent1) #input1_soft
return alpha, beta
def compare(self, sentence, soft_alignment):
"""
Apply a feed forward network to compare o ne sentence to its
soft alignment with the other.
:param sentence: embedded and projected sentence,
shape (batch, time_steps, num_units)
:param soft_alignment: tensor with shape (batch, time_steps, num_units)
:return: a tensor (batch, time_steps, num_units)
"""
sent_alignment = torch.cat([sentence, soft_alignment],2)
out = self.linear_layer_compare(sent_alignment)
#out, (state, _) = self.lstm_compare(out)
return out
def aggregate(self, v1, v2):
"""
Aggregate the representations induced from both sentences and their
representations
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: logits over classes, shape (batch, num_classes)
"""
v1_sum = torch.sum(v1,1)
v2_sum = torch.sum(v2,1)
out = self.linear_layer_aggregate(torch.cat([v1_sum,v2_sum],1))
return out
def forward(self, sent1, sent2, ext_feats=None, word_to_doc_count=None, raw_sent1=None, raw_sent2=None):
lsize_list = [len(s.split(" ")) for s in raw_sent1]
rsize_list = [len(s.split(" ")) for s in raw_sent2]
sent1 = sent1.permute(0, 2, 1)
sent2 = sent2.permute(0, 2, 1)
sent1 = self._transformation_input(sent1)
sent2 = self._transformation_input(sent2)
alpha, beta = self.attend(sent1, sent2, lsize_list, rsize_list)
v1 = self.compare(sent1, beta)
v2 = self.compare(sent2, alpha)
logits = self.aggregate(v1, v2)
return logits
| 46.298246
| 136
| 0.653909
| 1,057
| 7,917
| 4.661306
| 0.177862
| 0.047087
| 0.073067
| 0.029227
| 0.310331
| 0.275624
| 0.244368
| 0.236858
| 0.168865
| 0.151411
| 0
| 0.024464
| 0.246179
| 7,917
| 170
| 137
| 46.570588
| 0.801106
| 0.240369
| 0
| 0.038462
| 0
| 0
| 0.001395
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.221154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91c545eedebfe63072291f5498dba2aca85beda1
| 8,738
|
py
|
Python
|
basic_code/networks.py
|
J-asy/Emotion-FAN
|
30c1e24a31b2a05c0810a17eb533096a7baaeeef
|
[
"MIT"
] | 275
|
2019-09-11T10:22:06.000Z
|
2022-03-29T07:14:31.000Z
|
basic_code/networks.py
|
J-asy/Emotion-FAN
|
30c1e24a31b2a05c0810a17eb533096a7baaeeef
|
[
"MIT"
] | 34
|
2019-09-11T11:32:32.000Z
|
2022-03-18T09:32:42.000Z
|
basic_code/networks.py
|
J-asy/Emotion-FAN
|
30c1e24a31b2a05c0810a17eb533096a7baaeeef
|
[
"MIT"
] | 69
|
2019-09-18T19:00:17.000Z
|
2022-03-08T11:43:49.000Z
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
import cv2
import pdb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def norm_angle(angle):
norm_angle = sigmoid(10 * (abs(angle) / 0.7853975 - 1))
return norm_angle
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
###''' self-attention; relation-attention '''
class ResNet_AT(nn.Module):
def __init__(self, block, layers, num_classes=1000, end2end=True, at_type=''):
self.inplanes = 64
self.end2end = end2end
super(ResNet_AT, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.6)
self.alpha = nn.Sequential(nn.Linear(512, 1),
nn.Sigmoid())
self.beta = nn.Sequential(nn.Linear(1024, 1),
nn.Sigmoid())
self.pred_fc1 = nn.Linear(512, 7)
self.pred_fc2 = nn.Linear(1024, 7)
self.at_type = at_type
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x='', phrase='train', AT_level='first_level',vectors='',vm='',alphas_from1='',index_matrix=''):
vs = []
alphas = []
assert phrase == 'train' or phrase == 'eval'
assert AT_level == 'first_level' or AT_level == 'second_level' or AT_level == 'pred'
if phrase == 'train':
num_pair = 3
for i in range(num_pair):
f = x[:, :, :, :, i] # x[128,3,224,224]
f = self.conv1(f)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
vs.append(f)
alphas.append(self.alpha(self.dropout(f)))
vs_stack = torch.stack(vs, dim=2)
alphas_stack = torch.stack(alphas, dim=2)
if self.at_type == 'self-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
if self.at_type == 'self_relation-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
betas = []
for i in range(len(vs)):
vs[i] = torch.cat([vs[i], vm1], dim=1)
betas.append(self.beta(self.dropout(vs[i])))
cascadeVs_stack = torch.stack(vs, dim=2)
betas_stack = torch.stack(betas, dim=2)
output = cascadeVs_stack.mul(betas_stack * alphas_stack).sum(2).div((betas_stack * alphas_stack).sum(2))
if self.at_type == 'self-attention':
vm1 = self.dropout(vm1)
pred_score = self.pred_fc1(vm1)
if self.at_type == 'self_relation-attention':
output = self.dropout2(output)
pred_score = self.pred_fc2(output)
return pred_score
if phrase == 'eval':
if AT_level == 'first_level':
f = self.conv1(x)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
alphas = self.alpha(self.dropout(f))
return f, alphas
if AT_level == 'second_level':
assert self.at_type == 'self_relation-attention'
vms = index_matrix.permute(1, 0).mm(vm) # [381, 21783] -> [21783,381] * [381,512] --> [21783, 512]
vs_cate = torch.cat([vectors, vms], dim=1)
betas = self.beta(self.dropout(vs_cate))
''' keywords: mean_fc ; weight_sourcefc; sum_alpha; weightmean_sourcefc '''
''' alpha * beta '''
weight_catefc = vs_cate.mul(alphas_from1) # [21570,512] * [21570,1] --->[21570,512]
alpha_beta = alphas_from1.mul(betas)
sum_alphabetas = index_matrix.mm(alpha_beta) # [380,21570] * [21570,1] -> [380,1]
weightmean_catefc = index_matrix.mm(weight_catefc).div(sum_alphabetas)
weightmean_catefc = self.dropout2(weightmean_catefc)
pred_score = self.pred_fc2(weightmean_catefc)
return pred_score
if AT_level == 'pred':
if self.at_type == 'self-attention':
pred_score = self.pred_fc1(self.dropout(vm))
return pred_score
''' self-attention; relation-attention '''
def resnet18_at(pretrained=False, **kwargs):
# Constructs base a ResNet-18 model.
model = ResNet_AT(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| 34.674603
| 120
| 0.54509
| 1,097
| 8,738
| 4.213309
| 0.160438
| 0.008222
| 0.019472
| 0.018174
| 0.412592
| 0.345738
| 0.250108
| 0.236694
| 0.216357
| 0.206188
| 0
| 0.051977
| 0.32845
| 8,738
| 251
| 121
| 34.812749
| 0.735685
| 0.041314
| 0
| 0.361702
| 0
| 0
| 0.027492
| 0.008357
| 0
| 0
| 0
| 0
| 0.015957
| 1
| 0.058511
| false
| 0
| 0.042553
| 0.005319
| 0.18617
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91c7cf66ad6751a13ba5162d5a7e62b526efecd6
| 2,693
|
py
|
Python
|
project/scripts/clausecat/evaluate_clausecat.py
|
explosion/healthsea
|
4481488ed9fc85b89844ee872d0a8412a33f0b15
|
[
"MIT"
] | 60
|
2021-12-15T17:14:37.000Z
|
2022-03-26T18:25:15.000Z
|
project/scripts/clausecat/evaluate_clausecat.py
|
zhinoos-adibi/healthsea
|
4481488ed9fc85b89844ee872d0a8412a33f0b15
|
[
"MIT"
] | 3
|
2021-12-16T19:50:15.000Z
|
2022-03-28T06:10:48.000Z
|
project/scripts/clausecat/evaluate_clausecat.py
|
zhinoos-adibi/healthsea
|
4481488ed9fc85b89844ee872d0a8412a33f0b15
|
[
"MIT"
] | 9
|
2021-12-15T21:00:05.000Z
|
2022-03-17T09:20:51.000Z
|
import spacy
from spacy.scorer import PRFScore
import typer
from pathlib import Path
from wasabi import Printer, table
import operator
import benepar
import clausecat_component
import clausecat_model
import clausecat_reader
import clause_segmentation
import clause_aggregation
msg = Printer()
def main(model_path: Path, eval_path: Path):
"""This script is used to evaluate the clausecat component"""
nlp = spacy.load(model_path)
reader = clausecat_reader.ClausecatCorpus(eval_path)
examples = reader(nlp)
clausecat = nlp.get_pipe("clausecat")
scorer = {
"POSITIVE": PRFScore(),
"NEGATIVE": PRFScore(),
"NEUTRAL": PRFScore(),
"ANAMNESIS": PRFScore(),
}
for i, example in enumerate(examples):
prediction = example.predicted
reference = example.reference
# Prediction
prediction = clausecat(prediction)
# Iterate through prediction and references
for pred_clause, ref_clause in zip(prediction._.clauses, reference._.clauses):
prediction_cats = pred_clause["cats"]
reference_cats = ref_clause["cats"]
prediction_class = max(prediction_cats.items(), key=operator.itemgetter(1))[
0
]
# Add to matrix
for label in prediction_cats:
if label != prediction_class:
prediction = 0
else:
prediction = 1
if prediction == 0 and reference_cats[label] != 0:
scorer[label].fn += 1
elif prediction == 1 and reference_cats[label] != 1:
scorer[label].fp += 1
elif prediction == 1 and reference_cats[label] == 1:
scorer[label].tp += 1
# Printing
textcat_data = []
avg_fscore = 0
avg_recall = 0
avg_precision = 0
for label in scorer:
textcat_data.append(
(
label,
round(scorer[label].fscore, 2),
round(scorer[label].recall, 2),
round(scorer[label].precision, 2),
)
)
avg_fscore += scorer[label].fscore
avg_recall += scorer[label].recall
avg_precision += scorer[label].precision
textcat_data.append(
(
"AVERAGE",
round(avg_fscore / len(scorer), 2),
round(avg_recall / len(scorer), 2),
round(avg_precision / len(scorer), 2),
)
)
header = ("Label", "F-Score", "Recall", "Precision")
print(table(textcat_data, header=header, divider=True))
if __name__ == "__main__":
typer.run(main)
| 26.93
| 88
| 0.580394
| 282
| 2,693
| 5.375887
| 0.333333
| 0.065303
| 0.031662
| 0.041557
| 0.088391
| 0.064644
| 0.064644
| 0.064644
| 0.064644
| 0.064644
| 0
| 0.012068
| 0.32306
| 2,693
| 99
| 89
| 27.20202
| 0.819528
| 0.049016
| 0
| 0.027397
| 0
| 0
| 0.035658
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013699
| false
| 0
| 0.164384
| 0
| 0.178082
| 0.013699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91c97df0fae07bca6b5ed203a6e4102faddf3f12
| 4,534
|
py
|
Python
|
keras_cv_attention_models/resnest/resnest.py
|
dcleres/keras_cv_attention_models
|
264876673e369f23eff49b3b589b72f908a9625b
|
[
"MIT"
] | 140
|
2021-08-04T06:51:41.000Z
|
2022-03-30T08:08:32.000Z
|
keras_cv_attention_models/resnest/resnest.py
|
dcleres/keras_cv_attention_models
|
264876673e369f23eff49b3b589b72f908a9625b
|
[
"MIT"
] | 12
|
2021-09-29T00:43:58.000Z
|
2022-03-28T07:50:35.000Z
|
keras_cv_attention_models/resnest/resnest.py
|
dcleres/keras_cv_attention_models
|
264876673e369f23eff49b3b589b72f908a9625b
|
[
"MIT"
] | 20
|
2021-09-28T20:07:35.000Z
|
2022-03-31T14:06:40.000Z
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from keras_cv_attention_models.aotnet import AotNet
from keras_cv_attention_models.download_and_load import reload_model_weights
from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias
PRETRAINED_DICT = {
"resnest101": {"imagenet": "63f9ebdcd32529cbc4b4fbbec3d1bb2f"},
"resnest200": {"imagenet": "8e211dcb089b588e18d36ba7cdf92ef0"},
"resnest269": {"imagenet": "4309ed1b0a8ae92f2b1143dc3512c5c7"},
"resnest50": {"imagenet": "eee7b20a229821f730ab205b6afeb369"},
}
def rsoftmax(inputs, groups):
if groups > 1:
nn = tf.reshape(inputs, [-1, 1, groups, inputs.shape[-1] // groups])
# nn = tf.transpose(nn, [0, 2, 1, 3])
nn = tf.nn.softmax(nn, axis=2)
nn = tf.reshape(nn, [-1, 1, 1, inputs.shape[-1]])
else:
nn = keras.layers.Activation("sigmoid")(inputs)
return nn
def split_attention_conv2d(inputs, filters, kernel_size=3, strides=1, downsample_first=False, groups=2, activation="relu", name=""):
h_axis, w_axis = [2, 3] if K.image_data_format() == "channels_first" else [1, 2]
in_channels = inputs.shape[-1]
conv_strides = strides if downsample_first else 1
if groups == 1:
logits = conv2d_no_bias(inputs, filters, kernel_size, strides=conv_strides, padding="same", name=name and name + "1_")
else:
# Using groups=2 is slow in `mixed_float16` policy
# logits = conv2d_no_bias(inputs, filters * groups, kernel_size, padding="same", groups=groups, name=name and name + "1_")
logits = []
splitted_inputs = tf.split(inputs, groups, axis=-1)
for ii in range(groups):
conv_name = name and name + "1_g{}_".format(ii + 1)
logits.append(conv2d_no_bias(splitted_inputs[ii], filters, kernel_size, strides=conv_strides, padding="same", name=conv_name))
logits = tf.concat(logits, axis=-1)
logits = batchnorm_with_activation(logits, activation=activation, name=name and name + "1_")
if groups > 1:
splited = tf.split(logits, groups, axis=-1)
gap = tf.reduce_sum(splited, axis=0)
else:
gap = logits
gap = tf.reduce_mean(gap, [h_axis, w_axis], keepdims=True)
reduction_factor = 4
inter_channels = max(in_channels * groups // reduction_factor, 32)
atten = keras.layers.Conv2D(inter_channels, kernel_size=1, name=name and name + "2_conv")(gap)
atten = batchnorm_with_activation(atten, activation=activation, name=name and name + "2_")
atten = keras.layers.Conv2D(filters * groups, kernel_size=1, name=name and name + "3_conv")(atten)
atten = rsoftmax(atten, groups)
out = keras.layers.Multiply()([atten, logits])
if groups > 1:
out = tf.split(out, groups, axis=-1)
out = tf.reduce_sum(out, axis=0)
if not downsample_first and strides > 1:
out = keras.layers.ZeroPadding2D(padding=1, name=name and name + "pool_pad")(out)
out = keras.layers.AveragePooling2D(3, strides=2, name=name and name + "pool")(out)
return out
def ResNest(input_shape=(224, 224, 3), stem_type="deep", attn_types="sa", bn_after_attn=False, shortcut_type="avg", pretrained="imagenet", **kwargs):
kwargs.pop("kwargs", None)
model = AotNet(**locals(), **kwargs)
reload_model_weights(model, pretrained_dict=PRETRAINED_DICT, sub_release="resnest", pretrained=pretrained)
return model
def ResNest50(input_shape=(224, 224, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 4, 6, 3], stem_width=64, model_name="resnest50", **locals(), **kwargs)
def ResNest101(input_shape=(256, 256, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 4, 23, 3], stem_width=128, model_name="resnest101", **locals(), **kwargs)
def ResNest200(input_shape=(320, 320, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 24, 36, 3], stem_width=128, model_name="resnest200", **locals(), **kwargs)
def ResNest269(input_shape=(416, 416, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 30, 48, 8], stem_width=128, model_name="resnest269", **locals(), **kwargs)
| 50.377778
| 155
| 0.696074
| 612
| 4,534
| 4.980392
| 0.238562
| 0.023622
| 0.03248
| 0.044291
| 0.32185
| 0.250328
| 0.194882
| 0.177822
| 0.177822
| 0.145013
| 0
| 0.065789
| 0.161888
| 4,534
| 89
| 156
| 50.94382
| 0.736316
| 0.045214
| 0
| 0.089552
| 0
| 0
| 0.095491
| 0.029595
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104478
| false
| 0
| 0.089552
| 0.059701
| 0.298507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91cb094ac7602563246a111f9c1326b917365ed1
| 10,652
|
py
|
Python
|
cluster.py
|
Birfy/Endlinking
|
cc87a5528498e1733111d302437aeb1142b0a47f
|
[
"MIT"
] | 1
|
2020-02-20T03:46:10.000Z
|
2020-02-20T03:46:10.000Z
|
cluster.py
|
Birfy/Endlinking
|
cc87a5528498e1733111d302437aeb1142b0a47f
|
[
"MIT"
] | null | null | null |
cluster.py
|
Birfy/Endlinking
|
cc87a5528498e1733111d302437aeb1142b0a47f
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
import sys
chainlength = int(sys.argv[1])
dfname = sys.argv[2]
outfl = 'result.data'
cluster_size = int(sys.argv[3])
def readsize(dfname):
with open(dfname, 'r') as df:
lines = df.readlines()
for line in lines:
content = line.split()
if content and content[-1] == 'xhi':
return 2*float(content[1])
def readdata(dfname, chainlen):
X=[]
Xi=[]
with open(dfname, 'r') as df:
lines = df.readlines()
for line in lines:
content = line.split()
if len(content) == 9:
# print(content)
if (int(content[0]) % chainlen == 0 or int(content[0]) % chainlen == 1) and int(content[2]) != 3 and int(content[2]) != 4 :
X.append([float(content[i]) for i in range(3,6)])
Xi.append(int(content[0]))
return np.array(X), np.array(Xi)
def initmeans(n):
M=[]
for i in range(n):
M.append([size*(random.random()-0.5),size*(random.random()-0.5),size*(random.random()-0.5)])
return np.array(M)
def SetDistMat(X, means):
distmat_dtype = [('key',int), ('dist',float)]
distmat = np.empty((n,k),dtype=distmat_dtype)
for i in range(n):
distmat[i,:] = [(c[0], GetDist(X[i], c[1])) for c in enumerate(means)]
distmat[i,:] = np.sort(distmat[i,:], order='dist')
return distmat
def GetDist(x, c):
dist = np.linalg.norm(x-c-boxl*np.around((x-c)/boxl))
return dist
def Get_plst(assigned, distmat, full):
plst = []
for i in range(n):
if (i not in assigned):
j = 0
while j<k:
if (not full[distmat[i,j][0]]):
bestkey = distmat[i,j][0]
mindist = distmat[i,j][1]
break
else:
j += 1
for j in range(k-1,-1,-1):
if (not full[distmat[i,j][0]]):
maxdist = distmat[i,j][1]
break
plst.append((i, bestkey, maxdist-mindist))
plst.sort(key=lambda t:t[2])
return plst
def InitialAssignment(distmat):
clusters = {}
full = np.zeros(k,dtype=bool) # a boolean array that records which clusters are full
assigned = [] # a list of objects who has been assigned to a cluster
plst = Get_plst(assigned, distmat, full)
while (len(plst)):
temp = plst.pop()
try:
if (len(clusters[temp[1]])<cluster_size):
clusters[temp[1]].append(temp[0])
assigned.append(temp[0])
else:
full[temp[1]] = True
plst = Get_plst(assigned, distmat, full)
except KeyError:
clusters[temp[1]] = [temp[0]]
assigned.append(temp[0])
return clusters
def CalcMeans(X, oldmeans, clusters):
means = np.zeros((k,3))
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
means[key] += X[i]-boxl*np.around((X[i]-oldmeans[key])/boxl)
means[key] /= len(clusters[key])
means[key] -= boxl*np.around(means[key]/boxl)
return means
def SortObj(X, clusters, means, distmat):
objlst = [] # list of objects ordered in asceding delta of the current
# assignment and the best possible alternate assignment
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
currdist = GetDist(X[i],means[key])
mindist = distmat[i,0][1]
objlst.append((i, key, currdist-mindist))
objlst.sort(key=lambda t:t[2], reverse=True)
return objlst
def Transfer(obj, clufrom, cluto, clusters):
clusters[clufrom].remove(obj)
clusters[cluto].append(obj)
return clusters
def WriteResult(file, X, means, clusters):
with open(file, 'w') as fl:
# keys = sorted(clusters.keys())
# i = 1
# for key in keys:
# for obj in clusters[key]:
# fl.write("%d\t%d\t%f\t%f\t%f\t%d\n"\
# %(obj,Xi[obj], X[obj][0], X[obj][1], X[obj][2], key))
# i = i + 1
for c in enumerate(means):
fl.write("%d\t%f\t%f\t%f"%(c[0], c[1][0], c[1][1], c[1][2]))
for obj in clusters[c[0]]:
fl.write("\t%d"%(Xi[obj]))
fl.write('\n')
# i = i + 1
return
# This function will perform statistical analysis to the clustering results
def ClusterStat(X, means, clusters):
# Average distance between means
means_avg = 0.
for i in range(k-1):
for j in range(i+1,k):
means_avg += GetDist(means[i], means[j])
means_avg /= (k*(k-1)/2.)
# Average distance between obj and mean in a cluster
obj2mean_avg = np.zeros(k)
# Variance of the distances between obj and mean in a cluster
obj2mean_var = np.zeros(k)
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
obj2mean = GetDist(X[i], means[key])
obj2mean_avg[key] += obj2mean
obj2mean_var[key] += obj2mean*obj2mean
obj2mean_avg[key] /= len(clusters[key])
obj2mean_var[key] /= len(clusters[key])
obj2mean_var[key] = np.sqrt(obj2mean_var[key])
# Average within cluster distances between objects
winclu_avg = np.zeros(k)
# Average of within cluster distances of all clusters
winclu_grandavg = 0.
for key in keys:
for i in clusters[key]:
x = X[i]
for j in clusters[key]:
if j>i:
winclu_avg[key] += GetDist(x, X[j])
s = len(clusters[key])
winclu_avg[key] /= (s*(s-1)/2)
winclu_grandavg += winclu_avg[key]
winclu_grandavg /= k
# write the summary
print("average distance among means: %f"%means_avg)
#print("average distance from objects to the mean of a cluster:")
#for i in range(k):
# print("cluster %i: %f"%(i, obj2mean_avg[i]))
#print("variance of distances from objects to the mean of a cluster:")
#for i in range(k):
# print("cluster %i: %f"%(i, obj2mean_var[i]))
#print("within-cluster average distances:")
#for i in range(k):
# print("cluster %i: %f"%(i, winclu_avg[i]))
print("grand average of within-cluster average distances: %f"%winclu_grandavg)
return
X, Xi = readdata(dfname, chainlength)
size = readsize(dfname)
boxl = np.array([size, size, size])
n = len(X)
k = int(len(X)/cluster_size)
# Set up the database of objects
# X = readdata(dfname, chainlength)
# Choose initial means with K-means
means = initmeans(k)
# Set up initial clusters
distmat = SetDistMat(X, means)
clusters = InitialAssignment(distmat)
## debug code
#keys = sorted(clusters.keys())
#for key in keys:
# print("cluster %i:"%key)
# print(clusters[key])
## end of debug
# Iteration step
for iter in range(100):
active = 0 # indicate the number of transfers in the current iteration
tranlst = (-1)*np.ones(k, dtype='int') # set up transfer list for each cluster
# Compute the cluster means
oldmeans = means.copy()
means = CalcMeans(X, oldmeans, clusters)
# Get statistics about the clustering
#ClusterStat(X, means, clusters)
## debug code
#print("old means:")
#print(oldmeans)
#print("new means:")
#print(means)
## end of debug
# For each object, compute the distances to the cluster means
distmat = SetDistMat(X, means)
# Sort objects based on the delta of the current assignment and the best
# possible alternate assignment
objlst = SortObj(X, clusters, means, distmat)
##debug code
#print(objlst)
##return
#end of debug
# For each element by prioty:
while (len(objlst)):
(i, key, temp) = objlst.pop()
obj2key = GetDist(X[i], means[key])
transferred = False #record if any transfering has occured to i
if (key == distmat[i,0][0]):
##debug
#print("%i is already the opt cluster for obj %i. no transfer"%(clu, i))
##end of debug
continue
# For each other clusters by element gain:
else:
for j in range(k):
clu = distmat[i,j][0] # the key of another cluster
objgain = obj2key - distmat[i,j][1] # gain by transfering i from cluster key to clu
if (clu==key): # already in the cluster
continue
if (len(clusters[clu]) < cluster_size):
active += 1
transferred = True
clusters = Transfer(i, key, clu, clusters)
##debug
#print("cluster %i not full. transfer obj %i from cluster %i to it."%(clu, i, key))
##end of debug
break
elif (tranlst[clu] != -1): # if the tranlst of another cluster is not empty
# distance between the obj in the tranlst and the current cluster
tran2key = GetDist(X[tranlst[clu]], means[key])
tran2clu = GetDist(X[tranlst[clu]], means[clu])
# gain by transfering the obj in tranlst from cluster clu to key
trangain = tran2clu - tran2key
if (objgain + trangain > 0): # transfer if the sum of gains are positive, ie net gain
active += 2
transferred = True
clusters = Transfer(i, key, clu, clusters)
clusters = Transfer(tranlst[clu], clu, key, clusters)
##debug
#print("obj %i is transfered from cluster %i to %i"%(i, key, clu))
#print("obj %i is transfered from cluster %i to %i"%(tranlst[clu], clu, key))
#print("objgain: %f, trangain: %f"%(objgain, trangain))
##end of debug
tranlst[clu] = -1 # reset the tranlst to empty
break
if (not transferred):
tranlst[key] = i
##debug
#print("add obj %i in cluster %i to the transfer list"%(i, key))
##end of debug
# nothing is transferred during this iteration, return the clustering result
if (not active):
break
#debug code
print("number of transfers in iter %i: %i\n"%(iter+1, active))
#end of debug
print("K-means clustering converged in %d iterations!\n"%(iter+1))
# Output the clustering results
WriteResult(outfl, X, means, clusters)
ClusterStat(X, means, clusters)
# print(X)
| 36.986111
| 135
| 0.557548
| 1,438
| 10,652
| 4.107789
| 0.161335
| 0.006602
| 0.012189
| 0.014898
| 0.268834
| 0.205857
| 0.175047
| 0.156255
| 0.123074
| 0.118165
| 0
| 0.014646
| 0.314119
| 10,652
| 287
| 136
| 37.114983
| 0.793868
| 0.279666
| 0
| 0.25
| 0
| 0
| 0.029062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.015625
| 0
| 0.140625
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91cc3e617eabbbaa426a11dc2dc6c376ad5cab95
| 740
|
py
|
Python
|
ituro/accounts/tests.py
|
kayduemre/ituro
|
eb5bb0655c2d85eed212d28c1d154006c57a4f03
|
[
"MIT"
] | 9
|
2015-03-18T01:59:24.000Z
|
2022-03-09T06:36:21.000Z
|
ituro/accounts/tests.py
|
kayduemre/ituro
|
eb5bb0655c2d85eed212d28c1d154006c57a4f03
|
[
"MIT"
] | 29
|
2015-03-18T01:59:49.000Z
|
2021-06-10T20:39:03.000Z
|
ituro/accounts/tests.py
|
kayduemre/ituro
|
eb5bb0655c2d85eed212d28c1d154006c57a4f03
|
[
"MIT"
] | 10
|
2016-01-31T05:44:46.000Z
|
2019-10-15T06:12:27.000Z
|
from django.test import TestCase
from django.utils import timezone
from accounts.models import CustomUser, CustomUserManager
class UserCreateTestCase(TestCase):
def test_create_user_correctly(self):
"Creating users correctly"
new_user = CustomUser.objects.create(
email="participant@gmail.com",
name="Participant Name",
phone="09876543210",
school="Some University",
is_staff="False",
is_active="True",
date_joined=timezone.now())
self.assertTrue(isinstance(new_user, CustomUser))
self.assertEqual(new_user.get_full_name(), "Participant Name")
self.assertEqual(new_user.get_short_name(), "Participant Name")
| 33.636364
| 71
| 0.671622
| 79
| 740
| 6.113924
| 0.582278
| 0.057971
| 0.118012
| 0.091097
| 0.10352
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019366
| 0.232432
| 740
| 21
| 72
| 35.238095
| 0.830986
| 0.032432
| 0
| 0
| 0
| 0
| 0.172973
| 0.028378
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.058824
| false
| 0
| 0.176471
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91ce005123b48bec43dd6a96411c6f2b6ba102be
| 2,284
|
py
|
Python
|
continuum/datasets/dtd.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | 282
|
2020-05-09T21:35:22.000Z
|
2022-03-20T11:29:41.000Z
|
continuum/datasets/dtd.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | 180
|
2020-05-03T09:31:48.000Z
|
2022-03-30T12:12:48.000Z
|
continuum/datasets/dtd.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | 34
|
2020-06-13T14:09:29.000Z
|
2022-03-14T14:05:07.000Z
|
import os
from typing import List
import numpy as np
from torchvision import datasets as torchdata
from continuum.datasets import ImageFolderDataset
from continuum import download
from continuum.tasks import TaskType
class DTD(ImageFolderDataset):
"""Describable Textures Dataset (DTD)
Reference:
* Describing Textures in the Wild
M. Cimpoi and S. Maji and I. Kokkinos and S. Mohamed and and A. Vedaldi
CVPR 2014
"""
url = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
def __init__(self, data_path: str, train: bool = True, download: bool = True, split: int = 1):
super().__init__(data_path=data_path, train=train, download=download, data_type=TaskType.IMAGE_PATH)
if not (1 <= int(split) <= 10):
raise ValueError(f"Available splits are [1, ..., 10], not {split}")
self.split = split
def _download(self):
archive_path = os.path.join(self.data_path, "dtd-r1.0.1.tar.gz")
if not os.path.exists(archive_path):
print("Downloading DTD dataset...")
download.download(self.url, self.data_path)
if not os.path.exists(os.path.join(self.data_path, "dtd")):
print("Uncompressing images...")
download.untar(archive_path)
def get_data(self):
x, y, t = self._format(torchdata.ImageFolder(os.path.join(self.data_path, "dtd", "images")).imgs)
if self.train:
index_files = [
os.path.join(self.data_path, "dtd", "labels", f"train{str(self.split)}.txt"),
os.path.join(self.data_path, "dtd", "labels", f"val{str(self.split)}.txt")
]
else:
index_files = [
os.path.join(self.data_path, "dtd", "labels", f"test{str(self.split)}.txt")
]
valid_paths = set()
for index_file in index_files:
with open(index_file) as f:
valid_paths.update(
map(lambda p: os.path.join(self.data_path, "dtd", "images", p.strip()),
f.readlines()
)
)
valid_paths = np.array(list(valid_paths))
indexes = np.isin(x, valid_paths)
return x[indexes], y[indexes], None
| 35.6875
| 108
| 0.595009
| 302
| 2,284
| 4.377483
| 0.377483
| 0.066566
| 0.081694
| 0.07413
| 0.21407
| 0.188351
| 0.172466
| 0.134644
| 0.087746
| 0.06354
| 0
| 0.010284
| 0.27627
| 2,284
| 63
| 109
| 36.253968
| 0.789474
| 0.077933
| 0
| 0.045455
| 0
| 0.022727
| 0.145813
| 0.036092
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.159091
| 0
| 0.295455
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91d00c668e9c3c29e1e078f088b136cfebc103ca
| 1,727
|
py
|
Python
|
intro.py
|
Ebenazer-2002/library-management
|
8c1ededc7167d2221a3947abfeec4773da39dca9
|
[
"Apache-2.0"
] | null | null | null |
intro.py
|
Ebenazer-2002/library-management
|
8c1ededc7167d2221a3947abfeec4773da39dca9
|
[
"Apache-2.0"
] | null | null | null |
intro.py
|
Ebenazer-2002/library-management
|
8c1ededc7167d2221a3947abfeec4773da39dca9
|
[
"Apache-2.0"
] | 1
|
2021-09-22T22:08:15.000Z
|
2021-09-22T22:08:15.000Z
|
#Intro Page
from tkinter import *
from PIL import Image, ImageTk
import cv2
#----------------------------Start Function--------------------------#
def start(event):
label1.destroy()
import log
win.destroy()
log.main()
#------------------------Main Window---------------------------------#li
def main_window():
global win
global label1
win = Tk()
win.title('Library Management System')
win.iconbitmap("images/main_icon.ico")
win.bind('<Key>', start) # start function on pressing any key
win.state('zoomed')
# opens video
cap = cv2.VideoCapture("images/vid.MP4")
global n
n = 0
#-----------------------------------------------------------------
# defining show function
def show():
global n # frame count
n = n+1
if n <= 30:
rest, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image).resize((1600, 850))
imgtk = ImageTk.PhotoImage(image=img)
label1.imgtk = imgtk
label1.configure(image=imgtk)
win.after(10, show)
else:
label1.destroy()
frm = Frame(win, bg='black')
frm.place(relx=0, rely=0, relwidth=1, relheight=1)
label = Label(frm, text='Press any Key to continue',
bg='black', fg='white')
label.place(relx=0.45, rely=0.5)
#-----------------------------------------------------------------
label1 = Label(win)
label1.place(relx=0, rely=0, relheight=1, relwidth=1)
show()
win.mainloop()
#-----------------------------------------------------------------
main_window()
| 28.311475
| 72
| 0.466126
| 179
| 1,727
| 4.47486
| 0.486034
| 0.037453
| 0.037453
| 0.034956
| 0.037453
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031907
| 0.255935
| 1,727
| 60
| 73
| 28.783333
| 0.59144
| 0.246091
| 0
| 0.095238
| 0
| 0
| 0.085271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.095238
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91d02ed15b88e5d9e5da4c1c6b0a923344ec181d
| 16,740
|
py
|
Python
|
notebooks/week4_help.py
|
hugh9876/04-multivariate-analysis
|
0541962842df8844aa323c368f8a4e44999c2d7f
|
[
"MIT"
] | null | null | null |
notebooks/week4_help.py
|
hugh9876/04-multivariate-analysis
|
0541962842df8844aa323c368f8a4e44999c2d7f
|
[
"MIT"
] | null | null | null |
notebooks/week4_help.py
|
hugh9876/04-multivariate-analysis
|
0541962842df8844aa323c368f8a4e44999c2d7f
|
[
"MIT"
] | null | null | null |
"""
This module provides helper functions to support exercises during AM1
with outliers, robust regression and template regression in the CORE
data analytics workshop series, week 4.
"""
import numpy as np
import pandas as pd
import math
from collections import namedtuple
def recovery_sulphur_dataframe_with_outliers(outlier_probability):
"""Return dataframe representing recovery as a function of sulphur.
Parameters:
----------
outlier_probability:
This floating point parameter should range between 0 and 1
and is probability of an observation being an outlier.
Returns:
-------
Pandas dataframe:
A dataframe is returned with two series, the first being observed
recovery, and the second being sulphur %. The data may be sampled
from the true underlying relationship, plus gaussian noise, or
may be an outlier value taken from a non-gaussian distribution.
The proportion of outliers to non-outliers will depend on
the outlier_probability parameter.
"""
# Check that the outlier_probability is an ordinary number.
assert isinstance(outlier_probability, (float, int))
# As it's a probability, ensure that it ranges between 0 and 1.
assert outlier_probability >= 0.0
assert outlier_probability <= 1.0
# If no exceptions have been thrown then we likely have a valid input.
# Get 50 pairs of sulphur features and recovery labels
sulphur_percent = _draw_sulphur_observations(50)
recovery_percent = _observe_recovery(sulphur_percent,
outlier_probability)
return pd.DataFrame({'metal_recovery_percent': recovery_percent,
'feed_sulphur_percent': sulphur_percent})
def _initialise_randomstate(seed):
""" Use RandomState object with seed set."""
return np.random.RandomState(seed)
def _draw_sulphur_observations(count):
rs = _initialise_randomstate(7)
# draw "count" sulphur observations from a uniform distribution of
# sulphur percentages between 0.15% and 1.35%
sulphur_percent = rs.uniform(0.15, 1.35, count)
return sulphur_percent
def _draw_dilithium_observations(count):
rs = _initialise_randomstate(8)
return rs.uniform(25, 35, count)
def _draw_kryptonite_observations(count):
rs = _initialise_randomstate(9)
return rs.uniform(20, 25, count)
def _draw_unobtainium_observations(count):
rs = _initialise_randomstate(10)
return rs.uniform(0, 7, count)
def _draw_quartz_observations(count):
rs = _initialise_randomstate(11)
return rs.uniform(25, 35, count)
def _observe_recovery(sulphur_percent, outlier_probability):
"""Returns an array of metal recoveries.
This method returns an array of metal recoveries given both
an array of sulphur percentages and the probability of an
outlier being observed.
"""
recovery_percent = np.zeros_like(sulphur_percent)
is_outlier = _is_outlier(outlier_probability, len(sulphur_percent))
for index in range(0, len(recovery_percent)):
if is_outlier[index]:
recovery_percent [index]= _return_outlier_model_of_recovery(sulphur_percent[index])
else:
recovery_percent [index]=_noise_free_model_of_recovery(sulphur_percent[index])
return recovery_percent
def _noise_free_model_of_recovery(sulphur):
"""This method returns a metal recovery for a given sulphur %."""
return 74.81 - 6.81/sulphur
def _return_outlier_model_of_recovery(sulphur):
return (74.81 - 6.81/sulphur)/3
def _is_outlier(outlier_probability, how_many):
"""Return true/false numpy array
"""
rs = _initialise_randomstate(5)
uniformly_distributed = rs.uniform(0, 1, how_many)
is_outlier = np.zeros_like(uniformly_distributed)
for index in range(0, len(is_outlier)):
is_outlier[index]=uniformly_distributed[index]>(1-outlier_probability)
return is_outlier
def add_gaussian_noise(noise_free_input, mean, sigma):
"""Adds gaussian noise to vector, given mean and sigma
"""
bins = len(noise_free_input)
noise = np.random.normal(mean, sigma, bins)
return noise_free_input + noise
def gaussian_fwhm_pdf(X, height, x_position, fwhm):
"""Returns guassian probability distribution function, given FWHM
This computes a gaussian probability density function (pdf) given a
Full Width at Half Maximum (FWHM) instead of standard deviation, and
scales it by the height parameters. If the height is one, then the
area of the guassian will also be unity, as required for a pdf, and
for preserving area when used as an impulse response function in
convolution operations.
Note, this returns the function, it does not sample from the
distribution.
"""
return gaussian_pdf(X, height, x_position, fwhm / (2 * math.sqrt(2 * math.log(2))))
def gaussian_pdf(X, area, x_position, standard_deviation):
"""Returns gaussian probability distribution function multiplied by area.
This computes a gaussian with unit area and multiplies it
by the area parameter. It is translated to be centered
on x_position and has the width specified by standard_deviation.
Unit area gaussians are used as probability distributions functions,
and are also important in convolutions, as area of the convolution
of two functions is the product of their areas. If it is important
for the convolution to preserve area of a function when convolved
with a gaussian then that gaussian needs to have unit area. Preserving
area also implies conservation of energy in many physical models.
It can be shown that the integral of the gaussian function is unity
when the guassian's height is scaled as a function of standard_deviation
as:
height_scaling = 1/(standard_deviation*sqrt(2*pi))
So this function multiplies the height of the guassian by this factor and
then multiplies this result by the area parameter that is passed in.
If area parameter is 1, then the height of this gaussian with also
be 1 for all standard deviations, otherwise the area will be set by the
area parameter. The relationship between height and area, and the scaling
of height by the second parameter below, will be made clearer by
also studying the guassian function.
"""
return gaussian(X, area / (standard_deviation * math.sqrt(2 * math.pi)), x_position,
standard_deviation)
def gaussian(X, height, x_position, standard_deviation):
"""Return standard gaussian function
This is the unnormalised gaussian function
f(x)=height*exp(-(x-x_position)^2/(2*standard_deviation^2))
Parameters
----------
height:
This is the maximum of the gaussian peak.
This function does not normalise to constant area, the caller
must do this if this is what they want.
x_position:
This is the x position of the centre of the gaussian. If the
guassian is being used to apply the impulse response of an
instrument applied to an XRD reflection, then this will be the
two-theta position of the peak.
standard_deviation:
The standard deviation of the guassian curve.
If this function is being applied in spectroscopy, optics or
electrical engineering, it is common for gaussians to be
defined in terms of Full Width at Half Maximum (FWHM), which
is the width of the peak when the height drops to half
of the peak height, specified by the height parameter. If
the x-axis represents frequency, and the function height
is proportional to energy or power, then this will be the
gaussian's bandwidth, that is, the width between the -3db points.
To convert from FWHM to standard deviation use the relationship:
FWHM = 2*sqrt(2*log(2)) * standard_deviation
Returns
-------
double:
Evaluated gaussian function.
"""
return height * math.e**(-(X - x_position)**2 / 2 / standard_deviation**2)
class MultichannelXAxis:
"""Set up an X axis for isntrument
This object is set up with three inputs, min_x is the minimum value
on the axis. In the example I've chosen 5. The max_x
value is the highest value on the x axis, and spacing is
the x spacing between channels. In the example I've chosen
a max_x of 90 and spacing of 0.2. The unit is two-theta
degrees, and this unit (and the axis values) come from the
world of x-ray diffraction (XRD). We're describing the x-axis
of a low resolution XRD instrument.
The object's as_vector method can return the x_axis as an array
of numbers using numpy's linspace method, which we've already used
for plotting and other purposes.
"""
def __init__(self, min_x, max_x, spacing):
self._min = min_x
self._max = max_x
self._spacing = spacing
self._channel_count = \
round((self.max - self.min) / self.spacing + 1)
self._label = "r'$2\theta$ (degrees)"
@property
def min(self):
"""Return minimum two-theta for diffractogram x-axis."""
return self._min
@property
def max(self):
"""Return maximum two-theta for diffractogram x-axis."""
return self._max
@property
def spacing(self):
"""Return channel spacing in two-theta for diffractogram x-axis."""
return self._spacing
@property
def channel_count(self):
"""Return the count of channels in this diffractogram."""
return self._channel_count
@property
def label(self):
"""Return the x-axis label, for use with plot and report generation."""
return self._label
@property
def as_vector(self):
"""Return a numpy vector containing two-theta values for each channel."""
x_axis_vector = np.linspace(self.min, self.max, self.channel_count)
return x_axis_vector
def _apply_convolution_kernals(x_axis_vector, intensity, two_theta_angle,
instrument_broadening_fwhm,
reflection_broadening_fwhm):
"""Apply gaussian kernel for instrument broadening only."""
def _add_gaussian_fwhms(fwhm1, fwhm2):
sigma_fwhm_conversion_constant = 2*math.sqrt(2*math.log(2))
sigma_1 = fwhm1/sigma_fwhm_conversion_constant
sigma_2 = fwhm2/sigma_fwhm_conversion_constant
#squares of std_dev (ie sigma^2 which is variance) are additive
sigma_summed = math.sqrt(sigma_1*sigma_1 + sigma_2*sigma_2)
return sigma_summed*sigma_fwhm_conversion_constant
fwhm = _add_gaussian_fwhms (instrument_broadening_fwhm,
reflection_broadening_fwhm)
return gaussian_fwhm_pdf(x_axis_vector, intensity, two_theta_angle,
fwhm)
def create_templates_matrix():
"""Create templates for four test pure components.
This creates templates for quartz, dilithium, kryptonite and
unobtainium, in that order. The templates are returned
in an array where the first column is quartz, and the last is
unobtainium. If you plot them, you'll see gently varying
squiggly lines.
"""
# Create a templates matrix containing space for four templates, plus
# a column of ones.
x_axis = MultichannelXAxis(5, 90, 0.2)
template_count = 4
templates_matrix = np.zeros((x_axis.channel_count, template_count+1))
# set 4 two-theta units of instrument broadening
instrument_broadening = 4
# create a tuple for each reflection, and add it to a list. The loop
# then grabs each reflection from the list and then adds it to the
# template. The first value in the tuple is intensity, the second
# two-theta angle and the third is how much broadening to apply.
Reflection = namedtuple('Reflection', ('intensity', 'two_theta', 'broadening'))
quartz_reflections = []
quartz_reflections.append (Reflection(intensity=10.0, two_theta=25.0, broadening=3.0))
quartz_reflections.append (Reflection(13.0, 38.0, 6.0))
quartz_reflections.append (Reflection(10.0, 43.0, 2.0))
quartz_reflections.append (Reflection(25.0, 60, 2.0))
dilithium_reflections = []
dilithium_reflections.append (Reflection(25.0, 80, 1.0))
kryptonite_reflections = []
#kryptonite_reflections.append (Reflection(intensity=12.0, two_theta=25.0, broadening=9.0))
kryptonite_reflections.append (Reflection(17.0, 12.0, 1.0))
kryptonite_reflections.append (Reflection(19.0, 43.0, 12.0))
#kryptonite_reflections.append (Reflection(4.0, 70, 2.0))
#kryptonite_reflections.append (Reflection(32.0, 74, 2.0))
unobtainium_reflections = []
#unobtainium_reflections.append (Reflection(intensity=4.0, two_theta=25.0, broadening=12.0))
unobtainium_reflections.append (Reflection(5.0, 18.0, 6.0))
unobtainium_reflections.append (Reflection(1.0, 23.0, 1.0))
unobtainium_reflections.append (Reflection(5.0, 31.0, 2.0))
unobtainium_reflections.append (Reflection(3.0, 55.0, 6.0))
unobtainium_reflections.append (Reflection(7.0, 58.0, 1.0))
#unobtainium_reflections.append (Reflection(5.0, 80, 2.0))
phases=[]
# create four phases
phases.append(quartz_reflections)
phases.append(dilithium_reflections)
phases.append(kryptonite_reflections)
phases.append(unobtainium_reflections)
for phase_idx in range(0, template_count):
for a_reflection in phases[phase_idx]:
contribution_of_this_reflection = \
_apply_convolution_kernals(
x_axis.as_vector,
a_reflection.intensity,
a_reflection.two_theta,
instrument_broadening,
a_reflection.broadening)
templates_matrix[:, phase_idx] += \
contribution_of_this_reflection
# set the last column to be all ones
templates_matrix[:, template_count] = \
np.ones(x_axis.channel_count)
return templates_matrix
def create_composition_dataframe(observations_count):
"""Create a dataframe of observations of drilling samples
Returns:
Pandas DataFrame with observations_count observations.
The dataframe has four columns representing the amount
of quartz, dilithium, kryptonite and unobtainium present.
These values are drawn from uniform distributions."""
unobtainium = _draw_unobtainium_observations (observations_count)
dilithium = _draw_dilithium_observations(observations_count)
kryptonite = _draw_kryptonite_observations(observations_count)
quartz = _draw_quartz_observations(observations_count)
# Create clusters by imposing a relationship between quartz
# and dilithium.
for observation_idx in range(0, observations_count):
if quartz[observation_idx] > 30:
dilithium[observation_idx] = 5
if dilithium[observation_idx] > 30:
quartz[observation_idx] = 5
return pd.DataFrame({'Quartz': quartz,
'Dilithium': dilithium,
'Kryptonite': kryptonite,
'Unobtainium': unobtainium})
def create_observations(compositions_dataframe, templates):
"""Create a new array containing synthetic observations"""
observations_count = len(compositions_dataframe)
channels_count = len(templates[:,0])
observations_matrix = np.zeros((channels_count, observations_count))
for observation_idx in range (0, observations_count):
observations_matrix[:, observation_idx] = \
templates[:,0]*compositions_dataframe['Quartz'][observation_idx] + \
templates[:,1]*compositions_dataframe['Dilithium'][observation_idx] + \
templates[:,2]*compositions_dataframe['Kryptonite'][observation_idx] + \
templates[:,3]*compositions_dataframe['Unobtainium'][observation_idx]
# add gaussian noise. If you have time, try increasing this and watch
# prediction performance fall over.
observations_matrix[:, observation_idx] = \
add_gaussian_noise(observations_matrix[:, observation_idx], 10, 3)
return observations_matrix
| 41.435644
| 97
| 0.683871
| 2,165
| 16,740
| 5.130254
| 0.196305
| 0.007653
| 0.041325
| 0.023949
| 0.198974
| 0.125777
| 0.061223
| 0.031242
| 0.007743
| 0
| 0
| 0.020416
| 0.245102
| 16,740
| 403
| 98
| 41.538462
| 0.858511
| 0.445938
| 0
| 0.070588
| 0
| 0
| 0.020801
| 0.002645
| 0
| 0
| 0
| 0
| 0.017647
| 1
| 0.158824
| false
| 0
| 0.023529
| 0.005882
| 0.341176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91d0d1d94cdf45c4bcbd44fd68f0bba0ecae92c7
| 2,671
|
py
|
Python
|
tests/actions/test_mutable_token_action.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 32
|
2022-01-12T16:52:28.000Z
|
2022-03-24T18:05:47.000Z
|
tests/actions/test_mutable_token_action.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 3
|
2022-01-12T17:01:55.000Z
|
2022-02-12T15:14:16.000Z
|
tests/actions/test_mutable_token_action.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 1
|
2022-01-21T04:28:07.000Z
|
2022-01-21T04:28:07.000Z
|
from radixlib.actions import CreateTokenDefinition
from typing import Dict, Any
import unittest
class TestMutableTokenAction(unittest.TestCase):
""" Unit tests for the CreateTokenDefinition action of mutable tokens """
ActionDict: Dict[str, Any] = {
"token_properties": {
"name": "MutableTest",
"description": "An amazing new token with great utility!",
"icon_url": "https://www.google.com/",
"url": "https://www.google.com/",
"symbol": "mutable",
"is_supply_mutable": True,
"granularity": "1",
"owner": {
"address": "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
}
},
"token_supply": {
"value": "0",
"token_identifier": {
"rri": "mutable_tr1q06dd0ut3qmyp4pqkvmeu2dvkwg5f7vm8yeslwvpkt9qcl5vqu"
}
},
"type": "CreateTokenDefinition"
}
def test_from_dict(self):
""" Tests the derivation of the mainnet wallet addresses from the public key """
# The action loaded from the dictionary
creation: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict)
# Asserting that the CreateTokenDefinition object understood the content of the dictionary
self.assertEqual(creation.name, self.ActionDict['token_properties']['name'])
self.assertEqual(creation.description, self.ActionDict['token_properties']['description'])
self.assertEqual(creation.icon_url, self.ActionDict['token_properties']['icon_url'])
self.assertEqual(creation.url, self.ActionDict['token_properties']['url'])
self.assertEqual(creation.symbol, self.ActionDict['token_properties']['symbol'])
self.assertEqual(creation.is_supply_mutable, self.ActionDict['token_properties']['is_supply_mutable'])
self.assertEqual(creation.granularity, int(self.ActionDict['token_properties']['granularity']))
self.assertEqual(creation.owner.address, self.ActionDict['token_properties']['owner']['address'])
self.assertEqual(creation.token_supply, int(self.ActionDict['token_supply']['value']))
self.assertEqual(creation.token_rri, self.ActionDict['token_supply']['token_identifier']['rri'])
self.assertEqual(creation.to_account, None)
def test_to_dict(self):
""" Tests the conversion of the token account to a dictionary """
# The account loaded from the dictionary
account: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict)
self.assertEqual(account.to_dict(), self.ActionDict)
| 47.696429
| 110
| 0.675028
| 260
| 2,671
| 6.803846
| 0.3
| 0.102883
| 0.143019
| 0.131148
| 0.131148
| 0.072357
| 0
| 0
| 0
| 0
| 0
| 0.017005
| 0.207413
| 2,671
| 56
| 111
| 47.696429
| 0.818611
| 0.137027
| 0
| 0
| 0
| 0
| 0.275274
| 0.064333
| 0
| 0
| 0
| 0
| 0.292683
| 1
| 0.04878
| false
| 0
| 0.073171
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91d380ce2b1e14c5b063e9056626bb2c1ea92f55
| 6,869
|
py
|
Python
|
src/python/pants/backend/native/subsystems/xcode_cli_tools.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/native/subsystems/xcode_cli_tools.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/native/subsystems/xcode_cli_tools.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.native.config.environment import Assembler, CCompiler, CppCompiler, Linker
from pants.engine.rules import rule
from pants.engine.selectors import Select
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import is_readable_dir
from pants.util.memo import memoized_method, memoized_property
MIN_OSX_SUPPORTED_VERSION = '10.11'
MIN_OSX_VERSION_ARG = '-mmacosx-version-min={}'.format(MIN_OSX_SUPPORTED_VERSION)
class XCodeCLITools(Subsystem):
"""Subsystem to detect and provide the XCode command line developer tools.
This subsystem exists to give a useful error message if the tools aren't
installed, and because the install location may not be on the PATH when Pants
is invoked.
"""
options_scope = 'xcode-cli-tools'
_REQUIRED_FILES = {
'bin': [
'as',
'cc',
'c++',
'clang',
'clang++',
'ld',
'lipo',
],
# Any of the entries that would be here are not directly below the 'include' or 'lib' dirs, and
# we haven't yet encountered an invalid XCode/CLI tools installation which has the include dirs,
# but incorrect files. These would need to be updated if such an issue arises.
'include': [],
'lib': [],
}
INSTALL_PREFIXES_DEFAULT = [
# Prefer files from this installation directory, if available. This doesn't appear to be
# populated with e.g. header files on travis.
'/usr',
# Populated by the XCode CLI tools.
'/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr',
# Populated by the XCode app. These are derived from using the -v or -H switches invoking the
# osx clang compiler.
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/9.1.0',
'/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr',
]
class XCodeToolsUnavailable(Exception):
"""Thrown if the XCode CLI tools could not be located."""
class XCodeToolsInvalid(Exception):
"""Thrown if a method within this subsystem requests a nonexistent tool."""
@classmethod
def register_options(cls, register):
super(XCodeCLITools, cls).register_options(register)
register('--install-prefixes', type=list, default=cls.INSTALL_PREFIXES_DEFAULT,
fingerprint=True, advanced=True,
help='Locations to search for resources from the XCode CLI tools, including a '
'compiler, linker, header files, and some libraries. '
'Under this directory should be some selection of these subdirectories: {}.'
.format(cls._REQUIRED_FILES.keys()))
@memoized_property
def _all_existing_install_prefixes(self):
return [pfx for pfx in self.get_options().install_prefixes if is_readable_dir(pfx)]
# NB: We use @memoized_method in this file for methods which may raise.
@memoized_method
def _get_existing_subdirs(self, subdir_name):
maybe_subdirs = [os.path.join(pfx, subdir_name) for pfx in self._all_existing_install_prefixes]
existing_dirs = [existing_dir for existing_dir in maybe_subdirs if is_readable_dir(existing_dir)]
required_files_for_dir = self._REQUIRED_FILES.get(subdir_name)
if required_files_for_dir:
for fname in required_files_for_dir:
found = False
for subdir in existing_dirs:
full_path = os.path.join(subdir, fname)
if os.path.isfile(full_path):
found = True
continue
if not found:
raise self.XCodeToolsUnavailable(
"File '{fname}' in subdirectory '{subdir_name}' does not exist at any of the specified "
"prefixes. This file is required to build native code on this platform. You may need "
"to install the XCode command line developer tools from the Mac App Store.\n\n"
"If the XCode tools are installed and you are still seeing this message, please file "
"an issue at https://github.com/pantsbuild/pants/issues/new describing your "
"OSX environment and which file could not be found.\n"
"The existing install prefixes were: {pfxs}. These can be extended with "
"--{scope}-install-prefixes."
.format(fname=fname,
subdir_name=subdir_name,
pfxs=self._all_existing_install_prefixes,
scope=self.get_options_scope_equivalent_flag_component()))
return existing_dirs
@memoized_method
def path_entries(self):
return self._get_existing_subdirs('bin')
@memoized_method
def lib_dirs(self):
return self._get_existing_subdirs('lib')
@memoized_method
def include_dirs(self):
base_inc_dirs = self._get_existing_subdirs('include')
all_inc_dirs = base_inc_dirs
for d in base_inc_dirs:
# TODO: figure out what this directory does and why it's not already found by this compiler.
secure_inc_dir = os.path.join(d, 'secure')
if is_readable_dir(secure_inc_dir):
all_inc_dirs.append(secure_inc_dir)
return all_inc_dirs
@memoized_method
def assembler(self):
return Assembler(
path_entries=self.path_entries(),
exe_filename='as',
library_dirs=[])
@memoized_method
def linker(self):
return Linker(
path_entries=self.path_entries(),
exe_filename='ld',
library_dirs=[],
linking_library_dirs=[],
extra_args=[MIN_OSX_VERSION_ARG])
@memoized_method
def c_compiler(self):
return CCompiler(
path_entries=self.path_entries(),
exe_filename='clang',
library_dirs=self.lib_dirs(),
include_dirs=self.include_dirs(),
extra_args=[MIN_OSX_VERSION_ARG])
@memoized_method
def cpp_compiler(self):
return CppCompiler(
path_entries=self.path_entries(),
exe_filename='clang++',
library_dirs=self.lib_dirs(),
include_dirs=self.include_dirs(),
extra_args=[MIN_OSX_VERSION_ARG])
@rule(Assembler, [Select(XCodeCLITools)])
def get_assembler(xcode_cli_tools):
return xcode_cli_tools.assembler()
@rule(Linker, [Select(XCodeCLITools)])
def get_ld(xcode_cli_tools):
return xcode_cli_tools.linker()
@rule(CCompiler, [Select(XCodeCLITools)])
def get_clang(xcode_cli_tools):
return xcode_cli_tools.c_compiler()
@rule(CppCompiler, [Select(XCodeCLITools)])
def get_clang_plusplus(xcode_cli_tools):
return xcode_cli_tools.cpp_compiler()
def create_xcode_cli_tools_rules():
return [
get_assembler,
get_ld,
get_clang,
get_clang_plusplus,
]
| 34.345
| 105
| 0.7084
| 915
| 6,869
| 5.103825
| 0.300546
| 0.023983
| 0.038972
| 0.013705
| 0.208137
| 0.165096
| 0.137259
| 0.094004
| 0.094004
| 0.062741
| 0
| 0.002553
| 0.201776
| 6,869
| 199
| 106
| 34.517588
| 0.84917
| 0.174989
| 0
| 0.137681
| 0
| 0.014493
| 0.217584
| 0.068028
| 0
| 0
| 0
| 0.005025
| 0
| 1
| 0.108696
| false
| 0
| 0.057971
| 0.086957
| 0.311594
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91d43878e8db19b2ac8a4228dcc70b222e3033cf
| 11,998
|
py
|
Python
|
improver_tests/regrid/test_RegridWithLandSeaMask.py
|
yzhaobom/improver
|
47f9e103c63f890bfbb24d5e08d9d01d041514f7
|
[
"BSD-3-Clause"
] | 77
|
2017-04-26T07:47:40.000Z
|
2022-03-31T09:40:49.000Z
|
improver_tests/regrid/test_RegridWithLandSeaMask.py
|
yzhaobom/improver
|
47f9e103c63f890bfbb24d5e08d9d01d041514f7
|
[
"BSD-3-Clause"
] | 1,440
|
2017-03-29T10:04:15.000Z
|
2022-03-28T10:11:29.000Z
|
improver_tests/regrid/test_RegridWithLandSeaMask.py
|
MoseleyS/improver
|
ca028e3a1c842e3ff00b188c8ea6eaedd0a07149
|
[
"BSD-3-Clause"
] | 72
|
2017-03-17T16:53:45.000Z
|
2022-02-16T09:41:37.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the RegridWithLandSeaMask class"""
# set up a special data set and corresponding land-sea mask info
# set up target grid and its land-sea mask info
# it is designed to cover different scenarios for regridding with land-sea
# the regridding reference results are manually checked for different methods
# not using "set_up_variable_cube" because of different spacing at lat/lon
import numpy as np
from improver.regrid.bilinear import basic_indexes
from improver.regrid.grid import calculate_input_grid_spacing, latlon_from_cube
from improver.regrid.landsea import RegridLandSea
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
def modify_cube_coordinate_value(cube, coord_x, coord_y):
"""modify x(longitude) & y(latitude) andcoordinates for a cube"""
cube.coord(axis="x").points = coord_x
cube.coord(axis="x").bounds = None
cube.coord(axis="x").guess_bounds()
cube.coord(axis="y").points = coord_y
cube.coord(axis="y").bounds = None
cube.coord(axis="y").guess_bounds()
return cube
def define_source_target_grid_data():
""" define cube_in, cube_in_mask,cube_out_mask using assumed data """
# source (input) grid
in_lats = np.linspace(0, 15, 4)
in_lons = np.linspace(0, 40, 5)
# target (output) grid
out_lats = np.linspace(0, 14, 8)
out_lons = np.linspace(5, 35, 11)
# assume a set of nwp data
data = np.arange(20).reshape(4, 5).astype(np.float32)
# input grid mask info
in_mask = np.empty((4, 5), dtype=np.int)
in_mask[:, :] = 1
in_mask[0, 2] = 0
in_mask[2, 2:4] = 0
in_mask[3, 2:4] = 0
# output grid mask info
out_mask = np.empty((8, 11), dtype=np.int)
out_mask[:, :] = 1
out_mask[0, 4:7] = 0
out_mask[1, 5] = 0
out_mask[5:9, 4:10] = 0
out_mask[6, 6] = 1
out_mask[7, 6] = 1
out_mask[1, 0] = 0
# create cube with default spacing
cube_in = set_up_variable_cube(data, "air_temperature", "Celsius")
cube_in_mask = set_up_variable_cube(in_mask, "Land_Binary_Mask", "1")
cube_out_mask = set_up_variable_cube(out_mask, "Land_Binary_Mask", "1")
# modify cube coordinates to the designed value
cube_in = modify_cube_coordinate_value(cube_in, in_lons, in_lats)
cube_in_mask = modify_cube_coordinate_value(cube_in_mask, in_lons, in_lats)
cube_out_mask = modify_cube_coordinate_value(cube_out_mask, out_lons, out_lats)
return cube_in, cube_out_mask, cube_in_mask
def define_source_target_grid_data_same_domain():
""" define cube_in, cube_in_mask,cube_out_mask, assume the same domain """
# source (input) grid
in_lats = np.linspace(0, 15, 4)
in_lons = np.linspace(0, 40, 5)
# target (output) grid
out_lats = np.linspace(0, 15, 7)
out_lons = np.linspace(5, 40, 9)
# assume a set of nwp data
data = np.arange(20).reshape(4, 5).astype(np.float32)
# input grid mask info
in_mask = np.empty((4, 5), dtype=np.int)
in_mask[:, :] = 1
in_mask[0, 2] = 0
in_mask[2, 2:4] = 0
in_mask[3, 2:4] = 0
# output grid mask info
out_mask = np.empty((7, 9), dtype=np.int)
out_mask[:, :] = 1
out_mask[0, 3:6] = 0
out_mask[1, 4] = 0
out_mask[4:9, 4:8] = 0
out_mask[6, 6] = 1
out_mask[1, 0] = 0
# create cube with default spacing
cube_in = set_up_variable_cube(data, "air_temperature", "Celsius")
cube_in_mask = set_up_variable_cube(in_mask, "Land_Binary_Mask", "1")
cube_out_mask = set_up_variable_cube(out_mask, "Land_Binary_Mask", "1")
# modify cube coordinates to the designed value
cube_in = modify_cube_coordinate_value(cube_in, in_lons, in_lats)
cube_in_mask = modify_cube_coordinate_value(cube_in_mask, in_lons, in_lats)
cube_out_mask = modify_cube_coordinate_value(cube_out_mask, out_lons, out_lats)
return cube_in, cube_out_mask, cube_in_mask
def test_basic_indexes():
"""Test basic_indexes for identical source and target domain case """
cube_in, cube_out_mask, _ = define_source_target_grid_data_same_domain()
in_latlons = latlon_from_cube(cube_in)
out_latlons = latlon_from_cube(cube_out_mask)
in_lons_size = cube_in.coord(axis="x").shape[0]
lat_spacing, lon_spacing = calculate_input_grid_spacing(cube_in)
indexes = basic_indexes(
out_latlons, in_latlons, in_lons_size, lat_spacing, lon_spacing
)
test_results = indexes[58:63, :]
expected_results = np.array(
[
[12, 17, 18, 13],
[12, 17, 18, 13],
[13, 18, 19, 14],
[13, 18, 19, 14],
[13, 18, 19, 14],
]
)
np.testing.assert_array_equal(test_results, expected_results)
def test_regrid_nearest_2():
"""Test nearest neighbour regridding option 'nearest-2'"""
cube_in, cube_out_mask, _ = define_source_target_grid_data()
regrid_nearest = RegridLandSea(regrid_mode="nearest-2",)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18],
]
)
np.testing.assert_allclose(regrid_nearest.data, expected_results, atol=1e-3)
def test_regrid_bilinear_2():
"""Test bilinear regridding option 'bilinear-2'"""
cube_in, cube_out_mask, _ = define_source_target_grid_data()
regrid_bilinear = RegridLandSea(regrid_mode="bilinear-2",)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0.5, 0.8, 1.1, 1.4, 1.7, 2.0, 2.3, 2.6, 2.9, 3.2, 3.5],
[2.5, 2.8, 3.1, 3.4, 3.7, 4.0, 4.3, 4.6, 4.9, 5.2, 5.5],
[4.5, 4.8, 5.1, 5.4, 5.7, 6.0, 6.3, 6.6, 6.9, 7.2, 7.5],
[6.5, 6.8, 7.1, 7.4, 7.7, 8.0, 8.3, 8.6, 8.9, 9.2, 9.5],
[8.5, 8.8, 9.1, 9.4, 9.7, 10.0, 10.3, 10.6, 10.9, 11.2, 11.5],
[10.5, 10.8, 11.1, 11.4, 11.7, 12.0, 12.3, 12.6, 12.9, 13.2, 13.5],
[12.5, 12.8, 13.1, 13.4, 13.7, 14.0, 14.3, 14.6, 14.9, 15.2, 15.5],
[14.5, 14.8, 15.1, 15.4, 15.7, 16.0, 16.3, 16.6, 16.9, 17.2, 17.5],
]
)
np.testing.assert_allclose(regrid_bilinear.data, expected_results, atol=1e-3)
def test_regrid_nearest_with_mask_2():
"""Test nearest-with-mask-2 regridding"""
cube_in, cube_out_mask, cube_in_mask = define_source_target_grid_data()
regrid_nearest_with_mask = RegridLandSea(
regrid_mode="nearest-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[0, 1, 1, 1, 7, 2, 7, 3, 3, 3, 3],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9],
[10, 11, 11, 11, 7, 7, 7, 8, 8, 8, 14],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14],
[10, 11, 11, 11, 12, 12, 7, 13, 13, 13, 14],
[15, 16, 16, 16, 17, 17, 7, 18, 18, 18, 19],
]
)
np.testing.assert_allclose(
regrid_nearest_with_mask.data, expected_results, atol=1e-3
)
# consider constant field
cube_in.data = np.repeat(1.0, 20).reshape(4, 5).astype(np.float32)
regrid_nearest_with_mask = RegridLandSea(
regrid_mode="nearest-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.repeat(1.0, 88).reshape(8, 11).astype(np.float32)
np.testing.assert_allclose(
regrid_nearest_with_mask.data, expected_results, atol=1e-3
)
def test_regrid_bilinear_with_mask_2():
"""Test bilinear-with-mask-2 regridding """
cube_in, cube_out_mask, cube_in_mask = define_source_target_grid_data()
regrid_bilinear_with_mask = RegridLandSea(
regrid_mode="bilinear-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0.5, 0.8, 1.40096, 3.2916, 2.0, 2.0, 2.0, 4.94333, 3.25586, 3.2, 3.5],
[2.5, 2.8, 3.1, 3.4, 5.48911, 2.76267, 6.32926, 4.6, 4.9, 5.2, 5.5],
[4.5, 4.8, 5.1, 5.4, 5.7, 7.0154, 6.3, 6.6, 6.9, 7.2, 7.5],
[6.5, 6.8, 7.1, 7.4, 7.7, 7.0, 7.19033, 7.6681, 7.6618, 9.2, 9.5],
[
8.5,
8.8,
9.1,
9.4,
8.10633,
7.0,
7.0,
7.62915,
7.21672,
9.11434,
10.52363,
],
[
10.5,
10.8,
11.00012,
11.01183,
13.15439,
12.0,
12.3,
12.6,
12.9,
13.71286,
15.74504,
],
[
12.5,
12.8,
12.23411,
13.25881,
14.14155,
14.0,
8.07328,
14.6,
14.9,
14.96332,
16.3334,
],
[
14.5,
14.8,
15.0997,
14.22659,
15.50905,
16.0,
9.8733,
16.6,
16.9,
16.91114,
17.03773,
],
]
)
np.testing.assert_allclose(
regrid_bilinear_with_mask.data, expected_results, atol=1e-3
)
# consider constant field
cube_in.data = np.repeat(1.0, 20).reshape(4, 5).astype(np.float32)
regrid_bilinear_with_mask = RegridLandSea(
regrid_mode="bilinear-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.repeat(1.0, 88).reshape(8, 11).astype(np.float32)
np.testing.assert_allclose(
regrid_bilinear_with_mask.data, expected_results, atol=1e-3
)
| 35.602374
| 86
| 0.596516
| 1,892
| 11,998
| 3.593552
| 0.156977
| 0.038829
| 0.038829
| 0.024857
| 0.589646
| 0.553905
| 0.53449
| 0.520959
| 0.511252
| 0.484042
| 0
| 0.123426
| 0.272045
| 11,998
| 336
| 87
| 35.708333
| 0.655026
| 0.236039
| 0
| 0.425439
| 0
| 0
| 0.02382
| 0
| 0
| 0
| 0
| 0
| 0.030702
| 1
| 0.035088
| false
| 0
| 0.02193
| 0
| 0.070175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91d54e85fa9e683a691056ba3de4c8a49958c847
| 3,723
|
py
|
Python
|
test/test_workflow.py
|
asnramos/asv
|
8a0979b532d06c7c352826e2acf0dd872922260e
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_workflow.py
|
asnramos/asv
|
8a0979b532d06c7c352826e2acf0dd872922260e
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_workflow.py
|
asnramos/asv
|
8a0979b532d06c7c352826e2acf0dd872922260e
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import json
from os.path import join, isfile
import pytest
from asv import util
from . import tools
def test_run_publish(capfd, basic_conf_2):
tmpdir, local, conf, machine_file = basic_conf_2
tmpdir = util.long_path(tmpdir)
conf.matrix = {
"req": dict(conf.matrix),
"env": {"SOME_TEST_VAR": ["1"]},
}
# Tests a typical complete run/publish workflow
ret = tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--quick', '--show-stderr', '--profile',
'-a', 'warmup_time=0',
'--durations=5',
_machine_file=machine_file)
assert ret is None
text, err = capfd.readouterr()
assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5
assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2
assert 'asv: benchmark timed out (timeout 0.1s)' in text
assert 'total duration' in text
tools.run_asv_with_conf(conf, 'publish')
assert isfile(join(tmpdir, 'html', 'index.html'))
assert isfile(join(tmpdir, 'html', 'index.json'))
assert isfile(join(tmpdir, 'html', 'asv.js'))
assert isfile(join(tmpdir, 'html', 'asv.css'))
# Check parameterized test json data format
filename = glob.glob(join(tmpdir, 'html', 'graphs', 'arch-x86_64',
'asv_dummy_test_package_1',
'asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1],
'branch-master',
'cpu-Blazingly fast',
'env-SOME_TEST_VAR-1',
'machine-orangutan',
'os-GNU_Linux', 'python-*', 'ram-128GB',
'params_examples.time_skip.json'))[0]
with open(filename, 'r') as fp:
data = json.load(fp)
assert len(data) == 2
assert isinstance(data[0][0], int) # revision
assert len(data[0][1]) == 3
assert len(data[1][1]) == 3
assert isinstance(data[0][1][0], float)
assert isinstance(data[0][1][1], float)
assert data[0][1][2] is None
# Check that the skip options work
capfd.readouterr()
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--quick', '--skip-existing-successful',
'--bench=time_secondary.track_value',
'--skip-existing-failed',
_machine_file=join(tmpdir, 'asv-machine.json'))
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--bench=time_secondary.track_value',
'--quick', '--skip-existing-commits',
_machine_file=join(tmpdir, 'asv-machine.json'))
text, err = capfd.readouterr()
assert 'Running benchmarks.' not in text
# Check EXISTING and --environment work
python = "{0[0]}.{0[1]}".format(sys.version_info)
env_type = tools.get_default_environment_type(conf, python)
env_spec = ("-E", env_type + ":" + python)
tools.run_asv_with_conf(conf, 'run', "EXISTING", '--quick',
'--bench=time_secondary.track_value',
*env_spec,
_machine_file=machine_file)
# Remove the benchmarks.json file and check publish fails
os.remove(join(tmpdir, "results_workflow", "benchmarks.json"))
with pytest.raises(util.UserError):
tools.run_asv_with_conf(conf, 'publish')
| 39.189474
| 85
| 0.552243
| 434
| 3,723
| 4.573733
| 0.331797
| 0.050378
| 0.033249
| 0.04534
| 0.349622
| 0.244836
| 0.184383
| 0.105793
| 0.062469
| 0.062469
| 0
| 0.018772
| 0.313188
| 3,723
| 94
| 86
| 39.606383
| 0.757528
| 0.076551
| 0
| 0.183099
| 0
| 0
| 0.229738
| 0.073469
| 0
| 0
| 0
| 0
| 0.239437
| 1
| 0.014085
| false
| 0
| 0.112676
| 0
| 0.126761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91d673a77f43b00da4523b7edc231f25e64c3f72
| 5,750
|
py
|
Python
|
trainer.py
|
Metro1998/P-DQN
|
6ab2ac6991d2685f10887c16f854ebba6144b306
|
[
"MIT"
] | 5
|
2021-12-13T15:25:07.000Z
|
2022-03-29T12:42:37.000Z
|
trainer.py
|
Metro1998/P-DQN
|
6ab2ac6991d2685f10887c16f854ebba6144b306
|
[
"MIT"
] | null | null | null |
trainer.py
|
Metro1998/P-DQN
|
6ab2ac6991d2685f10887c16f854ebba6144b306
|
[
"MIT"
] | null | null | null |
# @author Metro
# @time 2021/11/24
import os.path
import gym
from agents.pdqn import P_DQN
from utilities.memory import ReplayBuffer
from utilities.utilities import *
from utilities.route_generator import generate_routefile
class Train_and_Evaluate(object):
def __init__(self, config):
# Environment
generate_routefile(seed=config.seed, demand=config.demand)
self.env = gym.make(config.environment)
# Agent
self.agent = P_DQN(config, self.env)
# Memory
self.replay_memory_size = config.hyperparameters['replay_memory_size']
self.batch_size = config.hyperparameters['batch_size']
self.updates_per_step = config.hyperparameters['updates_per_step']
self.memory = ReplayBuffer(self.replay_memory_size)
self.total_steps = 0
self.total_updates = 0
self.save_freq = config.save_freq
self.file_to_save = config.file_to_save
self.maximum_episodes = config.hyperparameters['maximum_episodes']
self.train = config.train
self.evaluate = config.evaluate
self.evaluate_internal = config.evaluate_internal
self.agent_to_color_dictionary = config.agent_to_color_dictionary
self.standard_deviation_results = config.standard_deviation_results
self.colors = ['red', 'blue', 'green', 'orange', 'yellow', 'purple']
self.color_idx = 0
self.rolling_score_window = config.rolling_score_window
self.runs_per_agent = config.runs_per_agent
self.agent_name = config.agent_name
self.ceil = config.ceil
# Training Loop
def train_agent(self):
"""
:return:
"""
rolling_scores_for_diff_runs = []
file_to_save_actor = os.path.join(self.file_to_save, 'actor/')
file_to_save_actor_param = os.path.join(self.file_to_save, 'actor_param/')
file_to_save_runs = os.path.join(self.file_to_save, 'runs_1/')
file_to_save_rolling_scores = os.path.join(self.file_to_save, 'rolling_scores/')
os.makedirs(file_to_save_actor, exist_ok=True)
os.makedirs(file_to_save_actor_param, exist_ok=True)
os.makedirs(file_to_save_runs, exist_ok=True)
os.makedirs(file_to_save_rolling_scores, exist_ok=True)
for run in range(self.runs_per_agent):
game_full_episodes_scores = []
game_full_episodes_rolling_scores = []
for i_episode in range(self.maximum_episodes):
if self.save_freq > 0 and i_episode % self.save_freq == 0:
actor_path = os.path.join(file_to_save_actor, 'episode{}'.format(i_episode))
actor_param_path = os.path.join(file_to_save_actor_param, 'episode{}'.format(i_episode))
self.agent.save_models(actor_path, actor_param_path)
episode_score = []
episode_steps = 0
done = 0
state = self.env.reset() # n_steps
while not done:
if len(self.memory) > self.batch_size:
action, action_params = self.agent.select_action(state, self.train)
if self.ceil:
action_params = np.ceil(action_params).squeeze(0)
action_for_env = [action, int(action_params[action])]
for i in range(self.updates_per_step):
self.agent.update(self.memory)
self.total_updates += 1
else:
action_params = np.random.randint(low=10, high=31, size=8)
action = np.random.randint(7, size=1)[0]
action_for_env = [action, action_params[action]]
next_state, reward, done, info = self.env.step(action_for_env)
print(reward)
episode_steps += 1
episode_score.append(info)
self.total_steps += 1
self.memory.push(state, action, action_params, reward, next_state, done)
state = next_state
episode_score_so_far = np.mean(episode_score)
game_full_episodes_scores.append(episode_score_so_far)
game_full_episodes_rolling_scores.append(
np.mean(game_full_episodes_scores[-1 * self.rolling_score_window:]))
print("Episode: {}, total steps:{}, episode steps:{}, scores:{}".format(
i_episode, self.total_steps, episode_steps, episode_score_so_far))
self.env.close()
file_path_for_pic = os.path.join(file_to_save_runs, 'episode{}_run{}.jpg'.format(i_episode, run))
visualize_results_per_run(agent_results=game_full_episodes_scores,
agent_name=self.agent_name,
save_freq=1,
file_path_for_pic=file_path_for_pic)
rolling_scores_for_diff_runs.append(game_full_episodes_rolling_scores)
file_path_for_pic = os.path.join(file_to_save_rolling_scores, 'rolling_scores.jpg')
visualize_overall_agent_results(agent_results=rolling_scores_for_diff_runs,
agent_name=self.agent_name,
show_mean_and_std_range=True,
agent_to_color_dictionary=self.agent_to_color_dictionary,
standard_deviation_results=1,
file_path_for_pic=file_path_for_pic
)
| 42.592593
| 113
| 0.598609
| 677
| 5,750
| 4.70901
| 0.200886
| 0.033877
| 0.056462
| 0.037641
| 0.266939
| 0.140841
| 0.120138
| 0.105082
| 0.039523
| 0.02133
| 0
| 0.007918
| 0.31913
| 5,750
| 134
| 114
| 42.910448
| 0.806386
| 0.01513
| 0
| 0.021277
| 0
| 0
| 0.042791
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.06383
| 0
| 0.095745
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91d9d1d9ae07a637595f6f1be3521d0ea393c068
| 1,468
|
py
|
Python
|
algorithms/maths/chinese_remainder_theorem.py
|
hbqdev/algorithms
|
65cc8551d86d7e065069d165dd8bf9baf10345a0
|
[
"MIT"
] | 22,426
|
2017-01-17T04:01:44.000Z
|
2022-03-31T12:06:16.000Z
|
algorithms/maths/chinese_remainder_theorem.py
|
Shubhanshu156/algorithms
|
d8f1428cee7f66376929f72c524b6e0325bf3492
|
[
"MIT"
] | 523
|
2017-04-18T12:05:11.000Z
|
2022-03-20T11:10:41.000Z
|
algorithms/maths/chinese_remainder_theorem.py
|
AmandaStromdahl/algorithms
|
1652835c3aef9aa670b67a5459e51dd3a8e6a71c
|
[
"MIT"
] | 4,900
|
2017-01-19T23:47:05.000Z
|
2022-03-31T10:00:47.000Z
|
from algorithms.maths.gcd import gcd
from typing import List
def solve_chinese_remainder(num : List[int], rem : List[int]):
"""
Computes the smallest x that satisfies the chinese remainder theorem
for a system of equations.
The system of equations has the form:
x % num[0] = rem[0]
x % num[1] = rem[1]
...
x % num[k - 1] = rem[k - 1]
Where k is the number of elements in num and rem, k > 0.
All numbers in num needs to be pariwise coprime otherwise an exception is raised
returns x: the smallest value for x that satisfies the system of equations
"""
if not len(num) == len(rem):
raise Exception("num and rem should have equal length")
if not len(num) > 0:
raise Exception("Lists num and rem need to contain at least one element")
for n in num:
if not n > 1:
raise Exception("All numbers in num needs to be > 1")
if not _check_coprime(num):
raise Exception("All pairs of numbers in num are not coprime")
k = len(num)
x = 1
while True:
i = 0
while i < k:
if x % num[i] != rem[i]:
break
i += 1
if i == k:
return x
else:
x += 1
def _check_coprime(l : List[int]):
for i in range(len(l)):
for j in range(len(l)):
if i == j:
continue
if gcd(l[i], l[j]) != 1:
return False
return True
| 31.234043
| 84
| 0.559264
| 228
| 1,468
| 3.574561
| 0.350877
| 0.030675
| 0.062577
| 0.041718
| 0.058896
| 0.058896
| 0.058896
| 0
| 0
| 0
| 0
| 0.015707
| 0.349455
| 1,468
| 46
| 85
| 31.913043
| 0.837696
| 0.284741
| 0
| 0
| 0
| 0
| 0.167335
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91dad0ab0f33fc6693bf8cc4e9a065c0be985607
| 19,086
|
py
|
Python
|
apphelper/image.py
|
caiyueliang/chineseocr
|
4495598f938936c6bcb2222fa44f840a7919212c
|
[
"MIT"
] | null | null | null |
apphelper/image.py
|
caiyueliang/chineseocr
|
4495598f938936c6bcb2222fa44f840a7919212c
|
[
"MIT"
] | null | null | null |
apphelper/image.py
|
caiyueliang/chineseocr
|
4495598f938936c6bcb2222fa44f840a7919212c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
##图像相关函数
@author: lywen
"""
import sys
import six
import os
import base64
import requests
import numpy as np
import cv2
from PIL import Image
import traceback
import uuid
from glob import glob
from bs4 import BeautifulSoup
def sort_box_(box):
x1,y1,x2,y2,x3,y3,x4,y4 = box[:8]
pts = (x1,y1),(x2,y2),(x3,y3),(x4,y4)
pts = np.array(pts, dtype="float32")
(x1,y1),(x2,y2),(x3,y3),(x4,y4) = _order_points(pts)
"""
newBox = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]]
## sort x
newBox = sorted(newBox,key=lambda x:x[0])
x1,y1 = sorted(newBox[:2],key=lambda x:x[1])[0]
index = newBox.index([x1,y1])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[1])
x4,y4 = sorted(newBox[:2],key=lambda x:x[0])[0]
index = newBox.index([x4,y4])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[0])
x2,y2 = sorted(newBox[:2],key=lambda x:x[1])[0]
index = newBox.index([x2,y2])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[1])
x3,y3 = sorted(newBox[:2],key=lambda x:x[0])[0]
"""
return x1,y1,x2,y2,x3,y3,x4,y4
import numpy as np
from scipy.spatial import distance as dist
def _order_points(pts):
# 根据x坐标对点进行排序
"""
---------------------
作者:Tong_T
来源:CSDN
原文:https://blog.csdn.net/Tong_T/article/details/81907132
版权声明:本文为博主原创文章,转载请附上博文链接!
"""
x_sorted = pts[np.argsort(pts[:, 0]), :]
# 从排序中获取最左侧和最右侧的点
# x坐标点
left_most = x_sorted[:2, :]
right_most = x_sorted[2:, :]
# 现在,根据它们的y坐标对最左边的坐标进行排序,这样我们就可以分别抓住左上角和左下角
left_most = left_most[np.argsort(left_most[:, 1]), :]
(tl, bl) = left_most
# 现在我们有了左上角坐标,用它作为锚来计算左上角和右上角之间的欧氏距离;
# 根据毕达哥拉斯定理,距离最大的点将是我们的右下角
distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0]
(br, tr) = right_most[np.argsort(distance)[::-1], :]
# 返回左上角,右上角,右下角和左下角的坐标
return np.array([tl, tr, br, bl], dtype="float32")
def solve(box):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x = cx-w/2
y = cy-h/2
x1-cx = -w/2*cos(angle) +h/2*sin(angle)
y1 -cy= -w/2*sin(angle) -h/2*cos(angle)
h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle)
w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle)
(hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy)
"""
x1,y1,x2,y2,x3,y3,x4,y4= box[:8]
cx = (x1+x3+x2+x4)/4.0
cy = (y1+y3+y4+y2)/4.0
w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2
h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2
#x = cx-w/2
#y = cy-h/2
sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2
if abs(sinA)>1:
angle = None
else:
angle = np.arcsin(sinA)
return angle,w,h,cx,cy
def read_singLine_for_yolo(p):
"""
单行文本
"""
im = Image.open(p).convert('RGB')
w,h = im.size
boxes = [{'cx':w/2,'cy':h/2,'w':w,'h':h,'angle':0.0}]
return im,boxes
def read_voc_xml(p):
##读取voc xml 文件
boxes = []
if os.path.exists(p):
with open(p) as f:
xmlString = f.read()
xmlString = BeautifulSoup(xmlString,'lxml')
objList = xmlString.findAll('object')
for obj in objList:
robndbox = obj.find('robndbox')
bndbox = obj.find('bndbox')
if robndbox is not None and bndbox is None:
cx = np.float(robndbox.find('cx').text)
cy = np.float(robndbox.find('cy').text)
w = np.float(robndbox.find('w').text)
h = np.float(robndbox.find('h').text)
angle = robndbox.find('angle').text
if angle=='nan' or h==0 or w==0:
#boxes = []
continue
angle = np.float(angle)
if abs(angle)>np.pi/2:
w,h = h,w
angle = abs(angle)%(np.pi/2)*np.sign(angle)
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle)
x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4])
"""
if abs(angle)>np.pi/2:
##lableImg bug
x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4])
"""
angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4])
else:
xmin = np.float(bndbox.find('xmin').text)
xmax = np.float(bndbox.find('xmax').text)
ymin = np.float(bndbox.find('ymin').text)
ymax = np.float(bndbox.find('ymax').text)
cx = (xmin+xmax)/2.0
cy = (ymin+ymax)/2.0
w = (-xmin+xmax)#/2.0
h = (-ymin+ymax)#/2.0
angle =0.0
boxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle})
return boxes
def xy_rotate_box(cx,cy,w,h,angle):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy
"""
cx = float(cx)
cy = float(cy)
w = float(w)
h = float(h)
angle = float(angle)
x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy)
x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy)
x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy)
x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy)
return x1,y1,x2,y2,x3,y3,x4,y4
from numpy import cos,sin,pi,tan
def rotate(x,y,angle,cx,cy):
"""
点(x,y) 绕(cx,cy)点旋转
"""
#angle = angle*pi/180
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy
return x_new,y_new
def resize_box(boxes,scale):
newBoxes = []
for box in boxes:
cx = box['cx']*scale
cy = box['cy']*scale
w = box['w']*scale
h = box['h']*scale
angle = box['angle']
newBoxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle})
return newBoxes
def resize_im(w,h, scale=416, max_scale=608):
f=float(scale)/min(h, w)
if max_scale is not None:
if f*max(h, w)>max_scale:
f=float(max_scale)/max(h, w)
newW,newH = int(w*f),int(h*f)
return newW-(newW%32),newH-(newH%32)
def get_rorate(boxes,im,degree=0):
"""
获取旋转角度后的box及im
"""
imgW,imgH = im.size
newBoxes = []
for line in boxes:
cx0,cy0 = imgW/2.0,imgH/2.0
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(**line)
x1,y1 = rotate(x1,y1,-degree/180*np.pi,cx0,cy0)
x2,y2 = rotate(x2,y2,-degree/180*np.pi,cx0,cy0)
x3,y3 = rotate(x3,y3,-degree/180*np.pi,cx0,cy0)
x4,y4 = rotate(x4,y4,-degree/180*np.pi,cx0,cy0)
box = (x1,y1,x2,y2,x3,y3,x4,y4)
degree_,w_,h_,cx_,cy_ = solve(box)
newLine = {'angle':degree_,'w':w_,'h':h_,'cx':cx_,'cy':cy_}
newBoxes.append(newLine)
return im.rotate(degree,center=(imgW/2.0,imgH/2.0 )),newBoxes
def letterbox_image(image, size,fillValue=[128,128,128]):
'''
resize image with unchanged aspect ratio using padding
'''
image_w, image_h = image.size
w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC)
if fillValue is None:
fillValue = [int(x.mean()) for x in cv2.split(np.array(im))]
boxed_image = Image.new('RGB', size, tuple(fillValue))
boxed_image.paste(resized_image,)
return boxed_image,new_w/image_w
def box_split(boxes,splitW = 15):
newBoxes = []
for box in boxes:
w = box['w']
h = box['h']
cx = box['cx']
cy=box['cy']
angle = box['angle']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle)
splitBoxes =[]
i = 1
tanAngle = tan(-angle)
while True:
flag = 0 if i==1 else 1
xmin = x1+(i-1)*splitW
ymin = y1-tanAngle*splitW*i
xmax = x1+i*splitW
ymax = y4-(i-1)*tanAngle*splitW +flag*tanAngle*(x4-x1)
if xmax>max(x2,x3) and xmin>max(x2,x3):
break
splitBoxes.append([int(xmin),int(ymin),int(xmax),int(ymax)])
i+=1
newBoxes.append(splitBoxes)
return newBoxes
def get_box_spilt(boxes,im,sizeW,SizeH,splitW=8,isRoate=False,rorateDegree=0):
"""
isRoate:是否旋转box
"""
size = sizeW,SizeH
if isRoate:
##旋转box
im,boxes = get_rorate(boxes,im,degree=rorateDegree)
newIm,f = letterbox_image(im, size)
newBoxes = resize_box(boxes,f)
newBoxes = sum(box_split(newBoxes,splitW),[])
newBoxes = [box+[1] for box in newBoxes]
return newBoxes,newIm
def box_rotate(box,angle=0,imgH=0,imgW=0):
"""
对坐标进行旋转 逆时针方向 0\90\180\270,
"""
x1,y1,x2,y2,x3,y3,x4,y4 = box[:8]
if angle==90:
x1_,y1_ = y2,imgW-x2
x2_,y2_ = y3,imgW-x3
x3_,y3_ = y4,imgW-x4
x4_,y4_ = y1,imgW-x1
elif angle==180:
x1_,y1_ = imgW-x3,imgH-y3
x2_,y2_ = imgW-x4,imgH-y4
x3_,y3_ = imgW-x1,imgH-y1
x4_,y4_ = imgW-x2,imgH-y2
elif angle==270:
x1_,y1_ = imgH-y4,x4
x2_,y2_ = imgH-y1,x1
x3_,y3_ = imgH-y2,x2
x4_,y4_ = imgH-y3,x3
else:
x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_ = x1,y1,x2,y2,x3,y3,x4,y4
return (x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_)
def solve(box):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x = cx-w/2
y = cy-h/2
x1-cx = -w/2*cos(angle) +h/2*sin(angle)
y1 -cy= -w/2*sin(angle) -h/2*cos(angle)
h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle)
w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle)
(hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy)
"""
x1,y1,x2,y2,x3,y3,x4,y4= box[:8]
cx = (x1+x3+x2+x4)/4.0
cy = (y1+y3+y4+y2)/4.0
w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2
h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2
sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2
angle = np.arcsin(sinA)
return angle,w,h,cx,cy
from numpy import cos,sin,pi
def rotate(x,y,angle,cx,cy):
angle = angle#*pi/180
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy
return x_new,y_new
def xy_rotate_box(cx,cy,w,h,angle):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy
"""
cx = float(cx)
cy = float(cy)
w = float(w)
h = float(h)
angle = float(angle)
x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy)
x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy)
x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy)
x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy)
return x1,y1,x2,y2,x3,y3,x4,y4
# def rotate_cut_img(im, degree, box, w, h, leftAdjust=False, rightAdjust=False, alph=0.2):
# x1, y1, x2, y2, x3, y3, x4, y4 = box[:8]
# # print('rotate_cut_img', x1, y1, x2, y2, x3, y3, x4, y4)
#
# x_center, y_center = np.mean([x1, x2, x3, x4]), np.mean([y1, y2, y3, y4])
# right = 0
# left = 0
# if rightAdjust:
# right = 1
# if leftAdjust:
# left = 1
#
# # print(im.shape)
# box = (max(1, x_center - w / 2 - left * alph * (w / 2)), # xmin
# y_center - h / 2, # ymin
# min(x_center + w / 2 + right * alph * (w / 2), im.shape[1] - 1), # xmax
# y_center + h / 2) # ymax
# # print('box', box)
#
# newW = int(box[2] - box[0])
# newH = int(box[3] - box[1])
#
# # =====================================================
# # remap_points = np.array([[0, 0], [164, 0], [164, 48], [0, 48]], dtype=np.float32)
# remap_points = np.array([[0, 0], [newW, 0], [newW, newH], [0, newH]], dtype=np.float32)
# old_points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], dtype=np.float32)
# # 透视变换:用到opencv函数
# M = cv2.getPerspectiveTransform(old_points, remap_points)
# tmpImg = cv2.warpPerspective(im, M, (newW, newH))
# # cv2.imshow('rotate_cut_img', tmpImg)
# # cv2.waitKey(0)
#
# return tmpImg, newW, newH
def rotate_cut_img(im, degree, box, w, h, leftAdjust=False, rightAdjust=False, alph=0.2):
x1, y1, x2, y2, x3, y3, x4, y4 = box[:8]
x_center, y_center = np.mean([x1, x2, x3, x4]), np.mean([y1, y2, y3, y4])
degree_ = degree * 180.0 / np.pi
right = 0
left = 0
if rightAdjust:
right = 1
if leftAdjust:
left = 1
box = (max(1, x_center - w / 2 - left * alph * (w / 2)), # xmin
y_center - h / 2, # ymin
min(x_center + w / 2 + right * alph * (w / 2), im.size[0] - 1), # xmax
y_center + h / 2) # ymax
newW = box[2] - box[0]
newH = box[3] - box[1]
tmpImg = im.rotate(degree_, center=(x_center, y_center)).crop(box)
return tmpImg, newW, newH
def letterbox_image(image, size, fillValue=[128, 128, 128]):
'''resize image with unchanged aspect ratio using padding'''
image_w, image_h = image.size
w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC)
if fillValue is None:
fillValue = [int(x.mean()) for x in cv2.split(np.array(im))]
boxed_image = Image.new('RGB', size, tuple(fillValue))
boxed_image.paste(resized_image, (0,0))
return boxed_image,new_w/image_w
from scipy.ndimage import filters,interpolation,morphology,measurements,minimum
#from pylab import amin, amax
from numpy import amin, amax
def estimate_skew_angle(raw):
"""
估计图像文字角度
"""
def resize_im(im, scale, max_scale=None):
f=float(scale)/min(im.shape[0], im.shape[1])
if max_scale!=None and f*max(im.shape[0], im.shape[1])>max_scale:
f=float(max_scale)/max(im.shape[0], im.shape[1])
return cv2.resize(im, (0, 0), fx=f, fy=f)
raw = resize_im(raw, scale=600, max_scale=900)
image = raw-amin(raw)
image = image/amax(image)
m = interpolation.zoom(image,0.5)
m = filters.percentile_filter(m,80,size=(20,2))
m = filters.percentile_filter(m,80,size=(2,20))
m = interpolation.zoom(m,1.0/0.5)
w,h = min(image.shape[1],m.shape[1]),min(image.shape[0],m.shape[0])
flat = np.clip(image[:h,:w]-m[:h,:w]+1,0,1)
d0,d1 = flat.shape
o0,o1 = int(0.1*d0),int(0.1*d1)
flat = amax(flat)-flat
flat -= amin(flat)
est = flat[o0:d0-o0,o1:d1-o1]
angles = range(-15,15)
estimates = []
for a in angles:
roest =interpolation.rotate(est,a,order=0,mode='constant')
v = np.mean(roest,axis=1)
v = np.var(v)
estimates.append((v,a))
_,a = max(estimates)
return a
def sort_box(box):
"""
对box排序,及页面进行排版
box[index, 0] = x1
box[index, 1] = y1
box[index, 2] = x2
box[index, 3] = y2
box[index, 4] = x3
box[index, 5] = y3
box[index, 6] = x4
box[index, 7] = y4
"""
box = sorted(box,key=lambda x:sum([x[1],x[3],x[5],x[7]]))
return list(box)
def get_boxes( bboxes):
"""
boxes: bounding boxes
"""
text_recs=np.zeros((len(bboxes), 8), np.int)
index = 0
for box in bboxes:
b1 = box[6] - box[7] / 2
b2 = box[6] + box[7] / 2
x1 = box[0]
y1 = box[5] * box[0] + b1
x2 = box[2]
y2 = box[5] * box[2] + b1
x3 = box[0]
y3 = box[5] * box[0] + b2
x4 = box[2]
y4 = box[5] * box[2] + b2
disX = x2 - x1
disY = y2 - y1
width = np.sqrt(disX*disX + disY*disY)
fTmp0 = y3 - y1
fTmp1 = fTmp0 * disY / width
x = np.fabs(fTmp1*disX / width)
y = np.fabs(fTmp1*disY / width)
if box[5] < 0:
x1 -= x
y1 += y
x4 += x
y4 -= y
else:
x2 += x
y2 += y
x3 -= x
y3 -= y
text_recs[index, 0] = x1
text_recs[index, 1] = y1
text_recs[index, 2] = x2
text_recs[index, 3] = y2
text_recs[index, 4] = x3
text_recs[index, 5] = y3
text_recs[index, 6] = x4
text_recs[index, 7] = y4
index = index + 1
return text_recs
def union_rbox(result,alpha=0.1):
"""
按行合并box
"""
def diff(box1,box2):
"""
计算box1,box2之间的距离
"""
cy1 = box1['cy']
cy2 = box2['cy']
h1 = box1['h']
h2 = box2['h']
return abs(cy1-cy2)/max(0.01,min(h1/2,h2/2))
def sort_group_box(boxes):
"""
对box进行排序, 并合并box
"""
N = len(boxes)
boxes = sorted(boxes,key=lambda x:x['cx'])
text = ' '.join([bx['text'] for bx in boxes])
box4 = np.zeros((N,8))
for i in range(N):
cx =boxes[i]['cx']
cy = boxes[i]['cy']
degree =boxes[i]['degree']
w = boxes[i]['w']
h = boxes[i]['h']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi)
box4[i] = [x1,y1,x2,y2,x3,y3,x4,y4]
x1 = box4[:,0].min()
y1 = box4[:,1].min()
x2 = box4[:,2].max()
y2 = box4[:,3].min()
x3 = box4[:,4].max()
y3 = box4[:,5].max()
x4 = box4[:,6].min()
y4 = box4[:,7].max()
angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4])
return {'text':text,'cx':cx,'cy':cy,'w':w,'h':h,'degree':angle/np.pi*180}
newBox = []
for line in result:
if len(newBox)==0:
newBox.append([line])
else:
check=False
for box in newBox[-1]:
if diff(line,box)>alpha:
check = True
if not check:
newBox[-1].append(line)
else:
newBox.append([line])
newBox = [sort_group_box(bx) for bx in newBox]
return newBox
def adjust_box_to_origin(img,angle, result):
"""
调整box到原图坐标
"""
h,w = img.shape[:2]
if angle in [90,270]:
imgW,imgH = img.shape[:2]
else:
imgH,imgW= img.shape[:2]
newresult = []
for line in result:
cx =line['box']['cx']
cy = line['box']['cy']
degree =line['box']['angle']
w = line['box']['w']
h = line['box']['h']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi)
x1,y1,x2,y2,x3,y3,x4,y4 = box_rotate([x1,y1,x2,y2,x3,y3,x4,y4],angle=(360-angle)%360,imgH=imgH,imgW=imgW)
box = x1,y1,x2,y2,x3,y3,x4,y4
newresult.append({'name':line['name'],'text':line['text'],'box':box})
return newresult
| 29.40832
| 113
| 0.515561
| 3,071
| 19,086
| 3.132204
| 0.112341
| 0.017465
| 0.020584
| 0.027446
| 0.450463
| 0.429566
| 0.404616
| 0.376027
| 0.363967
| 0.339952
| 0
| 0.074795
| 0.297391
| 19,086
| 649
| 114
| 29.40832
| 0.642506
| 0.155035
| 0
| 0.257732
| 0
| 0
| 0.016641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069588
| false
| 0
| 0.046392
| 0
| 0.185567
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91dbd76ebb4a6ee074d9e41d9b7337c54be487ec
| 1,748
|
py
|
Python
|
data_structure/stack_and_queue/494. Target Sum_ Medium.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
data_structure/stack_and_queue/494. Target Sum_ Medium.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
data_structure/stack_and_queue/494. Target Sum_ Medium.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
'''
You are given an integer array nums and an integer target.
You want to build an expression out of nums by adding one of the symbols '+' and '-' before each integer in nums and then concatenate all the integers.
For example, if nums = [2, 1], you can add a '+' before 2 and a '-' before 1 and concatenate them to build the expression "+2-1".
Return the number of different expressions that you can build, which evaluates to target.
'''
from collections import defaultdict
class Solution:
def findTargetSumWays(self, nums, target) -> int:
count = 0
target_depth = len(nums) - 1
stack = [(0, -1, 0)]
cache = defaultdict(int)
while stack:
# print(stack)
# count += 1
# if count == 10:
# break
curr_sum, depth, visited = stack.pop()
if visited:
if depth == target_depth:
if curr_sum == target:
cache[(curr_sum, depth, visited)] = 1
else:
l = cache[(curr_sum + nums[depth + 1], depth + 1, 1)]
r = cache[(curr_sum - nums[depth + 1], depth + 1, 1)]
cache[(curr_sum, depth, visited)] = l + r
continue
else:
if (curr_sum, depth, 1) in cache:
continue
stack.append((curr_sum, depth, 1))
if depth < target_depth:
stack.append((curr_sum + nums[depth + 1], depth + 1, 0))
stack.append((curr_sum - nums[depth + 1], depth + 1, 0))
return cache[(0, -1, 1)]
input_val, target = [1,1,1,1,1], 3
res = Solution().findTargetSumWays(input_val, target)
| 34.96
| 151
| 0.529748
| 223
| 1,748
| 4.085202
| 0.340807
| 0.076839
| 0.065862
| 0.070252
| 0.193194
| 0.140505
| 0.140505
| 0.140505
| 0.140505
| 0.076839
| 0
| 0.034234
| 0.364989
| 1,748
| 50
| 152
| 34.96
| 0.786486
| 0.276316
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91dd1a3b5de5801e9e8baf1d02a035b6853b1ad3
| 3,733
|
py
|
Python
|
fixtrack/frontend/pickable_markers.py
|
os-gabe/fixtrack
|
a0af4dfa9342acc0ba05c0249a32806c825b74b2
|
[
"MIT"
] | null | null | null |
fixtrack/frontend/pickable_markers.py
|
os-gabe/fixtrack
|
a0af4dfa9342acc0ba05c0249a32806c825b74b2
|
[
"MIT"
] | null | null | null |
fixtrack/frontend/pickable_markers.py
|
os-gabe/fixtrack
|
a0af4dfa9342acc0ba05c0249a32806c825b74b2
|
[
"MIT"
] | 1
|
2022-03-25T04:26:36.000Z
|
2022-03-25T04:26:36.000Z
|
import numpy as np
from fixtrack.frontend.pickable_base import PickableBase
from vispy import scene
class PickableMarkers(PickableBase):
"""
Markers that can highlight on hover and be selected
"""
class State(PickableBase.State):
def __init__(self, **kwargs):
super(PickableMarkers.State, self).__init__(**kwargs)
self.sizes_raw = None
self.sizes = None
class Config(PickableBase.Config):
def __init__(self, select_scale=1.0, hover_scale=1.0, **kwargs):
super(PickableMarkers.Config, self).__init__(**kwargs)
self.select_scale = select_scale
self.hover_scale = hover_scale
_kwargs_ignore = ["size", "color_select", "color_hover"]
def __init__(self, parent=None, data=np.zeros((0, 3)), select_scale=2.0, **kwargs):
super(PickableMarkers, self).__init__(
scene.visuals.Markers(pos=data, parent=parent), data=data, parent=parent, **kwargs
)
self.visual.set_gl_state("translucent", depth_test=False, blend=True)
self._cfg.select_scale = select_scale
self._cfg.hover_scale = select_scale * 1.15
self.multi_sel = None
@property
def marker_size(self):
return self._cfg.vis_args["size"]
@marker_size.setter
def marker_size(self, s):
self._cfg.vis_args["size"] = max(1, s)
self._init_data()
self.set_data()
def _selected_idxs(self):
sel = []
if self.multi_sel is None:
if self._state.idx_selected >= 0:
sel = [self._state.idx_selected]
else:
sel = self.multi_sel
return sel
def _init_data(self):
super(PickableMarkers, self)._init_data()
n = len(self._state.data)
self._state.sizes_raw = np.full((n, ), self._cfg.vis_args["size"])
self._state.sizes = self._state.sizes_raw.copy()
def _highlight(self):
self._state.sizes = self._state.sizes_raw.copy()
super(PickableMarkers, self)._highlight()
def _highlight_selected(self):
super(PickableMarkers, self)._highlight_selected()
cfg = self._cfg
state = self._state
if (state.idx_selected >= 0) and cfg.pickable:
state.sizes[self._selected_idxs()] = cfg.vis_args["size"] * cfg.select_scale
def _highlight_hovered(self):
super(PickableMarkers, self)._highlight_hovered()
cfg = self._cfg
state = self._state
if (state.idx_hover >= 0) and cfg.hoverable:
state.sizes[self._hover_idxs()] = cfg.vis_args["size"] * cfg.hover_scale
def _set_data(self):
if len(self._state.data) > 0:
kwargs = {
k: v
for k, v in self._cfg.vis_args.items() if k not in self._kwargs_ignore
}
self._state.edge_colors[:, 3] = self._state.colors[:, 3]
self.visual.set_data(
pos=self._state.data,
size=self._state.sizes,
face_color=self._state.colors,
edge_color=self._state.edge_colors,
edge_width=3,
**kwargs
)
else:
self.visual.set_data(np.zeros((0, 3)))
def _set_data_false(self):
if len(self._state.data) > 0:
colors = self._pa.unique_colors(id(self)) / 255.0
colors[self._state.colors[:, 3] < 1.0e-3] = 0.0
self.visual.set_data(
pos=self._state.data,
size=self._state.sizes,
face_color=colors,
edge_color=colors,
edge_width=0,
)
else:
self.visual.set_data(np.zeros((0, 3)))
| 33.936364
| 94
| 0.587731
| 462
| 3,733
| 4.450216
| 0.203463
| 0.091926
| 0.047665
| 0.034047
| 0.291342
| 0.197471
| 0.177043
| 0.154669
| 0.120623
| 0.058366
| 0
| 0.013303
| 0.295205
| 3,733
| 109
| 95
| 34.247706
| 0.768149
| 0.013662
| 0
| 0.213483
| 0
| 0
| 0.015821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134831
| false
| 0
| 0.033708
| 0.011236
| 0.235955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91dedad5ac38b05af586adadc029baeb5dbdb36c
| 2,242
|
py
|
Python
|
examples/blocking_subscribe.py
|
FFY00/jeepney
|
293241a54fbb73581755e97191720ed1603aed34
|
[
"MIT"
] | null | null | null |
examples/blocking_subscribe.py
|
FFY00/jeepney
|
293241a54fbb73581755e97191720ed1603aed34
|
[
"MIT"
] | null | null | null |
examples/blocking_subscribe.py
|
FFY00/jeepney
|
293241a54fbb73581755e97191720ed1603aed34
|
[
"MIT"
] | null | null | null |
"""
Example of subscribing to a D-Bus signal using blocking I/O.
This subscribes to the signal for a desktop notification being closed.
To try it, start this script, then trigger a desktop notification, and close it
somehow to trigger the signal. Use Ctrl-C to stop the script.
This example relies on the ``org.freedesktop.Notifications.NotificationClosed``
signal; some desktops may not support it. See the notification spec for more
details:
https://people.gnome.org/~mccann/docs/notification-spec/notification-spec-latest.html
Match rules are defined in the D-Bus specification:
https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules
"""
from jeepney.bus_messages import MatchRule, message_bus
from jeepney.integrate.blocking import connect_and_authenticate, Proxy
from jeepney.wrappers import DBusAddress
noti = DBusAddress('/org/freedesktop/Notifications',
bus_name='org.freedesktop.Notifications',
interface='org.freedesktop.Notifications')
connection = connect_and_authenticate(bus="SESSION")
match_rule = MatchRule(
type="signal",
sender=noti.bus_name,
interface=noti.interface,
member="NotificationClosed",
path=noti.object_path,
)
# This defines messages for talking to the D-Bus bus daemon itself:
session_bus = Proxy(message_bus, connection)
# Tell the session bus to pass us matching signal messages:
print("Match added?", session_bus.AddMatch(match_rule) == ())
reasons = {1: 'expiry', 2: 'dismissal', 3: 'dbus', '4': 'undefined'}
def notification_closed(data):
"""Callback for when we receive a notification closed signal"""
nid, reason_no = data
reason = reasons.get(reason_no, 'unknown')
print('Notification {} closed by: {}'.format(nid, reason))
# Connect the callback to the relevant signal
connection.router.subscribe_signal(
callback=notification_closed,
path=noti.object_path,
interface=noti.interface,
member="NotificationClosed"
)
# Using dbus-send or d-feet or blocking_notify.py, send a notification and
# manually close it or call ``.CloseNotification`` after a beat.
try:
while True:
connection.recv_messages()
except KeyboardInterrupt:
pass
connection.close()
| 34.492308
| 88
| 0.752007
| 298
| 2,242
| 5.583893
| 0.459732
| 0.033654
| 0.064904
| 0.033654
| 0.055288
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002101
| 0.150758
| 2,242
| 64
| 89
| 35.03125
| 0.871849
| 0.460303
| 0
| 0.121212
| 0
| 0
| 0.17938
| 0.073764
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0.030303
| 0.090909
| 0
| 0.121212
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91e1834b8771a7ae37346ead4e29d9b3101da09b
| 917
|
py
|
Python
|
setup.py
|
Kuba77/Xian-DB
|
2f15ef1b9b7a96c21bd46e9fb8481de6feb713b7
|
[
"MIT"
] | 1
|
2016-10-22T21:04:09.000Z
|
2016-10-22T21:04:09.000Z
|
setup.py
|
Kuba77/Xian-DB
|
2f15ef1b9b7a96c21bd46e9fb8481de6feb713b7
|
[
"MIT"
] | null | null | null |
setup.py
|
Kuba77/Xian-DB
|
2f15ef1b9b7a96c21bd46e9fb8481de6feb713b7
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='xiandb',
version='0.2.0',
description='A database model for Xian',
long_description=long_description,
url='https://github.com/Kuba77/Xian-DB',
author='Jakub Chronowski',
author_email='jakub@chronow.ski',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: XIAN Collaborators',
'Topic :: Software Development :: Database',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'
],
keywords='xian database db',
packages=['xiandb', 'xiandb.models'],
install_requires=['mongokat', 'pyyaml', 'bcrypt'],
extras_require={}
)
| 21.325581
| 64
| 0.641221
| 106
| 917
| 5.45283
| 0.698113
| 0.077855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012465
| 0.21265
| 917
| 42
| 65
| 21.833333
| 0.788089
| 0
| 0
| 0
| 0
| 0
| 0.393675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91e197a6aad024d05c47c68f4923bef335ff491f
| 4,993
|
py
|
Python
|
yolo3/focal_loss.py
|
ashishpatel26/tf2-yolo3
|
38814178643eb8e1f8b5e4fe8d448faed44ad574
|
[
"Apache-2.0"
] | 43
|
2019-12-08T15:05:53.000Z
|
2022-03-20T13:38:07.000Z
|
yolo3/focal_loss.py
|
1911590204/tf2-yolo3
|
38814178643eb8e1f8b5e4fe8d448faed44ad574
|
[
"Apache-2.0"
] | 3
|
2020-05-18T11:20:15.000Z
|
2021-02-26T01:11:04.000Z
|
yolo3/focal_loss.py
|
1911590204/tf2-yolo3
|
38814178643eb8e1f8b5e4fe8d448faed44ad574
|
[
"Apache-2.0"
] | 15
|
2019-12-25T01:44:29.000Z
|
2022-01-18T08:45:49.000Z
|
from functools import partial
import tensorflow as tf
_EPSILON = tf.keras.backend.epsilon()
def register_keras_custom_object(cls):
tf.keras.utils.get_custom_objects()[cls.__name__] = cls
return cls
def binary_focal_loss(y_true, y_pred, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None):
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32)
if from_logits:
return _binary_focal_loss_from_logits(labels=y_true,
logits=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
else:
return _binary_focal_loss_from_probs(labels=y_true,
p=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
@register_keras_custom_object
class BinaryFocalLoss(tf.keras.losses.Loss):
def __init__(self, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None, **kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.pos_weight = pos_weight
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def get_config(self):
config = super().get_config()
config.update(gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
return config
def call(self, y_true, y_pred):
return binary_focal_loss(y_true=y_true,
y_pred=y_pred,
gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
# Helper functions below
def _process_labels(labels, label_smoothing, dtype):
labels = tf.dtypes.cast(labels, dtype=dtype)
if label_smoothing is not None:
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels
def _binary_focal_loss_from_logits(labels, logits, gamma, pos_weight, label_smoothing):
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=logits.dtype)
# Compute probabilities for the positive class
p = tf.math.sigmoid(logits)
if label_smoothing is None:
labels_shape = labels.shape
logits_shape = logits.shape
if not labels_shape.is_fully_defined() or labels_shape != logits_shape:
labels_shape = tf.shape(labels)
logits_shape = tf.shape(logits)
shape = tf.broadcast_dynamic_shape(labels_shape, logits_shape)
labels = tf.broadcast_to(labels, shape)
logits = tf.broadcast_to(logits, shape)
if pos_weight is None:
loss_func = tf.nn.sigmoid_cross_entropy_with_logits
else:
loss_func = partial(tf.nn.weighted_cross_entropy_with_logits, pos_weight=pos_weight)
loss = loss_func(labels=labels, logits=logits)
modulation_pos = (1 - p)**gamma
modulation_neg = p**gamma
mask = tf.dtypes.cast(labels, dtype=tf.bool)
modulation = tf.where(mask, modulation_pos, modulation_neg)
return modulation * loss
# Terms for the positive and negative class components of the loss
pos_term = labels * ((1 - p)**gamma)
neg_term = (1 - labels) * (p**gamma)
# Term involving the log and ReLU
log_weight = pos_term
if pos_weight is not None:
log_weight *= pos_weight
log_weight += neg_term
log_term = tf.math.log1p(tf.math.exp(-tf.math.abs(logits)))
log_term += tf.nn.relu(-logits)
log_term *= log_weight
# Combine all the terms into the loss
loss = neg_term * logits + log_term
return loss
def _binary_focal_loss_from_probs(labels, p, gamma, pos_weight, label_smoothing):
q = 1 - p
# For numerical stability (so we don't inadvertently take the log of 0)
p = tf.math.maximum(p, _EPSILON)
q = tf.math.maximum(q, _EPSILON)
# Loss for the positive examples
pos_loss = -(q**gamma) * tf.math.log(p)
if pos_weight is not None:
pos_loss *= pos_weight
# Loss for the negative examples
neg_loss = -(p**gamma) * tf.math.log(q)
# Combine loss terms
if label_smoothing is None:
labels = tf.dtypes.cast(labels, dtype=tf.bool)
loss = tf.where(labels, pos_loss, neg_loss)
else:
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=p.dtype)
loss = labels * pos_loss + (1 - labels) * neg_loss
return loss
| 36.985185
| 106
| 0.616263
| 631
| 4,993
| 4.59271
| 0.183835
| 0.115942
| 0.038647
| 0.048309
| 0.360594
| 0.304693
| 0.204969
| 0.184955
| 0.184955
| 0.184955
| 0
| 0.003441
| 0.301622
| 4,993
| 135
| 107
| 36.985185
| 0.827646
| 0.070298
| 0
| 0.216495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082474
| false
| 0
| 0.020619
| 0.010309
| 0.206186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91e197bc72174a007b45ebf73223d69beb79eca0
| 13,808
|
py
|
Python
|
characters/models/characters.py
|
Sult/evetool
|
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
|
[
"MIT"
] | null | null | null |
characters/models/characters.py
|
Sult/evetool
|
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
|
[
"MIT"
] | null | null | null |
characters/models/characters.py
|
Sult/evetool
|
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
|
[
"MIT"
] | null | null | null |
import time
from collections import OrderedDict
from datetime import datetime, timedelta
from django.db import models
from django.conf import settings
from django.utils.timezone import utc
from .skills import Skill, SkillGroup
from metrics.models import Corporation
from tasks.models import EveApiCache, Task
from evetool.storage import OverwriteStorage
import utils
class CharacterApi(models.Model):
""" charactertype apis """
api = models.ForeignKey("apis.Api")
characterid = models.BigIntegerField()
charactername = models.CharField(max_length=254)
corporationid = models.BigIntegerField()
corporationname = models.CharField(max_length=254)
def __unicode__(self):
return self.charactername
#get right icon for characters view
def view_icon(self):
try:
icon = self.characterapiicon_set.get(size=128, relation=self)
return icon.icon
except CharacterApiIcon.DoesNotExist:
return None
#def character sheet image
def sheet_icon(self):
try:
icon = self.characterapiicon_set.get(size=200, relation=self)
return icon.icon
except CharacterApiIcon.DoesNotExist:
return None
def current_balance(self):
if self.api.access_to("CharacterInfo"):
sheet = utils.connection.api_request(
"CharacterInfoAuth", obj=self
)
if sheet.accountBalance:
return round(float(sheet.accountBalance), 2)
return 0
def sheet_cache_key(self):
key = "CharacterInfo"
category = EveApiCache.EVE
kwargs = {"characterID": self.characterid}
if self.api.access_to("CharacterInfo"):
return utils.connection.generate_cache_key(
category, key, api=self.api, **kwargs
)
else:
return utils.connection.generate_cache_key(category, key)
def sheet_set_cache_job(self):
key = "CharacterInfo"
category = EveApiCache.EVE
kwargs = {"characterID": self.characterid}
if self.api.access_to("CharacterInfo"):
api = self.api
else:
api = None
EveApiCache.objects.create(
priority=Task.VERY_HIGH,
api=api,
category=category,
key=key,
kwargs=kwargs,
)
#get the data for landing page after character selection
def character_sheet(self):
sheet = utils.connection.get_cache(self.sheet_cache_key())
employment = self.employment_history(sheet)
return sheet, employment
#employment history of a player
@staticmethod
def employment_history(sheet):
cache_key = "employment_history_%d" % int(sheet.characterID)
#result = utils.connection.get_cache(cache_key)
result = None
if not result:
cache_timer = 60 * 60
result = []
for corp_data in sheet.employmentHistory:
result.append({
"corporation": Corporation.find_corporation(
corp_data.corporationID
),
"startdate": utils.common.convert_timestamp(
corp_data.startDate
)
})
utils.connection.set_cache(cache_key, result, cache_timer)
return result
#get skill in training
def skill_in_training(self):
training_skill = None
if self.api.access_to("SkillInTraining"):
in_training = utils.connection.api_request(
"SkillInTraining", obj=self
)
try:
training_skill = {
"skill": Skill.objects.get(
typeid=int(in_training.trainingTypeID)
).typename,
"to_level": int(in_training.trainingToLevel),
"finnished": utils.common.convert_timestamp(
in_training.trainingEndTime
)
}
except AttributeError:
training_skill = {"skill": "No skill in training"}
return training_skill
#characters trained skills
def trained_skills(self):
cache_key = "trained_skills_%d" % self.pk
result = utils.connection.get_cache(cache_key)
if not result:
cache_timer = 60 * 5
sheet = utils.connection.api_request("CharacterSheet", obj=self)
groups = SkillGroup.objects.exclude(
groupname="Fake Skills"
).order_by("groupname")
skills = Skill.objects.order_by("typename")
all_skills = OrderedDict()
skillpoints = {}
for group in groups:
all_skills[group.groupname] = list()
skillpoints[group.groupname] = 0
for skill in skills:
trained = sheet.skills.Get(skill.typeid, False)
if trained:
all_skills[skill.skillgroup.groupname].append(
{
"skill": skill,
"level": int(trained.level)
}
)
skillpoints[skill.skillgroup.groupname] += \
trained.skillpoints
result = {
"all_skills": all_skills,
"skillpoints": skillpoints,
}
utils.connection.set_cache(cache_key, result, cache_timer)
return result
#get skillqueue
def skill_queue(self):
queue = None
if self.api.access_to("SkillQueue"):
queue = {}
skills = utils.connection.api_request(
"SkillQueue", obj=self
).skillqueue
queue["skills"] = skills
queue["total"] = self.total_skillpoints(skills)
now = datetime.now().replace(tzinfo=utc)
try:
trainingtime = utils.common.convert_timestamp(
skills[-1].endTime
) - now
trainingtime -= timedelta(
microseconds=trainingtime.microseconds
)
queue["trainingtime"] = trainingtime
except TypeError:
pass
return queue
#get total skillpoints for skills in queue
@staticmethod
def total_skillpoints(skills):
total = 0
for skill in skills:
total += int(skill.endSP - skill.startSP)
return total
#walletjournal
def wallet_journal(self):
cache_key = "walletjournal_character_%d" % self.pk
result = utils.connection.get_cache(cache_key)
if not result:
self.update_journal()
cache_timer = 60 * 10
utils.connection.set_cache(cache_key, True, cache_timer)
return CharacterJournal.objects.filter(characterapi=self)
#updates journal to current moment
def update_journal(self):
fromid = 0
transactions = utils.connection.api_request(
"WalletJournal", obj=self, rowcount=2500
).transactions
while True:
for trans in transactions:
date = utils.common.convert_timestamp(trans.date)
#check for duplicate
if CharacterJournal.objects.filter(
characterapi=self,
balance=trans.balance,
date=date,
).exists():
continue
else:
CharacterJournal.create_entry(self, trans)
if int(trans.refID) < fromid or fromid == 0:
fromid = int(trans.refID)
if len(transactions) < 2500:
break
else:
time.sleep(1)
transactions = utils.connection.api_request(
"WalletJournal", obj=self, rowcount=2500, fromid=fromid
).transactions
class CharacterApiIcon(models.Model):
""" images related to characters """
relation = models.ForeignKey("characters.CharacterApi")
size = models.IntegerField(choices=settings.IMAGE_SIZES)
typeid = models.IntegerField()
icon = models.ImageField(
upload_to="images/characters/",
storage=OverwriteStorage(),
blank=True,
null=True
)
class Meta:
unique_together = ["size", "relation"]
def __unicode__(self):
return "Character Image %s" % self.relation.charactername
# def save(self, *args, **kwargs):
# try:
# temp = CharacterApiIcon.objects.get(pk=self.pk)
# if temp.icon != self.icon:
# temp.icon.delete()
# except ObjectDoesNotExist:
# pass
# super(CharacterApiIcon, self).save(*args, **kwargs)
#get list of wanted character icon sizes
@staticmethod
def icon_sizes():
return [128, 200]
class Transaction(models.Model):
reftypeid = models.SmallIntegerField()
ownername1 = models.CharField(max_length=254)
ownerid1 = models.IntegerField()
ownername2 = models.CharField(max_length=254)
ownerid2 = models.IntegerField()
argname1 = models.CharField(max_length=254)
argid1 = models.IntegerField()
amount = models.FloatField(null=True)
reason = models.TextField(blank=True)
taxreceiverid = models.IntegerField(null=True)
taxamount = models.FloatField(null=True)
class Meta:
abstract = True
class CharacterJournal(Transaction):
"""
Wallet transcations of a player. Saved to database so data can
be filtered, and metadata can be created.
Like balance graphs, see how much you paid in taxes and more.
"""
characterapi = models.ForeignKey(CharacterApi)
date = models.DateTimeField()
balance = models.FloatField()
class Meta:
unique_together = ["characterapi", "date", "balance"]
ordering = ["-date", "-reftypeid"]
def __unicode__(self):
return "%s's transaction" % self.characterapi.charactername
@staticmethod
def create_entry(characterapi, transaction):
if transaction.taxReceiverID == "":
taxreceiverid = None
else:
taxreceiverid = int(transaction.taxReceiverID)
if transaction.taxAmount == "":
taxamount = None
else:
taxamount = round(float(transaction.taxAmount), 2)
date = utils.common.convert_timestamp(transaction.date)
CharacterJournal.objects.create(
characterapi=characterapi,
date=date,
balance=round(float(transaction.balance), 2),
reftypeid=int(transaction.refTypeID),
ownername1=str(transaction.ownerName1),
ownerid1=int(transaction.ownerID1),
ownername2=str(transaction.ownerName2),
ownerid2=int(transaction.ownerID2),
argname1=str(transaction.argName1),
argid1=int(transaction.argID1),
amount=round(float(transaction.amount), 2),
reason=str(transaction.reason),
taxreceiverid=taxreceiverid,
taxamount=taxamount,
)
@staticmethod
def monthly_balance(characterapi):
last_restart = utils.common.last_server_restart()
days = last_restart - timedelta(days=31)
entries = CharacterJournal.objects.filter(
characterapi=characterapi,
date__range=[days, last_restart]
)
balance = []
for days in range(31):
first = entries.first()
date = (last_restart - timedelta(days=days))
#make timestamp in miliseconds
timestamp = int(time.mktime(date.timetuple()) * 1000)
if first:
isk = first.balance
else:
try:
isk = balance[-1][1]
except IndexError:
isk = characterapi.current_balance()
balance.append([timestamp, isk])
entries = entries.filter(date__lt=(date - timedelta(days=1)))
#return reversed list
return balance[::-1]
@staticmethod
def weekly_balance(characterapi):
now = datetime.now().replace(tzinfo=utc)
entries = CharacterJournal.objects.filter(
characterapi=characterapi,
date__range=[
now.replace(hour=23, minute=59, second=0) - timedelta(days=9),
now
]
)
balance = []
for days in range(8):
date = now.replace(
hour=0, minute=0, second=0
) - timedelta(days=days)
day_entries = entries.filter(
date__lt=now.replace(
hour=23, minute=59, second=59
) - timedelta(days=days),
date__gt=date
)
if not day_entries.count() > 0:
try:
isk = balance[-1][1]
except IndexError:
isk = characterapi.current_balance()
timestamp = int(time.mktime(date.timetuple()) * 1000)
balance.append([timestamp, isk])
else:
for entry in day_entries:
timestamp = int(time.mktime(entry.date.timetuple()) * 1000)
balance.append([timestamp, entry.balance])
#add last value for date on xaxis
date = now.replace(hour=23, minute=59, second=59) - timedelta(days=8)
isk = balance[-1][1]
timestamp = int(time.mktime(date.timetuple()) * 1000)
balance.append([timestamp, isk])
return balance[::-1]
| 33.596107
| 79
| 0.571915
| 1,308
| 13,808
| 5.930428
| 0.212538
| 0.029006
| 0.013923
| 0.019337
| 0.284388
| 0.223153
| 0.1926
| 0.178935
| 0.148769
| 0.137166
| 0
| 0.014466
| 0.339151
| 13,808
| 410
| 80
| 33.678049
| 0.835616
| 0.069308
| 0
| 0.253086
| 0
| 0
| 0.042214
| 0.005472
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0.003086
| 0.033951
| 0.012346
| 0.253086
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91e4a887944adf9f3a04214dd378ac72dc05e86a
| 2,100
|
py
|
Python
|
biomaj2galaxy/commands/init.py
|
genouest/biomaj2galaxy
|
8c76f3cc96902d9401a03e7b1a6cd8f4a7ba17bd
|
[
"MIT"
] | 1
|
2015-05-11T00:08:24.000Z
|
2015-05-11T00:08:24.000Z
|
biomaj2galaxy/commands/init.py
|
genouest/biomaj2galaxy
|
8c76f3cc96902d9401a03e7b1a6cd8f4a7ba17bd
|
[
"MIT"
] | 5
|
2019-04-15T16:09:50.000Z
|
2020-11-24T10:35:21.000Z
|
biomaj2galaxy/commands/init.py
|
genouest/biomaj2galaxy
|
8c76f3cc96902d9401a03e7b1a6cd8f4a7ba17bd
|
[
"MIT"
] | 3
|
2015-06-14T08:33:49.000Z
|
2020-10-16T09:07:21.000Z
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from bioblend import galaxy
from biomaj2galaxy import config, pass_context
from biomaj2galaxy.io import info, warn
import click
CONFIG_TEMPLATE = """## BioMAJ2Galaxy: Global Configuration File.
# Each stanza should contain a single Galaxy server to interact with.
#
# You can set the key __default to the name of a default instance
__default: local
local:
url: "%(url)s"
apikey: "%(apikey)s"
"""
SUCCESS_MESSAGE = (
"Ready to go! Type `biomaj2galaxy` to get a list of commands you can execute."
)
@click.command()
@pass_context
def init(ctx, url=None, api_key=None, admin=False, **kwds):
"""Help initialize global configuration (in home directory)
"""
click.echo("""Welcome to BioMAJ2Galaxy""")
if os.path.exists(config.global_config_path()):
info("Your biomaj2galaxy configuration already exists. Please edit it instead: %s" % config.global_config_path())
return 0
while True:
# Check environment
url = click.prompt("url")
apikey = click.prompt("apikey")
info("Testing connection...")
try:
instance = galaxy.GalaxyInstance(url=url, key=apikey)
instance.libraries.get_libraries()
# We do a connection test during startup.
info("Ok! Everything looks good.")
break
except Exception as e:
warn("Error, we could not access the configuration data for your instance: %s", e)
should_break = click.prompt("Continue despite inability to contact this instance? [y/n]")
if should_break in ('Y', 'y'):
break
config_path = config.global_config_path()
if os.path.exists(config_path):
warn("File %s already exists, refusing to overwrite." % config_path)
return -1
with open(config_path, "w") as f:
f.write(CONFIG_TEMPLATE % {
'url': url,
'apikey': apikey,
})
info(SUCCESS_MESSAGE)
| 29.166667
| 121
| 0.65381
| 264
| 2,100
| 5.056818
| 0.484848
| 0.052434
| 0.035955
| 0.049438
| 0.029963
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005703
| 0.248571
| 2,100
| 71
| 122
| 29.577465
| 0.840304
| 0.06381
| 0
| 0.039216
| 0
| 0
| 0.342186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0.039216
| 0.156863
| 0
| 0.215686
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91e5db16db7c305afa819a65e2ba7480fc9d4276
| 4,700
|
py
|
Python
|
preprocessing/convert_formats/msmarco_doc_create_train_input.py
|
PranjaliJain/matchmaker
|
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
|
[
"Apache-2.0"
] | 97
|
2021-07-11T14:34:40.000Z
|
2022-03-31T14:17:25.000Z
|
preprocessing/convert_formats/msmarco_doc_create_train_input.py
|
PranjaliJain/matchmaker
|
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
|
[
"Apache-2.0"
] | 12
|
2021-07-11T13:03:23.000Z
|
2022-03-02T16:07:11.000Z
|
preprocessing/convert_formats/msmarco_doc_create_train_input.py
|
PranjaliJain/matchmaker
|
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
|
[
"Apache-2.0"
] | 16
|
2019-12-23T01:22:35.000Z
|
2021-06-23T12:54:36.000Z
|
#
# msmarco doc: create the train.tsv triples
# -------------------------------
import random
random.seed(42)
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.getcwd())
from matchmaker.evaluation.msmarco_eval import *
from collections import defaultdict
from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer
#
# config
#
parser = argparse.ArgumentParser()
parser.add_argument('--out-file', action='store', dest='out_file',
help='training output text file location', required=True)
parser.add_argument('--out-file-ids', action='store', dest='out_file_ids',
help='training output ids file location', required=True)
parser.add_argument('--candidate-file', action='store', dest='candidate_file',
help='trec ranking file location (lucene output)', required=True)
parser.add_argument('--collection-file', action='store', dest='collection_file',
help='collection.tsv location', required=True)
parser.add_argument('--query-file', action='store', dest='query_file',
help='query.tsv location', required=True)
parser.add_argument('--qrel', action='store', dest='qrel_file',
help='qrel location', required=True)
args = parser.parse_args()
max_triples = 10_000_000
max_doc_char_length = 150_000
max_doc_token_length = 10000
#
# load data
# -------------------------------
#
collection = {}
#collection_length = {}
tokenizer = BlingFireTokenizer()
with open(args.collection_file,"r",encoding="utf8") as collection_file:
for line in tqdm(collection_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[0]
max_char_doc = ls[1].rstrip()[:max_doc_char_length]
collection[_id] = max_char_doc
#collection_length[_id] = len(tokenizer.tokenize(max_char_doc))
queries = {}
with open(args.query_file,"r",encoding="utf8") as query_file:
for line in tqdm(query_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[0]
queries[_id] = ls[1].rstrip()
qrels = load_reference(args.qrel_file)
#
# produce output
# -------------------------------
#
triples = []
stats = defaultdict(int)
with open(args.candidate_file,"r",encoding="utf8") as candidate_file:
for line in tqdm(candidate_file):
#if random.random() <= 0.5: continue #skip some entries for faster processing
[topicid, _ , unjudged_docid, rank, _ , _ ] = line.split()
#if int(rank) <= 100:
# #if random.random() < 0.7: continue # skip 70% of candidates to speed up things...
# #else:
# stats['< 100 sampling count'] += 1
#else:
# if random.random() <= 0.9: continue # skip 90% of candidates assumong top1k -> same number of samples from 0-100 as 101 - 1000
# else:
# stats['> 100 sampling count'] += 1
if topicid not in queries or topicid not in qrels: # added: because we carved out the validation qrels from the train -> so there are some missing
stats['skipped'] += 1
continue
#assert topicid in qrels
assert unjudged_docid in collection
# Use topicid to get our positive_docid
positive_docid = random.choice(qrels[topicid])
assert positive_docid in collection
if unjudged_docid in qrels[topicid]:
stats['docid_collision'] += 1
continue
stats['kept'] += 1
#if collection_length[positive_docid] > max_doc_token_length and collection_length[unjudged_docid] > max_doc_token_length:
# stats['both_to_long'] += 1
# continue
#if collection_length[positive_docid] > max_doc_token_length:
# stats['pos_to_long'] += 1
# continue
#if collection_length[unjudged_docid] > max_doc_token_length:
# stats['unjuged_to_long'] += 1
# continue
triples.append((topicid,positive_docid,unjudged_docid))
# important: shuffle the train data
random.shuffle(triples)
with open(args.out_file,"w",encoding="utf8") as out_file_text ,\
open(args.out_file_ids,"w",encoding="utf8") as out_file_ids:
for i,(topicid, positive_docid, unjudged_docid) in tqdm(enumerate(triples)):
if i == max_triples:
break
if collection[positive_docid].strip() != "" and collection[unjudged_docid].strip() != "":
out_file_ids.write(str(topicid)+"\t"+positive_docid+"\t"+unjudged_docid+"\n")
out_file_text.write(queries[topicid]+"\t"+collection[positive_docid]+"\t"+collection[unjudged_docid]+"\n")
for key, val in stats.items():
print(f"{key}\t{val}")
| 33.098592
| 154
| 0.636809
| 592
| 4,700
| 4.866554
| 0.275338
| 0.024297
| 0.035404
| 0.036446
| 0.287053
| 0.190559
| 0.155502
| 0.088164
| 0.088164
| 0.019438
| 0
| 0.018806
| 0.219362
| 4,700
| 142
| 155
| 33.098592
| 0.766421
| 0.268298
| 0
| 0.086957
| 0
| 0
| 0.122275
| 0
| 0
| 0
| 0
| 0
| 0.028986
| 1
| 0
| false
| 0
| 0.115942
| 0
| 0.115942
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91e914734fc05c34e408967e2c372a75de766234
| 1,207
|
py
|
Python
|
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_get_schema_response.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_get_schema_response.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_get_schema_response.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SearchGetSchemaResponse(Model):
"""The get schema operation response.
:param metadata: The metadata from search results.
:type metadata: ~azure.mgmt.loganalytics.models.SearchMetadata
:param value: The array of result values.
:type value: list[~azure.mgmt.loganalytics.models.SearchSchemaValue]
"""
_attribute_map = {
'metadata': {'key': 'metadata', 'type': 'SearchMetadata'},
'value': {'key': 'value', 'type': '[SearchSchemaValue]'},
}
def __init__(self, **kwargs):
super(SearchGetSchemaResponse, self).__init__(**kwargs)
self.metadata = kwargs.get('metadata', None)
self.value = kwargs.get('value', None)
| 36.575758
| 76
| 0.610605
| 122
| 1,207
| 5.959016
| 0.631148
| 0.024759
| 0.057772
| 0.074278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00099
| 0.163215
| 1,207
| 32
| 77
| 37.71875
| 0.718812
| 0.591549
| 0
| 0
| 0
| 0
| 0.188184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91ebeac4c8302d86c1514c58ecbae0f104ee5904
| 1,332
|
py
|
Python
|
python/ds/spiralprint.py
|
unhingedporter/DataStructureMustKnow
|
3c5b3225afa2775d37a2ff90121f73208717640a
|
[
"MIT"
] | 3
|
2019-11-23T08:43:58.000Z
|
2019-11-23T08:52:53.000Z
|
python/ds/spiralprint.py
|
unhingedpotter/DSMustKnow
|
64958cbbbb3f4cdb1104c2255e555233554503f9
|
[
"MIT"
] | null | null | null |
python/ds/spiralprint.py
|
unhingedpotter/DSMustKnow
|
64958cbbbb3f4cdb1104c2255e555233554503f9
|
[
"MIT"
] | null | null | null |
# Python3 program to print
# given matrix in spiral form
def spiralPrint(m, n, a):
start_row_index = 0
start_col_index = 0
l = 0
''' start_row_index - starting row index
m - ending row index
start_col_index - starting column index
n - ending column index
i - iterator '''
while (start_row_index < m and start_col_index < n):
# Print the first row from
# the remaining rows
for i in range(start_col_index, n):
print(a[start_row_index][i], end=" ")
start_row_index += 1
# Print the last column from
# the remaining columns
for i in range(start_row_index, m):
print(a[i][n - 1], end=" ")
n -= 1
# Print the last row from
# the remaining rows
if (start_row_index < m):
for i in range(n - 1, (start_col_index - 1), -1):
print(a[m - 1][i], end=" ")
m -= 1
# Print the first column from
# the remaining columns
if (start_col_index < n):
for i in range(m - 1, start_row_index - 1, -1):
print(a[i][start_col_index], end=" ")
start_col_index += 1
# Driver Code
a = [[1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18]]
R = 3
C = 6
spiralPrint(R, C, a)
| 22.965517
| 61
| 0.534535
| 203
| 1,332
| 3.349754
| 0.295567
| 0.117647
| 0.152941
| 0.064706
| 0.286765
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052144
| 0.352102
| 1,332
| 57
| 62
| 23.368421
| 0.735805
| 0.187688
| 0
| 0
| 0
| 0
| 0.004367
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0
| 0
| 0.04
| 0.16
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91ed3db43e489e433ff783f8e76e26a52b78a6d5
| 568
|
py
|
Python
|
rest-api/routers/authorization.py
|
marintrace/backend
|
ad34bd50bd5e3f90be1ac16a74d39a0a9342fa33
|
[
"MIT"
] | 2
|
2021-12-14T03:14:41.000Z
|
2022-01-17T18:36:31.000Z
|
rest-api/routers/authorization.py
|
marintrace/backend
|
ad34bd50bd5e3f90be1ac16a74d39a0a9342fa33
|
[
"MIT"
] | 1
|
2021-03-29T08:06:42.000Z
|
2021-03-29T08:06:42.000Z
|
rest-api/routers/authorization.py
|
tracing-app/backend
|
ad34bd50bd5e3f90be1ac16a74d39a0a9342fa33
|
[
"MIT"
] | null | null | null |
"""
Authorization Utilities
"""
from shared.models.user_entities import User
from shared.service.jwt_auth_wrapper import JWTAuthManager
manager = JWTAuthManager(oidc_vault_secret="oidc/rest",
object_creator=lambda claims, assumed_role, user_roles: User(
first_name=claims["given_name"],
last_name=claims["family_name"],
school=assumed_role,
email=claims['email']
))
AUTH_USER = manager.auth_header()
| 35.5
| 86
| 0.572183
| 54
| 568
| 5.740741
| 0.611111
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.34507
| 568
| 15
| 87
| 37.866667
| 0.833333
| 0.040493
| 0
| 0
| 0
| 0
| 0.065177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91ee06a1881d10f22e7c8d7c219f9ef37412d52d
| 1,365
|
py
|
Python
|
photonpy/tests/psf_g2d_sigma.py
|
qnano/photonpy
|
9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4
|
[
"MIT"
] | 5
|
2021-04-29T21:06:05.000Z
|
2022-03-23T03:45:25.000Z
|
photonpy/tests/psf_g2d_sigma.py
|
qnano/photonpy
|
9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4
|
[
"MIT"
] | null | null | null |
photonpy/tests/psf_g2d_sigma.py
|
qnano/photonpy
|
9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4
|
[
"MIT"
] | 1
|
2021-06-18T12:39:28.000Z
|
2021-06-18T12:39:28.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from photonpy.cpp.context import Context
import photonpy.cpp.gaussian as gaussian
from photonpy.smlm.util import imshow_hstack
from photonpy.cpp.estimator import Estimator
def CheckDeriv(psf:Estimator, theta):
nderiv,ev=psf.NumDeriv(theta,eps=1e-6)
deriv,ev=psf.Derivatives(theta)
maxerr = np.max( np.abs(deriv-nderiv), (-1,-2) )
print(f"PSF {psf.ParamFormat()}, max {np.max(deriv)}, min: {np.min(deriv)}: Deriv-NumDeriv: {maxerr}")
plt.figure()
imshow_hstack(deriv[0] - nderiv[0])
with Context() as ctx:
g = gaussian.Gaussian(ctx)
for cuda in [False]:
print(f"CUDA = {cuda}")
sigma=2
roisize=12
psf = g.CreatePSF_XYIBg(roisize, sigma, cuda)
theta = [[4, 4, 1000, 3]]
img = psf.ExpectedValue(theta)
plt.figure()
plt.set_cmap('inferno')
smp = np.random.poisson(img)
plt.imshow(smp[0])
psf_sigma = g.CreatePSF_XYIBgSigma(roisize, sigma, cuda)
theta_s = [[4,4,1000,3,sigma]]
img2 = psf_sigma.ExpectedValue(theta_s)
CheckDeriv(psf, theta)
# CheckDeriv(psf_sigma)
print(f"PSF Sigma crlb: {psf_sigma.CRLB(theta_s)}")
theta = psf_sigma.Estimate(smp)[0]
print(theta)
| 26.764706
| 106
| 0.606593
| 181
| 1,365
| 4.502762
| 0.375691
| 0.058896
| 0.03681
| 0.051534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025896
| 0.264469
| 1,365
| 51
| 107
| 26.764706
| 0.785857
| 0.015385
| 0
| 0.060606
| 0
| 0.030303
| 0.114865
| 0.018769
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.181818
| 0
| 0.212121
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91ee64a13c556aefe5259d2a930de14c6c79472f
| 2,018
|
py
|
Python
|
tests/tools_tests/helpers_tests.py
|
Gautierhyp/tespy
|
d44ae41874baeff77619e560faea59dd0cb84c7c
|
[
"MIT"
] | null | null | null |
tests/tools_tests/helpers_tests.py
|
Gautierhyp/tespy
|
d44ae41874baeff77619e560faea59dd0cb84c7c
|
[
"MIT"
] | null | null | null |
tests/tools_tests/helpers_tests.py
|
Gautierhyp/tespy
|
d44ae41874baeff77619e560faea59dd0cb84c7c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8
"""Module for testing helper functions.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location
tests/tools_tests/helpers_tests.py
SPDX-License-Identifier: MIT
"""
from nose.tools import eq_
from tespy.tools.helpers import newton
def func(params, x):
return x ** 2 + x - 20
def deriv(params, x):
return 2 * x + 1
def test_newton_bounds():
"""
Test newton algorithm value limit handling.
Try to calculate a zero crossing of a quadratic function in three
tries.
- zero crossing within limits, starting value near 4
- zero crossing within limits, starting value near -5
- zero crossing below minimum
- zero crossing above maximum
The function is x^2 + x - 20, there crossings are -5 and 4.
"""
result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=0)
msg = ('The newton algorithm should find the zero crossing at 4.0. ' +
str(round(result, 1)) + ' was found instead.')
eq_(4.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=-10)
msg = ('The newton algorithm should find the zero crossing at -5.0. ' +
str(round(result, 1)) + ' was found instead.')
eq_(-5.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-4, valmax=-2, val0=-3)
msg = ('The newton algorithm should not be able to find a zero crossing. '
'The value ' + str(round(result, 1)) + ' was found, but the '
'algorithm should have found the lower boundary of -4.0.')
eq_(-4.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-20, valmax=-10, val0=-10)
msg = ('The newton algorithm should not be able to find a zero crossing. '
'The value ' + str(round(result, 1)) + ' was found, but the '
'algorithm should have found the upper boundary of -10.0.')
eq_(-10.0, result, msg)
| 32.548387
| 78
| 0.646184
| 305
| 2,018
| 4.245902
| 0.357377
| 0.083398
| 0.049421
| 0.064865
| 0.518919
| 0.518919
| 0.518919
| 0.455598
| 0.455598
| 0.364479
| 0
| 0.040883
| 0.236373
| 2,018
| 61
| 79
| 33.081967
| 0.799481
| 0.32557
| 0
| 0.24
| 0
| 0
| 0.349352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.08
| 0.08
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91eed42dd8cd7828f31d4494c0f4f389955bf685
| 8,960
|
py
|
Python
|
utils/dynamo.py
|
OnRails-IN/backend
|
5f5c9703fcda282ed54f2e6315680fb30fd91a6f
|
[
"MIT"
] | null | null | null |
utils/dynamo.py
|
OnRails-IN/backend
|
5f5c9703fcda282ed54f2e6315680fb30fd91a6f
|
[
"MIT"
] | null | null | null |
utils/dynamo.py
|
OnRails-IN/backend
|
5f5c9703fcda282ed54f2e6315680fb30fd91a6f
|
[
"MIT"
] | null | null | null |
"""
Dynamo Utils
============
All utility functions for interactions with DynamoDB
Functions
- ensure_json
- create_user_table
- create_or_update_record
- list_tables
- list_records
- get_record
- delete_table
- delete_record
- check_active
"""
import boto3
from decimal import Decimal
from constants import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, DYNAMO_URL
ddb = boto3.resource(
'dynamodb',
aws_access_key_id = AWS_ACCESS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
endpoint_url = DYNAMO_URL,
region_name = AWS_REGION
)
client = boto3.client(
'dynamodb',
aws_access_key_id = AWS_ACCESS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
endpoint_url = DYNAMO_URL,
region_name = AWS_REGION
)
def ensure_json(obj):
"""
Function to ensure that a python object is JSON serializable
Params:
obj::dict|[dict]
Object to be JSON serializable
Returns:
obj::dict|[dict]
Returns the JSON serializable object
"""
if isinstance(obj, list):
for i in range(len(obj)):
obj[i] = ensure_json(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.keys():
obj[k] = ensure_json(obj[k])
return obj
elif isinstance(obj, Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def create_user_table():
"""
Function to create the "users" table in DynamoDB
Returns:
bool
If the table was created or not
"""
try:
table = ddb.create_table(
TableName = "users",
KeySchema = [
{
"AttributeName": "username",
"KeyType": "HASH" # Partition key
},
{
"AttributeName": "index",
"KeyType": "RANGE" # Sort key
}
],
AttributeDefinitions = [
{
"AttributeName": "username",
"AttributeType": "S"
},
{
"AttributeName": "index",
"AttributeType": "S"
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_user_table\n{}".format(e))
return None
def create_train_table():
"""
Function to create the "trains" table in DynamoDB
Returns:
bool
If the table was created or not
"""
try:
table = ddb.create_table(
TableName = "trains",
KeySchema = [
{
"AttributeName": "train_name",
"KeyType": "HASH" # Partition key
},
{
"AttributeName": "train_type",
"KeyType": "RANGE" # Sort key
}
],
AttributeDefinitions = [
{
"AttributeName": "train_name",
"AttributeType": "N"
},
{
"AttributeName": "train_type",
"AttributeType": "S"
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_user_table\n{}".format(e))
return None
def create_or_update_record(tableName, record):
"""
Function to create or update a record in DynamoDB
Params:
tableName::str
The table name to get the record
record::dict
The object to store
Returns:
bool
If the record was inserted or not
"""
if not tableName or not record:
return False
if not {'username', 'index'}.issubset(record):
return False
try:
res = ddb.Table(tableName).get_item(
Key = {
"username": record['username'],
"index": record['index']
}
)
record = { **res['Item'], **record } if 'Item' in res else record
ddb.Table(tableName).put_item(
Item = record
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_or_update_record\n{}".format(e))
return None
def list_tables():
"""
Function to list all tables in DynamoDB
Returns:
tables::[str]
The list of tables
"""
try:
return client.list_tables()['TableNames']
except client.exceptions.ResourceNotFoundException:
print("Tables do not exist")
return False
except Exception as e:
print("Exception @ list_tables\n{}".format(e))
return None
def list_records(tableName):
"""
Function to list all records from a DynamoDB table
Params:
tableName::str
The table name to get the records
Returns:
records::[dict]
The list of records stored in the table
"""
if not tableName:
return False
try:
table = ddb.Table(tableName)
res = table.scan()
docs = ensure_json(res['Items'])
while 'LastEvaluatedKey' in res:
res = table.scan(ExclusiveStartKey = res['LastEvaluatedKey'])
docs.extend(ensure_json(res['Items']))
return docs
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ list_records\n{}".format(e))
return None
def get_record(tableName, query):
"""
Function to retrieve one record from DynamoDB table
Params:
tableName::str
The table name to get the record
query::dict
The query to fetch the record
Returns:
doc::dict
The record retrieved from the table
"""
if not tableName or not query or not isinstance(query, dict):
return False
try:
res = ddb.Table(tableName).get_item(
Key = query
)
doc = ensure_json(res['Item']) if 'Item' in res else None
return doc
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ get_record\n{}".format(e))
return None
def delete_table(tableName):
"""
Function to delete a DynamoDB table
Params:
tableName::str
The table name to delete
Returns:
bool
If the table was deleted or not
"""
if not tableName:
return False
try:
ddb.Table(tableName).delete()
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ delete_table\n{}".format(e))
return None
def delete_record(tableName, query):
"""
Function to delete a DynamoDB table
Params:
tableName::str
The table name to get the record
query::dict
The query to fetch the record
Returns:
bool
If the record was deleted or not
"""
if not tableName or not key or not val:
return False
try:
res = ddb.Table(tableName).delete_item(
Key = query
)
print(res)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ delete_record\n{}".format(e))
return None
def check_active(tableName):
"""
Function to check if a table is ACTIVE
Params:
tableName::str
The table name to check
Returns:
bool
If the table is active or not
"""
if not tableName:
return False
try:
if ddb.Table(tableName).table_status == "ACTIVE":
return True
return False
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ check_status\n{}".format(e))
return None
| 24.888889
| 76
| 0.547098
| 930
| 8,960
| 5.174194
| 0.147312
| 0.038861
| 0.035328
| 0.087905
| 0.633001
| 0.54946
| 0.504988
| 0.446384
| 0.432668
| 0.432668
| 0
| 0.0023
| 0.369196
| 8,960
| 360
| 77
| 24.888889
| 0.84908
| 0.226674
| 0
| 0.583732
| 0
| 0
| 0.142486
| 0.010549
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047847
| false
| 0
| 0.014354
| 0
| 0.253589
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37cd97b1c214ca81d9e46e1e2c07bc9bb82f06f0
| 340
|
py
|
Python
|
Source/Git/Experiments/git_annotate.py
|
cadappl/scm-workbench
|
302cdb8e36bb755f4977062e8977c37e7f4491f9
|
[
"Apache-2.0"
] | 24
|
2017-03-23T06:24:02.000Z
|
2022-03-19T13:35:44.000Z
|
Source/Git/Experiments/git_annotate.py
|
cadappl/scm-workbench
|
302cdb8e36bb755f4977062e8977c37e7f4491f9
|
[
"Apache-2.0"
] | 14
|
2016-06-21T10:06:27.000Z
|
2020-07-25T11:56:23.000Z
|
Source/Git/Experiments/git_annotate.py
|
barry-scott/git-workbench
|
9f352875ab097ce5e45f85bf255b1fa02a196807
|
[
"Apache-2.0"
] | 11
|
2016-12-25T12:36:16.000Z
|
2022-03-23T14:25:25.000Z
|
#!/usr/bin/python3
import sys
import git
r = git.Repo( sys.argv[1] )
num = 0
for info in r.blame( 'HEAD', sys.argv[2] ):
num += 1
commit = info[0]
all_lines = info[1]
print( '%s %6d:%s' % (commit, num, all_lines[0]) )
for line in all_lines[1:]:
num += 1
print( '%*s %6d:%s' % (40, '', num, line) )
| 17
| 54
| 0.517647
| 57
| 340
| 3.035088
| 0.45614
| 0.138728
| 0.080925
| 0.104046
| 0.115607
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.279412
| 340
| 19
| 55
| 17.894737
| 0.64898
| 0.05
| 0
| 0.166667
| 0
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37cf805b2f12051ac4eca05f7ae1c89c1a8dc059
| 544
|
py
|
Python
|
configs/global_configs.py
|
HansikaPH/time-series-forecasting
|
23be319a190489bc1464653a3d672edd70ab110b
|
[
"MIT"
] | 67
|
2019-09-09T14:53:35.000Z
|
2022-02-21T08:51:15.000Z
|
configs/global_configs.py
|
HansikaPH/time-series-forecasting
|
23be319a190489bc1464653a3d672edd70ab110b
|
[
"MIT"
] | 6
|
2019-09-09T06:11:51.000Z
|
2019-12-16T04:31:11.000Z
|
configs/global_configs.py
|
HansikaPH/time-series-forecasting
|
23be319a190489bc1464653a3d672edd70ab110b
|
[
"MIT"
] | 18
|
2019-09-12T02:49:58.000Z
|
2022-02-16T11:15:57.000Z
|
# configs for the model training
class model_training_configs:
VALIDATION_ERRORS_DIRECTORY = 'results/validation_errors/'
INFO_FREQ = 1
# configs for the model testing
class model_testing_configs:
RNN_FORECASTS_DIRECTORY = 'results/rnn_forecasts/'
RNN_ERRORS_DIRECTORY = 'results/errors'
PROCESSED_RNN_FORECASTS_DIRECTORY = '/results/processed_rnn_forecasts/'
# configs for hyperparameter tuning(SMAC3)
class hyperparameter_tuning_configs:
SMAC_RUNCOUNT_LIMIT = 50
class gpu_configs:
log_device_placement = False
| 30.222222
| 75
| 0.799632
| 66
| 544
| 6.212121
| 0.439394
| 0.156098
| 0.063415
| 0.087805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008565
| 0.141544
| 544
| 17
| 76
| 32
| 0.869379
| 0.185662
| 0
| 0
| 0
| 0
| 0.216401
| 0.18451
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37cfc9903bdf3148211aecc7d83461d403271fff
| 3,967
|
py
|
Python
|
webium/controls/select.py
|
kejkz/webium
|
ccb09876a201e75f5c5810392d4db7a8708b90cb
|
[
"Apache-2.0"
] | 152
|
2015-01-16T11:26:56.000Z
|
2022-01-22T12:11:28.000Z
|
webium/controls/select.py
|
goblinintree/webium
|
ccb09876a201e75f5c5810392d4db7a8708b90cb
|
[
"Apache-2.0"
] | 13
|
2015-03-05T14:36:44.000Z
|
2018-08-08T09:43:39.000Z
|
webium/controls/select.py
|
goblinintree/webium
|
ccb09876a201e75f5c5810392d4db7a8708b90cb
|
[
"Apache-2.0"
] | 57
|
2015-01-27T12:53:49.000Z
|
2022-03-26T23:02:36.000Z
|
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.remote.webelement import WebElement
class Select(WebElement):
"""
Implements logic to work with Web List UI elements
"""
@property
def is_multiple(self):
value = self.get_attribute('multiple')
return value is not None and not value == 'false'
def select_option(self, option):
"""
Performs selection of provided item from Web List
@params option - string item name
"""
items_list = self.get_options()
for item in items_list:
if item.get_attribute("value") == option:
item.click()
break
def get_options(self):
"""
Performs search for provided item in Web List
"""
return self.find_elements_by_tag_name('option')
def get_attribute_selected(self, attribute):
"""
Performs search of selected item from Web List
Return attribute of selected item
@params attribute - string attribute name
"""
items_list = self.get_options()
return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None)
def get_value_selected(self):
"""
Performs search of selected item from Web List
Return value of selected item
"""
return self.get_attribute_selected('value')
def get_text_selected(self):
"""
Performs search of selected item from Web List
Return text of selected item
"""
return self.get_attribute_selected('text')
def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return
matched = True
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == "":
candidates = self.get_options()
else:
xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: " + str(text))
@staticmethod
def _escape_string(value):
if '"' in value and "'" in value:
substrings = value.split('"')
result = ['concat(']
for substring in substrings:
result.append('"{0}"'.format(substring))
result.append(', \'"\', ')
result.pop()
if value.endswith('"'):
result.append(', \'"\'')
return ''.join(result) + ')'
if '"' in value:
return "'{0}'".format(value)
return '"{0}"'.format(value)
@staticmethod
def _get_longest_token(value):
items = value.split(' ')
longest = ''
for item in items:
if len(item) > len(longest):
longest = item
return longest
@staticmethod
def _set_selected(option):
if not option.is_selected():
option.click()
| 33.058333
| 118
| 0.57575
| 441
| 3,967
| 5.027211
| 0.244898
| 0.022102
| 0.044204
| 0.03383
| 0.313938
| 0.262517
| 0.18313
| 0.161479
| 0.071719
| 0.051421
| 0
| 0.002258
| 0.330224
| 3,967
| 119
| 119
| 33.336134
| 0.832142
| 0.175952
| 0
| 0.150685
| 0
| 0
| 0.057404
| 0.017613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136986
| false
| 0
| 0.027397
| 0
| 0.328767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d1196ce920fb2354298f73f3de4a4a984c7332
| 12,564
|
py
|
Python
|
mc/cookies/CookieManager.py
|
zy-sunshine/falkon-pyqt5
|
bc2b60aa21c9b136439bd57a11f391d68c736f99
|
[
"MIT"
] | 1
|
2021-04-29T05:36:44.000Z
|
2021-04-29T05:36:44.000Z
|
mc/cookies/CookieManager.py
|
zy-sunshine/falkon-pyqt5
|
bc2b60aa21c9b136439bd57a11f391d68c736f99
|
[
"MIT"
] | 1
|
2020-03-28T17:43:18.000Z
|
2020-03-28T17:43:18.000Z
|
mc/cookies/CookieManager.py
|
zy-sunshine/falkon-pyqt5
|
bc2b60aa21c9b136439bd57a11f391d68c736f99
|
[
"MIT"
] | 1
|
2021-01-15T20:09:24.000Z
|
2021-01-15T20:09:24.000Z
|
from PyQt5.QtWidgets import QDialog
from PyQt5 import uic
from PyQt5.Qt import Qt
from PyQt5.Qt import QShortcut
from PyQt5.Qt import QKeySequence
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QInputDialog
from PyQt5.Qt import QDateTime
from PyQt5.Qt import QStyle
from PyQt5.Qt import QNetworkCookie
from PyQt5.QtWidgets import QTreeWidgetItem
from mc.common.globalvars import gVar
from mc.app.Settings import Settings
from mc.common import const
from mc.tools.TreeWidget import TreeWidget
from mc.tools.IconProvider import IconProvider
class HashableTreeWidgetItem(QTreeWidgetItem):
def __hash__(self):
return id(self)
class CookieManager(QDialog):
def __init__(self, parent=None):
'''
@param parent QWidget
'''
super().__init__(parent)
self._ui = uic.loadUi('mc/cookies/CookieManager.ui', self)
self._domainHash = {} # QHash<QString, QTreeWidgetItem>
self._itemHash = {} # QHash<QTreeWidgetItem, QNetworkCookie>
self.setAttribute(Qt.WA_DeleteOnClose)
gVar.appTools.centerWidgetOnScreen(self)
if self.isRightToLeft():
self._ui.cookieTree.headerItem().setTextAlignment(0, Qt.AlignRight | Qt.AlignVCenter)
self._ui.cookieTree.headerItem().setTextAlignment(1, Qt.AlignRight | Qt.AlignVCenter)
self._ui.cookieTree.setLayoutDirection(Qt.LeftToRight)
self._ui.whiteList.setLayoutDirection(Qt.LeftToRight)
self._ui.blackList.setLayoutDirection(Qt.LeftToRight)
# Stored Cookies
self._ui.cookieTree.currentItemChanged.connect(self._currentItemChanged)
self._ui.removeAll.clicked.connect(self._removeAll)
self._ui.removeOne.clicked.connect(self._remove)
self._ui.close.clicked.connect(lambda: self._close())
self._ui.close2.clicked.connect(lambda: self._close())
self._ui.close3.clicked.connect(lambda: self._close())
self._ui.search.textChanged.connect(self._filterString)
# Cookie Filtering
self._ui.whiteAdd.clicked.connect(self._addWhitelist)
self._ui.whiteRemove.clicked.connect(self._removeWhitelist)
self._ui.blackAdd.clicked.connect(self._addBlacklist)
self._ui.blackRemove.clicked.connect(self._removeBlacklist)
# Cookie Settings
settings = Settings()
settings.beginGroup('Cookie-Settings')
self._ui.saveCookies.setChecked(settings.value('allCookies', True))
self._ui.filter3rdParty.setChecked(settings.value('filterThirdPartyCookies', False))
self._ui.filterTracking.setChecked(settings.value('filterTrackingCookie', False))
self._ui.deleteCookiesOnClose.setChecked(settings.value('deleteCookiesOnClose', False))
self._ui.whiteList.addItems(settings.value('whitelist', []))
self._ui.blackList.addItems(settings.value('blacklist', []))
settings.endGroup()
if const.QTWEBENGINEWIDGETS_VERSION < const.QT_VERSION_CHECK(5, 11, 0):
self._ui.filter3rdParty.hide()
self._ui.search.setPlaceholderText(_('Search'))
self._ui.cookieTree.setDefaultItemShowMode(TreeWidget.ItemsCollapsed)
self._ui.cookieTree.sortItems(0, Qt.AscendingOrder)
self._ui.cookieTree.header().setDefaultSectionSize(220)
self._ui.cookieTree.setFocus()
self._ui.whiteList.setSortingEnabled(True)
self._ui.blackList.setSortingEnabled(True)
self._removeShortcut = QShortcut(QKeySequence('Del'), self)
self._removeShortcut.activated.connect(self._deletePressed)
self._ui.search.textChanged.connect(self._filterString)
cookieJar = gVar.app.cookieJar()
cookieJar.cookieAdded.connect(self._addCookie)
cookieJar.cookieRemoved.connect(self._removeCookie)
# Load cookies
for cookie in cookieJar.getAllCookies():
self._addCookie(cookie)
gVar.appTools.setWmClass('Cookies', self)
def _close(self):
super().close()
# private Q_SLOTS:
def _currentItemChanged(self, current, parent):
'''
@param: current QTreeWidgetItem
@param: parent QTreeWidgetItem
'''
if not current:
return
if not current.text(1):
self._ui.name.setText(_('<cookie not selected>'))
self._ui.value.setText(_("<cookie not selected>"))
self._ui.server.setText(_("<cookie not selected>"))
self._ui.path.setText(_("<cookie not selected>"))
self._ui.secure.setText(_("<cookie not selected>"))
self._ui.expiration.setText(_("<cookie not selected>"))
self._ui.removeOne.setText(_("Remove cookies"))
return
cookie = current.data(0, Qt.UserRole + 10)
self._ui.name.setText(cookie.name().data().decode())
self._ui.value.setText(cookie.value().data().decode())
self._ui.server.setText(cookie.domain())
self._ui.path.setText(cookie.path())
if cookie.isSecure():
self._ui.secure.setText(_('Secure only'))
else:
self._ui.secure.setText(_('All connections'))
if cookie.isSessionCookie():
self._ui.expiration.setText(_('Session cookie'))
else:
self._ui.expiration.setText(
QDateTime(cookie.expirationDate()).toString('hh:mm:ss dddd d. MMMM yyyy')
)
self._ui.removeOne.setText(_('Remove cookie'))
def _remove(self):
current = self._ui.cookieTree.currentItem()
if not current:
return
cookies = [] # QList<QNetworkCookie>
if current.childCount():
for idx in range(current.childCount()):
# QTreeWidgetItem
item = current.child(idx)
if item and item in self._itemHash:
cookies.append(self._itemHash[item])
elif current in self._itemHash:
cookies.append(self._itemHash[current])
cookieJar = gVar.app.cookieJar()
for cookie in cookies:
cookieJar.deleteCookie(cookie)
def _removeAll(self):
button = QMessageBox.warning(self, _('Confirmation'),
_('Are you sure you want to delete all cookies on your computer?'),
QMessageBox.Yes | QMessageBox.No)
if button != QMessageBox.Yes:
return
gVar.app.cookieJar().deleteAllCookies()
self._itemHash.clear()
self._domainHash.clear()
self._ui.cookieTree.clear()
def _addWhitelist(self):
server, ok = QInputDialog.getText(self, _('Add to whitelist'),
_('Server:'))
if not server:
return
if self._ui.blackList.findItems(server, Qt.MatchFixedString):
QMessageBox.information(self, _('Already blacklisted!'),
_("The server \"%s\" is already in blacklist, please remove it first.") % server)
return
if not self._ui.whiteList.findItems(server, Qt.MatchFixedString):
self._ui.whiteList.addItem(server)
def _removeWhitelist(self):
item = self._ui.whiteList.currentItem()
self._removeTreeItem(self._ui.whiteList, item)
def _addBlacklist(self):
server, ok = QInputDialog.getText(self, _('Add to blacklist'),
_('Server:'))
self._addBlacklistByServer(server)
def _removeBlacklist(self):
item = self._ui.blackList.currentItem()
self._removeTreeItem(self._ui.blackList, item)
def _deletePressed(self):
if self._ui.cookieTree.hasFocus():
self._remove()
elif self._ui.whiteList.hasFocus():
self._removeWhitelist()
elif self._ui.blackList.hasFocus():
self._removeBlacklist()
def _filterString(self, string):
'''
@param: string QString
'''
print('=====>', string)
if not string:
for idx in range(self._ui.cookieTree.topLevelItemCount()):
item = self._ui.cookieTree.topLevelItem(idx)
item.setHidden(False)
item.setExpanded(self._ui.cookieTree.defaultItemShowMode() == TreeWidget.ItemsExpanded)
else:
strLower = string.lower()
for idx in range(self._ui.cookieTree.topLevelItemCount()):
item = self._ui.cookieTree.topLevelItem(idx)
text = '.' + item.text(0)
item.setHidden(text.lower() not in strLower)
item.setExpanded(True)
def _addCookie(self, cookie):
'''
@param: cookie QNetworkCookie
'''
item = None # QTreeWidgetItem
domain = self._cookieDomain(cookie)
findParent = self._domainHash.get(domain)
if findParent:
item = HashableTreeWidgetItem(findParent)
else:
newParent = HashableTreeWidgetItem(self._ui.cookieTree)
newParent.setText(0, domain)
newParent.setIcon(0, IconProvider.standardIcon(QStyle.SP_DirIcon))
newParent.setData(0, Qt.UserRole + 10, cookie.domain())
self._ui.cookieTree.addTopLevelItem(newParent)
self._domainHash[domain] = newParent
item = HashableTreeWidgetItem(newParent)
cookie = QNetworkCookie(cookie)
item.setText(0, '.' + domain)
item.setText(1, cookie.name().data().decode())
item.setData(0, Qt.UserRole + 10, cookie)
self._ui.cookieTree.addTopLevelItem(item)
self._itemHash[item] = cookie
def _removeCookie(self, cookie):
'''
@param: cookie QNetworkCookie
'''
# QTreeWidgetItem
item = self._cookieItem(cookie)
if not item:
return
self._itemHash.pop(item, None)
itemParent = item.parent()
if itemParent and itemParent.childCount() == 1:
self._domainHash.pop(self._cookieDomain(cookie), None)
self._removeTreeItem(self._ui.cookieTree, itemParent)
item = None
if item:
self._removeTreeItem(self._ui.cookieTree, item)
def _removeTreeItem(self, tree, item):
if not item: return
(item.parent() or tree.invisibleRootItem()).removeChild(item)
# private:
# override
def closeEvent(self, event):
'''
@param event QCloseEvent
'''
whitelist = []
blacklist = []
for idx in range(self._ui.whiteList.count()):
item = self._ui.whiteList.item(idx)
whitelist.append(item.text())
for idx in range(self._ui.blackList.count()):
item = self._ui.blackList.item(idx)
blacklist.append(item.text())
settings = Settings()
settings.beginGroup('Cookie-Settings')
settings.setValue('allowCookies', self._ui.saveCookies.isChecked())
settings.setValue('filterThirdPartyCookies', self._ui.filter3rdParty.isChecked())
settings.setValue('filterTrackingCookie', self._ui.filterTracking.isChecked())
settings.setValue('deleteCookiesOnClose', self._ui.deleteCookiesOnClose.isChecked())
settings.setValue('whitelist', whitelist)
settings.setValue('blacklist', blacklist)
settings.endGroup()
gVar.app.cookieJar().loadSettings()
event.accept()
# override
def keyPressEvent(self, event):
'''
@param event QKeyEvent
'''
if event.key() == Qt.Key_Escape:
self._close()
super().keyPressEvent(event)
def _addBlacklistByServer(self, server):
'''
@param: server QString
'''
if not server:
return
if self._ui.whiteList.findItems(server, Qt.MatchFixedString):
QMessageBox.information(self, _('Already whitelisted!'),
_("The server \"%s\" is already in whitelist, please remove it first.") % server)
return
if not self._ui.blackList.findItems(server, Qt.MatchFixedString):
self._ui.blackList.addItem(server)
def _cookieDomain(self, cookie):
'''
@param: cookie QNetworkCookie
@return: QString
'''
domain = cookie.domain()
domain = domain.lstrip('.')
return domain
def _cookieItem(self, cookie):
'''
@param: cookie QNetworkCookie
@return: QTreeWidgetItem
'''
for key, val in self._itemHash.items():
if val == cookie:
return key
return None
| 35.897143
| 103
| 0.630372
| 1,240
| 12,564
| 6.240323
| 0.206452
| 0.062807
| 0.043422
| 0.013182
| 0.256526
| 0.189325
| 0.124451
| 0.059188
| 0.031791
| 0.031791
| 0
| 0.004501
| 0.257243
| 12,564
| 349
| 104
| 36
| 0.824689
| 0.046721
| 0
| 0.142857
| 0
| 0
| 0.06542
| 0.006235
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.065306
| 0.004082
| 0.208163
| 0.004082
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d161d2ab9998ed2955dcc68be64d87474fc1ce
| 1,803
|
py
|
Python
|
.circleci/process_submitted_data.py
|
dongbohu/cimr-d
|
7d8f7f7319cff0092946a28d1416d38c06e085d7
|
[
"CC-BY-4.0"
] | null | null | null |
.circleci/process_submitted_data.py
|
dongbohu/cimr-d
|
7d8f7f7319cff0092946a28d1416d38c06e085d7
|
[
"CC-BY-4.0"
] | null | null | null |
.circleci/process_submitted_data.py
|
dongbohu/cimr-d
|
7d8f7f7319cff0092946a28d1416d38c06e085d7
|
[
"CC-BY-4.0"
] | 2
|
2019-05-22T16:05:54.000Z
|
2019-05-23T14:29:10.000Z
|
#!/usr/bin/env python3
import os
import sys
import logging
import subprocess
logging.basicConfig(level=logging.INFO)
root_dir = 'submitted_data'
submitted_file_split = set()
for dir_, _, files in os.walk(root_dir):
for file_name in files:
rel_dir = os.path.relpath(dir_, root_dir)
rel_file = os.path.join(root_dir, rel_dir, file_name)
submitted_file_split.add(rel_file)
for submitted_file in submitted_file_split:
if submitted_file.startswith('submitted_data'):
dir_name, data_type, file_name = submitted_file.split('/')
out_dir_name = 'processed_data'
if not os.path.isdir(out_dir_name):
os.makedirs(out_dir_name, exist_ok=True)
if not os.path.isdir(out_dir_name + '/' + data_type):
os.makedirs(out_dir_name + '/' + data_type, exist_ok=True)
outfile = submitted_file.replace(dir_name, out_dir_name)
if not os.path.isfile(outfile):
if not data_type == 'tad':
from cimr.processor.utils import Infiler
infile = Infiler(
data_type,
submitted_file,
genome_build='b38',
update_rsid=False,
outfile=str(outfile),
chunksize=700000
)
infile.read_file()
if data_type == 'eqtl':
from cimr.processor.query import Querier
genes = list(infile.list_genes())
queried = Querier(genes)
queried.form_query()
else:
logging.info(f' processed file already exists for {submitted_file}')
logging.info(f' if reprocessing, delete {outfile} and file a new pull request')
| 31.086207
| 95
| 0.585136
| 217
| 1,803
| 4.603687
| 0.359447
| 0.117117
| 0.06006
| 0.045045
| 0.16016
| 0.052052
| 0.052052
| 0.052052
| 0
| 0
| 0
| 0.007414
| 0.326678
| 1,803
| 57
| 96
| 31.631579
| 0.815486
| 0.011647
| 0
| 0
| 0
| 0
| 0.094382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.146341
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d19d97641fbdfe4cfca519ffd963eb1a649c60
| 469
|
py
|
Python
|
common/enums.py
|
resourceidea/resourceideaapi
|
4cc7db98f981d8f2011c1995e23e8a8655e31f75
|
[
"MIT"
] | 1
|
2020-05-30T22:27:59.000Z
|
2020-05-30T22:27:59.000Z
|
common/enums.py
|
resourceidea/resourceideaapi
|
4cc7db98f981d8f2011c1995e23e8a8655e31f75
|
[
"MIT"
] | 15
|
2020-02-11T21:53:08.000Z
|
2021-11-02T21:20:03.000Z
|
common/enums.py
|
resourceidea/resourceideaapi
|
4cc7db98f981d8f2011c1995e23e8a8655e31f75
|
[
"MIT"
] | 1
|
2020-08-27T10:57:47.000Z
|
2020-08-27T10:57:47.000Z
|
import enum
class Status(enum.Enum):
"""Status enumeration."""
ACTIVE = 'ACTIVE'
DISABLED = 'DISABLED'
ARCHIVED = 'ARCHIVED'
DELETED = 'DELETED'
class ProgressStatus(enum.Enum):
"""Enumeration indicates the different
stages of the progress made on an engagement,
job or task."""
NOT_STARTED = 'NOT STARTED'
RUNNING = 'RUNNING'
IN_REVIEW = 'IN REVIEW'
REVIEWED = 'REVIEWED'
CLOSED = 'CLOSED'
| 21.318182
| 50
| 0.616205
| 49
| 469
| 5.857143
| 0.612245
| 0.055749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277186
| 469
| 21
| 51
| 22.333333
| 0.846608
| 0.24307
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d34e7f40c00147044227bceb687730996c355b
| 10,288
|
py
|
Python
|
biggan/paddorch/paddorch/vision/functional.py
|
zzz2010/Contrib
|
d351d83da718145cef9f6c98598f7fedc027efe5
|
[
"Apache-2.0"
] | 20
|
2020-03-13T13:40:32.000Z
|
2022-03-10T07:31:48.000Z
|
biggan/paddorch/paddorch/vision/functional.py
|
zzz2010/Contrib
|
d351d83da718145cef9f6c98598f7fedc027efe5
|
[
"Apache-2.0"
] | 34
|
2020-02-20T11:04:58.000Z
|
2022-03-12T00:54:26.000Z
|
biggan/paddorch/paddorch/vision/functional.py
|
zzz2010/Contrib
|
d351d83da718145cef9f6c98598f7fedc027efe5
|
[
"Apache-2.0"
] | 41
|
2020-02-14T09:34:39.000Z
|
2022-03-10T07:31:42.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import collections
import random
import math
import cv2
import numbers
import numpy as np
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
__all__ = ['flip', 'resize', 'pad', 'rotate', 'to_grayscale']
def flip(image, code):
"""
Accordding to the code (the type of flip), flip the input image
Args:
image: Input image, with (H, W, C) shape
code: Code that indicates the type of flip.
-1 : Flip horizontally and vertically
0 : Flip vertically
1 : Flip horizontally
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms import functional as F
fake_img = np.random.rand(224, 224, 3)
# flip horizontally and vertically
F.flip(fake_img, -1)
# flip vertically
F.flip(fake_img, 0)
# flip horizontally
F.flip(fake_img, 1)
"""
return cv2.flip(image, flipCode=code)
def resize(img, size, interpolation=cv2.INTER_LINEAR):
"""
resize the input data to given size
Args:
input: Input data, could be image or masks, with (H, W, C) shape
size: Target size of input data, with (height, width) shape.
interpolation: Interpolation method.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms import functional as F
fake_img = np.random.rand(256, 256, 3)
F.resize(fake_img, 224)
F.resize(fake_img, (200, 150))
"""
if isinstance(interpolation, Sequence):
interpolation = random.choice(interpolation)
if isinstance(size, int):
h, w = img.shape[:2]
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return cv2.resize(img, (ow, oh), interpolation=interpolation)
else:
oh = size
ow = int(size * w / h)
return cv2.resize(img, (ow, oh), interpolation=interpolation)
else:
return cv2.resize(img, tuple(size[::-1]), interpolation=interpolation)
def pad(img, padding, fill=(0, 0, 0), padding_mode='constant'):
"""Pads the given CV Image on all sides with speficified padding mode and fill value.
Args:
img (np.ndarray): Image to be padded.
padding (int|tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int|tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
``constant`` means padding with a constant value, this value is specified with fill.
``edge`` means padding with the last value at the edge of the image.
``reflect`` means padding with reflection of image (without repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode
will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``.
``symmetric`` menas pads with reflection of image (repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode
will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``.
Returns:
numpy ndarray: Padded image.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import pad
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = pad(fake_img, 2)
print(fake_img.shape)
"""
if not isinstance(padding, (numbers.Number, list, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, list, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
raise ValueError(
"Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Expected padding mode be either constant, edge, reflect or symmetric, but got {}'.format(padding_mode)
PAD_MOD = {
'constant': cv2.BORDER_CONSTANT,
'edge': cv2.BORDER_REPLICATE,
'reflect': cv2.BORDER_DEFAULT,
'symmetric': cv2.BORDER_REFLECT
}
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, collections.Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, collections.Sequence) and len(padding) == 4:
pad_left, pad_top, pad_right, pad_bottom = padding
if isinstance(fill, numbers.Number):
fill = (fill,) * (2 * len(img.shape) - 3)
if padding_mode == 'constant':
assert (len(fill) == 3 and len(img.shape) == 3) or (len(fill) == 1 and len(img.shape) == 2), \
'channel of image is {} but length of fill is {}'.format(img.shape[-1], len(fill))
img = cv2.copyMakeBorder(
src=img,
top=pad_top,
bottom=pad_bottom,
left=pad_left,
right=pad_right,
borderType=PAD_MOD[padding_mode],
value=fill)
return img
def rotate(img,
angle,
interpolation=cv2.INTER_LINEAR,
expand=False,
center=None):
"""Rotates the image by angle.
Args:
img (numpy.ndarray): Image to be rotated.
angle (float|int): In degrees clockwise order.
interpolation (int, optional):
interpolation: Interpolation method.
expand (bool|optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple|optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
Returns:
numpy ndarray: Rotated image.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import rotate
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = rotate(fake_img, 10)
print(fake_img.shape)
"""
dtype = img.dtype
h, w, _ = img.shape
point = center or (w / 2, h / 2)
M = cv2.getRotationMatrix2D(point, angle=-angle, scale=1)
if expand:
if center is None:
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - point[0]
M[1, 2] += (nH / 2) - point[1]
dst = cv2.warpAffine(img, M, (nW, nH))
else:
xx = []
yy = []
for point in (np.array([0, 0, 1]), np.array([w - 1, 0, 1]),
np.array([w - 1, h - 1, 1]), np.array([0, h - 1, 1])):
target = np.dot(M, point)
xx.append(target[0])
yy.append(target[1])
nh = int(math.ceil(max(yy)) - math.floor(min(yy)))
nw = int(math.ceil(max(xx)) - math.floor(min(xx)))
M[0, 2] += (nw - w) / 2
M[1, 2] += (nh - h) / 2
dst = cv2.warpAffine(img, M, (nw, nh), flags=interpolation)
else:
dst = cv2.warpAffine(img, M, (w, h), flags=interpolation)
return dst.astype(dtype)
def to_grayscale(img, num_output_channels=1):
"""Converts image to grayscale version of image.
Args:
img (numpy.ndarray): Image to be converted to grayscale.
Returns:
numpy.ndarray: Grayscale version of the image.
if num_output_channels == 1, returned image is single channel
if num_output_channels == 3, returned image is 3 channel with r == g == b
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import to_grayscale
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = to_grayscale(fake_img)
print(fake_img.shape)
"""
if num_output_channels == 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
elif num_output_channels == 3:
img = cv2.cvtColor(
cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
else:
raise ValueError('num_output_channels should be either 1 or 3')
return img
| 38.38806
| 111
| 0.602061
| 1,389
| 10,288
| 4.403888
| 0.203024
| 0.021743
| 0.012751
| 0.014713
| 0.285271
| 0.222168
| 0.18751
| 0.170508
| 0.145496
| 0.128494
| 0
| 0.026076
| 0.29549
| 10,288
| 268
| 112
| 38.38806
| 0.817881
| 0.494751
| 0
| 0.094828
| 0
| 0
| 0.091272
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 1
| 0.043103
| false
| 0
| 0.060345
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d5209ef3010122c779cf4e6e97b119c2f9a504
| 14,267
|
py
|
Python
|
ground_battle.py
|
ashhansen6/minigames
|
5b2e0db14b3567c9b6220206105ed448fb303551
|
[
"MIT"
] | null | null | null |
ground_battle.py
|
ashhansen6/minigames
|
5b2e0db14b3567c9b6220206105ed448fb303551
|
[
"MIT"
] | 3
|
2021-03-25T02:39:44.000Z
|
2021-06-16T17:53:36.000Z
|
ground_battle.py
|
ashhansen6/minigames
|
5b2e0db14b3567c9b6220206105ed448fb303551
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 13:38:35 2021
GROUND INVASION! The Game
@author: Ashton Hansen (ashhansen6@outlook.com)
"""
# Packages used:
import numpy as np
import pandas as pd
import random as rng
from termcolor import colored
# Defining starting forces
## Defenders:
def_force = 1250
def_reserves = 400
defenders = def_force + def_reserves
def_strength = def_force
def_guard = def_force
## Attackers:
att_force = 900
att_reserves = 1000
attackers = att_force + att_reserves
att_strength = att_force
att_guard = att_force
# Defining strategies:
## Defenders:
def_strat = ["draft", "turtle"]
### Draft
def draft(def_force, def_reserves):
global def_pair
global def_strength
global def_guard
# Defender Strategy Information
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("You hear news that a draft decree was issued...")
print("Intelligence suggests that there will be more enemy combatants.")
print("You expect the drafted soldiers to have decreased combat effectiveness.")
# Defender Strategy Effects
if def_reserves >= 100:
def_danger = def_force + 100
def_safe = def_reserves - 100
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
else:
def_danger = def_force + def_reserves
def_safe = 0
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
def_power = def_danger * 0.980
def_protection = def_danger * 0.95
def_deployment = [def_danger, def_safe, def_power, def_protection]
return(def_deployment)
### Turtle
def turtle(def_force, def_reserves):
global def_pair
global def_strength
global def_guard
# Defender Strategy Information
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("The defenders appear to bolster their defenses in preparation.")
print("Intelligence suggests that their defenses will be difficult to penetrate.")
print("It is likely that the defenders will try to keep soldiers out of harm's way.")
# Defender Strategy Effects
if def_force > 1100:
def_danger = def_force
def_safe = def_reserves + (def_danger - 1100)
def_danger = 1100
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
else:
def_danger = def_force
def_safe = def_reserves
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
def_power = def_danger * 0.975
def_protection = def_danger * 1.15
def_deployment = [def_danger, def_safe, def_power, def_protection]
return(def_deployment)
## Attackers:
att_strat = ["blitz", "guerilla"]
### Blitz
def blitz(att_force, att_reserves):
global att_pair
global att_strength
global att_guard
# Attacker Strategy Information
print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan"))
print("Your officers grimly accept your orders...")
print("There is an air of apprehension as the troops prepare to deploy.")
print("While offensive effectiveness will improve, heavier losses are expected.")
# Attacker Strategy Effects
if att_reserves >= 200:
att_danger = att_force + 200
att_safe = att_reserves - 200
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_safe)
else:
att_danger = att_force + att_reserves
att_safe = 0
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_reserves)
att_power = att_danger * 1.10
att_protection = att_danger * 0.90
att_deployment = [att_danger, att_safe, att_power, att_protection]
return(att_deployment)
### Guerilla
def guerilla(att_force, att_reserves):
global att_pair
global att_strength
global att_guard
# Attacker Strategy Information
print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan"))
print("Your officers immediately begin plans to target strategic weak points.")
print("Soldiers move out in small forces and keep the enemy guessing.")
print("While not as effective offensively, troop survival rates should be higher.")
# Attacker Strategy Effects
if att_force > 750:
att_danger = att_force
att_safe = att_reserves + (att_force - 750)
att_danger = 750
else:
att_danger = att_force
att_safe = att_reserves
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_safe)
att_power = att_danger * 0.95
att_protection = att_danger * 1.25
att_deployment = [att_danger, att_safe, att_power, att_protection]
return(att_deployment)
# Ground Battle Event (Player == Attacker)
wave = 0
player = input("Attacker or Defender? [A/D]:")
while (attackers > 0) and (defenders > 0):
# Wave Information
wave = wave + 1
if wave == 1:
print("############################################################")
print("PREPARE FOR BATTLE! THE FIRST WAVE OF THE BATTLE BEGINS NOW.")
print("############################################################")
else:
print("########## WAVE:", wave, "##########")
print("#############################")
print("Defending force strength:", def_force)
print("Defending forces in reserve:", def_reserves)
print("Attacking force strength:", att_force)
print("Attacking forces in reserve:", att_reserves)
if player =="A":
# Active Player (Attacker)
att_strat_chosen = input(colored("How should we proceed, commander? [blitz/guerilla]:", "yellow"))
elif player == "D":
# CPU Attacker
att_strat_chosen = rng.choice(att_strat)
# Defender Setup
if player == "A":
# CPU Defender
if def_reserves > 0:
def_strat = ["none",
"draft", "draft", "draft", "draft", "draft", "draft",
"turtle", "turtle", "turtle"]
def_strat_chosen = rng.choice(def_strat)
else:
def_strat = ["none", "none",
"turtle", "turtle", "turtle" ,"turtle", "turtle", "turtle", "turtle", "turtle"]
def_strat_chosen = rng.choice(def_strat)
elif player == "D":
# Active Player (defender)
def_strat_chosen = input(colored("How should we proceed, commander? [draft/turtle]:", "yellow"))
if def_strat_chosen == "draft":
draft_results = draft(def_force, def_reserves)
def_force = draft_results[0]
def_reserves = draft_results[1]
def_strength = draft_results[2]
def_guard = draft_results[3]
elif def_strat_chosen == "turtle":
turtle_results = turtle(def_force, def_reserves)
def_force = turtle_results[0]
def_reserves = turtle_results[1]
def_strength = turtle_results[2]
def_guard = turtle_results[3]
elif def_strat_chosen == "none":
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("It appears that the enemy will employ standard tactics...")
def_force = def_force
def_reserves = def_reserves
def_strength = def_force
def_guard = def_force
print("Defending force strength:", def_force)
print("Forces kept in reserve:", def_reserves)
# Attacker Setup
if att_strat_chosen == "blitz":
blitz_results = blitz(att_force, att_reserves)
att_force = blitz_results[0]
att_reserves = blitz_results[1]
att_strength = blitz_results[2]
att_guard = blitz_results[3]
elif att_strat_chosen == "guerilla":
guerilla_results = guerilla(att_force, att_reserves)
att_force = guerilla_results[0]
att_reserves = guerilla_results[1]
att_strength = guerilla_results[2]
att_guard = guerilla_results[3]
# Combat
# Attacker damage
def_guard = np.random.normal(def_guard, def_guard/10) * 0.50
att_strength = att_strength - def_guard
if att_strength < 0:
att_strength = 0
def_force = def_force - np.random.normal(att_strength, att_strength/10)//2 - (0.1*att_strength)//1
if def_force < 0:
def_force = 0
# Defender damage
att_guard = np.random.normal(att_guard, att_guard/10) * 0.50 - 0.1
def_strength = def_strength - att_guard
if def_strength < 0:
def_strength = 0
att_force = att_force - np.random.normal(def_strength, def_strength/10)//2 - (0.1*def_strength)//1
if att_force < 0:
att_force = 0
# Post-wave results:
print(colored("########## POST-WAVE RESULTS ##########", on_color = "on_cyan"))
print(colored("Defenders:", on_color = "on_blue"))
print("Surviving defensive forces:", def_force)
print("Defenseive forces kept in reserve:", def_reserves)
print("Defender strength estimate:", def_strength)
print("Defender guard estimate:", def_guard)
print(colored("Attackers:", on_color = "on_red"))
print("Surviving attacker forces:", att_force)
print("Attacker forces kept in reserve:", att_reserves)
print("Attacker strength estimate:", att_strength)
print("Attacker guard estimate:", att_guard)
# Reset allocations
# Defender reallocations:
def_reserves = def_reserves + def_force
def_force = 0
if def_reserves >= 1250:
def_reserves = def_reserves - 1250
def_force = 1250
def_guard = def_force
else:
def_force = def_reserves
def_reserves = 0
def_guard = def_force
# Attacker reallocations:
att_reserves = att_reserves + att_force
att_force = 0
if att_reserves >= 900:
att_reserves = att_reserves - 900
att_force = 900
att_guard = att_force
else:
att_force = att_reserves
att_reserves = 0
att_guard = att_force
defenders = def_force + def_reserves
attackers = att_force + att_reserves
# End of wave conditionals
if (attackers > 0) and (defenders > 0) and (player == "A"):
fightflight = input(colored("Continue or retreat?: [continue/retreat]:", "yellow"))
if fightflight == "retreat":
print(colored("########## WITHDRAWAL ##########", on_color = "on_blue"))
print("You choose to withdraw your troops...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
break
else:
print("The battle will continue next turn...")
elif attackers <= 0 and player == "A":
print(colored("########## FAILURE! ##########", on_color = "on_red"))
print("Your assault has been repelled!")
print("You return home, wondering what punishment for your failure awaits...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
elif defenders <= 0 and player == "A":
print(colored("########## SUCCESS! ##########", on_color = "on_green"))
print("The defenders have been routed!")
print("You may now decide the fate of the defending population...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
elif (attackers > 0) and (defenders > 0) and (player == "D"):
fightflight = input(colored("Defend or retreat?: [defend/retreat]:", "yellow"))
if fightflight == "retreat":
print(colored("########## WITHDRAWAL ##########", on_color = "on_blue"))
print("You choose to withdraw your troops from the region...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1900 - defenders))
print("Survival rate:", (defenders)/1900)
print("Total assault waves:", wave)
break
else:
print("The battle will continue next turn...")
elif defenders <= 0 and player == "D":
print(colored("########## FAILURE! ##########", on_color = "on_red"))
print("Your defense has been broken!")
print("Enemy troops now occupy your lands and have claimed dominion...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1650 - defenders))
print("Survival rate:", (defenders)/1650)
print("Total assault waves:", wave)
elif attackers <= 0 and player == "D":
print(colored("########## SUCCESS! ##########", on_color = "on_green"))
print("The attackers have been repelled!")
print("The storm has passed, and your people live another day...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1650 - defenders))
print("Survival rate:", (defenders)/1650)
print("Total assault waves:", wave)
print("#############################")
| 41.961765
| 107
| 0.604892
| 1,671
| 14,267
| 4.968282
| 0.159186
| 0.0318
| 0.021682
| 0.018791
| 0.532402
| 0.460371
| 0.419778
| 0.400385
| 0.385931
| 0.344977
| 0
| 0.021663
| 0.255835
| 14,267
| 339
| 108
| 42.085546
| 0.76029
| 0.056284
| 0
| 0.44964
| 0
| 0
| 0.310276
| 0.01362
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014388
| false
| 0.003597
| 0.014388
| 0
| 0.028777
| 0.370504
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d57b222d4daa1969049535271df3dff47b0edb
| 1,925
|
py
|
Python
|
ws2122-lspm/Lib/site-packages/pm4py/statistics/overlap/utils/compute.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-19T04:02:46.000Z
|
2022-01-19T04:02:46.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/statistics/overlap/utils/compute.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2021-11-19T07:21:48.000Z
|
2021-11-19T07:21:48.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/statistics/overlap/utils/compute.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-14T17:15:38.000Z
|
2022-01-14T17:15:38.000Z
|
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from enum import Enum
from typing import Optional, Dict, Any, Tuple, List, Union
from intervaltree import Interval, IntervalTree
from pm4py.util import exec_utils
class Parameters(Enum):
EPSILON = "epsilon"
def apply(points: List[Tuple[float, float]], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> List[int]:
"""
Computes the overlap statistic given a list of points, expressed as (min_timestamp, max_timestamp)
Parameters
-----------------
points
List of points with the aforementioned features
parameters
Parameters of the method, including:
- Parameters.EPSILON
Returns
-----------------
overlap
List associating to each point the number of intersecting points
"""
if parameters is None:
parameters = {}
epsilon = exec_utils.get_param_value(Parameters.EPSILON, parameters, 10 ** (-5))
points = [(x[0] - epsilon, x[1] + epsilon) for x in points]
sorted_points = sorted(points)
tree = IntervalTree()
for p in sorted_points:
tree.add(Interval(p[0], p[1]))
overlap = []
for p in points:
overlap.append(len(tree[p[0]:p[1]]))
return overlap
| 31.048387
| 122
| 0.676883
| 264
| 1,925
| 4.905303
| 0.488636
| 0.015444
| 0.030116
| 0.044015
| 0.06332
| 0.043243
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 0.227013
| 1,925
| 61
| 123
| 31.557377
| 0.859543
| 0.537143
| 0
| 0
| 0
| 0
| 0.008974
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d597714762fd1b5295ccfa14750529f2501042
| 1,775
|
py
|
Python
|
webapp/apps/Base Quiz/baseui_gen.py
|
sk-Prime/webapp
|
c21d7d49de4e4442f9af29ba9f08f37b5abbd20d
|
[
"MIT"
] | 4
|
2021-12-11T16:01:10.000Z
|
2021-12-22T19:47:51.000Z
|
webapp/apps/Base Quiz/baseui_gen.py
|
sk-Prime/webapp
|
c21d7d49de4e4442f9af29ba9f08f37b5abbd20d
|
[
"MIT"
] | null | null | null |
webapp/apps/Base Quiz/baseui_gen.py
|
sk-Prime/webapp
|
c21d7d49de4e4442f9af29ba9f08f37b5abbd20d
|
[
"MIT"
] | null | null | null |
from htmlman import HTMLMan
from styleman import Template
page=HTMLMan()
page.make_responsive()
page.add_title("Base Quiz")
style=Template('antartica')
page.add_body_class(style['page'])
page.add_js("baseui.js")
page.create_section('main',append=True)
page['main'].add_style_class(style['main'])
title=page.create_section('title')
title.add_style_class(style['title'])
title.add_content("Base Quiz")
widget=page.create_section("widget")
widget.add_style_class(style['widget'])
label = page.create_section('label',ID='label')
#label.add_style_class(style['center'])
label.add_style(name='label',mode="class")
label.style_to_cssman(style)
label.style(
"font-size","20pt",
"font-family","monospace",
"height","50px",
"border-bottom","1px solid #ccd",
)
label.add_content("0x0")
answer_l=page.create_section("answer_l1",ID="label_t")
answer_l.add_style_class(style["label"])
answer_l2=page.create_section("answer_l2",ID="label_b")
answer_l2.add_style_class(style["label"])
controls = page.create_section("control")
controls.add_style(name="control",mode="class",cssman_obj=style)
controls.style(
"display","grid",
"grid-template-columns","1fr 1fr",
"gap","10px",
"padding","10px"
)
rand_b=page.create_section('random',tag="button",inner_html="Random")
rand_b.config_attr("type","button","onclick","randomize()")
answer_b=page.create_section('answer_b',tag="button",inner_html="Answer")
answer_b.config_attr("type","button","onclick","answer()")
controls.add_content(rand_b)
controls.add_content(answer_b)
widget.add_content(label)
widget.add_content(answer_l)
widget.add_content(answer_l2)
widget.add_content(controls)
page['main'].add_content(title)
page['main'].add_content(widget)
page.render(style,html_path="baseui.html")
| 26.102941
| 73
| 0.750423
| 262
| 1,775
| 4.843511
| 0.282443
| 0.078802
| 0.120567
| 0.085106
| 0.080378
| 0.044129
| 0
| 0
| 0
| 0
| 0
| 0.010863
| 0.066479
| 1,775
| 68
| 74
| 26.102941
| 0.754979
| 0.021408
| 0
| 0
| 0
| 0
| 0.227404
| 0.01209
| 0
| 0
| 0.001727
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d5b6f804f5b3c1c18198672cc73bf3cc33a2a6
| 514
|
py
|
Python
|
cluster_config/cluster.py
|
srcc-msu/job_statistics
|
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
|
[
"MIT"
] | null | null | null |
cluster_config/cluster.py
|
srcc-msu/job_statistics
|
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
|
[
"MIT"
] | null | null | null |
cluster_config/cluster.py
|
srcc-msu/job_statistics
|
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
|
[
"MIT"
] | null | null | null |
name = "cluster"
num_cores = 1000
GENERAL_PARTITIONS = ["regular"]
GPU_PARTITIONS = ["gpu"]
PARTITIONS = GENERAL_PARTITIONS + GPU_PARTITIONS
ACTIVE_JOB_STATES = ["RUNNING", "COMPLETING"]
FINISHED_JOB_STATES = ["COMPLETED", "NODE_FAIL", "TIMEOUT", "FAILED", "CANCELLED"]
JOB_STATES = ACTIVE_JOB_STATES + FINISHED_JOB_STATES
def node2int(node):
"""custom function to convert nodename to int
this one removes all chars from names like node1-001-01"""
return int(''.join(filter(lambda x: x.isdigit(), node)))
| 27.052632
| 82
| 0.741245
| 68
| 514
| 5.382353
| 0.676471
| 0.122951
| 0.125683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024499
| 0.126459
| 514
| 18
| 83
| 28.555556
| 0.790646
| 0.190661
| 0
| 0
| 0
| 0
| 0.180929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d62e06868fc1146c429cff23d726ebbfa8afd8
| 7,146
|
py
|
Python
|
room_assistance/indico_room_assistance/plugin.py
|
OmeGak/indico-plugins-cern
|
6e32bc158877080085ceffd021ab1d2247192f75
|
[
"MIT"
] | 4
|
2019-02-12T05:08:56.000Z
|
2022-03-09T23:43:18.000Z
|
room_assistance/indico_room_assistance/plugin.py
|
OmeGak/indico-plugins-cern
|
6e32bc158877080085ceffd021ab1d2247192f75
|
[
"MIT"
] | 40
|
2017-11-08T15:08:50.000Z
|
2022-03-28T15:09:51.000Z
|
room_assistance/indico_room_assistance/plugin.py
|
OmeGak/indico-plugins-cern
|
6e32bc158877080085ceffd021ab1d2247192f75
|
[
"MIT"
] | 15
|
2017-11-08T12:35:59.000Z
|
2022-01-13T15:16:42.000Z
|
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2021 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
import dateutil.parser
import pytz
from flask import flash, request, session
from flask_pluginengine import render_plugin_template, url_for_plugin
from indico.core import signals
from indico.core.config import config
from indico.core.plugins import IndicoPlugin
from indico.core.settings.converters import ModelListConverter
from indico.modules.events.requests.models.requests import Request, RequestState
from indico.modules.events.requests.views import WPRequestsEventManagement
from indico.modules.rb.models.rooms import Room
from indico.modules.users import User
from indico.util.string import natural_sort_key
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import EmailListField, IndicoQuerySelectMultipleField, PrincipalListField
from indico.web.menu import TopMenuItem
from indico_room_assistance import _
from indico_room_assistance.blueprint import blueprint
from indico_room_assistance.definition import RoomAssistanceRequest
from indico_room_assistance.util import (can_request_assistance_for_event, event_has_room_with_support_attached,
is_room_assistance_support)
def _order_func(object_list):
return sorted(object_list, key=lambda r: natural_sort_key(r[1].full_name))
class RoomAssistanceForm(IndicoForm):
_fieldsets = [
('Startup assistance emails', ['room_assistance_recipients', 'rooms_with_assistance',
'room_assistance_support']),
]
room_assistance_recipients = EmailListField(_('Recipients'),
description=_('Notifications about room assistance requests are sent '
'to these email addresses (one per line)'))
rooms_with_assistance = IndicoQuerySelectMultipleField('Rooms',
query_factory=lambda: Room.query,
description=_('Rooms for which users can request startup '
'assistance'),
get_label='full_name', collection_class=set,
render_kw={'size': 20}, modify_object_list=_order_func)
room_assistance_support = PrincipalListField(_('Room assistance support'), allow_groups=True,
description=_('List of users who can view the list of events with '
'room startup assistance.'))
class RoomAssistancePlugin(IndicoPlugin):
"""Room assistance request
This plugin lets users request assistance for meeting rooms.
"""
configurable = True
settings_form = RoomAssistanceForm
settings_converters = {
'rooms_with_assistance': ModelListConverter(Room)
}
acl_settings = {'room_assistance_support'}
default_settings = {
'room_assistance_recipients': [],
'rooms_with_assistance': [],
}
def init(self):
super().init()
self.inject_bundle('main.css', WPRequestsEventManagement, subclasses=False,
condition=lambda: request.view_args.get('type') == RoomAssistanceRequest.name)
self.template_hook('event-actions', self._room_assistance_action)
self.connect(signals.menu.items, self._extend_services_menu, sender='top-menu')
self.connect(signals.plugin.get_event_request_definitions, self._get_room_assistance_request)
self.connect(signals.event.updated, self._on_event_update)
def get_blueprints(self):
return blueprint
def _room_assistance_action(self, event, **kwargs):
return render_plugin_template('room_assistance_action.html', event=event,
can_request_assistance=can_request_assistance_for_event(event))
def _extend_services_menu(self, reservation, **kwargs):
if not session.user or not is_room_assistance_support(session.user):
return
return TopMenuItem('services-cern-room-assistance', _('Room assistance'),
url_for_plugin('room_assistance.request_list'), section='services')
def _get_room_assistance_request(self, sender, **kwargs):
return RoomAssistanceRequest
def _on_event_update(self, event, **kwargs):
changes = kwargs['changes']
if not changes.keys() & {'location_data', 'start_dt', 'end_dt'}:
return
request = Request.find_latest_for_event(event, RoomAssistanceRequest.name)
if not request or request.state != RequestState.accepted:
return
if 'location_data' in changes and not event_has_room_with_support_attached(event):
request.definition.reject(request, {'comment': render_plugin_template('auto_reject_no_supported_room.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event location is not in the list of the rooms supported by the room assistance team. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
if changes.keys() & {'start_dt', 'end_dt'}:
tz = pytz.timezone(config.DEFAULT_TIMEZONE)
occurrences = {dateutil.parser.parse(occ).astimezone(tz) for occ in request.data['occurrences']}
req_dates = {occ.date() for occ in occurrences}
event_dates = set(event.iter_days())
old_dates = req_dates - event_dates
has_overlapping_dates = req_dates & event_dates
if not has_overlapping_dates:
request.definition.reject(request,
{'comment': render_plugin_template('auto_reject_no_overlapping_dates.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event dates don't overlap with the existing room assistance request for this event. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
elif old_dates and has_overlapping_dates:
new_data = dict(request.data)
new_data['occurrences'] = [occ.astimezone(pytz.utc).isoformat() for occ in occurrences
if occ.date() in req_dates & event_dates]
request.data = new_data
flash(_("Room assistance had been requested for days that are not between the updated start/end "
"dates. Support will not be provided on these days anymore."), 'warning')
| 52.160584
| 120
| 0.645116
| 773
| 7,146
| 5.730919
| 0.291074
| 0.088488
| 0.033183
| 0.02167
| 0.185327
| 0.142889
| 0.095711
| 0.095711
| 0.095711
| 0.095711
| 0
| 0.002135
| 0.279037
| 7,146
| 136
| 121
| 52.544118
| 0.857725
| 0.04562
| 0
| 0.085714
| 0
| 0
| 0.186029
| 0.046176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.190476
| 0.038095
| 0.428571
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d69c9affc9004808d91089e961fe9861840f56
| 6,808
|
py
|
Python
|
datamart/materializers/wikidata_spo_materializer.py
|
liangmuxin/datamart
|
495a21588db39c9ad239409208bec701dca07f30
|
[
"MIT"
] | 7
|
2018-10-02T01:32:23.000Z
|
2020-10-08T00:42:35.000Z
|
datamart/materializers/wikidata_spo_materializer.py
|
liangmuxin/datamart
|
495a21588db39c9ad239409208bec701dca07f30
|
[
"MIT"
] | 47
|
2018-10-02T05:41:13.000Z
|
2021-02-02T21:50:31.000Z
|
datamart/materializers/wikidata_spo_materializer.py
|
liangmuxin/datamart
|
495a21588db39c9ad239409208bec701dca07f30
|
[
"MIT"
] | 19
|
2018-10-01T22:27:20.000Z
|
2019-02-28T18:59:53.000Z
|
from datamart.materializers.materializer_base import MaterializerBase
import os
import urllib.request
import sys
import csv
import copy
import json
from typing import List
from pprint import pprint
import re
import typing
from pandas import DataFrame
import traceback
class WikidataSPOMaterializer(MaterializerBase):
property = ""
def __init__(self, **kwargs):
""" initialization and loading the city name to city id map
"""
MaterializerBase.__init__(self, **kwargs)
def get(self,
metadata: dict = None,
constrains: dict = None
) -> typing.Optional[DataFrame]:
materialization_arguments = metadata["materialization"].get("arguments", {})
self.property = materialization_arguments.get("property", "")
materialization_arguments = metadata["materialization"].get("arguments", {})
self.property = materialization_arguments.get("property", "")
prefix = 'http://sitaware.isi.edu:8080/bigdata/namespace/wdq/sparql?query='
format = '&format=json'
result = dict()
property_label = ""
main_query_encoded = self._encode_url(self._formulate_main_query(self.property))
try:
# print(prefix + main_query_encoded + format)
main_query_req = urllib.request.Request(prefix + main_query_encoded + format)
result, property_label = self._process_main_query(self._get_query_result(main_query_req))
except Exception as err:
print(err)
traceback.print_tb(err.__traceback__)
count = 0
while(True):
try:
main_query_encoded = self._encode_url(self._next(self._formulate_main_query(self.property), offset=count))
main_query_req = urllib.request.Request(prefix + main_query_encoded + format)
temp, property_label = self._process_main_query(self._get_query_result(main_query_req))
# property_label = re.sub(r"\s+", '_', property_label)
count += 1
result.update(temp)
except:
# print("property ", property, "count ", count)
break
property_label = re.sub(r"\s+", '_', property_label)
sep = ";"
values = list(result.values())
columns = ["source", "subject_label", "category", "prop_value", "value_label"]
# for val in values:
# col_name = col_name.union(set(val.keys()))
# columns = list(col_name)
rows = list()
for k, v in result.items():
v['value_label'] = list(filter(None, v['value_label']))
v['value_label'] = list() if not any(v['value_label']) else list(v['value_label'])
for k1, v1 in v.items():
if k1 != "source":
# print(k1, v1)
v[k1] = sep.join(v1)
rows.append(v)
df = DataFrame(rows, columns=columns)
# print(df)
return df
@staticmethod
def _formulate_main_query(property):
main_query = 'select distinct ?source ?source_l ?category ?prop_l ?prop_value ?know_as where{\
?source wdt:' + property + ' ?prop_value.\
?source rdfs:label ?source_l.\
?source wdt:P31/rdfs:label ?category.\
filter (lang(?category)="en")\
filter (lang(?source_l)="en")\
wd:' + property + ' rdfs:label ?prop_l.\
filter (lang(?prop_l)="en")\
optional {?prop_value rdfs:label ?know_as.\
filter (lang(?know_as)="en")}\
}'
return main_query
@staticmethod
def _formulate_id_category_query(property):
id_category_query = \
'select distinct ?identifier ?l where{\
?source wdt:' + property + ' ?value.\
?source ?id ?idValue.\
?identifier ?ref ?id.\
optional {?value rdfs:label ?know_as.\
filter (lang(?know_as)="en")}\
?identifier wikibase:directClaim ?id.\
?identifier wikibase:propertyType wikibase:ExternalId.\
?identifier rdfs:label ?l.\
?identifier schema:description ?desc.\
filter (lang(?desc)="en")\
filter (lang(?l)="en")\
}\
ORDER BY ?identifier'
return id_category_query
@staticmethod
def _next(query_sent, offset):
query_sent = query_sent + " LIMIT 1000 " + "OFFSET " + str(1000 * offset)
return query_sent
@staticmethod
def _encode_url(url):
encoded_url = urllib.parse.quote(url)
return encoded_url
@staticmethod
def _get_query_result(query_req) -> List[dict]:
data = {}
with urllib.request.urlopen(query_req) as r:
data = json.loads(r.read().decode('utf-8'))
result = data['results']['bindings']
return result
@staticmethod
def _process_id_category_query(data):
ids = dict()
for item in data:
identifier = item['l']['value']
ids[identifier] = set()
return ids
@staticmethod
def _process_main_query(data):
result = {}
property_label = ""
for item in data:
category = item['category']['value'].strip()
property_label = item['prop_l']['value'].strip()
source = item['source']['value'].strip()
prop_value = item['prop_value']['value'].strip()
know_as = item['know_as']['value'].strip() if 'know_as' in item.keys() else None
subject_l = item['source_l']['value'].strip()
# id = item['id']['value'].strip()
# id_l = item['id_l']['value'].strip()
# id_value = item['id_value']['value'].strip()
if source not in result.keys():
result[source] = dict()
result[source]['source'] = source
result[source]['category'] = set()
result[source]['prop_value'] = set()
result[source]['subject_label'] = set()
result[source]['value_label'] = set()
# result[source].update(copy.deepcopy(ids))
result[source]['prop_value'].add(prop_value)
result[source]['category'].add(category)
result[source]['subject_label'].add(subject_l)
result[source]['value_label'].add(know_as)
# result[source][id_l].add(id_value)
# pprint("ss", result)
return result, property_label
| 38.03352
| 126
| 0.552732
| 717
| 6,808
| 5.034868
| 0.225941
| 0.042382
| 0.022161
| 0.018283
| 0.209972
| 0.202216
| 0.184488
| 0.166205
| 0.147922
| 0.147922
| 0
| 0.005215
| 0.324031
| 6,808
| 178
| 127
| 38.247191
| 0.779226
| 0.07829
| 0
| 0.153285
| 0
| 0
| 0.084253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065693
| false
| 0
| 0.094891
| 0
| 0.233577
| 0.021898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d6ae677936f62a1cad64182feb228714d24c7d
| 1,402
|
py
|
Python
|
axelrod/load_data_.py
|
danilobellini/Axelrod
|
2c9212553e06095c24adcb82a5979279cbdf45fb
|
[
"MIT"
] | null | null | null |
axelrod/load_data_.py
|
danilobellini/Axelrod
|
2c9212553e06095c24adcb82a5979279cbdf45fb
|
[
"MIT"
] | 1
|
2019-01-22T09:59:52.000Z
|
2019-01-22T09:59:52.000Z
|
axelrod/load_data_.py
|
danilobellini/Axelrod
|
2c9212553e06095c24adcb82a5979279cbdf45fb
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Tuple
import pkg_resources
def load_file(filename: str, directory: str) -> List[List[str]]:
"""Loads a data file stored in the Axelrod library's data subdirectory,
likely for parameters for a strategy."""
path = "/".join((directory, filename))
data_bytes = pkg_resources.resource_string(__name__, path)
data = data_bytes.decode("UTF-8", "replace")
rows = []
for line in data.split("\n"):
if line.startswith("#") or len(line) == 0:
continue
s = line.split(", ")
rows.append(s)
return rows
def load_weights(
filename: str = "ann_weights.csv", directory: str = "data"
) -> Dict[str, Tuple[int, int, List[float]]]:
"""Load Neural Network Weights."""
rows = load_file(filename, directory)
d = dict()
for row in rows:
name = str(row[0])
num_features = int(row[1])
num_hidden = int(row[2])
weights = list(map(float, row[3:]))
d[name] = (num_features, num_hidden, weights)
return d
def load_pso_tables(filename="pso_gambler.csv", directory="data"):
"""Load lookup tables."""
rows = load_file(filename, directory)
d = dict()
for row in rows:
name, a, b, c, = str(row[0]), int(row[1]), int(row[2]), int(row[3])
values = list(map(float, row[4:]))
d[(name, int(a), int(b), int(c))] = values
return d
| 31.155556
| 75
| 0.601284
| 198
| 1,402
| 4.151515
| 0.373737
| 0.036496
| 0.058394
| 0.048662
| 0.121655
| 0.121655
| 0.121655
| 0.121655
| 0.121655
| 0.121655
| 0
| 0.010329
| 0.240371
| 1,402
| 44
| 76
| 31.863636
| 0.761502
| 0.110556
| 0
| 0.242424
| 0
| 0
| 0.04564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.060606
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d85e09c27d6497523862946e45ed0db97f77b6
| 5,248
|
py
|
Python
|
prescryptchain/api/views.py
|
genobank-io/CryptoVault
|
7c2f6c4c55df7d9e172058aad334a26786ea839f
|
[
"Apache-2.0"
] | 3
|
2018-05-03T18:40:48.000Z
|
2019-06-09T19:04:44.000Z
|
prescryptchain/api/views.py
|
genobank-io/CryptoVault
|
7c2f6c4c55df7d9e172058aad334a26786ea839f
|
[
"Apache-2.0"
] | 6
|
2018-06-27T00:14:46.000Z
|
2018-10-29T20:51:45.000Z
|
prescryptchain/api/views.py
|
genobank-io/CryptoVault
|
7c2f6c4c55df7d9e172058aad334a26786ea839f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# REST
from rest_framework.viewsets import ViewSetMixin
from rest_framework import routers, serializers, viewsets
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
from rest_framework.permissions import IsAuthenticated, BasePermission
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.views import APIView
from rest_framework import mixins, generics
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
# our models
from blockchain.models import Block, Prescription, Transaction, Address
from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri
from .exceptions import NonValidPubKey
# Define router
router = routers.DefaultRouter()
class PrescriptionSerializer(serializers.ModelSerializer):
""" Prescription serializer """
timestamp = serializers.DateTimeField(read_only=False)
data = serializers.JSONField(binary=False, read_only=False, required=False)
files = serializers.JSONField(binary=False, read_only=False, required=False)
previous_hash = serializers.CharField(read_only=False, required=False, default="0")
class Meta:
model = Prescription
fields = (
'id',
'public_key',
'data',
"files",
'timestamp',
'signature',
'previous_hash',
'raw_size',
'hash_id',
'is_valid',
'transaction',
'readable',
)
read_only_fields = ('id', 'hash_id', 'is_valid',' transaction',)
def validate(self, data):
''' Method to control Extra Keys on Payload!'''
extra_keys = set(self.initial_data.keys()) - set(self.fields.keys())
if extra_keys:
print(extra_keys)
return data
def create(self, validated_data):
return Transaction.objects.create_tx(data=validated_data)
class PrescriptionViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
# Temporally without auth
# authentication_classes = (TokenAuthentication, BasicAuthentication, )
# permission_classes = (IsAuthenticated, )
serializer_class = PrescriptionSerializer
lookup_field = "hash_id"
http_method_names = ['get', 'post', 'options']
def get_queryset(self):
''' Custom Get queryset '''
raw_public_key = self.request.query_params.get('public_key', None)
if raw_public_key:
try:
pub_key = pubkey_string_to_rsa(raw_public_key)
except:
pub_key , raw_public_key = pubkey_base64_to_rsa(raw_public_key)
hex_raw_pub_key = savify_key(pub_key)
return Prescription.objects.filter(public_key=hex_raw_pub_key).order_by('-id')
else:
return Prescription.objects.all().order_by('-id')
# add patient filter by email, after could modify with other
router.register(r'rx-endpoint', PrescriptionViewSet, 'prescription-endpoint')
class BlockSerializer(serializers.ModelSerializer):
""" Prescription serializer """
class Meta:
model = Block
fields = (
'id',
'hash_block',
'previous_hash',
'raw_size',
'data',
'timestamp',
'merkleroot',
'hashcash',
'nonce',
)
read_only_fields = ('id', 'hash_block','timestamp','previous_hash', 'raw_size', 'data', 'merkleroot','hashcash','nonce',)
class BlockViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
serializer_class = BlockSerializer
def get_queryset(self):
return Block.objects.all().order_by('-timestamp')
# add patient filter by email, after could modify with other
router.register(r'block', BlockViewSet, 'block-endpoint')
class AddressSerializer(serializers.ModelSerializer):
""" Address serializer """
pub_key = serializers.CharField(read_only=True,allow_null=True, source="get_pub_key" )
class Meta:
model = Address
fields = (
'public_key_b64',
'address',
'is_valid',
'pub_key',
)
read_only_fields = ('address','pub_key', )
class AddressViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
serializer_class = AddressSerializer
lookup_field = "address"
http_method_names = ['get', 'options']
def get_queryset(self):
''' Custom Get queryset '''
raw_public_key = self.request.query_params.get('public_key', None)
if raw_public_key:
try:
pub_key_b64 = pubkey_base64_from_uri(raw_public_key)
except Exception as e:
raise NonValidPubKey
else:
_address = Address.objects.get_or_create_rsa_address(pub_key_b64)
return Address.objects.filter(address=_address)
else:
return Address.objects.all()
# add patient filter by email, after could modify with other
router.register(r'address', AddressViewSet, 'address_endpoint')
| 33.858065
| 129
| 0.664444
| 558
| 5,248
| 6.003584
| 0.284946
| 0.034925
| 0.045672
| 0.018806
| 0.268358
| 0.203881
| 0.159104
| 0.159104
| 0.159104
| 0.125075
| 0
| 0.004004
| 0.238567
| 5,248
| 154
| 130
| 34.077922
| 0.834334
| 0.11109
| 0
| 0.280374
| 0
| 0
| 0.102369
| 0.004564
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046729
| false
| 0
| 0.121495
| 0.018692
| 0.429907
| 0.009346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37d92a06667232ad4a4f6ca14ad0257dd6a2e56a
| 2,484
|
py
|
Python
|
client/commands/incremental.py
|
stvreumi/pyre-check
|
94d13c8df37b53843ae92544b81042347b64315d
|
[
"MIT"
] | null | null | null |
client/commands/incremental.py
|
stvreumi/pyre-check
|
94d13c8df37b53843ae92544b81042347b64315d
|
[
"MIT"
] | null | null | null |
client/commands/incremental.py
|
stvreumi/pyre-check
|
94d13c8df37b53843ae92544b81042347b64315d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import atexit
import logging
import os
import subprocess
import sys
from typing import List
from .command import ClientException, ExitCode, State
from .reporting import Reporting
from .start import Start
LOG = logging.getLogger(__name__)
class Incremental(Reporting):
NAME = "incremental"
def __init__(self, arguments, configuration, analysis_directory) -> None:
super(Incremental, self).__init__(arguments, configuration, analysis_directory)
def _run(self) -> None:
if self._state() == State.DEAD:
LOG.warning("Starting server at `%s`.", self._analysis_directory.get_root())
arguments = self._arguments
arguments.terminal = False
arguments.no_watchman = False
Start(arguments, self._configuration, self._analysis_directory).run()
if self._state() != State.DEAD:
LOG.info("Waiting for server...")
result = self._call_client(command=self.NAME)
try:
result.check()
errors = self._get_errors(result)
self._print(errors)
except ClientException as exception:
LOG.error("Error while waiting for server.")
LOG.error("Run `%s restart` in order to restart the server.", sys.argv[0])
self._exit_code = ExitCode.FAILURE
def _flags(self) -> List[str]:
flags = super()._flags()
flags.extend(
[
"-typeshed",
self._configuration.typeshed,
"-expected-binary-version",
self._configuration.version_hash,
]
)
search_path = self._configuration.search_path
if search_path:
flags.extend(["-search-path", ",".join(search_path)])
return flags
# pyre-ignore: T31696900
def _read_stderr(self, _stream, analysis_directory) -> None:
stderr_file = os.path.join(analysis_directory, ".pyre/server/server.stdout")
with subprocess.Popen(
["tail", "-f", stderr_file],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
) as stderr_tail:
atexit.register(stderr_tail.terminate)
super(Incremental, self)._read_stderr(
stderr_tail.stdout, analysis_directory
)
| 31.846154
| 88
| 0.625201
| 269
| 2,484
| 5.568773
| 0.416357
| 0.079439
| 0.040053
| 0.052069
| 0.030708
| 0.030708
| 0
| 0
| 0
| 0
| 0
| 0.007226
| 0.275765
| 2,484
| 77
| 89
| 32.25974
| 0.825459
| 0.074477
| 0
| 0
| 0
| 0
| 0.092891
| 0.021805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.157895
| 0
| 0.280702
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37da81bd71be1d388df7554cdc71e1b8d0bef4e9
| 26,540
|
py
|
Python
|
main_random_policy.py
|
rish-raghu/Object-Goal-Navigation
|
d2c882f3a97396c691fc75b46bd94bb7077f7d0f
|
[
"MIT"
] | null | null | null |
main_random_policy.py
|
rish-raghu/Object-Goal-Navigation
|
d2c882f3a97396c691fc75b46bd94bb7077f7d0f
|
[
"MIT"
] | null | null | null |
main_random_policy.py
|
rish-raghu/Object-Goal-Navigation
|
d2c882f3a97396c691fc75b46bd94bb7077f7d0f
|
[
"MIT"
] | null | null | null |
from collections import deque, defaultdict
import os
import sys
import logging
import time
import json
import gym
import torch.nn as nn
import torch
import numpy as np
import matplotlib.pyplot as plt
from model import RL_Policy, Semantic_Mapping
from utils.storage import GlobalRolloutStorage
from envs import make_vec_envs
from arguments import get_args
import algo
os.environ["OMP_NUM_THREADS"] = "1"
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Setup Logging
log_dir = "{}/models/{}/".format(args.dump_location, args.exp_name)
dump_dir = "{}/dump/{}/".format(args.dump_location, args.exp_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
logging.basicConfig(
filename=log_dir + 'train.log',
level=logging.INFO)
print("Dumping at {}".format(log_dir))
print(args)
logging.info(args)
# Logging and loss variables
num_scenes = args.num_processes
num_episodes = int(args.num_eval_episodes)
device = args.device = torch.device("cuda:0" if args.cuda else "cpu")
g_masks = torch.ones(num_scenes).float().to(device)
best_g_reward = -np.inf
# one episode per process for both train and eval
# for eval, one scene per process
if args.eval:
episode_success = []
episode_spl = []
episode_dist = []
for _ in range(args.num_processes):
episode_success.append(deque(maxlen=num_episodes))
episode_spl.append(deque(maxlen=num_episodes))
episode_dist.append(deque(maxlen=num_episodes))
# for train, different episodes of same scene per process
else:
episode_success = deque(maxlen=1000)
episode_spl = deque(maxlen=1000)
episode_dist = deque(maxlen=1000)
finished = np.zeros((args.num_processes))
wait_env = np.zeros((args.num_processes))
g_episode_rewards = deque(maxlen=1000)
g_value_losses = deque(maxlen=1000)
g_action_losses = deque(maxlen=1000)
g_dist_entropies = deque(maxlen=1000)
per_step_g_rewards = deque(maxlen=1000)
g_process_rewards = np.zeros((num_scenes))
# Starting environments
torch.set_num_threads(1)
envs = make_vec_envs(args)
obs, infos = envs.reset()
full_episode_data = []
episode_data = [None] * num_scenes
for e, info in enumerate(infos):
cInfo = info.copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
torch.set_grad_enabled(False)
# Initialize map variables:
# Full map consists of multiple channels containing the following:
# 1. Obstacle Map
# 2. Exploread Area (places that are known to be free or occupied)
# 3. Current Agent Location
# 4. Past Agent Locations
# 5,6,7,.. : Semantic Categories
nc = args.num_sem_categories + 4 # num channels
# Calculating full and local map sizes
map_size = args.map_size_cm // args.map_resolution
full_w, full_h = map_size, map_size
local_w = int(full_w / args.global_downscaling)
local_h = int(full_h / args.global_downscaling)
# Initializing full and local map
full_map = torch.zeros(num_scenes, nc, full_w, full_h).float().to(device)
local_map = torch.zeros(num_scenes, nc, local_w,
local_h).float().to(device)
# Initial full and local pose
full_pose = torch.zeros(num_scenes, 3).float().to(device)
local_pose = torch.zeros(num_scenes, 3).float().to(device)
# Origin of local map
origins = np.zeros((num_scenes, 3))
# Local Map Boundaries
lmb = np.zeros((num_scenes, 4)).astype(int)
# Planner pose inputs has 7 dimensions
# 1-3 store continuous global agent location
# 4-7 store local map boundaries
planner_pose_inputs = np.zeros((num_scenes, 7))
# get local boundary (x1, x2, y1, y2) given local agent position (x, y) and map size
def get_local_map_boundaries(agent_loc, local_sizes, full_sizes):
loc_r, loc_c = agent_loc
local_w, local_h = local_sizes
full_w, full_h = full_sizes
if args.global_downscaling > 1:
gx1, gy1 = loc_r - local_w // 2, loc_c - local_h // 2
gx2, gy2 = gx1 + local_w, gy1 + local_h
if gx1 < 0:
gx1, gx2 = 0, local_w
if gx2 > full_w:
gx1, gx2 = full_w - local_w, full_w
if gy1 < 0:
gy1, gy2 = 0, local_h
if gy2 > full_h:
gy1, gy2 = full_h - local_h, full_h
else:
gx1, gx2, gy1, gy2 = 0, full_w, 0, full_h
return [gx1, gx2, gy1, gy2]
# initialize global and local maps and poses given that initial position
# is at map center with 0 orientation
def init_map_and_pose():
full_map.fill_(0.)
full_pose.fill_(0.)
full_pose[:, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
# 3x3 grid around agent location is considered explored
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
for e in range(num_scenes):
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# identical to above, except for specific environment
def init_map_and_pose_for_env(e):
full_map[e].fill_(0.)
full_pose[e].fill_(0.)
full_pose[e, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose[e].cpu().numpy()
planner_pose_inputs[e, :3] = locs
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# reward is the newly explored area in a given step (in m^2)
def update_intrinsic_rew(e):
prev_explored_area = full_map[e, 1].sum(1).sum(0)
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
curr_explored_area = full_map[e, 1].sum(1).sum(0)
intrinsic_rews[e] = curr_explored_area - prev_explored_area
intrinsic_rews[e] *= (args.map_resolution / 100.)**2 # to m^2
def get_random_goal(e):
for _ in range(20):
goal = np.random.rand(2)
goal = [int(goal[0] * local_w), int(goal[1] * local_w)]
goal = [min(goal[0], int(local_w-1)), min(goal[1], int(local_w-1))]
if not local_map[e, 1, goal[0], goal[1]]: break
return goal
init_map_and_pose()
# Global policy observation space
ngc = 8 + args.num_sem_categories
es = 2
g_observation_space = gym.spaces.Box(0, 1, # binary local map
(ngc,
local_w,
local_h), dtype='uint8')
# Semantic Mapping
sem_map_module = Semantic_Mapping(args).to(device)
sem_map_module.eval()
intrinsic_rews = torch.zeros(num_scenes).to(device)
# Predict semantic map from frame 1
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx in range(num_scenes)])
).float().to(device)
# args (obs, pose_obs, maps_last, poses_last)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
global_goals = [get_random_goal(e) for e in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
episode_data[e]["used_policy"].append(True)
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy() # obstacles
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy() # explored
p_input['pose_pred'] = planner_pose_inputs[e] # global location+local map bounds
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = 1
p_input['found_goal'] = 0
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5 # TODO: what is this?
# single channel where each grid loc is cat ID
p_input['sem_map_pred'] = local_map[e, 4:, :, :
].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
start = time.time()
g_reward = 0
torch.set_grad_enabled(False)
spl_per_category = defaultdict(list)
success_per_category = defaultdict(list)
for step in range(args.num_training_frames // args.num_processes + 1):
if finished.sum() == args.num_processes:
break
g_step = (step // args.num_local_steps) % args.num_global_steps # global step num in PPO
l_step = step % args.num_local_steps # local step num in global step
# ------------------------------------------------------------------
# Reinitialize variables when episode ends
l_masks = torch.FloatTensor([0 if x else 1
for x in done]).to(device)
g_masks *= l_masks
for e, x in enumerate(done):
if x:
spl = infos[e]['spl']
success = infos[e]['success']
dist = infos[e]['distance_to_goal']
spl_per_category[infos[e]['goal_name']].append(spl)
success_per_category[infos[e]['goal_name']].append(success)
if args.eval:
episode_success[e].append(success)
episode_spl[e].append(spl)
episode_dist[e].append(dist)
if len(episode_success[e]) == num_episodes:
finished[e] = 1
episode_data[e]["success"] = success
episode_data[e]["spl"] = spl
episode_data[e]["distance_to_goal"] = dist
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = local_map[e]
episode_data[e]["explored_area"] = full_map[e, 1].sum(1).sum(0).item()
scene = episode_data[e]["scene_id"][16:-4]
if args.save_maps:
np.save('{}/maparr_{}_{}'.format(dump_dir, scene, episode_data[e]['episode_id']), full_map[e].cpu().numpy())
full_episode_data.append(episode_data[e])
cInfo = infos[e].copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
else:
episode_success.append(success)
episode_spl.append(spl)
episode_dist.append(dist)
wait_env[e] = 1.
update_intrinsic_rew(e)
init_map_and_pose_for_env(e)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Semantic Mapping Module
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx
in range(num_scenes)])
).float().to(device)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs + origins
local_map[:, 2, :, :].fill_(0.) # Resetting current location channel
# update current location
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 2:loc_r + 3, loc_c - 2:loc_c + 3] = 1.
if args.eval and not wait_env[e]:
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Global Policy
if l_step == args.num_local_steps - 1:
# For every global step, update the full and local maps
for e in range(num_scenes):
if wait_env[e] == 1: # New episode
wait_env[e] = 0.
else:
update_intrinsic_rew(e)
# update global map and pose based on new position in old local frame
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
full_pose[e] = local_pose[e] + \
torch.from_numpy(origins[e]).to(device).float()
# center the local frame based on new position
locs = full_pose[e].cpu().numpy()
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
# compute new local map and pose based on new local frame
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
locs = local_pose.cpu().numpy()
# Get exploration reward and metrics
g_reward = torch.from_numpy(np.asarray(
[infos[env_idx]['g_reward'] for env_idx in range(num_scenes)])
).float().to(device)
g_reward += args.intrinsic_rew_coeff * intrinsic_rews.detach()
for e in range(num_scenes):
if args.eval and not wait_env[e]:
episode_data[e]["goal_rewards"].append(infos[e]["g_reward"])
episode_data[e]["explore_rewards"].append(intrinsic_rews[e].item())
g_process_rewards += g_reward.cpu().numpy()
g_total_rewards = g_process_rewards * \
(1 - g_masks.cpu().numpy())
g_process_rewards *= g_masks.cpu().numpy()
per_step_g_rewards.append(np.mean(g_reward.cpu().numpy()))
if np.sum(g_total_rewards) != 0:
for total_rew in g_total_rewards:
if total_rew != 0:
g_episode_rewards.append(total_rew)
global_goals = [get_random_goal(e) for e in range(num_scenes)]
for e in range(num_scenes):
if args.eval and not wait_env[e]:
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
g_reward = 0
g_masks = torch.ones(num_scenes).float().to(device)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Update long-term goal if target object is found
found_goal = [0 for _ in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
# If goal category not found in map, goal is the location sampled by
# policy
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
if args.eval and not wait_env[e]:
episode_data[e]["used_policy"].append(True)
# Else if goal category found in map, use all locations where prob of goal
# obj existing is > 0 as the goal map for planner
for e in range(num_scenes):
cn = infos[e]['goal_cat_id'] + 4
if local_map[e, cn, :, :].sum() != 0.:
cat_semantic_map = local_map[e, cn, :, :].cpu().numpy()
cat_semantic_scores = cat_semantic_map
cat_semantic_scores[cat_semantic_scores > 0] = 1.
goal_maps[e] = cat_semantic_scores
found_goal[e] = 1
if args.eval and not wait_env[e]:
episode_data[e]["used_policy"][-1] = False
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Take action and get next observation
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy()
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy()
p_input['pose_pred'] = planner_pose_inputs[e]
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = l_step == args.num_local_steps - 1
p_input['found_goal'] = found_goal[e]
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5
p_input['sem_map_pred'] = local_map[e, 4:, :,
:].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
# ------------------------------------------------------------------
# Logging
if len(full_episode_data) % args.episode_save_interval == 0:
with open('{}/{}_episode_data.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(full_episode_data, f)
if step % args.log_interval == 0:
end = time.time()
time_elapsed = time.gmtime(end - start)
log = " ".join([
"Time: {0:0=2d}d".format(time_elapsed.tm_mday - 1),
"{},".format(time.strftime("%Hh %Mm %Ss", time_elapsed)),
"num timesteps {},".format(step * num_scenes),
"FPS {},".format(int(step * num_scenes / (end - start)))
])
log += "\n\tRewards:"
if len(g_episode_rewards) > 0:
log += " ".join([
" Global step mean/med rew:",
"{:.4f}/{:.4f},".format(
np.mean(per_step_g_rewards),
np.median(per_step_g_rewards)),
" Global eps mean/med/min/max eps rew:",
"{:.3f}/{:.3f}/{:.3f}/{:.3f},".format(
np.mean(g_episode_rewards),
np.median(g_episode_rewards),
np.min(g_episode_rewards),
np.max(g_episode_rewards))
])
if args.eval:
total_success = []
total_spl = []
total_dist = []
for e in range(args.num_processes):
for acc in episode_success[e]:
total_success.append(acc)
for dist in episode_dist[e]:
total_dist.append(dist)
for spl in episode_spl[e]:
total_spl.append(spl)
if len(total_spl) > 0:
log += " ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(total_success),
np.mean(total_spl),
np.mean(total_dist),
len(total_spl))
else:
if len(episode_success) > 100:
log += " ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(episode_success),
np.mean(episode_spl),
np.mean(episode_dist),
len(episode_spl))
log += "\n\tLosses:"
if len(g_value_losses) > 0 and not args.eval:
log += " ".join([
" Policy Loss value/action/dist:",
"{:.3f}/{:.3f}/{:.3f},".format(
np.mean(g_value_losses),
np.mean(g_action_losses),
np.mean(g_dist_entropies))
])
print(log)
logging.info(log)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Save best models
if (step * num_scenes) % args.save_interval < \
num_scenes:
if len(g_episode_rewards) >= 1000 and \
(np.mean(g_episode_rewards) >= best_g_reward) \
and not args.eval:
torch.save(g_policy.state_dict(),
os.path.join(log_dir, "model_best.pth"))
best_g_reward = np.mean(g_episode_rewards)
# Save periodic models
if (step * num_scenes) % args.save_periodic < \
num_scenes:
total_steps = step * num_scenes
if not args.eval:
torch.save(g_policy.state_dict(),
os.path.join(dump_dir,
"periodic_{}.pth".format(total_steps)))
# ------------------------------------------------------------------
# Print and save model performance numbers during evaluation
if args.eval:
print("Dumping eval details...")
total_success = []
total_spl = []
total_dist = []
for e in range(args.num_processes):
for acc in episode_success[e]:
total_success.append(acc)
for dist in episode_dist[e]:
total_dist.append(dist)
for spl in episode_spl[e]:
total_spl.append(spl)
if len(total_spl) > 0:
log = "Final ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(total_success),
np.mean(total_spl),
np.mean(total_dist),
len(total_spl))
print(log)
logging.info(log)
# Save the spl per category
log = "Success | SPL per category\n"
for key in success_per_category:
log += "{}: {} | {}\n".format(key,
sum(success_per_category[key]) /
len(success_per_category[key]),
sum(spl_per_category[key]) /
len(spl_per_category[key]))
print(log)
logging.info(log)
with open('{}/{}_spl_per_cat_pred_thr.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(spl_per_category, f)
with open('{}/{}_success_per_cat_pred_thr.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(success_per_category, f)
with open('{}/{}_episode_data.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(full_episode_data, f)
if __name__ == "__main__":
main()
| 41.020093
| 132
| 0.511492
| 3,339
| 26,540
| 3.830488
| 0.112908
| 0.036122
| 0.018765
| 0.02502
| 0.503753
| 0.452619
| 0.42455
| 0.395152
| 0.383425
| 0.371853
| 0
| 0.021995
| 0.335343
| 26,540
| 646
| 133
| 41.083591
| 0.703061
| 0.117257
| 0
| 0.433121
| 0
| 0
| 0.061459
| 0.010621
| 0
| 0
| 0
| 0.001548
| 0
| 1
| 0.012739
| false
| 0
| 0.03397
| 0
| 0.050955
| 0.016985
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37ddb9f83521ff471c035e9cd6a4902772e590bf
| 5,107
|
py
|
Python
|
mindarmour/utils/logger.py
|
hboshnak/mindarmour
|
0609a4eaea875a84667bed279add9305752880cc
|
[
"Apache-2.0"
] | 139
|
2020-03-28T02:37:07.000Z
|
2022-03-24T15:35:39.000Z
|
mindarmour/utils/logger.py
|
hboshnak/mindarmour
|
0609a4eaea875a84667bed279add9305752880cc
|
[
"Apache-2.0"
] | 2
|
2020-04-02T09:50:21.000Z
|
2020-05-09T06:52:57.000Z
|
mindarmour/utils/logger.py
|
hboshnak/mindarmour
|
0609a4eaea875a84667bed279add9305752880cc
|
[
"Apache-2.0"
] | 12
|
2020-03-28T02:52:42.000Z
|
2021-07-15T08:05:06.000Z
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Util for log module. """
import logging
_LOGGER = logging.getLogger('MA')
def _find_caller():
"""
Bind findCaller() method, which is used to find the stack frame of the
caller so that we can note the source file name, line number and
function name.
"""
return _LOGGER.findCaller()
class LogUtil:
"""
Logging module.
Raises:
SyntaxError: If create this class.
"""
_instance = None
_logger = None
_extra_fmt = ' [%s] [%s] '
def __init__(self):
raise SyntaxError('can not instance, please use get_instance.')
@staticmethod
def get_instance():
"""
Get instance of class `LogUtil`.
Returns:
Object, instance of class `LogUtil`.
"""
if LogUtil._instance is None:
LogUtil._instance = object.__new__(LogUtil)
LogUtil._logger = _LOGGER
LogUtil._init_logger()
return LogUtil._instance
@staticmethod
def _init_logger():
"""
Initialize logger.
"""
LogUtil._logger.setLevel(logging.WARNING)
log_fmt = '[%(levelname)s] %(name)s(%(process)d:%(thread)d,' \
'%(processName)s):%(asctime)s%(message)s'
log_fmt = logging.Formatter(log_fmt)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_fmt)
# add the handlers to the logger
LogUtil._logger.handlers = []
LogUtil._logger.addHandler(console_handler)
LogUtil._logger.propagate = False
def set_level(self, level):
"""
Set the logging level of this logger, level must be an integer or a
string. Supported levels are 'NOTSET'(integer: 0), 'ERROR'(integer: 1-40),
'WARNING'('WARN', integer: 1-30), 'INFO'(integer: 1-20) and 'DEBUG'(integer: 1-10).
For example, if logger.set_level('WARNING') or logger.set_level(21), then
logger.warn() and logger.error() in scripts would be printed while running,
while logger.info() or logger.debug() would not be printed.
Args:
level (Union[int, str]): Level of logger.
"""
self._logger.setLevel(level)
def add_handler(self, handler):
"""
Add other handler supported by logging module.
Args:
handler (logging.Handler): Other handler supported by logging module.
Raises:
ValueError: If handler is not an instance of logging.Handler.
"""
if isinstance(handler, logging.Handler):
self._logger.addHandler(handler)
else:
raise ValueError('handler must be an instance of logging.Handler,'
' but got {}'.format(type(handler)))
def debug(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'DEBUG'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.debug(self._extra_fmt + msg, file_info, tag, *args)
def info(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'INFO'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.info(self._extra_fmt + msg, file_info, tag, *args)
def warn(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'WARNING'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.warning(self._extra_fmt + msg, file_info, tag, *args)
def error(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'ERROR'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.error(self._extra_fmt + msg, file_info, tag, *args)
| 32.119497
| 91
| 0.595457
| 621
| 5,107
| 4.761675
| 0.289855
| 0.024349
| 0.027054
| 0.018938
| 0.308759
| 0.291173
| 0.266824
| 0.266824
| 0.256679
| 0.223199
| 0
| 0.008542
| 0.289407
| 5,107
| 158
| 92
| 32.322785
| 0.806283
| 0.440572
| 0
| 0.192308
| 0
| 0
| 0.085931
| 0.029907
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.019231
| 0
| 0.326923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e09e1c599fd41f037cb54000938dba1d33127b
| 7,483
|
py
|
Python
|
bert_rerannker_eval.py
|
satya77/transformer_rankers
|
0d2c20bd26041d887fb65102020a0b609ec967fc
|
[
"MIT"
] | null | null | null |
bert_rerannker_eval.py
|
satya77/transformer_rankers
|
0d2c20bd26041d887fb65102020a0b609ec967fc
|
[
"MIT"
] | null | null | null |
bert_rerannker_eval.py
|
satya77/transformer_rankers
|
0d2c20bd26041d887fb65102020a0b609ec967fc
|
[
"MIT"
] | null | null | null |
from transformer_rankers.trainers import transformer_trainer
from transformer_rankers.datasets import dataset, preprocess_scisumm_ranked
from transformer_rankers.eval import results_analyses_tools
from transformers import BertTokenizer, BertForSequenceClassification
from sacred.observers import FileStorageObserver
from sacred import Experiment
import numpy as np
import torch
import pandas as pd
import argparse
import logging
import sys
ex = Experiment('BERT-ranker experiment')
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
@ex.main
def run_experiment(args):
args.run_id = str(ex.current_run._id)
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
train, valid, test = preprocess_scisumm_ranked.transform_to_dfs(
args.path_to_ranked_file,args.path_to_ranked_test,args.path_to_ranked_dev)
# Choose the negative candidate sampler
ns_train=None
ns_val=None
# Create the loaders for the datasets, with the respective negative samplers
dataloader = dataset.QueryDocumentDataLoader(train, valid, test,
tokenizer, ns_train, ns_val,
'classification', args.val_batch_size,
args.val_batch_size, 512,
0, args.data_folder + "/scisumm_ranked")
with_ranked_list=True
train_loader, val_loader, test_loader = dataloader.get_pytorch_dataloaders(with_ranked_list)
# Instantiate transformer model to be used
model = BertForSequenceClassification.from_pretrained('bert-base-cased')
model.resize_token_embeddings(len(dataloader.tokenizer))
e = torch.load(args.model_dir)
model.load_state_dict(e)
model.eval()
# Instantiate trainer that handles fitting.
trainer = transformer_trainer.TransformerTrainer(model, train_loader, val_loader, test_loader,
0, "classification", tokenizer,
False, 0,
0 ,0, 0)
# Predict for test
logging.info("Predicting")
preds, labels, doc_ids, all_queries, preds_without_acc = trainer.test()
# res = results_analyses_tools.evaluate_and_aggregate(preds, labels, ['R_10@1',
# 'R_10@2',
# 'R_10@5',
# 'R_2@1',
# 'accuracy_0.3',
# 'accuracy_0.3_upto_1',
# 'precision_0.3',
# 'recall_0.3',
# 'f_score_0.3',
# 'accuracy_0.4',
# 'accuracy_0.4_upto_1',
# 'precision_0.4',
# 'recall_0.4',
# 'f_score_0.4',
# 'accuracy_0.5',
# 'accuracy_0.5_upto_1',
# 'precision_0.5',
# 'recall_0.5',
# 'f_score_0.5'
# ])
# for metric, v in res.items():
# logging.info("Test {} : {:4f}".format(metric, v))
# # Saving predictions and labels to a file
# max_preds_column = max([len(l) for l in preds])
# preds_df = pd.DataFrame(preds, columns=["prediction_" + str(i) for i in range(max_preds_column)])
# preds_df.to_csv(args.output_dir + "/" + args.run_id + "/predictions.csv", index=False)
#
# labels_df = pd.DataFrame(labels, columns=["label_" + str(i) for i in range(max_preds_column)])
# labels_df.to_csv(args.output_dir + "/" + args.run_id + "/labels.csv", index=False)
# # predict on the test set
# preds, labels, doc_ids, all_queries, preds_without_acc = trainer.test()
new_preds=list((np.array(preds_without_acc)> 0.4).astype(int))
d = {'query': all_queries, 'doc_id': doc_ids,'label': new_preds, 'similiarity':preds_without_acc}
df_doc_ids = pd.DataFrame(d)
import pdb
pdb.set_trace()
df_doc_ids = df_doc_ids.groupby('query').agg(list).reset_index()
# df_doc_ids_ones = df_doc_ids[df_doc_ids['label']==1]
# df_doc_ids_ones = df_doc_ids_ones.groupby('query').agg(list).reset_index()
# df_doc_ids_non_ones = df_doc_ids.groupby('query').agg(list).reset_index()
# new_df=[]
# for i,row in df_doc_ids_non_ones.iterrows():
# if all([v == 0 for v in row['label']]):
# highest_value=[x for _, x in sorted(zip(row['similiarity'], row['doc_id']), key=lambda pair: pair[0])]
# highest_value_sim=[x for x in sorted(row['similiarity'])]
#
# row['label'] = [1]
# row[ 'doc_id'] = [highest_value[0]]
# row[ 'similiarity'] = [highest_value_sim[0]]
#
# new_df.append(row)
# result = pd.concat([df_doc_ids,pd.DataFrame(new_df)])
df_doc_ids.to_csv(args.output_dir + "/" + args.run_id + "/doc_ids_test_all_results.csv", index=False, sep='\t')
return trainer.best_ndcg
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_folder", default=None, type=str, required=True,
help="the folder containing data")
parser.add_argument("--model_dir", default=None, type=str, required=True,
help="the folder that the model is saved in.")
parser.add_argument("--val_batch_size", default=32, type=int, required=False,
help="Validation and test batch size.")
parser.add_argument("--path_to_ranked_file", default=None, type=str, required=False,
help="if there is a ranked file this will be the path to it. ")
parser.add_argument("--path_to_ranked_test", default=None, type=str, required=False,
help="if there is a ranked test file this will be the path to it. ")
parser.add_argument("--path_to_ranked_dev", default=None, type=str, required=False,
help="if there is a ranked test file this will be the path to it. ")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="the folder to output predictions")
args = parser.parse_args()
args.sacred_ex = ex
ex.observers.append(FileStorageObserver(args.output_dir))
ex.add_config({'args': args})
return ex.run()
if __name__ == "__main__":
main()
| 47.967949
| 116
| 0.526928
| 821
| 7,483
| 4.544458
| 0.263094
| 0.027339
| 0.027875
| 0.028947
| 0.292415
| 0.254891
| 0.226749
| 0.218172
| 0.210935
| 0.136425
| 0
| 0.013634
| 0.372711
| 7,483
| 156
| 117
| 47.967949
| 0.78121
| 0.425498
| 0
| 0.025974
| 0
| 0
| 0.149788
| 0.016722
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025974
| false
| 0
| 0.168831
| 0
| 0.220779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e13b4fd890037fc4d7192b2e7467ef9a1cb201
| 4,033
|
py
|
Python
|
check.py
|
Dysoncat/student-services-slas-chat-bot
|
5d9c7105cef640c34018d260249b6a05b959e73f
|
[
"MIT"
] | null | null | null |
check.py
|
Dysoncat/student-services-slas-chat-bot
|
5d9c7105cef640c34018d260249b6a05b959e73f
|
[
"MIT"
] | null | null | null |
check.py
|
Dysoncat/student-services-slas-chat-bot
|
5d9c7105cef640c34018d260249b6a05b959e73f
|
[
"MIT"
] | null | null | null |
import long_responses as long
# Returns the probability of a message matching the responses that we have
def messageProb(userMessage, recognizedWords, isSingleResponse=False, requiredWords=[]):
messageCertainty = 0
hasRequiredWords = True
# Counts how many words are present in each predefined message
for word in userMessage:
if word in recognizedWords:
messageCertainty += 1
# Calculates the percent of recognized words in a user message
percentage = float(messageCertainty) / float(len(recognizedWords))
# Checks that the required words are in the string
for word in requiredWords:
if word not in userMessage:
hasRequiredWords = False
break
# Must either have the required words, or be a single response
if hasRequiredWords or isSingleResponse:
return int(percentage * 100)
else:
return 0
# Checks all the responses using the probability of the messages
def checkAllMesages(message):
highest_prob_list = {}
ignore_list = {}
def ignoreResponse(bot_response, list_of_words, single_response=False, required_words=[]):
nonlocal ignore_list
ignore_list[bot_response] = messageProb(
message, list_of_words, single_response, required_words)
# Simplifies response creation / adds it to the dict
def response(bot_response, list_of_words, single_response=False, required_words=[]):
nonlocal highest_prob_list
highest_prob_list[bot_response] = messageProb(
message, list_of_words, single_response, required_words)
# Responses -------------------------------------------------------------------------------------------------------
response('Hello!', ['hello', 'hi', 'hey',
'sup', 'heyo'], single_response=True)
response('See you!', ['bye', 'goodbye'], single_response=True)
response('I\'m doing fine, and you?', [
'how', 'are', 'you', 'doing'], required_words=['how', "you"])
response('You\'re welcome!', ['thank', 'thanks'], single_response=True)
response("You can borrow a computer from room 315", ["how", "do", "i", "borrow", "a", "computer"], required_words=["borrow", "computer"])
response("You can apply for a new locker key in room 310", ["how", "can", "i", "apply", "for", "a", "new", "locker", "key"], ["new", "locker", "key"])
response("The guidance office is on the third floor", [
"where", "is", "the", "guidance", "office"], required_words=["guidance", "office"])
response("You can apply for the ID in room 310", [
"how", "can", "i", "get", "new", "id"], ["new", "id"])
response("A student ID costs 25 RMB, and it has to be in cash", [
"how", "much", "does", "a", "new", "id", "cost"], ["id", "cost"])
response("The secondary computer classroom is on the fifth floor, and is number 521", [
"where", "is", "the", "secondary", "computer", "classroom"], ["secondary", "computer"])
response("Don't worry about it.", ["sorry", "sry"], ["sorry", "sry"])
# Ignored Responses
ignoreResponse("Good to hear", [
"i", "doing", "good", "fine", "ok"], required_words=["i", "good"])
best_ignore_match = max(ignore_list, key=ignore_list.get)
# Longer responses
response(long.R_ADVICE, ['give', 'advice'], required_words=['advice'])
response(long.R_EATING, ['what', 'you', 'eat'],
required_words=['you', 'eat'])
response(long.R_SWEARING, [
"fuck", "shit", "motherfucker", "fuck", "you"])
best_match = max(highest_prob_list, key=highest_prob_list.get)
# DEBUGGING TOOLS IF NEEDED
print(highest_prob_list)
print("")
print(
f'Best match = {best_match} | Score: {highest_prob_list[best_match]}')
if highest_prob_list[best_match] < ignore_list[best_ignore_match]:
return best_ignore_match
elif highest_prob_list[best_match] < 1:
return long.unknown()
else:
return best_match
| 40.33
| 154
| 0.623109
| 485
| 4,033
| 5.039175
| 0.334021
| 0.06383
| 0.055237
| 0.027823
| 0.183306
| 0.139116
| 0.108838
| 0.108838
| 0.108838
| 0.108838
| 0
| 0.00665
| 0.21696
| 4,033
| 99
| 155
| 40.737374
| 0.767258
| 0.147037
| 0
| 0.063492
| 0
| 0
| 0.223454
| 0.009043
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.015873
| 0
| 0.15873
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e16ab061f36f12398b74b8a1440f3cc6768529
| 1,446
|
py
|
Python
|
image_predictor/utils.py
|
jdalzatec/streamlit-manizales-tech-talks
|
619af5edc79a22ed4cc9f50dd2d0379399357549
|
[
"MIT"
] | 2
|
2022-02-05T15:48:55.000Z
|
2022-02-05T15:57:40.000Z
|
image_predictor/utils.py
|
jdalzatec/streamlit-manizales-tech-talks
|
619af5edc79a22ed4cc9f50dd2d0379399357549
|
[
"MIT"
] | null | null | null |
image_predictor/utils.py
|
jdalzatec/streamlit-manizales-tech-talks
|
619af5edc79a22ed4cc9f50dd2d0379399357549
|
[
"MIT"
] | 4
|
2022-02-05T15:49:02.000Z
|
2022-02-05T15:58:14.000Z
|
from io import StringIO
import numpy as np
from h5py import File
from keras.models import load_model as keras_load_model
from PIL import Image, ImageOps
def predict(image, model):
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Replace this with the path to your image
image = Image.open(image)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
return prediction[0]
def read_labels(labels_file):
labels = []
lines = StringIO(labels_file.getvalue().decode()).readlines()
for line in lines:
_, *remaining = line.split()
label = " ".join(remaining).strip()
labels.append(label)
return labels
def load_model(model_file):
return keras_load_model(File(model_file))
| 32.133333
| 81
| 0.697095
| 218
| 1,446
| 4.541284
| 0.444954
| 0.040404
| 0.028283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035587
| 0.222683
| 1,446
| 44
| 82
| 32.863636
| 0.845196
| 0.331259
| 0
| 0
| 0
| 0
| 0.001046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.2
| 0.04
| 0.44
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e304a5ab34e95d070c76d96d91559914adff14
| 561
|
py
|
Python
|
tests/facebook/models/test_photo.py
|
Socian-Ltd/python-facebook-1
|
e9a4f626b37541103c9534a29342ef6033c09c06
|
[
"Apache-2.0"
] | 2
|
2021-03-16T02:58:10.000Z
|
2021-03-16T16:53:23.000Z
|
tests/facebook/models/test_photo.py
|
nedsons/python-facebook
|
bf2b4a70ef0e0a67a142f5856586ea318f9807ea
|
[
"Apache-2.0"
] | null | null | null |
tests/facebook/models/test_photo.py
|
nedsons/python-facebook
|
bf2b4a70ef0e0a67a142f5856586ea318f9807ea
|
[
"Apache-2.0"
] | 1
|
2021-06-02T07:15:35.000Z
|
2021-06-02T07:15:35.000Z
|
import json
import unittest
import pyfacebook.models as models
class PhotoModelTest(unittest.TestCase):
BASE_PATH = "testdata/facebook/models/photos/"
with open(BASE_PATH + 'photo.json', 'rb') as f:
PHOTO_INFO = json.loads(f.read().decode('utf-8'))
def testPhoto(self):
m = models.Photo.new_from_json_dict(self.PHOTO_INFO)
self.assertEqual(m.id, "166370841591183")
self.assertEqual(m.album.id, "108824087345859")
self.assertEqual(len(m.images), 8)
self.assertEqual(m.webp_images[0].height, 800)
| 28.05
| 60
| 0.686275
| 75
| 561
| 5.026667
| 0.573333
| 0.159151
| 0.127321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 0.181818
| 561
| 19
| 61
| 29.526316
| 0.742919
| 0
| 0
| 0
| 0
| 0
| 0.14082
| 0.057041
| 0
| 0
| 0
| 0
| 0.307692
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e4a1783cf1d5a9318a74c7d860d1f54e64ee4e
| 5,837
|
py
|
Python
|
airbyte-integrations/connectors/source-scaffold-source-python/source_scaffold_source_python/source.py
|
curanaj/airbyte-dbt-demo
|
f6b8ccd8f8e57b7ea84caf814b14d836338e8007
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-scaffold-source-python/source_scaffold_source_python/source.py
|
curanaj/airbyte-dbt-demo
|
f6b8ccd8f8e57b7ea84caf814b14d836338e8007
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-scaffold-source-python/source_scaffold_source_python/source.py
|
curanaj/airbyte-dbt-demo
|
f6b8ccd8f8e57b7ea84caf814b14d836338e8007
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from datetime import datetime
from typing import Dict, Generator
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
from airbyte_cdk.sources import Source
class SourceScaffoldSourcePython(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the integration
e.g: if a provided Stripe API token can be used to connect to the Stripe API.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
# Not Implemented
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}")
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in this integration.
For example, given valid credentials to a Postgres database,
returns an Airbyte catalog where each postgres table is a stream, and each table column is a field.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteCatalog is an object describing a list of all available streams in this source.
A stream is an AirbyteStream object that includes:
- its stream name (or table name in the case of Postgres)
- json_schema providing the specifications of expected schema for this stream (a list of columns described
by their names and types)
"""
streams = []
stream_name = "TableName" # Example
json_schema = { # Example
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {"columnName": {"type": "string"}},
}
# Not Implemented
streams.append(AirbyteStream(name=stream_name, json_schema=json_schema))
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
"""
Returns a generator of the AirbyteMessages generated by reading the source with the given configuration,
catalog, and state.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:param catalog: The input catalog is a ConfiguredAirbyteCatalog which is almost the same as AirbyteCatalog
returned by discover(), but
in addition, it's been configured in the UI! For each particular stream and field, there may have been provided
with extra modifications such as: filtering streams and/or columns out, renaming some entities, etc
:param state: When a Airbyte reads data from a source, it might need to keep a checkpoint cursor to resume
replication in the future from that saved checkpoint.
This is the object that is provided with state from previous runs and avoid replicating the entire set of
data everytime.
:return: A generator that produces a stream of AirbyteRecordMessage contained in AirbyteMessage object.
"""
stream_name = "TableName" # Example
data = {"columnName": "Hello World"} # Example
# Not Implemented
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=stream_name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
| 47.072581
| 122
| 0.704471
| 758
| 5,837
| 5.408971
| 0.358839
| 0.021463
| 0.010244
| 0.02122
| 0.204146
| 0.18
| 0.18
| 0.18
| 0.18
| 0.18
| 0
| 0.002256
| 0.240535
| 5,837
| 123
| 123
| 47.455285
| 0.922626
| 0.648621
| 0
| 0.05
| 0
| 0
| 0.091442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.15
| 0
| 0.325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e57878ec351c326eab8dff88096e5a9b705681
| 8,983
|
py
|
Python
|
experiments/vgg16/VGG16_utils.py
|
petrapoklukar/DCA
|
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
|
[
"MIT"
] | 2
|
2022-02-14T15:54:22.000Z
|
2022-02-15T18:43:36.000Z
|
experiments/vgg16/VGG16_utils.py
|
petrapoklukar/DCA
|
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
|
[
"MIT"
] | null | null | null |
experiments/vgg16/VGG16_utils.py
|
petrapoklukar/DCA
|
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np
import os
def _analyze_query_point_assignment(
query_data_dict: dict,
init_Rdata_dict: dict,
init_Edata_dict: dict,
num_R: int,
query_point_assignment_array: np.ndarray,
root: str,
n_points_to_copy=50,
):
"""
Analyzes and visualizes qDCA results.
:param query_data_dict: raw query data.
:param init_Rdata_dict: raw R data.
:param init_Edata_dict: raw E data.
:param num_R: total number of R points.
:param query_point_assignment_array: query point assignments results.
:param root: root directory of the experiment.
:param n_points_to_copy: number of images to save.
:return: accuracy of qDCA assignments; list of (R, query) points with same label;
list of (R, query) points with different label
"""
true_query_data_labels = query_data_dict["labels"]
assigned_R = query_point_assignment_array[
query_point_assignment_array[:, 1] < num_R, 1
]
assigned_E = query_point_assignment_array[
query_point_assignment_array[:, 1] >= num_R, 1
]
assigned_R_labels = init_Rdata_dict["labels"][assigned_R]
assigned_E_labels = init_Edata_dict["labels"][assigned_E - num_R]
assigned_query_data_labels = np.empty(
shape=query_point_assignment_array.shape[0]
).astype(np.int32)
assigned_query_data_labels[
query_point_assignment_array[:, 1] < num_R
] = assigned_R_labels
assigned_query_data_labels[
query_point_assignment_array[:, 1] >= num_R
] = assigned_E_labels
accuracy = (
true_query_data_labels == assigned_query_data_labels
).sum() / assigned_query_data_labels.shape[0]
same_label_idx = np.where(true_query_data_labels == assigned_query_data_labels)[0]
wrong_label_idx = np.where(true_query_data_labels != assigned_query_data_labels)[0]
correct_pairs = []
for i in query_point_assignment_array[same_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
wrong_pairs = []
for i in query_point_assignment_array[wrong_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
with open(
os.path.join(root, "logs", "analyzed_query_point_assignments.pkl"), "wb"
) as f:
pickle.dump(
{
"accuracy": accuracy,
"same_label_idx": same_label_idx,
"wrong_label_idx": wrong_label_idx,
"correct_pairs": correct_pairs,
"wrong_pairs": wrong_pairs,
"query_point_assignment_array": query_point_assignment_array,
},
f,
)
same_label_image_path = os.path.join(root, "visualization", "same_label_images")
wrong_label_image_path = os.path.join(root, "visualization", "wrong_label_images")
if not os.path.exists(wrong_label_image_path):
os.mkdir(wrong_label_image_path)
if not os.path.exists(same_label_image_path):
os.mkdir(same_label_image_path)
for i in range(n_points_to_copy):
query_image_path, init_image_path, query_label, init_label = correct_pairs[i]
path_to_copy = os.path.join(
same_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(init_image_path, path_to_copy))
path_to_copy2 = os.path.join(
same_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(query_image_path, path_to_copy2))
(
w_query_image_path,
w_init_image_path,
w_query_label,
w_init_label,
) = wrong_pairs[i]
path_to_copy_w = os.path.join(
wrong_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(w_query_label), str(w_init_label)
),
)
os.system("cp {0} {1}".format(w_init_image_path, path_to_copy_w))
path_to_copy_w2 = os.path.join(
wrong_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
i, w_query_label, w_init_label
),
)
os.system("cp {0} {1}".format(w_query_image_path, path_to_copy_w2))
return accuracy, correct_pairs, wrong_pairs
def _generate_query_sets(version: str, N: int = 5000):
"""
Generates query sets for qDCA experiment in Section 4.3.
:param version: either version1 (dogs vs kitchen utils) or version2 (random).
:param N: number of points to sample for R used in DCA.
"""
with open(f"representations/vgg16/{version}/Rfeatures.pkl", "rb") as f:
Rdata_v1 = pickle.load(f)
with open(f"representations/vgg16/{version}/Efeatures.pkl", "rb") as f:
Edata_v1 = pickle.load(f)
init_Ridxs = np.random.choice(
np.arange(len(Rdata_v1["feat_lin1"])), size=N, replace=False
)
query_Ridxs = np.setdiff1d(np.arange(len(Rdata_v1["feat_lin1"])), init_Ridxs)
init_Eidxs = np.random.choice(
np.arange(len(Edata_v1["feat_lin1"])), size=N, replace=False
)
query_Eidxs = np.setdiff1d(np.arange(len(Edata_v1["feat_lin1"])), init_Eidxs)
with open(f"representations/vgg16/{version}/sampled_Rfeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Rdata_v1["feat_lin1"][init_Ridxs],
"feat_lin2": Rdata_v1["feat_lin2"][init_Ridxs],
"labels": Rdata_v1["labels"][init_Ridxs],
"paths": np.array(Rdata_v1["paths"])[init_Ridxs],
"init_Ridx": init_Ridxs,
"query_Ridx": query_Ridxs,
},
f,
)
with open(f"representations/vgg16/{version}/sampled_Efeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Edata_v1["feat_lin1"][init_Eidxs],
"feat_lin2": Edata_v1["feat_lin2"][init_Eidxs],
"labels": Edata_v1["labels"][init_Eidxs],
"paths": np.array(Edata_v1["paths"])[init_Eidxs],
"init_Eidx": init_Eidxs,
"query_Eidx": query_Eidxs,
},
f,
)
with open(f"representations/vgg16/{version}/query_features.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": np.concatenate(
[
Rdata_v1["feat_lin1"][query_Ridxs],
Edata_v1["feat_lin1"][query_Eidxs],
]
),
"feat_lin2": np.concatenate(
[
Rdata_v1["feat_lin2"][query_Ridxs],
Edata_v1["feat_lin2"][query_Eidxs],
]
),
"labels": np.concatenate(
[Rdata_v1["labels"][query_Ridxs], Edata_v1["labels"][query_Eidxs]]
),
"paths": np.concatenate(
[
np.array(Rdata_v1["paths"])[query_Ridxs],
np.array(Edata_v1["paths"])[query_Eidxs],
]
),
"init_Eidxs": init_Eidxs,
"query_Eidxs": query_Eidxs,
"init_Ridxs": init_Ridxs,
"query_Ridxs": query_Ridxs,
},
f,
)
| 36.815574
| 87
| 0.571969
| 1,088
| 8,983
| 4.348346
| 0.132353
| 0.043754
| 0.059184
| 0.068696
| 0.596914
| 0.512788
| 0.447263
| 0.395477
| 0.313464
| 0.301205
| 0
| 0.015974
| 0.317043
| 8,983
| 243
| 88
| 36.967078
| 0.755175
| 0.075365
| 0
| 0.267327
| 0
| 0
| 0.13352
| 0.06026
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009901
| false
| 0
| 0.014851
| 0
| 0.029703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e640e884ea7efdcb34d9809f129977c3b8f796
| 2,905
|
py
|
Python
|
back-end/RawFishSheep/app_cart/views.py
|
Coldarra/RawFishSheep
|
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
|
[
"Apache-2.0"
] | null | null | null |
back-end/RawFishSheep/app_cart/views.py
|
Coldarra/RawFishSheep
|
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
|
[
"Apache-2.0"
] | 4
|
2021-10-06T22:49:52.000Z
|
2022-02-27T12:28:18.000Z
|
back-end/RawFishSheep/app_cart/views.py
|
Coldarra/RawFishSheep
|
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
|
[
"Apache-2.0"
] | null | null | null |
from .models import *
from decorator import *
from app_goods.views import getGoodsByID
# 查询当前用户所有的购物车信息
def getCartByUser(user_id=None):
if user_id == None:
raise ParamException()
return Cart.objects.filter(user_id=user_id)
def getSelectedCart(user_id=None):
if user_id == None:
raise ParamException()
return Cart.objects.filter(user_id=user_id, selection="1")
def getCartByGoods(user_id=None, goods_id=None):
if None in [user_id, goods_id]:
raise ParamException()
if Cart.objects.filter(user_id=user_id, goods_id=goods_id).count() <= 0:
raise RFSException("40012", "无效购物车商品")
return Cart.objects.get(user_id=user_id, goods_id=goods_id)
def checkCartByGoods(user_id, goods_id):
return Cart.objects.filter(user_id=user_id, goods_id=goods_id).count() > 0
def createCart(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
if checkCartByGoods(user_id, goods_id):
appendToCart(user_id, goods_id, amount)
return Cart.objects.create(
user_id=user_id, goods_id=goods_id, amount=amount)
def appendToCart(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
amount = int(amount)
if getGoodsByID(goods_id).remain < amount:
raise RFSException("40013", "商品余辆不足")
if checkCartByGoods(user_id, goods_id):
cart_obj = getCartByGoods(user_id, goods_id)
cart_obj.amount += amount
cart_obj.save()
return cart_obj
else:
return createCart(user_id, goods_id, amount)
def deleteCartByGoods(user_id=None, goods_id=None):
if None in [user_id, goods_id]:
raise ParamException()
Cart.objects.filter(user_id=user_id,
goods_id=goods_id).delete()
def deleteCartByUser(user_id=None):
if None in [user_id, goods_id]:
raise ParamException()
Cart.objects.filter(user_id=user_id).delete()
def deleteSelectedCart(user_id=None):
if user_id == None:
raise ParamException()
Cart.objects.filter(user_id=user_id, selection="1").delete()
def setCartAmount(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
amount = int(amount)
cart = getCartByGoods(user_id, goods_id)
if amount <= 0:
raise RFSException("40033", "购物车商品数量非法")
cart.amount = amount
cart.save()
return cart
def setCartSelection(user_id=None, goods_id=None, selection=None):
# 检测参数是否合法
if None in [user_id, goods_id, selection]:
raise ParamException()
cart = getCartByGoods(user_id, goods_id)
# 检测商品状态是否合法
if cart.selection != "0" and cart.selection != "1":
raise RFSException("40033", "状态非法")
# 改变商品状态
cart.selection = selection
cart.save()
return cart
| 29.05
| 78
| 0.685714
| 393
| 2,905
| 4.857506
| 0.145038
| 0.144578
| 0.117863
| 0.136197
| 0.614458
| 0.572027
| 0.481928
| 0.470927
| 0.440545
| 0.3934
| 0
| 0.011704
| 0.205852
| 2,905
| 99
| 79
| 29.343434
| 0.815778
| 0.014114
| 0
| 0.408451
| 0
| 0
| 0.017489
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15493
| false
| 0
| 0.042254
| 0.014085
| 0.323944
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e6a1c12c2e7ca4fa6cc0bc35bd20189bfd7063
| 7,704
|
py
|
Python
|
extensions/catsum.py
|
johannesgiorgis/my-timewarrior-extensions
|
1a8b83359298d3cbf002148f02b5ef6f1693a797
|
[
"MIT"
] | null | null | null |
extensions/catsum.py
|
johannesgiorgis/my-timewarrior-extensions
|
1a8b83359298d3cbf002148f02b5ef6f1693a797
|
[
"MIT"
] | 1
|
2022-02-14T16:53:54.000Z
|
2022-02-14T16:53:54.000Z
|
extensions/catsum.py
|
xoiopure/my-timewarrior-extensions
|
1a8b83359298d3cbf002148f02b5ef6f1693a797
|
[
"MIT"
] | 1
|
2021-08-29T00:32:18.000Z
|
2021-08-29T00:32:18.000Z
|
#!/usr/bin/env python3
###############################################################################
#
# Category Summaries
#
#
###############################################################################
import datetime
import io
import json
import logging
import pprint
import sys
from typing import Dict, Any
from dateutil import tz
# set logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create handler
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
LOG_FORMAT = "[%(asctime)s - %(levelname)-8s - %(module)s:%(name)s ] %(message)s"
c_format = logging.Formatter(LOG_FORMAT)
c_handler.setFormatter(c_format)
# Add handlers to the logger
logger.addHandler(c_handler)
DATE_FORMAT = "%Y%m%dT%H%M%SZ"
# TODO: Convert to defaultdict
# https://www.accelebrate.com/blog/using-defaultdict-python
# https://stackoverflow.com/questions/9358983/dictionaries-and-default-values
# https://docs.python.org/2/library/collections.html#collections.defaultdict
CATEGORIES: dict = {
"PT": "Personal Time",
"PW": "Planned Work",
"UW": "Unplanned Work",
"OW": "Other Work",
}
def main():
print("~" * 100)
totals = calculate_totals(sys.stdin)
# print(totals)
if not totals:
sys.exit(0)
categories_total = extract_categories(totals)
# All Categories Statistics
category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_category_breakdown = format_category_breakdown(category_percent_breakdown)
display_category_breakdown(formatted_category_breakdown)
# remove personal category
categories_total.pop("Personal Time", None)
work_category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_work_category_breakdown = format_category_breakdown(work_category_percent_breakdown)
display_category_breakdown(formatted_work_category_breakdown)
# formatted_category_breakdown.pop("Personal Time", None)
# formatted
# print(type(formatted_category_breakdown))
# print(formatted_category_breakdown.keys())
def format_seconds(seconds: int) -> str:
"""
Convert seconds to a formatted string
Convert seconds: 3661
To formatted: " 1:01:01"
"""
# print(seconds, type(seconds))
hours = seconds // 3600
minutes = seconds % 3600 // 60
seconds = seconds % 60
return f"{hours:4d}:{minutes:02d}:{seconds:02d}"
def calculate_totals(input_stream: io.TextIOWrapper) -> Dict[str, datetime.timedelta]:
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
# Extract the configuration settings.
header = 1
configuration = dict()
body = ""
for line in input_stream:
if header:
if line == "\n":
header = 0
else:
fields = line.strip().split(": ", 2)
if len(fields) == 2:
configuration[fields[0]] = fields[1]
else:
configuration[fields[0]] = ""
else:
body += line
# Sum the seconds tracked by tag
totals = dict()
untagged = None
j = json.loads(body)
for object in j:
start = datetime.datetime.strptime(object["start"], DATE_FORMAT)
if "end" in object:
end = datetime.datetime.strptime(object["end"], DATE_FORMAT)
else:
end = datetime.datetime.utcnow()
tracked = end - start
if "tags" not in object or object["tags"] == []:
if untagged is None:
untagged = tracked
else:
untagged += tracked
else:
for tag in object["tags"]:
if tag in totals:
totals[tag] += tracked
else:
totals[tag] = tracked
if "temp.report.start" not in configuration:
print("There is no data in the database")
return totals
start_utc = datetime.datetime.strptime(configuration["temp.report.start"], DATE_FORMAT)
start_utc = start_utc.replace(tzinfo=from_zone)
start = start_utc.astimezone(to_zone)
if "temp.report.end" in configuration:
end_utc = datetime.datetime.strptime(configuration["temp.report.end"], DATE_FORMAT)
end_utc = end_utc.replace(tzinfo=from_zone)
end = end_utc.astimezone(to_zone)
else:
end = datetime.datetime.now()
if len(totals) == 0 and untagged is None:
print(f"No data in the range {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
print(f"\nCategory Summary Data for {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
def extract_categories(totals: Dict[str, datetime.timedelta]) -> Dict[str, datetime.timedelta]:
categories_total = {}
for category, category_full_name in CATEGORIES.items():
categories_total[category_full_name] = totals.get(category, datetime.timedelta(0))
return categories_total
def get_category_percent_breakdown(
category_run_times: Dict[str, datetime.timedelta]
) -> Dict[str, Any]:
logger.debug("Getting category percentage breakdown...")
total_time = sum([run_time.total_seconds() for run_time in category_run_times.values()])
logger.debug(f"Total Time:{total_time}")
category_percentage_breakdown: dict = {}
for category, run_time in category_run_times.items():
category_percent = run_time.total_seconds() / total_time
category_percentage_breakdown[category] = {
"percent": category_percent,
"duration": run_time.total_seconds() / 60,
"run_time": format_seconds(int(run_time.total_seconds())),
}
# add total time statistics
category_percentage_breakdown["Total"] = {
"percent": total_time / total_time,
"duration": total_time / 60,
"run_time": format_seconds(int(total_time)),
}
logger.debug(pprint.pformat(category_percentage_breakdown))
return category_percentage_breakdown
def format_category_breakdown(category_breakdown: dict) -> Dict[str, Any]:
# print(type(category_breakdown))
# pprint.pprint(category_breakdown)
formatted_category_breakdown = {}
for category, category_statistics in category_breakdown.items():
formatted_category_breakdown[category] = {
# convert duration to mins
"duration": round(category_statistics["duration"], 2),
"percent": round(category_statistics["percent"] * 100, 2),
"run_time": category_statistics["run_time"],
}
return formatted_category_breakdown
def display_category_breakdown(category_breakdown: dict, title: str = "Category Breakdown"):
# Determine largest width
max_width = len("Category")
for category_statistics in category_breakdown.values():
if len(category_statistics) > max_width:
max_width = len(category_statistics)
print_dotted_line()
print(f"\t\t{title.capitalize():>{max_width}}")
print(
f"{'Category':{max_width}}\t"
f"{'Duration':{max_width}}\t"
f"{'Run_Time':>{max_width + 2}}\t"
f"{'Percent':{max_width + 1}}"
)
for category, category_statistics in category_breakdown.items():
print(
f"{category:{max_width}}\t"
f"{category_statistics['duration']:{max_width}}\t"
f"{category_statistics['run_time']:}\t"
f"{category_statistics['percent']}%"
)
print_dotted_line()
def print_dotted_line(width: int = 72):
"""Print a dotted (rather 'dashed') line"""
print("-" * width)
if __name__ == "__main__":
main()
| 31.57377
| 98
| 0.641874
| 899
| 7,704
| 5.292547
| 0.221357
| 0.08575
| 0.043716
| 0.020177
| 0.269021
| 0.166036
| 0.12232
| 0.067255
| 0.044977
| 0.044977
| 0
| 0.010339
| 0.221573
| 7,704
| 243
| 99
| 31.703704
| 0.783058
| 0.123572
| 0
| 0.10828
| 0
| 0.019108
| 0.144562
| 0.047575
| 0
| 0
| 0
| 0.004115
| 0
| 1
| 0.050955
| false
| 0
| 0.050955
| 0
| 0.146497
| 0.082803
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37e90c8995ed6a6f4dbc2bb7d6d0c967a69b04ab
| 3,881
|
py
|
Python
|
resources/hotel.py
|
jnascimentocode/REST-API-COM-PYTHON-E-FLASK
|
c55dca53f3a864c6c1aba8bbde63dcadc3c19347
|
[
"MIT"
] | null | null | null |
resources/hotel.py
|
jnascimentocode/REST-API-COM-PYTHON-E-FLASK
|
c55dca53f3a864c6c1aba8bbde63dcadc3c19347
|
[
"MIT"
] | null | null | null |
resources/hotel.py
|
jnascimentocode/REST-API-COM-PYTHON-E-FLASK
|
c55dca53f3a864c6c1aba8bbde63dcadc3c19347
|
[
"MIT"
] | null | null | null |
from typing import ParamSpecArgs
from flask_restful import Resource, reqparse
from models.hotel import HotelModel
from flask_jwt_extended import jwt_required
from models.site import SiteModel
from resources.filtros import *
import sqlite3
path_params = reqparse.RequestParser()
path_params.add_argument('cidade', type=str)
path_params.add_argument('estrelas_min', type=float)
path_params.add_argument('estrelas_max', type=float)
path_params.add_argument('diaria_min', type=float)
path_params.add_argument('diaria_max', type=float)
path_params.add_argument('limit', type=float)
path_params.add_argument('offset', type=float)
class Hoteis(Resource):
def get(self):
connection = sqlite3.connect('banco.db')
cursor = connection.cursor()
dados = path_params.parse_args()
dados_validos = {chave:dados[chave] for chave in dados if dados[chave] is not None}
parametros = normalize_path_params(**dados_validos)
if not parametros.get('cidade'):
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_sem_cidade, tupla)
else:
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_com_cidade, tupla)
hoteis = []
for linha in resultado:
hoteis.append({
'hotel_id': linha[0],
'nome': linha[1],
'estrelas': linha[2],
'diaria': linha[3],
'cidade': linha[4],
'site_id': linha[5]
})
return {'hoteis': hoteis}
class Hotel(Resource):
argumentos = reqparse.RequestParser()
argumentos.add_argument('nome', type=str, required=True, help="The field 'nome' cannot be left blank")
argumentos.add_argument('estrelas', type=float, required=True, help="The field 'estrelas' cannot be left blank")
argumentos.add_argument('diaria')
argumentos.add_argument('cidade')
argumentos.add_argument('site_id', type=int, required=True, help="Every hotel needs to be linked with site")
def get(self, hotel_id):
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
return hotel.json()
return {'message': 'Hotel not found.'}, 404
@jwt_required()
def post(self, hotel_id):
if HotelModel.find_hotel(hotel_id):
return {"message": "Hotel id '{}' already exists.".format(hotel_id)}, 400
dados = Hotel.argumentos.parse_args()
hotel = HotelModel(hotel_id, **dados)
if not SiteModel.find_by_id(dados.get('site_id')):
return {'message': 'The hotel must be associated to a valid site id'}, 400
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json()
@jwt_required()
def put(self, hotel_id):
dados = Hotel.argumentos.parse_args()
hotel_encontrado = HotelModel.find_hotel(hotel_id)
if hotel_encontrado:
hotel_encontrado.update_hotel(**dados)
hotel_encontrado.save_hotel()
return hotel_encontrado.json(), 200
hotel = HotelModel(hotel_id, **dados)
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json(), 201 #created
@jwt_required()
def delete(self, hotel_id):
global hoteis
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
try:
hotel.delete_hotel()
except:
return {'message': 'An error occurred trying to delete hotel.'}, 500
return {'message': 'Hotel deleted.'}
return {'message': 'Hotel not found.'}, 404
| 34.345133
| 116
| 0.631538
| 461
| 3,881
| 5.156182
| 0.26898
| 0.038284
| 0.038284
| 0.061843
| 0.424485
| 0.346235
| 0.280606
| 0.174169
| 0.142196
| 0.142196
| 0
| 0.012182
| 0.259727
| 3,881
| 112
| 117
| 34.651786
| 0.815176
| 0.001804
| 0
| 0.303371
| 0
| 0
| 0.152713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0.078652
| 0
| 0.314607
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37eaf107409d84d5c2fde68eaa08ffa5c4d85c18
| 2,413
|
py
|
Python
|
testing/berge_equilibrium_cndp.py
|
Eliezer-Beczi/CNDP
|
73decdfaef1c9e546ad94dd7448c89078af27034
|
[
"MIT"
] | 1
|
2021-08-13T09:14:40.000Z
|
2021-08-13T09:14:40.000Z
|
testing/berge_equilibrium_cndp.py
|
Eliezer-Beczi/CNDP
|
73decdfaef1c9e546ad94dd7448c89078af27034
|
[
"MIT"
] | null | null | null |
testing/berge_equilibrium_cndp.py
|
Eliezer-Beczi/CNDP
|
73decdfaef1c9e546ad94dd7448c89078af27034
|
[
"MIT"
] | null | null | null |
import networkx as nx
import utils.connectivity_metrics as connectivity_metric
from platypus import NSGAII, EpsMOEA, NSGAIII, EpsNSGAII, Problem, Dominance, Subset, TournamentSelector, \
HypervolumeFitnessEvaluator, Archive
import statistics
import multiprocessing as mp
G = nx.read_adjlist("input/Ventresca/BarabasiAlbert_n500m1.txt")
k = 50
num_of_tests = 10
def get_pairwise_connectivity(exclude=None):
if exclude is None:
exclude = {}
S = set(exclude)
subgraph = nx.subgraph_view(G, filter_node=lambda n: n not in S)
return connectivity_metric.pairwise_connectivity(subgraph)
class CNDP(Problem):
def __init__(self):
super(CNDP, self).__init__(1, 1)
self.types[:] = Subset(list(G), k)
def evaluate(self, solution):
solution.objectives[0] = get_pairwise_connectivity(solution.variables[0])
class BergeDominance(Dominance):
def __init__(self):
super(BergeDominance, self).__init__()
def compare(self, x, y):
k1 = 0
k2 = 0
nodes_x = x.variables[0][:]
nodes_y = y.variables[0][:]
metric_x = x.objectives[0]
metric_y = y.objectives[0]
for i in range(k):
tmp = nodes_y[i]
nodes_y[i] = nodes_x[i]
if get_pairwise_connectivity(nodes_y) < metric_x:
k1 += 1
nodes_y[i] = tmp
for i in range(k):
tmp = nodes_x[i]
nodes_x[i] = nodes_y[i]
if get_pairwise_connectivity(nodes_x) < metric_y:
k2 += 1
nodes_x[i] = tmp
if k1 < k2:
return -1
elif k1 > k2:
return 1
else:
return 0
class BergeArchive(Archive):
def __init__(self):
super(BergeArchive, self).__init__(dominance=BergeDominance())
def get_critical_nodes():
algorithm = NSGAII(CNDP(), selector=TournamentSelector(dominance=BergeDominance()), archive=BergeArchive())
algorithm.run(1000)
fitness = algorithm.result[0].objectives[0]
print(fitness)
return fitness
if __name__ == '__main__':
pool = mp.Pool(mp.cpu_count())
samples = pool.starmap_async(get_critical_nodes, [() for _ in range(num_of_tests)]).get()
pool.close()
avg = sum(samples) / len(samples)
stdev = statistics.stdev(samples)
print(f"Average: {avg}")
print(f"Standard Deviation: {stdev}")
| 25.135417
| 111
| 0.63075
| 300
| 2,413
| 4.823333
| 0.356667
| 0.024879
| 0.06358
| 0.033172
| 0.070491
| 0.070491
| 0.027643
| 0
| 0
| 0
| 0
| 0.020705
| 0.259428
| 2,413
| 95
| 112
| 25.4
| 0.789032
| 0
| 0
| 0.075758
| 0
| 0
| 0.037298
| 0.016991
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106061
| false
| 0
| 0.075758
| 0
| 0.30303
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37eb3791b2e71065562272b988c38a600939b27b
| 3,087
|
py
|
Python
|
Charm/models/risk_functions.py
|
TanyaAdams1/Charm
|
cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d
|
[
"BSD-3-Clause"
] | 17
|
2018-04-23T20:17:58.000Z
|
2021-04-12T19:28:40.000Z
|
Charm/models/risk_functions.py
|
TanyaAdams1/Charm
|
cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d
|
[
"BSD-3-Clause"
] | 1
|
2020-02-01T23:57:28.000Z
|
2020-02-04T18:03:17.000Z
|
Charm/models/risk_functions.py
|
TanyaAdams1/Charm
|
cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d
|
[
"BSD-3-Clause"
] | 3
|
2018-04-19T19:24:38.000Z
|
2020-11-06T00:33:53.000Z
|
import numpy as np
from mcerp import *
from uncertainties.core import AffineScalarFunc
class RiskFunction(object):
def get_risk(self, bar, p):
""" Computes risk for perf array w.r.t. bar.
Args:
bar: reference performance bar.
perfs: performance array-like.
Returns:
single float (mean risk)
"""
if isinstance(p, UncertainFunction):
return self.func(bar, p._mcpts)
elif isinstance(p, AffineScalarFunc):
#TODO: what should we return? How to define risk analytically?
raise ValueError('Risk -- Undefined behavior.')
else:
return self.func(bar, [p])
def get_name(self):
name = type(self).__name__
return name[:name.find('Function')]
class DollarValueFunction(RiskFunction):
def dollar_function(self, bar, perf):
value = .0
for p in perf:
normed_p = float(p)/bar
if normed_p < .6:
value += 100
elif normed_p < .8:
value += 200
elif normed_p < .9:
value += 300
elif normed_p < 1.0:
value += 600
else:
value += 1000
return 1000 - value/len(perf)
def __init__(self):
self.func = self.dollar_function
class StepRiskFunction(RiskFunction):
def step_function(self, bar, perf):
return float(len([p for p in perf if p < bar]))/len(perf)
def __init__(self):
self.func = self.step_function
class LinearRiskFunction(RiskFunction):
def linear_cutoff_function(self, bar, perf):
# risk = a * (perf-bar)
a = 1
risk = []
for p in perf:
base = bar - p
if base > 0:
risk.append(a * base)
return np.mean(risk) if risk else 0
def __init__(self):
self.func = self.linear_cutoff_function
class QuadraticRiskFunction(RiskFunction):
def quadratic_cutoff_function(self, bar, perf):
# risk = a * (perf-bar)**2 + b * (perf-bar) + c
risk = []
a = 4
b = 0
c = 0
for p in perf:
base = (bar - p)/bar
if base > 0:
risk.append(a*base**2 + b*base + c)
return np.mean(risk) if risk else 0
def __init__(self):
self.func = self.quadratic_cutoff_function
class ExponentialRiskFunction(RiskFunction):
def exponential_cutoff_function(self, bar, perf):
# risk = a ** (perf-bar)
risk = []
a = 2.718
for p in perf:
base = (bar - p)/bar
if base > 0:
risk.append(a ** base)
return np.mean(risk) if risk else 0
def __init__(self):
self.func = self.exponential_cutoff_function
class RiskFunctionCollection(object):
funcs = {'step': StepRiskFunction(),
'linear': LinearRiskFunction(),
'quad': QuadraticRiskFunction(),
'exp': ExponentialRiskFunction(),
'dollar': DollarValueFunction()}
| 29.4
| 74
| 0.551344
| 357
| 3,087
| 4.633053
| 0.268908
| 0.033857
| 0.045345
| 0.057437
| 0.299274
| 0.270254
| 0.270254
| 0.259371
| 0.223096
| 0.155985
| 0
| 0.020833
| 0.346939
| 3,087
| 104
| 75
| 29.682692
| 0.799603
| 0.100421
| 0
| 0.282051
| 0
| 0
| 0.021339
| 0
| 0
| 0
| 0
| 0.009615
| 0
| 1
| 0.153846
| false
| 0
| 0.038462
| 0.012821
| 0.397436
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37ebc35183b9314a344aaf25eb9e7de4a348916a
| 2,149
|
py
|
Python
|
spacy/tests/tagger/test_lemmatizer.py
|
TerminalWitchcraft/spaCy
|
29adbef095c04e21a691e912671e4ec21082b047
|
[
"MIT"
] | 1
|
2018-09-24T17:00:23.000Z
|
2018-09-24T17:00:23.000Z
|
spacy/tests/tagger/test_lemmatizer.py
|
TerminalWitchcraft/spaCy
|
29adbef095c04e21a691e912671e4ec21082b047
|
[
"MIT"
] | null | null | null |
spacy/tests/tagger/test_lemmatizer.py
|
TerminalWitchcraft/spaCy
|
29adbef095c04e21a691e912671e4ec21082b047
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
from ...lemmatizer import read_index, read_exc
import pytest
@pytest.mark.models
@pytest.mark.parametrize('text,lemmas', [("aardwolves", ["aardwolf"]),
("aardwolf", ["aardwolf"]),
("planets", ["planet"]),
("ring", ["ring"]),
("axes", ["axis", "axe", "ax"])])
def test_tagger_lemmatizer_noun_lemmas(lemmatizer, text, lemmas):
if lemmatizer is None:
return None
assert lemmatizer.noun(text) == set(lemmas)
@pytest.mark.models
def test_tagger_lemmatizer_base_forms(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.noun('dive', {'number': 'sing'}) == set(['dive'])
assert lemmatizer.noun('dive', {'number': 'plur'}) == set(['diva'])
@pytest.mark.models
def test_tagger_lemmatizer_base_form_verb(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.verb('saw', {'verbform': 'past'}) == set(['see'])
@pytest.mark.models
def test_tagger_lemmatizer_punct(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.punct('“') == set(['"'])
assert lemmatizer.punct('“') == set(['"'])
@pytest.mark.models
def test_tagger_lemmatizer_read_index(path):
if path is not None:
with (path / 'wordnet' / 'index.noun').open() as file_:
index = read_index(file_)
assert 'man' in index
assert 'plantes' not in index
assert 'plant' in index
@pytest.mark.models
@pytest.mark.parametrize('text,lemma', [("was", "be")])
def test_tagger_lemmatizer_read_exc(path, text, lemma):
if path is not None:
with (path / 'wordnet' / 'verb.exc').open() as file_:
exc = read_exc(file_)
assert exc[text] == (lemma,)
@pytest.mark.models
def test_tagger_lemmatizer_lemma_assignment(EN):
text = "Bananas in pyjamas are geese."
doc = EN.tokenizer(text)
assert all(t.lemma_ == '' for t in doc)
EN.tagger(doc)
assert all(t.lemma_ != '' for t in doc)
| 30.7
| 74
| 0.604467
| 257
| 2,149
| 4.88716
| 0.29572
| 0.071656
| 0.089172
| 0.128185
| 0.539809
| 0.48328
| 0.48328
| 0.324841
| 0.170382
| 0
| 0
| 0.00062
| 0.249884
| 2,149
| 69
| 75
| 31.144928
| 0.778536
| 0.006049
| 0
| 0.365385
| 0
| 0
| 0.106842
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.134615
| false
| 0
| 0.057692
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37ee0cfd689d053055f9512b80721598ce49ab1a
| 1,845
|
py
|
Python
|
AlgoNet2/Helper.py
|
Bhaney44/AlgorandDevelopment
|
309e68337227af879f5c4e92c72156928a39fe32
|
[
"MIT"
] | null | null | null |
AlgoNet2/Helper.py
|
Bhaney44/AlgorandDevelopment
|
309e68337227af879f5c4e92c72156928a39fe32
|
[
"MIT"
] | 1
|
2021-04-24T19:24:05.000Z
|
2021-04-28T05:32:40.000Z
|
AlgoNet2/Helper.py
|
Bhaney44/AlgorandDevelopment
|
309e68337227af879f5c4e92c72156928a39fe32
|
[
"MIT"
] | 1
|
2022-01-17T18:00:56.000Z
|
2022-01-17T18:00:56.000Z
|
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
def visualize_training_results(results):
"""
Plots the loss and accuracy for the training and testing data
"""
history = results.history
plt.figure(figsize=(12,4))
plt.plot(history['val_loss'])
plt.plot(history['loss'])
plt.legend(['val_loss', 'loss'])
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
plt.figure(figsize=(12,4))
plt.plot(history['val_accuracy'])
plt.plot(history['accuracy'])
plt.legend(['val_accuracy', 'accuracy'])
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
def split_sequence(seq, n_steps_in, n_steps_out):
"""
Splits the univariate time sequence
"""
X, y = [], []
for i in range(len(seq)):
end = i + n_steps_in
out_end = end + n_steps_out
if out_end > len(seq):
break
seq_x, seq_y = seq[i:end], seq[end:out_end]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def layer_maker(n_layers, n_nodes, activation, drop=None, d_rate=.5):
"""
Create a specified number of hidden layers for an RNN
Optional: Adds regularization option, dropout layer to prevent potential overfitting if necessary
"""
model = Sequential()
# Creating the specified number of hidden layers with the specified number of nodes
for x in range(1,n_layers+1):
model.add(LSTM(n_nodes, activation=activation, return_sequences=True))
# Adds a Dropout layer after every Nth hidden layer (the 'drop' variable)
try:
if x % drop == 0:
model.add(Dropout(d_rate))
except:
pass
| 27.954545
| 101
| 0.615176
| 253
| 1,845
| 4.367589
| 0.395257
| 0.031674
| 0.050679
| 0.032579
| 0.161086
| 0.065158
| 0.065158
| 0.065158
| 0.065158
| 0
| 0
| 0.007391
| 0.266667
| 1,845
| 65
| 102
| 28.384615
| 0.809313
| 0.21897
| 0
| 0.146341
| 0
| 0
| 0.071942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0.02439
| 0.073171
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37ef3e58f557478ce40c98955a961b550b4256ca
| 14,120
|
py
|
Python
|
apex/contrib/multihead_attn/self_multihead_attn_func.py
|
Muflhi01/apex
|
79c018776129aad13abeb4ce63d24e1fbb4cd29e
|
[
"BSD-3-Clause"
] | 6,523
|
2018-04-25T17:35:27.000Z
|
2022-03-31T22:49:45.000Z
|
apex/contrib/multihead_attn/self_multihead_attn_func.py
|
Muflhi01/apex
|
79c018776129aad13abeb4ce63d24e1fbb4cd29e
|
[
"BSD-3-Clause"
] | 1,100
|
2018-05-18T00:03:34.000Z
|
2022-03-30T22:00:33.000Z
|
apex/contrib/multihead_attn/self_multihead_attn_func.py
|
Muflhi01/apex
|
79c018776129aad13abeb4ce63d24e1fbb4cd29e
|
[
"BSD-3-Clause"
] | 1,057
|
2018-05-07T13:53:04.000Z
|
2022-03-31T09:18:47.000Z
|
import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
is_additive_mask,
dropout_prob,
):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
scale_t = torch.tensor([scale])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
if use_biases_t[0]:
input_lin_results = torch.addmm(
input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
input_lin_results = torch.mm(
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)), input_weights.transpose(0, 1)
)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul1_results = torch.empty(
(queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype, device=torch.device("cuda")
)
matmul1_results = torch.baddbmm(
matmul1_results,
queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results,
beta=0.0,
alpha=scale_t[0],
)
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert len(mask.size()) == 2, "Timing mask is not 2D!"
assert mask.size(0) == mask.size(1), "Sequence length should match!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float("-inf"))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
if is_additive_mask:
matmul1_results = matmul1_results + mask.unsqueeze(1).unsqueeze(2)
else:
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float("-inf"))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1.0 - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
matmul2_results = torch.empty(
(dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype,
device=torch.device("cuda"),
).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = (
matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1), inputs.size(2))
)
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
if use_biases_t[0]:
outputs = torch.addmm(
output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
outputs = torch.mm(
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)), output_weights.transpose(0, 1)
)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
ctx.save_for_backward(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t[0]
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Slice out q,k,v from one big set of gradients entering the input linear's bprop (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights
)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)),
)
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
if use_biases_t[0]:
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0
)
else:
output_bias_grads = None
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(
queries_grads.transpose(0, 1),
softmax_grads,
keys.transpose(0, 1),
out=queries_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
keys_grads = torch.baddbmm(
keys_grads.transpose(0, 1),
softmax_grads.transpose(1, 2),
queries.transpose(0, 1),
out=keys_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(
inputs.size(0) * inputs.size(1), heads_t[0] * 3 * head_dim
)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
input_weight_grads = torch.mm(
input_lin_results_grads.transpose(0, 1), inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2))
)
if use_biases_t[0]:
input_bias_grads = torch.sum(input_lin_results_grads, 0)
else:
input_bias_grads = None
return (
None,
None,
None,
None,
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
None,
None,
)
self_attn_func = SelfAttnFunc.apply
| 45.844156
| 133
| 0.590652
| 1,870
| 14,120
| 4.22139
| 0.104813
| 0.03547
| 0.045984
| 0.024829
| 0.634406
| 0.563593
| 0.531796
| 0.49734
| 0.489866
| 0.434887
| 0
| 0.032624
| 0.300992
| 14,120
| 307
| 134
| 45.993485
| 0.767173
| 0.351346
| 0
| 0.353535
| 0
| 0
| 0.007382
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 1
| 0.010101
| false
| 0
| 0.010101
| 0
| 0.035354
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37ef3fd76dba247104e4038149d9913b2621526c
| 6,481
|
py
|
Python
|
api/app/reviews/models.py
|
NikolaSiplakova/Baobab
|
180cd3cb492ed47d38ca0b473572fad0ac6f604b
|
[
"Apache-2.0"
] | null | null | null |
api/app/reviews/models.py
|
NikolaSiplakova/Baobab
|
180cd3cb492ed47d38ca0b473572fad0ac6f604b
|
[
"Apache-2.0"
] | null | null | null |
api/app/reviews/models.py
|
NikolaSiplakova/Baobab
|
180cd3cb492ed47d38ca0b473572fad0ac6f604b
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from app import db
from app.utils import misc
class ReviewForm(db.Model):
id = db.Column(db.Integer(), primary_key=True)
application_form_id = db.Column(db.Integer(), db.ForeignKey('application_form.id'), nullable=False)
is_open = db.Column(db.Boolean(), nullable=False)
deadline = db.Column(db.DateTime(), nullable=False)
application_form = db.relationship('ApplicationForm', foreign_keys=[application_form_id])
review_questions = db.relationship('ReviewQuestion')
def __init__(self, application_form_id, deadline):
self.application_form_id = application_form_id
self.is_open = True
self.deadline = deadline
def close(self):
self.is_open = False
class ReviewQuestion(db.Model):
id = db.Column(db.Integer, primary_key=True)
review_form_id = db.Column(db.Integer(), db.ForeignKey('review_form.id'), nullable=False)
question_id = db.Column(db.Integer(), db.ForeignKey('question.id'), nullable=True)
type = db.Column(db.String(), nullable=False)
is_required = db.Column(db.Boolean(), nullable=False)
order = db.Column(db.Integer(), nullable=False)
weight = db.Column(db.Float(), nullable=False)
review_form = db.relationship('ReviewForm', foreign_keys=[review_form_id])
question = db.relationship('Question', foreign_keys=[question_id])
translations = db.relationship('ReviewQuestionTranslation', lazy='dynamic')
def __init__(self,
review_form_id,
question_id,
type,
is_required,
order,
weight):
self.review_form_id = review_form_id
self.question_id = question_id
self.type = type
self.is_required = is_required
self.order = order
self.weight = weight
def get_translation(self, language):
translation = self.translations.filter_by(language=language).first()
return translation
class ReviewQuestionTranslation(db.Model):
__tablename__ = 'review_question_translation'
__table_args__ = tuple([db.UniqueConstraint('review_question_id', 'language', name='uq_review_question_id_language')])
id = db.Column(db.Integer(), primary_key=True)
review_question_id = db.Column(db.Integer(), db.ForeignKey('review_question.id'), nullable=False)
language = db.Column(db.String(2), nullable=False)
description = db.Column(db.String(), nullable=True)
headline = db.Column(db.String(), nullable=True)
placeholder = db.Column(db.String(), nullable=True)
options = db.Column(db.JSON(), nullable=True)
validation_regex = db.Column(db.String(), nullable=True)
validation_text = db.Column(db.String(), nullable=True)
def __init__(self,
review_question_id,
language,
description=None,
headline=None,
placeholder=None,
options=None,
validation_regex=None,
validation_text=None):
self.review_question_id = review_question_id
self.language = language
self.description = description
self.headline = headline
self.placeholder = placeholder
self.options = options
self.validation_regex = validation_regex
self.validation_text = validation_text
class ReviewResponse(db.Model):
id = db.Column(db.Integer(), primary_key=True)
review_form_id = db.Column(db.Integer(), db.ForeignKey('review_form.id'), nullable=False)
reviewer_user_id = db.Column(db.Integer(), db.ForeignKey('app_user.id'), nullable=False)
response_id = db.Column(db.Integer(), db.ForeignKey('response.id'), nullable=False)
submitted_timestamp = db.Column(db.DateTime(), nullable=False)
language = db.Column(db.String(2), nullable=False)
is_submitted = db.Column(db.Boolean(), nullable=False)
submitted_timestamp = db.Column(db.DateTime(), nullable=True)
review_form = db.relationship('ReviewForm', foreign_keys=[review_form_id])
reviewer_user = db.relationship('AppUser', foreign_keys=[reviewer_user_id])
response = db.relationship('Response', foreign_keys=[response_id])
review_scores = db.relationship('ReviewScore')
def __init__(self,
review_form_id,
reviewer_user_id,
response_id,
language):
self.review_form_id = review_form_id
self.reviewer_user_id = reviewer_user_id
self.response_id = response_id
self.language = language
self.is_submitted = False
def submit(self):
self.is_submitted = True
self.submitted_timestamp = datetime.now()
def calculate_score(self):
return sum([
misc.try_parse_float(score.value) * score.review_question.weight for score in self.review_scores
if score.review_question.weight > 0
])
class ReviewScore(db.Model):
id = db.Column(db.Integer(), primary_key=True)
review_response_id = db.Column(db.Integer(), db.ForeignKey('review_response.id'), nullable=False)
review_question_id = db.Column(db.Integer(), db.ForeignKey('review_question.id'), nullable=False)
value = db.Column(db.String(), nullable=False)
review_response = db.relationship('ReviewResponse', foreign_keys=[review_response_id])
review_question = db.relationship('ReviewQuestion', foreign_keys=[review_question_id])
def __init__(self,
review_question_id,
value):
self.review_question_id = review_question_id
self.value = value
class ReviewConfiguration(db.Model):
id = db.Column(db.Integer(), primary_key=True)
review_form_id = db.Column(db.Integer(), db.ForeignKey('review_form.id'), nullable=False)
num_reviews_required = db.Column(db.Integer(), nullable=False)
num_optional_reviews = db.Column(db.Integer(), nullable=False)
drop_optional_question_id = db.Column(db.Integer(), db.ForeignKey('review_question.id'), nullable=True)
drop_optional_agreement_values = db.Column(db.String(), nullable=True)
review_form = db.relationship('ReviewForm', foreign_keys=[review_form_id])
review_question = db.relationship('ReviewQuestion', foreign_keys=[drop_optional_question_id])
| 42.084416
| 123
| 0.666101
| 761
| 6,481
| 5.423127
| 0.130092
| 0.073661
| 0.092077
| 0.082384
| 0.497456
| 0.480494
| 0.356433
| 0.348922
| 0.249576
| 0.217834
| 0
| 0.000595
| 0.222651
| 6,481
| 153
| 124
| 42.359477
| 0.818579
| 0
| 0
| 0.233871
| 0
| 0
| 0.06574
| 0.012958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072581
| false
| 0
| 0.024194
| 0.008065
| 0.58871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37ef5b190722144951d2dc7179cd76d69b1cdbc2
| 3,307
|
py
|
Python
|
speednet/vae/ConvVae.py
|
Abhranta/speednet
|
d15971e946cddc62a644d6a6f3be10a4df5b2ce2
|
[
"MIT"
] | 1
|
2021-01-20T14:29:14.000Z
|
2021-01-20T14:29:14.000Z
|
speednet/vae/ConvVae.py
|
Abhranta/speednet
|
d15971e946cddc62a644d6a6f3be10a4df5b2ce2
|
[
"MIT"
] | null | null | null |
speednet/vae/ConvVae.py
|
Abhranta/speednet
|
d15971e946cddc62a644d6a6f3be10a4df5b2ce2
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
from utils import Flatten , Unflatten , weights_init , down_conv , up_conv
class Net(nn.Module):
def __init__(self , num_layers , img_dim , in_chan , act_func , latent_vector_size):
super(Net , self).__init__()
assert act_func in ("ReLU" , "LeakyReLU") , "Activation function that can be used now are ReLU and LeakyReLU"
assert img_dim % (2**(num_layers)) >= 0 , "Latent vector driven to 0, please increase image size or decreasenumber of layers"
self.act_func = act_func
self.in_chan = in_chan
self.num_layers = num_layers
self.latent_vector_size = latent_vector_size
self.in_chan2 = self.in_chan
self.encoder_net_layers = []
self.decoder_net_layers = []
self.out_chan = 2**5
for _ in range(num_layers):
self.encoder_net_layers.append(down_conv(self.in_chan , self.act_func , self.out_chan))
self.in_chan = self.out_chan*2
self.out_chan = self.out_chan*4
self.encoder = nn.Sequential(*self.encoder_net_layers ,
Flatten() ,
nn.Linear(((self.out_chan//2)*((img_dim//(2 ** num_layers))**2)) , self.latent_vector_size*4) ,
nn.ReLU(),
nn.Linear(self.latent_vector_size*4 , self.latent_vector_size*2) ,
nn.ReLU()
)
self.mu = nn.Linear(self.latent_vector_size*2 , self.latent_vector_size)
self.logvar = nn.Linear(self.latent_vector_size*2 , self.latent_vector_size)
self.out_chan2 = self.out_chan
for _ in range(num_layers):
self.decoder_net_layers.append(up_conv(self.out_chan2//2 , self.act_func , self.out_chan2//4))
self.out_chan2 = self.out_chan2//4
self.decoder = nn.Sequential(nn.Linear(self.latent_vector_size , self.latent_vector_size*4) ,
nn.ReLU() ,
nn.Linear(self.latent_vector_size*4 , ((self.out_chan//2)*((img_dim//(2 ** num_layers))**2))) ,
nn.ReLU() ,
Unflatten(self.out_chan//2 , (img_dim//(2 ** num_layers)) , (img_dim//(2 ** num_layers)) ) ,
*self.decoder_net_layers ,
nn.ConvTranspose2d(self.out_chan2//2 , self.in_chan2 , 3 , 1 , 1))
def encode(self , input_tensor):
encoded_vector = self.encoder(input_tensor)
mu , logvar = self.mu(encoded_vector) , self.logvar(encoded_vector)
return mu , logvar
def reparameterize(self , mu , logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
latent = mu + std*eps
return latent
def decode(self , latent):
decoded_vector = self.decoder(latent)
return decoded_vector
def forward(self , input_tensor):
mu , logvar = self.encode(input_tensor)
latent_space = self.reparameterize(mu , logvar)
return self.decode(latent_space) , mu , logvar
| 47.927536
| 133
| 0.556093
| 406
| 3,307
| 4.258621
| 0.204434
| 0.060729
| 0.120301
| 0.127241
| 0.378832
| 0.246964
| 0.178138
| 0.178138
| 0.178138
| 0.161943
| 0
| 0.018833
| 0.341699
| 3,307
| 69
| 134
| 47.927536
| 0.775379
| 0
| 0
| 0.089286
| 0
| 0
| 0.047461
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 1
| 0.089286
| false
| 0
| 0.053571
| 0
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37f19f659d9ef143b2408f934266bdcc951f5ade
| 73,603
|
py
|
Python
|
nelpy/utils.py
|
IsaacBusaleh/nelpy
|
f2663cf6f028c9bd0e630fbf8a527c236f4e0f41
|
[
"MIT"
] | 1
|
2021-01-01T17:59:31.000Z
|
2021-01-01T17:59:31.000Z
|
nelpy/utils.py
|
IsaacBusaleh/nelpy
|
f2663cf6f028c9bd0e630fbf8a527c236f4e0f41
|
[
"MIT"
] | null | null | null |
nelpy/utils.py
|
IsaacBusaleh/nelpy
|
f2663cf6f028c9bd0e630fbf8a527c236f4e0f41
|
[
"MIT"
] | null | null | null |
"""This module contains helper functions and utilities for nelpy."""
__all__ = ['spatial_information',
'frange',
'swap_cols',
'swap_rows',
'pairwise',
'is_sorted',
'linear_merge',
'PrettyDuration',
'ddt_asa',
'get_contiguous_segments',
'get_events_boundaries',
'get_threshold_crossing_epochs',
'_bst_get_bins']
import numpy as np
import logging
from itertools import tee, repeat
from collections import namedtuple
from math import floor
from scipy.signal import hilbert
import scipy.ndimage.filters #import gaussian_filter1d, gaussian_filter
from numpy import log, ceil
import copy
import sys
import ctypes
from multiprocessing import Array, cpu_count
from multiprocessing.pool import Pool
import pdb
from . import core # so that core.RegularlySampledAnalogSignalArray is exposed
from . import auxiliary # so that auxiliary.TuningCurve1D is epxosed
from . import filtering
from .utils_.decorators import keyword_deprecation
# def sub2ind(array_shape, rows, cols):
# ind = rows*array_shape[1] + cols
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# return ind
# def ind2sub(array_shape, ind):
# # see also np.unravel_index(ind, array.shape)
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# rows = (ind.astype('int') / array_shape[1])
# cols = ind % array_shape[1]
# return (rows, cols)
def ragged_array(arr):
"""Takes a list of arrays, and returns a ragged array.
See https://github.com/numpy/numpy/issues/12468
"""
n_elem = len(arr)
out = np.array(n_elem*[None])
for ii in range(out.shape[0]):
out[ii] = arr[ii]
return out
def asa_indices_within_epochs(asa, intervalarray):
"""Return indices of ASA within epochs.
[[start, stop]
...
[start, stop]]
so that data can be associated with asa._data[:,start:stop] for each epoch.
"""
indices = []
intervalarray = intervalarray[asa.support]
for interval in intervalarray.merge().data:
a_start = interval[0]
a_stop = interval[1]
frm, to = np.searchsorted(asa._abscissa_vals, (a_start, a_stop))
indices.append((frm, to))
indices = np.array(indices, ndmin=2)
return indices
def frange(start, stop, step):
"""arange with floating point step"""
# TODO: this function is not very general; we can extend it to work
# for reverse (stop < start), empty, and default args, etc.
# there are also many edge cases where this is weird.
# see https://stackoverflow.com/questions/7267226/range-for-floats
# for better alternatives.
num_steps = int(np.floor((stop-start)/step))
return np.linspace(start, stop, num=num_steps, endpoint=False)
def spatial_information(ratemap):
"""Compute the spatial information and firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L.,
and Skaggs, W. E. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
"""
ratemap = copy.deepcopy(ratemap)
# ensure that the ratemap always has nonzero firing rates,
# otherwise the spatial information might return NaNs:
bkg_rate = ratemap[ratemap>0].min()
ratemap[ratemap < bkg_rate] = bkg_rate
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = np.transpose(ratemap, (2,1,0))
si = np.sum(np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1), axis=1)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
si = np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1)
else:
raise TypeError("rate map shape not supported / understood!")
return si/number_of_spatial_bins
def spatial_sparsity(ratemap):
"""Compute the firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L.,
and Skaggs, W. E. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
occupancy : array of shape (n_bins,)
Occupancy of the animal.
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
sparsity: array of shape (n_units,)
sparsity (in percent) for each unit
"""
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = ratemap
sparsity = np.sum(np.sum((Ri*Pi), axis=1), axis=1)/(R**2)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
sparsity = np.sum((Pi*Ri.T), axis=1)/(R**2)
else:
raise TypeError("rate map shape not supported / understood!")
return sparsity/number_of_spatial_bins
def _bst_get_bins_inside_interval(interval, ds, w=1):
"""(np.array) Return bin edges entirely contained inside an interval.
Bin edges always start at interval.start, and continue for as many
bins as would fit entirely inside the interval.
NOTE 1: there are (n+1) bin edges associated with n bins.
WARNING: if an interval is smaller than ds, then no bin will be
associated with the particular interval.
NOTE 2: nelpy uses half-open intervals [a,b), but if the bin
width divides b-a, then the bins will cover the entire
range. For example, if interval = [0,2) and ds = 1, then
bins = [0,1,2], even though [0,2] is not contained in
[0,2). There might be numerical precision deviations from this?
Parameters
----------
interval : EpochArray
EpochArray containing a single interval with a start, and stop
ds : float
Time bin width, in seconds.
w : number of bins to use in a sliding window mode. Default is 1 (no sliding window).
For example, 40 ms bins, with a stride of 5 ms, can be achieved by using (ds=0.005, w=8)
For now, w has to be an integer, and therefore 5 second bins, with a stride of 2 seconds
are not supported within this framework.
Returns
-------
bins : array
Bin edges in an array of shape (n+1,) where n is the number
of bins
centers : array
Bin centers in an array of shape (n,) where n is the number
of bins
"""
if interval.length < ds:
return None, None
n_bins = int(np.floor(interval.length / ds)) # number of bins
# linspace is better than arange for non-integral steps
bins = np.linspace(interval.start, interval.start + n_bins*ds, n_bins+1)
if w > 1:
wn_bins = np.max((1, n_bins - w + 1))
wn_bins = bins[:wn_bins+1] + w/2*ds - ds/2
bins = wn_bins
centers = bins[:-1] + (ds / 2)
return bins, centers
def _bst_get_bins(intervalArray, ds, w=1):
"""
Docstring goes here. TBD. For use with bins that are contained
wholly inside the intervals.
"""
b = [] # bin list
c = [] # centers list
left_edges = []
right_edges = []
counter = 0
for interval in intervalArray:
bins, centers = _bst_get_bins_inside_interval(interval=interval, ds=ds, w=w)
if bins is not None:
left_edges.append(counter)
counter += len(centers) - 1
right_edges.append(counter)
counter += 1
b.extend(bins.tolist())
c.extend(centers.tolist())
bins = np.array(b)
bin_centers = np.array(c)
le = np.array(left_edges)
le = le[:, np.newaxis]
re = np.array(right_edges)
re = re[:, np.newaxis]
binned_support = np.hstack((le, re))
lengths = np.atleast_1d((binned_support[:,1] - binned_support[:,0] + 1).squeeze())
support_starts = bins[np.insert(np.cumsum(lengths+1),0,0)[:-1]]
support_stops = bins[np.insert(np.cumsum(lengths+1)-1,0,0)[1:]]
supportdata = np.vstack([support_starts, support_stops]).T
support = type(intervalArray)(supportdata) # set support to TRUE bin support
return bins, bin_centers, binned_support, support
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def get_mua(st, ds=None, sigma=None, truncate=None, _fast=True):
"""Compute the multiunit activity (MUA) from a spike train.
Parameters
----------
st : SpikeTrainArray
SpikeTrainArray containing one or more units.
-- OR --
st : BinnedSpikeTrainArray
BinnedSpikeTrainArray containing multiunit activity.
ds : float, optional
Time step in which to bin spikes. Default is 1 ms.
sigma : float, optional
Standard deviation (in seconds) of Gaussian smoothing kernel.
Default is 10 ms. If sigma==0 then no smoothing is applied.
truncate : float, optional
Bandwidth of the Gaussian filter. Default is 6.
Returns
-------
mua : AnalogSignalArray
AnalogSignalArray with MUA.
"""
if ds is None:
ds = 0.001 # 1 ms bin size
if sigma is None:
sigma = 0.01 # 10 ms standard deviation
if truncate is None:
truncate = 6
if isinstance(st, core.EventArray):
# bin spikes, so that we can count the spikes
mua_binned = st.bin(ds=ds).flatten()
elif isinstance(st, core.BinnedEventArray):
mua_binned = st.flatten()
ds = mua_binned.ds
else:
raise TypeError('st has to be one of (SpikeTrainArray, BinnedSpikeTrainArray)')
# make sure data type is float, so that smoothing works, and convert to rate
mua_binned._data = mua_binned._data.astype(float) / ds
# TODO: now that we can simply cast from BST to ASA and back, the following logic could be simplified:
# put mua rate inside an AnalogSignalArray
if _fast:
mua = core.AnalogSignalArray([], empty=True)
mua._data = mua_binned.data
mua._abscissa_vals = mua_binned.bin_centers
mua._abscissa.support = mua_binned.support
else:
mua = core.AnalogSignalArray(mua_binned.data, timestamps=mua_binned.bin_centers, fs=1/ds)
mua._fs = 1/ds
if (sigma != 0) and (truncate > 0):
mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)
return mua
def is_odd(n):
"""Returns True if n is odd, and False if n is even.
Assumes integer.
"""
return bool(n & 1)
def swap_cols(arr, frm, to):
"""swap columns of a 2D np.array"""
if arr.ndim > 1:
arr[:,[frm, to]] = arr[:,[to, frm]]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def swap_rows(arr, frm, to):
"""swap rows of a 2D np.array"""
if arr.ndim > 1:
arr[[frm, to],:] = arr[[to, frm],:]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def pairwise(iterable):
"""returns a zip of all neighboring pairs.
This is used as a helper function for is_sorted.
Example
-------
>>> mylist = [2, 3, 6, 8, 7]
>>> list(pairwise(mylist))
[(2, 3), (3, 6), (6, 8), (8, 7)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def argsort(seq):
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key=seq.__getitem__)
def is_sorted_general(iterable, key=lambda a, b: a <= b):
"""Check to see if iterable is monotonic increasing (sorted)."""
return all(key(a, b) for a, b in pairwise(iterable))
def is_sorted(x, chunk_size=None):
"""Returns True if iterable is monotonic increasing (sorted).
NOTE: intended for 1D array, list or tuple. Will not work on
more than 1D
This function works in-core with memory footrpint XXX.
chunk_size = 100000 is probably a good choice.
"""
if not isinstance(x, (tuple, list, np.ndarray)):
raise TypeError("Unsupported type {}".format(type(x)))
x = np.atleast_1d(np.array(x).squeeze())
if x.ndim > 1:
raise ValueError("Input x must be 1-dimensional")
if chunk_size is None:
chunk_size = 500000
stop = x.size
for chunk_start in range(0, stop, chunk_size):
chunk_stop = int(min(stop, chunk_start + chunk_size + 1))
chunk = x[chunk_start:chunk_stop]
if not np.all(chunk[:-1] <= chunk[1:]):
return False
return True
def linear_merge(list1, list2):
"""Merge two SORTED lists in linear time.
UPDATED TO WORK WITH PYTHON 3.7+ (see https://stackoverflow.com/questions/51700960/runtimeerror-generator-raised-stopiteration-every-time-i-try-to-run-app)
Returns a generator of the merged result.
Examples
--------
>>> a = [1, 3, 5, 7]
>>> b = [2, 4, 6, 8]
>>> [i for i in linear_merge(a, b)]
[1, 2, 3, 4, 5, 6, 7, 8]
>>> [i for i in linear_merge(b, a)]
[1, 2, 3, 4, 5, 6, 7, 8]
>>> a = [1, 2, 2, 3]
>>> b = [2, 2, 4, 4]
>>> [i for i in linear_merge(a, b)]
[1, 2, 2, 2, 2, 3, 4, 4]
"""
# if any of the lists are empty, return the other (possibly also
# empty) list: (this is necessary because having either list1 or
# list2 be empty makes this quite a bit more complicated...)
if isinstance(list1, (list, np.ndarray)):
if len(list1) == 0:
list2 = iter(list2)
while True:
try:
yield next(list2)
except StopIteration:
return
if isinstance(list2, (list, np.ndarray)):
if len(list2) == 0:
list1 = iter(list1)
while True:
try:
yield next(list1)
except StopIteration:
return
list1 = iter(list1)
list2 = iter(list2)
value1 = next(list1)
value2 = next(list2)
# We'll normally exit this loop from a next() call raising
# StopIteration, which is how a generator function exits anyway.
while True:
if value1 <= value2:
# Yield the lower value.
try:
yield value1
except StopIteration:
return
try:
# Grab the next value from list1.
value1 = next(list1)
except StopIteration:
# list1 is empty. Yield the last value we received from list2, then
# yield the rest of list2.
try:
yield value2
except StopIteration:
return
while True:
try:
yield next(list2)
except StopIteration:
return
else:
try:
yield value2
except StopIteration:
return
try:
value2 = next(list2)
except StopIteration:
# list2 is empty.
try:
yield value1
except StopIteration:
return
while True:
try:
yield next(list1)
except StopIteration:
return
def get_mua_events(mua, fs=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None):
"""Determine MUA/PBEs from multiunit activity.
MUA : multiunit activity
PBE : population burst event
Parameters
----------
mua : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred from
mua.fs
minLength : float, optional
maxLength : float, optional
PrimaryThreshold : float, optional
SecondaryThreshold : float, optional
minThresholdLength : float, optional
Returns
-------
mua_epochs : EpochArray
EpochArray containing all the MUA events / PBEs.
Example
-------
mua = get_mua(spiketrain)
mua_epochs = get_mua_events(mua)
PBEs = get_PBEs(spiketrain, min_active=5)
= get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5)
"""
if fs is None:
fs = mua.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in mua!")
if PrimaryThreshold is None:
PrimaryThreshold = mua.mean() + 3*mua.std()
if SecondaryThreshold is None:
SecondaryThreshold = mua.mean()
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# determine MUA event bounds:
mua_bounds_idx, maxes, _ = get_events_boundaries(
x = mua.data,
PrimaryThreshold = PrimaryThreshold,
SecondaryThreshold = SecondaryThreshold,
minThresholdLength = minThresholdLength,
minLength = minLength,
maxLength = maxLength,
ds = 1/fs
)
if len(mua_bounds_idx) == 0:
logging.warning("no mua events detected")
return core.EpochArray(empty=True)
# store MUA bounds in an EpochArray
mua_epochs = core.EpochArray(mua.time[mua_bounds_idx])
return mua_epochs
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def get_PBEs(data, fs=None, ds=None, sigma=None, truncate=None, unsorted_id=0,
min_active=None, minLength=None, maxLength=None,
PrimaryThreshold=None, minThresholdLength=None,
SecondaryThreshold=None):
"""Determine PBEs from multiunit activity or spike trains.
Definitions
-----------
MUA : multiunit activity
PBE : population burst event
Summary
-------
This function can be used to identify PBE epochs from spike trains, binned
spike trains, or multiunit activity (in the form of an AnalogSignalArray).
It is recommended to either pass in a SpikeTrainArray or a
BinnedSpikeTrainArray, so that a `min_active` number of sorted units can be
set.
It is also recommended that the unsorted units (but not noise artifacts!)
should be included in the spike train that is used to estimate the PBEs. By
default, unit_id=0 is assumed to be unsorted, but this can be changed, or if
no unsorted units are present, you can set unsorted_id=None. Equivalently,
if min_active=0, then no restriction will apply, and the unsorted_id will
have no effect on the final PBE epochs.
Examples
--------
PBE_epochs = get_PBEs(mua_asa)
PBE_epochs = get_PBEs(spiketrain, min_active=5)
PBE_epochs = get_PBEs(binnedspiketrain, min_active=5)
Parameters
----------
data : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
-- OR --
data : SpikeTrainArray
SpikeTrainArray with multiple units, including unsorted unit(s), but
excluding any noise artifects.
-- OR --
data : BinnedSpikeTrainArray
BinnedSpikeTrainArray containing multiunit activity.
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred
from data.
ds : float, optional
Time step in which to bin spikes. Default is 1 ms.
sigma : float, optional
Standard deviation (in seconds) of Gaussian smoothing kernel.
Default is 10 ms. If sigma==0 then no smoothing is applied.
truncate : float, optional
Bandwidth of the Gaussian filter. Default is 6.
unsorted_id : int, optional
unit_id of the unsorted unit. Default is 0. If no unsorted unit is
present, then set unsorted_id = None
min_active : int, optional
Minimum number of active units per event, excluding unsorted unit.
Default is 5.
minLength : float, optional
Minimum event duration in seconds. Default is 50 ms.
maxLength : float, optional
Maximum event duration in seconds. Default is 750 ms.
PrimaryThreshold : float, optional
Primary threshold to exceed. Default is mean() + 3*std()
SecondaryThreshold : float, optional
Secondary threshold to fall back to. Default is mean().
minThresholdLength : float, optional
Minimum duration to stay above PrimaryThreshold. Default is 0 ms.
Returns
-------
PBE_epochs : EpochArray
EpochArray containing all the PBEs.
Future improvements
-------------------
As of now, it is possible, but not easy to specify the Primary and Secondary
thresholds for event detection. A slight change in API might be needed to
make this specification more flexible.
"""
if sigma is None:
sigma = 0.01 # 10 ms standard deviation
if truncate is None:
truncate = 6
if isinstance(data, core.AnalogSignalArray):
# if we have only mua, then we cannot set (ds, unsorted_id, min_active)
if ds is not None:
raise ValueError('if data is an AnalogSignalArray then ds cannot be specified!')
if unsorted_id:
raise ValueError('if data is an AnalogSignalArray then unsorted_id cannot be specified!')
if min_active is not None:
raise ValueError('if data is an AnalogSignalArray then min_active cannot be specified!')
mua = data
mua._data = mua._data.astype(float)
if (sigma != 0) and (truncate > 0):
mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)
elif isinstance(data, (core.EventArray, core.BinnedEventArray)):
# set default parameter values:
if ds is None:
ds = 0.001 # default 1 ms
if min_active is None:
min_active = 5
mua = get_mua(data, ds=ds, sigma=sigma, truncate=truncate, _fast=True)
else:
raise TypeError('data has to be one of (AnalogSignalArray, SpikeTrainArray, BinnedSpikeTrainArray)')
# set default parameter values:
if fs is None:
fs = mua.fs
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# if PrimaryThreshold is None:
# PrimaryThreshold =
# if SecondaryThreshold is None:
# SecondaryThreshold =
PBE_epochs = get_mua_events(mua=mua,
fs=fs,
minLength=minLength,
maxLength=maxLength,
PrimaryThreshold=PrimaryThreshold,
minThresholdLength=minThresholdLength,
SecondaryThreshold=SecondaryThreshold)
# now require min_active number of sorted cells
if isinstance(data, (core.EventArray, core.BinnedEventArray)):
if min_active > 0:
if unsorted_id is not None:
# remove unsorted unit, if present:
unit_ids = copy.deepcopy(data.unit_ids)
try:
unit_ids.remove(unsorted_id)
except ValueError:
pass
# data_ = data._unit_subset(unit_ids)
data_ = data.loc[:,unit_ids]
else:
data_ = data
# determine number of active units per epoch:
n_active = np.array([snippet.n_active for snippet in data_[PBE_epochs]])
active_epochs_idx = np.argwhere(n_active > min_active).squeeze()
# only keep those epochs where sufficiently many units are active:
PBE_epochs = PBE_epochs[active_epochs_idx]
return PBE_epochs
def get_contiguous_segments(data, *, step=None, assume_sorted=None,
in_core=True, index=False, inclusive=False,
fs=None, sort=None, in_memory=None):
"""Compute contiguous segments (seperated by step) in a list.
Note! This function requires that a sorted list is passed.
It first checks if the list is sorted O(n), and only sorts O(n log(n))
if necessary. But if you know that the list is already sorted,
you can pass assume_sorted=True, in which case it will skip
the O(n) check.
Returns an array of size (n_segments, 2), with each row
being of the form ([start, stop]) [inclusive, exclusive].
NOTE: when possible, use assume_sorted=True, and step=1 as explicit
arguments to function call.
WARNING! Step is robustly computed in-core (i.e., when in_core is
True), but is assumed to be 1 when out-of-core.
Example
-------
>>> data = [1,2,3,4,10,11,12]
>>> get_contiguous_segments(data)
([1,5], [10,13])
>>> get_contiguous_segments(data, index=True)
([0,4], [4,7])
Parameters
----------
data : array-like
1D array of sequential data, typically assumed to be integral (sample
numbers).
step : float, optional
Expected step size for neighboring samples. Default uses numpy to find
the median, but it is much faster and memory efficient to explicitly
pass in step=1.
assume_sorted : bool, optional
If assume_sorted == True, then data is not inspected or re-ordered. This
can be significantly faster, especially for out-of-core computation, but
it should only be used when you are confident that the data is indeed
sorted, otherwise the results from get_contiguous_segments will not be
reliable.
in_core : bool, optional
If True, then we use np.diff which requires all the data to fit
into memory simultaneously, otherwise we use groupby, which uses
a generator to process potentially much larger chunks of data,
but also much slower.
index : bool, optional
If True, the indices of segment boundaries will be returned. Otherwise,
the segment boundaries will be returned in terms of the data itself.
Default is False.
inclusive : bool, optional
If True, the boundaries are returned as [(inclusive idx, inclusive idx)]
Default is False, and can only be used when index==True.
Deprecated
----------
in_memory : bool, optional
This is equivalent to the new 'in-core'.
sort : bool, optional
This is equivalent to the new 'assume_sorted'
fs : sampling rate (Hz) used to extend half-open interval support by 1/fs
"""
# handle deprecated API calls:
if in_memory:
in_core = in_memory
logging.warning("'in_memory' has been deprecated; use 'in_core' instead")
if sort:
assume_sorted = sort
logging.warning("'sort' has been deprecated; use 'assume_sorted' instead")
if fs:
step = 1/fs
logging.warning("'fs' has been deprecated; use 'step' instead")
if inclusive:
assert index, "option 'inclusive' can only be used with 'index=True'"
if in_core:
data = np.asarray(data)
if not assume_sorted:
if not is_sorted(data):
data = np.sort(data) # algorithm assumes sorted list
if step is None:
step = np.median(np.diff(data))
# assuming that data(t1) is sampled somewhere on [t, t+1/fs) we have a 'continuous' signal as long as
# data(t2 = t1+1/fs) is sampled somewhere on [t+1/fs, t+2/fs). In the most extreme case, it could happen
# that t1 = t and t2 = t + 2/fs, i.e. a difference of 2 steps.
if np.any(np.diff(data) < step):
logging.warning("some steps in the data are smaller than the requested step size.")
breaks = np.argwhere(np.diff(data)>=2*step)
starts = np.insert(breaks+1, 0, 0)
stops = np.append(breaks, len(data)-1)
bdries = np.vstack((data[starts], data[stops] + step)).T
if index:
if inclusive:
indices = np.vstack((starts, stops)).T
else:
indices = np.vstack((starts, stops + 1)).T
return indices
else:
from itertools import groupby
from operator import itemgetter
if not assume_sorted:
if not is_sorted(data):
# data = np.sort(data) # algorithm assumes sorted list
raise NotImplementedError("out-of-core sorting has not been implemented yet...")
if step is None:
step = 1
bdries = []
if not index:
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
start = next(gen)
stop = start
for stop in gen:
pass
bdries.append([start, stop + step])
else:
counter = 0
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
_ = next(gen)
start = counter
stop = start
for _ in gen:
stop +=1
if inclusive:
bdries.append([start, stop])
else:
bdries.append([start, stop + 1])
counter = stop + 1
return np.asarray(bdries)
def get_direction(asa, *, sigma=None):
"""Return epochs during which an animal was running left to right, or right
to left.
Parameters
----------
asa : AnalogSignalArray 1D
AnalogSignalArray containing the 1D position data.
sigma : float, optional
Smoothing to apply to position (x) before computing gradient estimate.
Default is 0.
Returns
-------
l2r, r2l : EpochArrays
EpochArrays corresponding to left-to-right and right-to-left movement.
"""
if sigma is None:
sigma = 0
if not isinstance(asa, core.AnalogSignalArray):
raise TypeError('AnalogSignalArray expected!')
assert asa.n_signals == 1, "1D AnalogSignalArray expected!"
direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma),
rectify=False).data
direction[direction>=0] = 1
direction[direction<0] = -1
direction = direction.squeeze()
l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1)
l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
l2r = core.EpochArray(asa.abscissa_vals[l2r])
r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1)
r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
r2l = core.EpochArray(asa.abscissa_vals[r2l])
return l2r, r2l
class PrettyBytes(int):
"""Prints number of bytes in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
if self.val < 1024:
return '{} bytes'.format(self.val)
elif self.val < 1024**2:
return '{:.3f} kilobytes'.format(self.val/1024)
elif self.val < 1024**3:
return '{:.3f} megabytes'.format(self.val/1024**2)
elif self.val < 1024**4:
return '{:.3f} gigabytes'.format(self.val/1024**3)
def __repr__(self):
return self.__str__()
class PrettyInt(int):
"""Prints integers in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
return '{:,}'.format(self.val)
def __repr__(self):
return '{:,}'.format(self.val)
class PrettyDuration(float):
"""Time duration with pretty print.
Behaves like a float, and can always be cast to a float.
"""
def __init__(self, seconds):
self.duration = seconds
def __str__(self):
return self.time_string(self.duration)
def __repr__(self):
return self.time_string(self.duration)
@staticmethod
def to_dhms(seconds):
"""convert seconds into hh:mm:ss:ms"""
pos = seconds >= 0
if not pos:
seconds = -seconds
ms = seconds % 1; ms = round(ms*10000)/10
seconds = floor(seconds)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
Time = namedtuple('Time', 'pos dd hh mm ss ms')
time = Time(pos=pos, dd=d, hh=h, mm=m, ss=s, ms=ms)
return time
@staticmethod
def time_string(seconds):
"""returns a formatted time string."""
if np.isinf(seconds):
return 'inf'
pos, dd, hh, mm, ss, s = PrettyDuration.to_dhms(seconds)
if s > 0:
if mm == 0:
# in this case, represent milliseconds in terms of
# seconds (i.e. a decimal)
sstr = str(s/1000).lstrip('0')
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
# for all other cases, milliseconds will be represented
# as an integer
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
sstr = ":{:03d}".format(int(s))
else:
sstr = ""
if dd > 0:
daystr = "{:01d} days ".format(dd)
else:
daystr = ""
if hh > 0:
timestr = daystr + "{:01d}:{:02d}:{:02d}{} hours".format(hh, mm, ss, sstr)
elif mm > 0:
timestr = daystr + "{:01d}:{:02d}{} minutes".format(mm, ss, sstr)
elif ss > 0:
timestr = daystr + "{:01d}{} seconds".format(ss, sstr)
else:
timestr = daystr +"{} milliseconds".format(s)
if not pos:
timestr = "-" + timestr
return timestr
def __add__(self, other):
"""a + b"""
return PrettyDuration(self.duration + other)
def __radd__(self, other):
"""b + a"""
return self.__add__(other)
def __sub__(self, other):
"""a - b"""
return PrettyDuration(self.duration - other)
def __rsub__(self, other):
"""b - a"""
return other - self.duration
def __mul__(self, other):
"""a * b"""
return PrettyDuration(self.duration * other)
def __rmul__(self, other):
"""b * a"""
return self.__mul__(other)
def __truediv__(self, other):
"""a / b"""
return PrettyDuration(self.duration / other)
def shrinkMatColsTo(mat, numCols):
""" Docstring goes here
Shrinks a NxM1 matrix down to an NxM2 matrix, where M2 <= M1"""
import scipy.ndimage
numCells = mat.shape[0]
numColsMat = mat.shape[1]
a = np.zeros((numCells, numCols))
for row in np.arange(numCells):
niurou = scipy.ndimage.interpolation.zoom(input=mat[row,:], zoom=(numCols/numColsMat), order = 1)
a[row,:] = niurou
return a
def find_threshold_crossing_events(x, threshold, *, mode='above'):
"""Find threshold crossing events. INCLUSIVE
Parameters
----------
x : numpy array
Input data
threshold : float
The value whose crossing triggers an event
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
Returns
-------
eventlist : list
List containing the indices corresponding to threshold crossings
eventmax : list
List containing the maximum value of each event
"""
from itertools import groupby
from operator import itemgetter
if mode == 'below':
cross_threshold = np.where(x <= threshold, 1, 0)
elif mode == 'above':
cross_threshold = np.where(x >= threshold, 1, 0)
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
eventlist = []
eventmax = []
for k,v in groupby(enumerate(cross_threshold),key=itemgetter(1)):
if k:
v = list(v)
eventlist.append([v[0][0],v[-1][0]])
try :
eventmax.append(x[v[0][0]:(v[-1][0]+1)].max())
except :
print(v, x[v[0][0]:v[-1][0]])
eventmax = np.asarray(eventmax)
eventlist = np.asarray(eventlist)
return eventlist, eventmax
def get_events_boundaries(x, *, PrimaryThreshold=None,
SecondaryThreshold=None,
minThresholdLength=None, minLength=None,
maxLength=None, ds=None, mode='above'):
"""get event boundaries such that event.max >= PrimaryThreshold
and the event extent is defined by SecondaryThreshold.
Note that when PrimaryThreshold==SecondaryThreshold, then this is a
simple threshold crossing algorithm.
NB. minLength and maxLength are applied to the SecondaryThreshold
events, whereas minThresholdLength is applied to the
PrimaryThreshold events.
Parameters
----------
x : numpy array
Input data
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
PrimaryThreshold : float, optional
If mode=='above', requires that event.max >= PrimaryThreshold
If mode=='below', requires that event.min <= PrimaryThreshold
SecondaryThreshold : float, optional
The value that defines the event extent
minThresholdLength : float, optional
Minimum duration for which the PrimaryThreshold is crossed
minLength : float, optional
Minimum duration for which the SecondaryThreshold is crossed
maxLength : float, optional
Maximum duration for which the SecondaryThreshold is crossed
ds : float, optional
Time step of the input data x
Returns
-------
returns bounds, maxes, events
where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive
maxes <==> maximum value during each event
events <==> PrimaryThreshold to PrimaryThreshold, inclusive
"""
# TODO: x must be a numpy array
# TODO: ds is often used, but we have no default, and no check for when
# it is left as None.
# TODO: the Docstring should equally be improved.
x = x.squeeze()
if x.ndim > 1:
raise TypeError("multidimensional arrays not supported!")
if PrimaryThreshold is None: # by default, threshold is 3 SDs above mean of x
PrimaryThreshold = np.mean(x) + 3*np.std(x)
if SecondaryThreshold is None: # by default, revert back to mean of x
SecondaryThreshold = np.mean(x) # + 0*np.std(x)
events, _ = \
find_threshold_crossing_events(x=x,
threshold=PrimaryThreshold,
mode=mode)
# apply minThresholdLength criterion:
if minThresholdLength is not None and len(events) > 0:
durations = (events[:,1] - events[:,0] + 1) * ds
events = events[[durations >= minThresholdLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Find periods where value is > SecondaryThreshold; note that the previous periods should be within these!
if mode == 'above':
assert SecondaryThreshold <= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
elif mode == 'below':
assert SecondaryThreshold >= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
bounds, broader_maxes = \
find_threshold_crossing_events(x=x,
threshold=SecondaryThreshold,
mode=mode)
# Find corresponding big windows for potential events
# Specifically, look for closest left edge that is just smaller
outer_boundary_indices = np.searchsorted(bounds[:,0], events[:,0], side='right')
# searchsorted finds the index after, so subtract one to get index before
outer_boundary_indices = outer_boundary_indices - 1
# Find extended boundaries for events by pairing to larger windows
# (Note that there may be repeats if the larger window contains multiple > 3SD sections)
bounds = bounds[outer_boundary_indices,:]
maxes = broader_maxes[outer_boundary_indices]
if minLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations >= minLength]]
maxes = maxes[[durations >= minLength]]
events = events[[durations >= minLength]]
if maxLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations <= maxLength]]
maxes = maxes[[durations <= maxLength]]
events = events[[durations <= maxLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Now, since all that we care about are the larger windows, so we should get rid of repeats
_, unique_idx = np.unique(bounds[:,0], return_index=True)
bounds = bounds[unique_idx,:] # SecondaryThreshold to SecondaryThreshold
maxes = maxes[unique_idx] # maximum value during event
events = events[unique_idx,:] # PrimaryThreshold to PrimaryThreshold
return bounds, maxes, events
def signal_envelope1D(data, *, sigma=None, fs=None):
logging.warnings("'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!")
return signal_envelope_1d(data, sigma=sigma, fs=fs)
def signal_envelope_1d(data, *, sigma=None, fs=None):
"""Finds the signal envelope by taking the absolute value
of the Hilbert transform
Parameters
----------
data : numpy array, list, or RegularlySampledAnalogSignalArray
Input data
If data is a numpy array, it is expected to have shape
(n_signals, n_samples)
If data is a list, it is expected to have length n_signals,
where each sublist has length n_samples, i.e. data is not
jagged
sigma : float, optional
Standard deviation of the Gaussian kernel used to
smooth the envelope after applying the Hilbert transform.
Units of seconds. Default is 4 ms
fs : float, optional
Sampling rate of the signal
Returns
-------
out : same type as the input object
An object containing the signal envelope
TODO: this is not yet epoch-aware!
UPDATE: this is actually epoch-aware by now!
"""
if sigma is None:
sigma = 0.004 # 4 ms standard deviation
if fs is None:
if isinstance(data, (np.ndarray, list)):
raise ValueError("sampling frequency must be specified!")
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
fs = data.fs
if isinstance(data, (np.ndarray, list)):
data_array = np.array(data)
n_dims = np.array(data).ndim
assert n_dims <= 2, "Only 1D signals supported!"
if n_dims == 1:
input_data = data_array.reshape((1, data_array.size))
else:
input_data = data_array
n_signals, n_samples = input_data.shape
# Compute number of samples to compute fast FFTs
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (input_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
if isinstance(data, list):
envelope = envelope.tolist()
return envelope
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
# Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported
assert data.data.ndim == 2
cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0)
newasa = data.copy()
# for segment in data:
for idx in range(data.n_epochs):
# print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs))
segment_data = data._data[:,cum_lengths[idx]:cum_lengths[idx+1]]
n_signals, n_samples = segment_data.shape
# Compute number of samples to compute fast FFTs:
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (segment_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
newasa._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.atleast_2d(envelope)
return newasa
def nextpower(n, base=2.0):
"""Return the next integral power of two greater than the given number.
Specifically, return m such that
m >= n
m == 2**x
where x is an integer. Use base argument to specify a base other than 2.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)
"""
x = base**ceil (log (n) / log (base))
if type(n) == np.ndarray:
return np.asarray (x, dtype=int)
else:
return int (x)
def nextfastpower(n):
"""Return the next integral power of small factors greater than the given
number. Specifically, return m such that
m >= n
m == 2**x * 3**y * 5**z
where x, y, and z are integers.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)
See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html
"""
if n < 7:
return max (n, 1)
# x, y, and z are all bounded from above by the formula of nextpower.
# Compute all possible combinations for powers of 3 and 5.
# (Not too many for reasonable FFT sizes.)
def power_series (x, base):
nmax = ceil (log (x) / log (base))
return np.logspace (0.0, nmax, num=nmax+1, base=base)
n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0))
n35 = n35[n35<=n]
# Lump the powers of 3 and 5 together and solve for the powers of 2.
n2 = nextpower (n / n35)
return int (min (n2 * n35))
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def gaussian_filter(obj, *, fs=None, sigma=None, truncate=None, inplace=False, mode=None, cval=None, within_intervals=False):
"""Smooths with a Gaussian kernel.
Smoothing is applied along the abscissa, and the same smoothing is applied to each
signal in the RegularlySampledAnalogSignalArray, or to each unit in a BinnedSpikeTrainArray.
Smoothing is applied ACROSS intervals, but smoothing WITHIN intervals is also supported.
Parameters
----------
obj : RegularlySampledAnalogSignalArray or BinnedSpikeTrainArray.
fs : float, optional
Sampling rate (in obj.base_unit^-1) of obj. If not provided, it will
be inferred.
sigma : float, optional
Standard deviation of Gaussian kernel, in obj.base_units. Default is 0.05
(50 ms if base_unit=seconds).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0.
inplace : bool
If True the data will be replaced with the smoothed data.
Default is False.
mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional
The mode parameter determines how the array borders are handled,
where cval is the value when mode is equal to ‘constant’. Default is
‘reflect’.
cval : scalar, optional
Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
within_intervals : boolean, optional
If True, then smooth within each epoch. Otherwise smooth across epochs.
Default is False.
Note that when mode = 'wrap', then smoothing within epochs aren't affected
by wrapping.
Returns
-------
out : same type as obj
An object with smoothed data is returned.
"""
if sigma is None:
sigma = 0.05
if truncate is None:
truncate = 4
if mode is None:
mode = 'reflect'
if cval is None:
cval = 0.0
if not inplace:
out = copy.deepcopy(obj)
else:
out = obj
if isinstance(out, core.RegularlySampledAnalogSignalArray):
if fs is None:
fs = out.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
elif isinstance(out, core.BinnedEventArray):
bst = out
if fs is None:
fs = 1/bst.ds
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
else:
raise NotImplementedError("gaussian_filter for {} is not yet supported!".format(str(type(out))))
sigma = sigma * fs
if not within_intervals:
# see https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python
# (1) if smoothing across intervals, we work on a merged support
# (2) build abscissa_vals, including existing ones, and out-of-support ones
# (3) to smooth U, build auxiliary arrays V and W, with (V=U).nan=0, and (W=1).nan=0
# (4) Z = smooth(V)/smooth(W)
# (5) only keep original support, and original abscissa_vals
if isinstance(out, (core.RegularlySampledAnalogSignalArray, core.BinnedEventArray)):
support = out._abscissa.support.merge()
if not support.domain.is_finite:
support.domain = (support.start, support.stop) #TODO: #FIXME might come from abscissa definition, and not from support
missing_abscissa_vals = []
for interval in (~support):
missing_vals = frange(interval.start, interval.stop, 1/fs)
missing_abscissa_vals.extend(missing_vals)
if isinstance(out, core.RegularlySampledAnalogSignalArray):
n_signals = out.n_signals
n_samples = out.n_samples
elif isinstance(out, core.BinnedEventArray):
n_signals = out.n_series
n_samples = out.n_bins
V = np.zeros((n_signals, n_samples + len(missing_abscissa_vals)))
W = np.ones(V.shape)
all_abscissa_vals = np.sort(np.append(out._abscissa_vals, missing_abscissa_vals))
data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals)
missing_idx = np.searchsorted(all_abscissa_vals, missing_abscissa_vals)
V[:, data_idx] = out.data
W[:, missing_idx] = 0
VV = scipy.ndimage.filters.gaussian_filter(V, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval)
WW = scipy.ndimage.filters.gaussian_filter(W, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval)
Z = VV[:,data_idx]/WW[:,data_idx]
out._data = Z
else:
raise NotImplementedError("gaussian_filter across intervals for {} is not yet supported!".format(str(type(out))))
else: # within intervals:
cum_lengths = np.insert(np.cumsum(out.lengths), 0, 0)
out._data = out._data.astype(float)
if isinstance(out, core.RegularlySampledAnalogSignalArray):
# now smooth each interval separately
for idx in range(out.n_intervals):
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = scipy.ndimage.filters.gaussian_filter(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], sigma=(0,sigma), truncate=truncate)
elif isinstance(out, core.BinnedSpikeTrainArray):
# now smooth each interval separately
for idx in range(out.n_epochs):
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = scipy.ndimage.filters.gaussian_filter(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], sigma=(0,sigma), truncate=truncate)
# out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = self._smooth_array(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], w=w)
return out
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def ddt_asa(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None, norm=False):
"""Numerical differentiation of a regularly sampled AnalogSignalArray.
Optionally also smooths result with a Gaussian kernel.
Smoothing is applied in time, and the same smoothing is applied to each
signal in the AnalogSignalArray.
Differentiation, (and if requested, smoothing) is applied within each epoch.
Parameters
----------
asa : nelpy.RegularlySampledAnalogSignalArray
Input object.
fs : float, optional
Sampling rate (in Hz) of input RSASA. If not provided, it will be obtained
from asa.fs.
smooth : bool, optional
If true, result will be smoothed. Default is False
rectify : bool, optional
If True, absolute value of derivative is computed. Default is True.
sigma : float, optional
Standard deviation of Gaussian kernel, in seconds. Default is 0.05
(50 ms).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0
norm: boolean, optional
If True, then apply the L2 norm to the result.
Returns
-------
out : nelpy.RegularlySampledAnalogSignalArray
A RegularlySampledAnalogSignalArray with derivative data (in units
per second) is returned.
Notes
-----
Central differences are used here.
"""
if not isinstance(asa, core.RegularlySampledAnalogSignalArray):
raise TypeError("Input object must be a RegularlySampledAnalogSignalArray!")
if fs is None:
fs = asa.fs
if sigma is None:
sigma = 0.05 # 50 ms default
out = asa.copy()
cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0)
# ensure that datatype is float
# TODO: this will break complex data
out._data = out.data.astype(float)
# now obtain the derivative for each epoch separately
for idx in range(asa.n_epochs):
# if 1D:
if asa.n_signals == 1:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[[0],cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
else:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[:,cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
out._data = out._data * fs
if norm:
out._data = np.atleast_2d(np.linalg.norm(out._data, axis=0))
if rectify:
out._data = np.abs(out._data)
if smooth:
out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate)
return out
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def dxdt_AnalogSignalArray(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None):
"""Numerical differentiation of a regularly sampled AnalogSignalArray.
Optionally also smooths result with a Gaussian kernel.
Smoothing is applied in time, and the same smoothing is applied to each
signal in the AnalogSignalArray.
Differentiation, (and if requested, smoothing) is applied within each epoch.
Parameters
----------
asa : AnalogSignalArray
fs : float, optional
Sampling rate (in Hz) of AnalogSignalArray. If not provided, it will
be obtained from asa.fs
smooth : bool, optional
If true, result will be smoothed. Default is False
rectify : bool, optional
If True, absolute value of derivative is computed. Default is True.
sigma : float, optional
Standard deviation of Gaussian kernel, in seconds. Default is 0.05
(50 ms).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0
Returns
-------
out : AnalogSignalArray
An AnalogSignalArray with derivative data (in units per second) is returned.
"""
raise DeprecationWarning('use ddt_asa instead!')
if fs is None:
fs = asa.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the AnalogSignalArray!")
if sigma is None:
sigma = 0.05 # 50 ms default
out = copy.deepcopy(asa)
cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0)
# ensure that datatype is float
out._data = out.data.astype(float)
if asa.n_signals == 2:
out._data = out._data[[0],:]
# now obtain the derivative for each epoch separately
for idx in range(asa.n_epochs):
# if 1D:
if asa.n_signals == 1:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[[0],cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
elif asa.n_signals == 2:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.linalg.norm(np.gradient(asa._data[:,cum_lengths[idx]:cum_lengths[idx+1]], axis=1), axis=0)
else:
raise TypeError("more than 2D not currently supported!")
out._data = out._data * fs
if rectify:
out._data = np.abs(out._data)
if smooth:
out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate)
return out
def get_threshold_crossing_epochs(asa, t1=None, t2=None, mode='above'):
"""Return epochs where a signal crosses a compound threshold specified by t1
and t2.
Parameters
----------
asa : AnalogSignalArray
AnalogSignalArray containing a single channel
t1 : float, optional
Primary threshold. Minimum signal value that has to be reached /
exceeded during an event. Default is 3 standard deviations above signal
mean.
t2 : float, optional
Secondary threshold. Signal value that defines the event boundaries.
Default is signal mean.
mode : string, optional
Mode of operation. One of ['above', 'below']. If 'above', then return
epochs where the signal exceeds the compound threshold, and if 'below',
then return epochs where the signal falls below the compound threshold.
Default is 'above'.
Returns
-------
epochs : EpochArray
EpochArray with all the epochs where the signal satisfied the criteria.
"""
if asa.n_signals > 1:
raise TypeError("multidimensional AnalogSignalArrays not supported!")
x = asa.data.squeeze()
if t1 is None: # by default, threshold is 3 SDs above mean of x
t1 = np.mean(x) + 3*np.std(x)
if t2 is None: # by default, revert back to mean of x
t2 = np.mean(x)
# compute periods where signal exceeds compound threshold
epoch_bounds, _, _ = get_events_boundaries(
x=x,
PrimaryThreshold=t1,
SecondaryThreshold=t2,
mode=mode
)
# convert bounds to time in seconds
epoch_bounds = asa.time[epoch_bounds]
if len(epoch_bounds) == 0:
return type(asa._abscissa.support)(empty=True)
# add 1/fs to stops for open interval
epoch_bounds[:,1] += 1/asa.fs
# create EpochArray with threshould exceeding bounds
epochs = type(asa._abscissa.support)(epoch_bounds)
return epochs
def get_run_epochs(speed, v1=10, v2=8):
"""Return epochs where animal is running at least as fast as
specified by v1 and v2.
Parameters
----------
speed : AnalogSignalArray
AnalogSignalArray containing single channel speed, in units/sec
v1 : float, optional
Minimum speed (in same units as speed) that has to be reached /
exceeded during an event. Default is 10 [units/sec]
v2 : float, optional
Speed that defines the event boundaries. Default is 8 [units/sec]
Returns
-------
run_epochs : EpochArray
EpochArray with all the epochs where speed satisfied the criteria.
"""
run_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='above')
return run_epochs
def get_inactive_epochs(speed, v1=5, v2=7):
"""Return epochs where animal is running no faster than specified by
v1 and v2.
Parameters
----------
speed : AnalogSignalArray
AnalogSignalArray containing single channel speed, in units/sec
v1 : float, optional
Minimum speed (in same units as speed) that has to be reached /
exceeded during an event. Default is 10 [units/sec]
v2 : float, optional
Speed that defines the event boundaries. Default is 8 [units/sec]
Returns
-------
inactive_epochs : EpochArray
EpochArray with all the epochs where speed satisfied the criteria.
"""
inactive_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='below')
return inactive_epochs
def spiketrain_union(st1, st2):
"""Join two spiketrains together.
WARNING! This function should be improved a lot!
"""
assert st1.n_units == st2.n_units
support = st1.support.join(st2.support)
newdata = []
for unit in range(st1.n_units):
newdata.append(np.append(st1.time[unit], st2.time[unit]))
fs = None
if st1.fs == st2.fs:
fs = st1.fs
return core.SpikeTrainArray(newdata, support=support, fs=fs)
########################################################################
# uncurated below this line!
########################################################################
def find_nearest_idx(array, val):
"""Finds nearest index in array to value.
Parameters
----------
array : np.array
val : float
Returns
-------
Index into array that is closest to val
TODO: this is a better version that should be incorporated:
# Based on answer here: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
def find_nearest(array,values):
right_idxs = np.searchsorted(array, values, side="left")
left_idxs = np.where(right_idxs > 0, right_idxs-1, right_idxs)
right_idxs = np.where(right_idxs == len(array), len(array)-1, right_idxs)
closest_idx = np.where(np.abs(values - array[right_idxs]) < np.abs(values - array[left_idxs]),
right_idxs, left_idxs)
return closest_idx
"""
return (np.abs(array-val)).argmin()
def find_nearest_indices(array, vals):
"""Finds nearest index in array to value.
Parameters
----------
array : np.array
This is the array you wish to index into.
vals : np.array
This is the array that you are getting your indices from.
Returns
-------
Indices into array that is closest to vals.
Notes
-----
Wrapper around find_nearest_idx().
"""
return np.array([find_nearest_idx(array, val) for val in vals], dtype=int)
def get_sort_idx(tuning_curves):
"""Finds indices to sort neurons by max firing in tuning curve.
Parameters
----------
tuning_curves : list of lists
Where each inner list is the tuning curves for an individual
neuron.
Returns
-------
sorted_idx : list
List of integers that correspond to the neuron in sorted order.
"""
tc_max_loc = []
for i, neuron_tc in enumerate(tuning_curves):
tc_max_loc.append((i, np.where(neuron_tc == np.max(neuron_tc))[0][0]))
sorted_by_tc = sorted(tc_max_loc, key=lambda x: x[1])
sorted_idx = []
for idx in sorted_by_tc:
sorted_idx.append(idx[0])
return sorted_idx
def collapse_time(obj, gap=0):
"""Collapse all epochs in a SpikeTrainArray and collapse them into a single, contiguous SpikeTrainArray"""
# TODO: redo SpikeTrainArray so as to keep the epochs separate!, and to support gaps!
# We'll have to ajust all the spikes per epoch... and we'll have to compute a new support. Also set a flag!
# If it's a SpikeTrainArray, then we left-shift the spike times. If it's an AnalogSignalArray, then we
# left-shift the time and tdata.
# Also set a new attribute, with the boundaries in seconds.
if isinstance(obj, core.RegularlySampledAnalogSignalArray):
new_obj = type(obj)(empty=True)
new_obj._data = obj._data
durations = obj.support.durations
starts = np.insert(np.cumsum(durations + gap),0,0)[:-1]
stops = starts + durations
newsupport = type(obj._abscissa.support)(np.vstack((starts, stops)).T)
new_obj._support = newsupport
new_time = obj.time.astype(float) # fast copy
time_idx = np.insert(np.cumsum(obj.lengths),0,0)
new_offset = 0
for epidx in range(obj.n_epochs):
if epidx > 0:
new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset + gap
new_offset += durations[epidx] + gap
else:
new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset
new_offset += durations[epidx]
new_obj._time = new_time
new_obj._fs = obj._fs
elif isinstance(obj, core.EventArray):
if gap > 0:
raise ValueError("gaps not supported for SpikeTrainArrays yet!")
new_obj = type(obj)(empty=True)
new_time = [[] for _ in range(obj.n_series)]
duration = 0
for st_ in obj:
le = st_.support.start
for unit_ in range(obj.n_series):
new_time[unit_].extend(st_._data[unit_] - le + duration)
duration += st_.support.duration
new_time = np.asanyarray([np.asanyarray(unittime) for unittime in new_time])
new_obj._data = new_time
new_obj.support = type(obj._abscissa.support)([0, duration])
new_obj._series_ids = obj._series_ids
new_obj._series_labels = obj._series_labels
new_obj._series_tags = obj._series_tags
elif isinstance(obj, core.BinnedEventArray):
raise NotImplementedError("BinnedEventArrays are not yet supported, but bst.data is essentially already collapsed!")
else:
raise TypeError("unsupported type for collapse_time")
return new_obj
def cartesian(xcenters, ycenters):
"""Finds every combination of elements in two arrays.
Parameters
----------
xcenters : np.array
ycenters : np.array
Returns
-------
cartesian : np.array
With shape(n_sample, 2).
"""
return np.transpose([np.tile(xcenters, len(ycenters)), np.repeat(ycenters, len(xcenters))])
| 37.135721
| 190
| 0.615709
| 9,571
| 73,603
| 4.647163
| 0.113154
| 0.011691
| 0.014029
| 0.007554
| 0.433473
| 0.377018
| 0.340573
| 0.322721
| 0.310895
| 0.293853
| 0
| 0.017809
| 0.287475
| 73,603
| 1,981
| 191
| 37.154467
| 0.830295
| 0.429276
| 0
| 0.371397
| 0
| 0
| 0.065926
| 0.006021
| 0
| 0
| 0
| 0.004038
| 0.007761
| 1
| 0.064302
| false
| 0.002217
| 0.025499
| 0.006652
| 0.175166
| 0.001109
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37f1a13e31b524b47983953b4e76242354934ac4
| 23,625
|
py
|
Python
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
logan-siyao-peng/Paddle
|
10a8f3e5c3151c1abb810fba2994cc30e1232bec
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
logan-siyao-peng/Paddle
|
10a8f3e5c3151c1abb810fba2994cc30e1232bec
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
logan-siyao-peng/Paddle
|
10a8f3e5c3151c1abb810fba2994cc30e1232bec
|
[
"Apache-2.0"
] | 1
|
2021-01-17T01:11:45.000Z
|
2021-01-17T01:11:45.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import numpy as np
import sys
import os
import paddle
from paddle.fluid import dygraph, core, framework
from paddle.fluid.executor import Executor
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn import Linear, Conv2D, Conv2DTranspose, MaxPool2D, MaxPool1D, BatchNorm1D, BatchNorm2D, BatchNorm3D
from paddle.fluid.dygraph.nn import BatchNorm, Pool2D
from paddle.fluid.io import load_inference_model, save_inference_model
from paddle.nn.layer.activation import ReLU, LeakyReLU, Sigmoid, ReLU6, Tanh, Softmax, PReLU, Swish
from paddle.fluid.log_helper import get_logger
from . import quant_nn
from .. import quantization_pass
__all__ = ['ImperativeQuantAware', 'ImperativeCalcOutScale']
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
_op_real_in_out_name = {
"conv2d": [["Input", "Filter"], ["Output"]],
"conv2d_transpose": [["Input", "Filter"], ["Output"]],
"pool2d": [["X"], ["Out"]],
"elementwise_add": [["X", "Y"], ["Out"]],
"softmax": [["X"], ["Out"]],
"relu": [["X"], ["Out"]],
"relu6": [["X"], ["Out"]],
"leaky_relu": [["X"], ["Out"]],
"prelu": [["X"], ["Out"]],
"tanh": [["X"], ["Out"]],
"batch_norm": [["X"], ["Y"]],
"sigmoid": [["X"], ["Out"]],
"swish": [["X"], ["Out"]],
}
class ImperativeQuantAware(object):
"""
Add the fake quant logic for given quantizable layers, namely add the quant_dequant
computational logic both for activation inputs and weight inputs.
"""
def __init__(self,
weight_bits=8,
activation_bits=8,
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max',
moving_rate=0.9,
quantizable_layer_type=['Conv2D', 'Linear'],
weight_preprocess_layer=None,
act_preprocess_layer=None,
weight_quantize_layer=None,
act_quantize_layer=None):
r"""
The constructor for ImperativeQuantAware.
Args:
weight_bits(int): quantization bit number for weights,
whereas the bias is not quantized.
activation_bits(int): quantization bit number for activations.
weight_quantize_type(str): quantization type for weights,
which supports 'abs_max' now. The 'moving_average_abs_max'
usually is not used for weights, since weights are fixed once the
model is well trained.
activation_quantize_type(str): quantization type for activations,
which supports 'abs_max' and 'moving_average_abs_max' now.
If using 'abs_max' mode, the quantization scale will be calculated
dynamically each step in both training and testing period. If using
'moving_average_abs_max', the static quantization scale will be calculated
during training and used in inference.
moving_rate(float): the parameter for 'moving_average_abs_max' quantization.
quantizable_layer_type(list[str]): List the type of layers that will be quantized.
Default is ['Conv2D', 'Linear']. The quantizable_op_type in
QuantizationFreezePass and ConvertToInt8Pass must be the same as this.
weight_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to preprocess
weight before quantization. Using this can quickly test if user's
preprocess method works or not. The input is non-quantized
weight and function returns processed weight to be quantized.
If None, the weight will be quantized directly. Default is None.
act_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to preprocess
activation before quantization. Using this can quickly test if user's
preprocess method works or not. The input is non-quantized
activation and function returns processed activation to be quantized.
If None, the activation will be quantized directly. Default is None.
weight_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to quantize weight.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
weight and returns dequantized weight. If None, will use
quantization op defined by 'weight_quantize_type'. Default is None.
act_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to quantize activation.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
activation and returns dequantized activation. If None, will use
quantization op defined by 'activation_quantize_type'. Default is None.
Note:
If user sets attribute 'skip_quant' to a Layer that support dynamic quantization and sets
it to true, the layer would not be quantized during training. If this attribute is not sets
or the attribute is false, the Layer would be qunatized in training.
Examples 1:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
from paddle.vision.models \
import resnet
model = resnet.resnet50(pretrained=True)
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
# The outscale of outputs in supportted layers would be calculated.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./resnet50_qat",
input_spec=[
paddle.static.InputSpec(
shape=[None, 3, 224, 224], dtype='float32')])
Examples 2:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super(ImperativeModel, self).__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./imperative_model_qat")
"""
super(ImperativeQuantAware, self).__init__()
self._weight_bits = weight_bits
self._activation_bits = activation_bits
self._moving_rate = moving_rate
self._activation_quantize_type = activation_quantize_type
self._weight_quantize_type = weight_quantize_type
self._weight_pre_layer = weight_preprocess_layer
self._act_pre_layer = act_preprocess_layer
self._weight_quant_layer = weight_quantize_layer
self._act_quant_layer = act_quantize_layer
self._out_scale = ImperativeCalcOutScale()
t_check = lambda method: method is None or issubclass(method, dygraph.layers.Layer)
assert t_check(
self._weight_pre_layer), "weight_preprocess should be nn.Layer"
assert t_check(self._act_pre_layer), "act_preprocess should be nn.Layer"
assert t_check(
self._weight_quant_layer), "weight_quantize should be nn.Layer"
assert t_check(self._act_quant_layer), "act_quantize should be nn.Layer"
quant_type = {
'abs_max', 'moving_average_abs_max', 'channel_wise_abs_max'
}
assert activation_quantize_type != 'channel_wise_abs_max', \
"The activation quantization type does not support 'channel_wise_abs_max'."
if activation_quantize_type not in quant_type:
raise ValueError(
"Unknown activation_quantize_type : '%s'. It can only be "
"'abs_max' or 'moving_average_abs_max' now." %
(str(activation_quantize_type)))
if weight_quantize_type not in quant_type:
raise ValueError(
"Unknown weight_quantize_type: '%s'. It can only be "
"'abs_max' or 'moving_average_abs_max' or 'channel_wise_abs_max' now."
% (str(weight_quantize_type)))
self._quant_layers_map = {
'Conv2D': Conv2D,
'Linear': Linear,
'Pool2D': Pool2D,
'ReLU': ReLU,
'LeakyReLU': LeakyReLU,
'ReLU6': ReLU6,
'Softmax': Softmax,
'Tanh': Tanh,
'Swish': Swish
}
self._quantizable_layer_type = tuple(
self._quant_layers_map[layer]
if layer in self._quant_layers_map else layer
for layer in quantizable_layer_type)
for layer in self._quantizable_layer_type:
assert not isinstance(
layer, str), "{} is unspported to be quantized.".format(layer)
def quantize(self, model):
"""
According to weights' and activations' quantization types, the model will be added some fake
quant ops, such as fake_quantize_dequantize_moving_average_abs_max, fake_quantize_dequantize_abs_max
and so on. At the same time, the out_scale value of outputs would be calculated.
Args:
model(fluid.dygraph.Layer): the model to be quantized.
Returns:
None
"""
for name, layer in model.named_sublayers():
if not isinstance(layer, self._quantizable_layer_type):
continue
if hasattr(layer, "skip_quant") and layer.skip_quant == True:
continue
scopes = name.split('.')
target = scopes[-1]
obj = model
parent = model
for i in range(len(scopes) - 1):
obj = getattr(parent, scopes[i])
parent = obj
quant_layer = self._get_quantized_counterpart(layer)
setattr(quant_layer, "layer_name", layer.full_name())
setattr(obj, target, quant_layer)
self._out_scale.calc_out_scale(model)
def _get_quantized_counterpart(self, layer):
quant_layers = tuple(self._quant_layers_map.values())
quantized_counterpart = tuple('Quantized' + k
for k in self._quant_layers_map.keys())
predicate = lambda value: isinstance(layer, value)
index_generator = (i for i, v in enumerate(quant_layers)
if predicate(v))
try:
index = next(index_generator)
except StopIteration:
_logger.fatal("The layer {} is unsupported to be quantized.".format(
layer.full_name()))
sys.exit(-1)
layer_with_weight = ['QuantizedConv2D', 'QuantizedLinear']
if quantized_counterpart[index] not in layer_with_weight:
quant_layer_class_name = 'QuantizedNoweightLayer'
else:
quant_layer_class_name = quantized_counterpart[index]
quantized_layer = quant_nn.__dict__[quant_layer_class_name](
layer, self._weight_bits, self._activation_bits, self._moving_rate,
self._weight_quantize_type, self._activation_quantize_type,
self._weight_pre_layer, self._act_pre_layer,
self._weight_quant_layer, self._act_quant_layer)
return quantized_layer
def save_quantized_model(self, layer, path, input_spec=None, **config):
self._out_scale.save_quantized_model(layer, path, input_spec, **config)
class ImperativeCalcOutScale(object):
def __init__(self, moving_rate=0.9):
"""
Add the logic of calculating and setting output quantization scales of some layers.
These output quantization scales may be used by tensorRT or some other inference engines.
Args:
moving_rate(float): The decay coefficient of moving average. The default value is 0.9.
"""
super(ImperativeCalcOutScale, self).__init__()
self._moving_rate = moving_rate
self._out_scale_layer_type_list = (
BatchNorm, BatchNorm1D, BatchNorm2D, BatchNorm3D, Conv2D,
Conv2DTranspose, LeakyReLU, Linear, PReLU, Pool2D, MaxPool1D,
MaxPool2D, ReLU, ReLU6, Sigmoid, Softmax, Tanh, Swish)
self._register_hook_handle_list = []
self._out_scale_dict = collections.OrderedDict()
def calc_out_scale(self, model):
"""
Insert the `moving_average_abs_max_scale` op to calculate output scale of Specific layers in model.
Args:
model(fluid.dygraph.Layer): The target model which would be calculate the output quantization scale.
Returns:
None
"""
assert isinstance(
model, dygraph.Layer), "model must be the instance of dygraph.Layer"
for _, layer in model.named_sublayers():
if not isinstance(layer, self._out_scale_layer_type_list):
if 'quantized_' not in layer.full_name():
continue
forward_post_hook_handle = layer.register_forward_post_hook(
self._forward_post_hook)
self._register_hook_handle_list.append(forward_post_hook_handle)
def save_quantized_model(self, layer, path, input_spec=None, **config):
"""
Save the quantized model for the inference.
Args:
layer (Layer): The Layer to be saved.
path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
input_spec (list[InputSpec|Tensor], optional): Describes the input of the saved model's forward
method, which can be described by InputSpec or example Tensor. If None, all input variables of
the original Layer's forward method would be the inputs of the saved model. Default None.
**configs (dict, optional): Other save configuration options for compatibility. We do not
recommend using these configurations, they may be removed in the future. If not necessary,
DO NOT use them. Default None.
The following options are currently supported:
(1) output_spec (list[Tensor]): Selects the output targets of the saved model.
By default, all return variables of original Layer's forward method are kept as the
output of the saved model. If the provided ``output_spec`` list is not all output variables,
the saved model will be pruned according to the given ``output_spec`` list.
Returns:
None
"""
assert isinstance(
layer, dygraph.Layer), "model must be the instance of dygraph.Layer"
is_dynamic_mode = False
with dygraph.guard():
layer.eval()
for handle in self._register_hook_handle_list:
handle.remove()
for key in self._out_scale_dict:
self._out_scale_dict[key] = float(self._out_scale_dict[key]
.numpy())
if paddle.in_dynamic_mode():
is_dynamic_mode = True
paddle.enable_static()
paddle.jit.save(layer=layer, path=path, input_spec=input_spec, **config)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
file_prefix = os.path.basename(path)
dirname = os.path.dirname(path)
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
[inference_program, feed_target_names, fetch_targets] = (
load_inference_model(
dirname=dirname,
executor=exe,
model_filename=model_filename,
params_filename=params_filename))
# Traverse all ops in the program and find out the op matching
# the Layer in the dynamic graph.
layer_var_dict = {}
ops_list = [key for key, _ in self._out_scale_dict.items()]
op_count = 0
for block in inference_program.blocks:
for op in block.ops:
if op.type in _op_real_in_out_name:
if op.type in ["batch_norm", "pool2d"]:
if op.type == "pool2d" and op.attr(
"pooling_type") != "max":
continue
op_count = self.op_match(op, ops_list, op_count)
if op_count >= len(ops_list):
continue
op._set_attr('out_threshold',
self._out_scale_dict[ops_list[op_count]])
op_count += 1
else:
output_var_names = quantization_pass._get_op_output_var_names(
op)
for output_var_name in output_var_names:
output_var_tensor = block.var(output_var_name)
if output_var_tensor.dtype not in [
core.VarDesc.VarType.FP64,
core.VarDesc.VarType.FP32
]:
continue
# Because the Layer in dygraph may correspond to multiple ops
# in static program after being saved. To ensure correctness,
# the outscale collected for output of dygraph Layer can only
# be set to the last op in the corresponding ops in static program.
#
# We can judge the execution order of the ops which corresponding
# to dygraph Layer by the name of output. And use dict to save
# the corresponding relationship between the dygraph Layer and the
# static graph op that needs to set the outscale attribute.
if '.' not in output_var_name:
continue
dynamic_layer_name, var_name_suffix = output_var_name.split(
".")
if dynamic_layer_name in layer_var_dict:
if layer_var_dict[dynamic_layer_name][
0] < var_name_suffix:
layer_var_dict[dynamic_layer_name] = [
var_name_suffix, op
]
else:
layer_var_dict[dynamic_layer_name] = [
var_name_suffix, op
]
# Because the naming styles of static and dynamic graph are different,
# in order to avoid mistakes, we unify the name here.
for (layer_name, var_name_op_list) in layer_var_dict.items():
if 'prelu' in layer_name:
layer_name = layer_name.replace('prelu', 'p_re_lu')
if 'relu' in layer_name:
layer_name = layer_name.replace('relu', 're_lu')
if layer_name not in self._out_scale_dict:
continue
var_name_op_list[1]._set_attr('out_threshold',
self._out_scale_dict[layer_name])
# Save the processed program.
save_inference_model(
dirname=dirname,
feeded_var_names=feed_target_names,
target_vars=fetch_targets,
executor=exe,
main_program=inference_program.clone(),
model_filename=model_filename,
params_filename=params_filename)
if is_dynamic_mode:
paddle.disable_static()
def op_match(self, op, ops_list, op_count):
while op_count < len(ops_list) and op.type not in ops_list[op_count]:
op_count += 1
while op_count < len(ops_list) and op.type is "pool2d" and op.attr(
"pooling_type") != "max":
op_count += 1
return op_count
def _forward_post_hook(self, layer, input, output):
assert isinstance(
output, (core.VarBase, framework.Variable)
), "Multiple outputs are not currently supported in ImperativeOutScale."
if output.dtype not in [
core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64
]:
return
if not hasattr(layer, "_out_scale"):
layer._out_scale = quant_nn.MovingAverageAbsMaxScale(
output.name, self._moving_rate, output.dtype)
scale_out = layer._out_scale(output)
if hasattr(layer, 'layer_name'):
layer_name = layer.layer_name
else:
layer_name = layer.full_name()
self._out_scale_dict[layer_name] = scale_out
| 46.232877
| 114
| 0.600974
| 2,721
| 23,625
| 4.983094
| 0.173098
| 0.011505
| 0.01239
| 0.016815
| 0.344126
| 0.274357
| 0.216093
| 0.19972
| 0.175972
| 0.155542
| 0
| 0.006606
| 0.327238
| 23,625
| 510
| 115
| 46.323529
| 0.846483
| 0.376466
| 0
| 0.149635
| 0
| 0
| 0.096385
| 0.018669
| 0
| 0
| 0
| 0
| 0.032847
| 1
| 0.032847
| false
| 0.007299
| 0.058394
| 0
| 0.109489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37f415fea8c9ac7d7647ab03f1f9ceb7a0593bde
| 1,815
|
py
|
Python
|
sc/northwind.py
|
elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases
|
c730e2b3e66199226fa7549511cbb7801eb7a694
|
[
"MIT"
] | null | null | null |
sc/northwind.py
|
elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases
|
c730e2b3e66199226fa7549511cbb7801eb7a694
|
[
"MIT"
] | null | null | null |
sc/northwind.py
|
elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases
|
c730e2b3e66199226fa7549511cbb7801eb7a694
|
[
"MIT"
] | null | null | null |
import pandas as pd
import sqlite3
from pandas import DataFrame
n_conn = sqlite3.connect('northwind_small.sqlite3')
n_curs = n_conn.cursor()
# What are the ten most expensive items (per unit price) in the database?
query = """
SELECT ProductName, UnitPrice
FROM Product
ORDER BY UnitPrice DESC
LIMIT 10
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What is the average age of an employee at the time of their hiring? (Hint: a
# lot of arithmetic works with dates.)
query = """
SELECT AVG(HireDate-BirthDate)
FROM Employee
"""
n_curs.execute(query)
print(n_curs.fetchall())
# answer: 37.22
# (*Stretch*) How does the average age of employee at hire vary by city?
query = """SELECT City, AVG(HireDate-BirthDate)
FROM Employee
GROUP BY City
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What are the ten most expensive items (per unit price)
# in the database *and* their suppliers?
query = """
SELECT ProductName, UnitPrice, CompanyName
FROM Product as p
JOIN Supplier as s
ON p.SupplierID = s.ID
ORDER BY UnitPrice DESC
LIMIT 10
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What is the largest category (by number of unique products in it)?
query = """
SELECT CategoryName, COUNT(CategoryName)
FROM Category as c
JOIN Product as p
ON c.ID=p.CategoryID
GROUP BY CategoryName
ORDER by COUNT(CategoryName) DESC
"""
n_curs.execute(query)
print(n_curs.fetchall())
# largest category is Confections 13
# (*Stretch*) Who's the employee with the most territories? Use `TerritoryId`
# (not name, region, or other fields) as the unique identifier for territories.
# EMPLOYEE ID 7
query = """
SELECT EmployeeId, TerritoryId, COUNT(DISTINCT TerritoryId)
FROM EmployeeTerritory
GROUP BY EmployeeId
ORDER BY COUNT(DISTINCT TerritoryId) DESC
"""
n_curs.execute(query)
print(n_curs.fetchall())
| 22.6875
| 79
| 0.755923
| 278
| 1,815
| 4.877698
| 0.384892
| 0.047935
| 0.053097
| 0.075221
| 0.343658
| 0.29646
| 0.29646
| 0.29646
| 0.270649
| 0.187316
| 0
| 0.009044
| 0.147107
| 1,815
| 79
| 80
| 22.974684
| 0.866925
| 0.349862
| 0
| 0.54717
| 0
| 0
| 0.557461
| 0.059177
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.056604
| 0
| 0.056604
| 0.113208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37f5b2546c850f56d3d094ea379b377bba04af7c
| 2,051
|
py
|
Python
|
lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py
|
jeffkimbrel/MergeMetabolicAnnotations
|
ec971d114d57942cef73dc2980c8faf48cea7afe
|
[
"MIT"
] | 1
|
2021-08-04T15:42:46.000Z
|
2021-08-04T15:42:46.000Z
|
lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py
|
jeffkimbrel/MergeMetabolicAnnotations
|
ec971d114d57942cef73dc2980c8faf48cea7afe
|
[
"MIT"
] | 3
|
2019-02-01T22:14:02.000Z
|
2021-02-03T03:16:52.000Z
|
lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py
|
jeffkimbrel/MergeMetabolicAnnotations
|
ec971d114d57942cef73dc2980c8faf48cea7afe
|
[
"MIT"
] | 3
|
2018-11-30T21:31:00.000Z
|
2021-01-12T16:13:01.000Z
|
import os
import datetime
import logging
import json
import uuid
from installed_clients.WorkspaceClient import Workspace as Workspace
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.annotation_ontology_apiServiceClient import annotation_ontology_api
import MergeMetabolicAnnotations.utils.functions as f
class CompareAnnotationsUtil:
def __init__(self, config):
self.config = config
self.timestamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.kbr = KBaseReport(self.callback_url)
self.anno_api = annotation_ontology_api()
self.ws_client = Workspace(config["workspace-url"])
def run(self, ctx, params):
get_ontology_results = self.anno_api.get_annotation_ontology_events({
"input_ref": params['genome'],
"workspace-url": self.config["workspace-url"]
})
ontology_selected = f.filter_selected_ontologies(
get_ontology_results, params, workflow="compare")
with open(os.path.join(self.scratch, "get_ontology_dump.json"), 'w') as outfile:
json.dump(ontology_selected, outfile, indent=2)
# make reports
html_reports = []
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
os.mkdir(output_directory)
event_summary = f.get_event_lists(ontology_selected)
html_reports = f.compare_report_stack(html_reports, event_summary, output_directory)
# finalize html reports
report_params = {
'message': '',
'html_links': html_reports,
'direct_html_link_index': 0,
'workspace_name': params['workspace_name'],
'report_object_name': f'compare_annotations_{uuid.uuid4()}'}
report_output = self.kbr.create_extended_report(report_params)
return {'report_name': report_output['name'],
'report_ref': report_output['ref']}
| 36.625
| 92
| 0.686494
| 235
| 2,051
| 5.685106
| 0.395745
| 0.041168
| 0.04491
| 0.020958
| 0.031437
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002478
| 0.213067
| 2,051
| 55
| 93
| 37.290909
| 0.825279
| 0.016577
| 0
| 0
| 0
| 0
| 0.134558
| 0.038729
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.219512
| 0
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37f85d17e2772b9092e4ca6adf7715edc27bc547
| 1,168
|
py
|
Python
|
src/backend/tests/test_game/test_models.py
|
ToJestKrzysio/TheJungleGame
|
904dd4adc937145df2c8c353eb83bec3b5dd1f7e
|
[
"MIT"
] | null | null | null |
src/backend/tests/test_game/test_models.py
|
ToJestKrzysio/TheJungleGame
|
904dd4adc937145df2c8c353eb83bec3b5dd1f7e
|
[
"MIT"
] | null | null | null |
src/backend/tests/test_game/test_models.py
|
ToJestKrzysio/TheJungleGame
|
904dd4adc937145df2c8c353eb83bec3b5dd1f7e
|
[
"MIT"
] | null | null | null |
from unittest.mock import Mock, patch
import numpy as np
from game.models import ValuePolicyModel
def test_predict():
mask = np.zeros((9, 7, 8), dtype=bool)
mask[1, 2, 3] = 1
mask[6, 6, 6] = 1
tensor_mock = Mock()
policy_tensor = np.zeros((9, 7, 8), dtype=float)
policy_tensor[0, 0, 0] = 10
policy_tensor[1, 2, 3] = 100
policy_tensor[6, 6, 6] = 100
policy_tensor = policy_tensor.reshape(-1)
value = np.array([[0.7]], dtype=float)
get_prediction_mock = Mock(return_value=(value, policy_tensor))
network_mock = Mock(spec=ValuePolicyModel, output_shape=(9, 7, 8),
input_shape=(9, 7, 178), _get_prediction=get_prediction_mock)
result_value, result_policy = ValuePolicyModel.predict(
self=network_mock, tensor=tensor_mock, mask=mask)
get_prediction_mock.assert_called_once_with(tensor_mock)
expected_value = 0.7
expected_policy = np.zeros((9, 7, 8))
expected_policy[1, 2, 3] = 0.5
expected_policy[6, 6, 6] = 0.5
assert isinstance(result_value, float)
assert result_value == expected_value
assert np.array_equal(result_policy, expected_policy)
| 27.809524
| 85
| 0.673801
| 174
| 1,168
| 4.298851
| 0.304598
| 0.112299
| 0.016043
| 0.036096
| 0.053476
| 0.040107
| 0
| 0
| 0
| 0
| 0
| 0.061555
| 0.207192
| 1,168
| 41
| 86
| 28.487805
| 0.74622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.037037
| false
| 0
| 0.111111
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37fbd663edc97f78d91a3917050b5ae91d7a6023
| 2,191
|
py
|
Python
|
examples/labs/demo_dmtx.py
|
yarikoptic/nipy
|
749302c7ffa8ea714cc32d405f0df521102bbc6f
|
[
"BSD-3-Clause"
] | null | null | null |
examples/labs/demo_dmtx.py
|
yarikoptic/nipy
|
749302c7ffa8ea714cc32d405f0df521102bbc6f
|
[
"BSD-3-Clause"
] | null | null | null |
examples/labs/demo_dmtx.py
|
yarikoptic/nipy
|
749302c7ffa8ea714cc32d405f0df521102bbc6f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Examples of design matrices specification and and computation (event-related
design, FIR design, etc)
Requires matplotlib
Author : Bertrand Thirion: 2009-2010
"""
print(__doc__)
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.modalities.fmri.design_matrix import make_dmtx
from nipy.modalities.fmri.experimental_paradigm import (EventRelatedParadigm,
BlockParadigm)
# frame times
tr = 1.0
nscans = 128
frametimes = np.linspace(0, (nscans - 1) * tr, nscans)
# experimental paradigm
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
hrf_model = 'canonical'
motion = np.cumsum(np.random.randn(128, 6), 0)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
#event-related design matrix
paradigm = EventRelatedParadigm(conditions, onsets)
X1 = make_dmtx(
frametimes, paradigm, drift_model='polynomial', drift_order=3,
add_regs=motion, add_reg_names=add_reg_names)
# block design matrix
duration = 7 * np.ones(9)
paradigm = BlockParadigm(con_id=conditions, onset=onsets,
duration=duration)
X2 = make_dmtx(frametimes, paradigm, drift_model='polynomial',
drift_order=3)
# FIR model
paradigm = EventRelatedParadigm(conditions, onsets)
hrf_model = 'FIR'
X3 = make_dmtx(frametimes, paradigm, hrf_model='fir',
drift_model='polynomial', drift_order=3,
fir_delays=np.arange(1, 6))
# plot the results
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot(1, 3, 1)
X1.show(ax=ax)
ax.set_title('Event-related design matrix', fontsize=12)
ax = plt.subplot(1, 3, 2)
X2.show(ax=ax)
ax.set_title('Block design matrix', fontsize=12)
ax = plt.subplot(1, 3, 3)
X3.show(ax=ax)
ax.set_title('FIR design matrix', fontsize=12)
plt.subplots_adjust(top=0.9, bottom=0.25)
plt.show()
| 30.013699
| 77
| 0.685075
| 315
| 2,191
| 4.634921
| 0.450794
| 0.049315
| 0.036986
| 0.053425
| 0.199315
| 0.189726
| 0.15274
| 0.127397
| 0.127397
| 0.078082
| 0
| 0.050083
| 0.179827
| 2,191
| 72
| 78
| 30.430556
| 0.762382
| 0.120037
| 0
| 0.04
| 0
| 0
| 0.177268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
37fdb024ea14a002f56310787cf60b4ca3d52485
| 36,821
|
py
|
Python
|
fbpcs/private_computation/test/service/test_private_computation.py
|
yelixu2/fbpcs
|
31b1154bf1a207471fa207a0b0e4c74693f09608
|
[
"MIT"
] | null | null | null |
fbpcs/private_computation/test/service/test_private_computation.py
|
yelixu2/fbpcs
|
31b1154bf1a207471fa207a0b0e4c74693f09608
|
[
"MIT"
] | null | null | null |
fbpcs/private_computation/test/service/test_private_computation.py
|
yelixu2/fbpcs
|
31b1154bf1a207471fa207a0b0e4c74693f09608
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from collections import defaultdict
from typing import List, Optional, Tuple
from unittest.mock import MagicMock, call, patch
from fbpcp.entity.container_instance import ContainerInstance, ContainerInstanceStatus
from fbpcp.service.mpc import MPCInstanceStatus, MPCParty, MPCService
from fbpcp.service.onedocker import OneDockerService
from fbpcs.common.entity.pcs_mpc_instance import PCSMPCInstance
from fbpcs.data_processing.lift_id_combiner.lift_id_spine_combiner_cpp import (
CppLiftIdSpineCombinerService,
)
from fbpcs.data_processing.sharding.sharding_cpp import CppShardingService
from fbpcs.onedocker_binary_config import OneDockerBinaryConfig
from fbpcs.onedocker_binary_names import OneDockerBinaryNames
from fbpcs.onedocker_service_config import OneDockerServiceConfig
from fbpcs.pcf.tests.async_utils import to_sync
from fbpcs.pid.entity.pid_instance import (
PIDInstance,
PIDInstanceStatus,
PIDProtocol,
PIDRole,
)
from fbpcs.pid.service.pid_service.pid import PIDService
from fbpcs.private_computation.entity.private_computation_instance import (
PrivateComputationGameType,
PrivateComputationInstance,
PrivateComputationInstanceStatus,
PrivateComputationRole,
UnionedPCInstance,
)
from fbpcs.private_computation.entity.private_computation_stage_type import (
PrivateComputationStageType,
)
from fbpcs.private_computation.repository.private_computation_game import GameNames
from fbpcs.private_computation.service.errors import (
PrivateComputationServiceValidationError,
)
from fbpcs.private_computation.service.private_computation import (
PrivateComputationService,
NUM_NEW_SHARDS_PER_FILE,
DEFAULT_K_ANONYMITY_THRESHOLD,
)
from fbpcs.private_computation.service.private_computation_stage_service import (
PrivateComputationStageService,
)
# TODO T94666166: libfb won't work in OSS
from libfb.py.asyncio.mock import AsyncMock
from libfb.py.testutil import data_provider
from fbpcs.private_computation.service.utils import (
create_and_start_mpc_instance,
gen_mpc_game_args_to_retry,
map_private_computation_role_to_mpc_party,
DEFAULT_CONTAINER_TIMEOUT_IN_SEC,
)
def _get_valid_stages_data() -> List[Tuple[PrivateComputationStageType]]:
return [
(PrivateComputationStageType.ID_MATCH,),
(PrivateComputationStageType.COMPUTE,),
(PrivateComputationStageType.AGGREGATE,),
(PrivateComputationStageType.POST_PROCESSING_HANDLERS,),
]
class TestPrivateComputationService(unittest.TestCase):
def setUp(self):
container_svc_patcher = patch("fbpcp.service.container_aws.AWSContainerService")
storage_svc_patcher = patch("fbpcp.service.storage_s3.S3StorageService")
mpc_instance_repo_patcher = patch(
"fbpcs.common.repository.mpc_instance_local.LocalMPCInstanceRepository"
)
pid_instance_repo_patcher = patch(
"fbpcs.pid.repository.pid_instance_local.LocalPIDInstanceRepository"
)
private_computation_instance_repo_patcher = patch(
"fbpcs.private_computation.repository.private_computation_instance_local.LocalPrivateComputationInstanceRepository"
)
mpc_game_svc_patcher = patch("fbpcp.service.mpc_game.MPCGameService")
container_svc = container_svc_patcher.start()
storage_svc = storage_svc_patcher.start()
mpc_instance_repository = mpc_instance_repo_patcher.start()
pid_instance_repository = pid_instance_repo_patcher.start()
private_computation_instance_repository = (
private_computation_instance_repo_patcher.start()
)
mpc_game_svc = mpc_game_svc_patcher.start()
for patcher in (
container_svc_patcher,
storage_svc_patcher,
mpc_instance_repo_patcher,
pid_instance_repo_patcher,
private_computation_instance_repo_patcher,
mpc_game_svc_patcher,
):
self.addCleanup(patcher.stop)
self.onedocker_service_config = OneDockerServiceConfig(
task_definition="test_task_definition",
)
self.onedocker_binary_config_map = defaultdict(
lambda: OneDockerBinaryConfig(
tmp_directory="/test_tmp_directory/", binary_version="latest"
)
)
self.onedocker_service = OneDockerService(
container_svc, self.onedocker_service_config.task_definition
)
self.mpc_service = MPCService(
container_svc=container_svc,
instance_repository=mpc_instance_repository,
task_definition="test_task_definition",
mpc_game_svc=mpc_game_svc,
)
self.pid_service = PIDService(
instance_repository=pid_instance_repository,
storage_svc=storage_svc,
onedocker_svc=self.onedocker_service,
onedocker_binary_config_map=self.onedocker_binary_config_map,
)
self.private_computation_service = PrivateComputationService(
instance_repository=private_computation_instance_repository,
mpc_svc=self.mpc_service,
pid_svc=self.pid_service,
onedocker_svc=self.onedocker_service,
onedocker_binary_config_map=self.onedocker_binary_config_map,
)
self.test_private_computation_id = "test_private_computation_id"
self.test_num_containers = 2
self.test_input_path = "in_path"
self.test_output_dir = "out_dir"
self.test_game_type = PrivateComputationGameType.LIFT
self.test_concurrency = 1
def test_create_instance(self):
test_role = PrivateComputationRole.PUBLISHER
self.private_computation_service.create_instance(
instance_id=self.test_private_computation_id,
role=test_role,
game_type=self.test_game_type,
input_path=self.test_input_path,
output_dir=self.test_output_dir,
num_pid_containers=self.test_num_containers,
num_mpc_containers=self.test_num_containers,
concurrency=self.test_concurrency,
num_files_per_mpc_container=NUM_NEW_SHARDS_PER_FILE,
)
# check instance_repository.create is called with the correct arguments
self.private_computation_service.instance_repository.create.assert_called()
args = self.private_computation_service.instance_repository.create.call_args[0][
0
]
self.assertEqual(self.test_private_computation_id, args.instance_id)
self.assertEqual(test_role, args.role)
self.assertEqual(PrivateComputationInstanceStatus.CREATED, args.status)
def test_update_instance(self):
test_pid_id = self.test_private_computation_id + "_id_match"
test_pid_protocol = PIDProtocol.UNION_PID
test_pid_role = PIDRole.PUBLISHER
test_input_path = "pid_in"
test_output_path = "pid_out"
# create one PID instance to be put into PrivateComputationInstance
pid_instance = PIDInstance(
instance_id=test_pid_id,
protocol=test_pid_protocol,
pid_role=test_pid_role,
num_shards=self.test_num_containers,
input_path=test_input_path,
output_path=test_output_path,
status=PIDInstanceStatus.STARTED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.ID_MATCHING_STARTED,
instances=[pid_instance],
)
updated_pid_instance = pid_instance
updated_pid_instance.status = PIDInstanceStatus.COMPLETED
self.private_computation_service.pid_svc.update_instance = MagicMock(
return_value=updated_pid_instance
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# call update on the PrivateComputationInstance
updated_instance = self.private_computation_service.update_instance(
instance_id=self.test_private_computation_id
)
# check update instance called on the right pid instance
self.private_computation_service.pid_svc.update_instance.assert_called()
self.assertEqual(
test_pid_id,
self.private_computation_service.pid_svc.update_instance.call_args[0][0],
)
# check update instance called on the right private lift instance
self.private_computation_service.instance_repository.update.assert_called()
self.assertEqual(
private_computation_instance,
self.private_computation_service.instance_repository.update.call_args[0][0],
)
# check updated_instance has new status
self.assertEqual(
PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
updated_instance.status,
)
# create one MPC instance to be put into PrivateComputationInstance
test_mpc_id = "test_mpc_id"
mpc_instance = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_STARTED,
instances=[mpc_instance],
)
updated_mpc_instance = mpc_instance
updated_mpc_instance.status = MPCInstanceStatus.COMPLETED
self.private_computation_service.mpc_svc.update_instance = MagicMock(
return_value=updated_mpc_instance
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# call update on the PrivateComputationInstance
updated_instance = self.private_computation_service.update_instance(
instance_id=self.test_private_computation_id
)
# check update instance called on the right mpc instance
self.private_computation_service.mpc_svc.update_instance.assert_called()
self.assertEqual(
test_mpc_id,
self.private_computation_service.mpc_svc.update_instance.call_args[0][0],
)
# check update instance called on the right private lift instance
self.private_computation_service.instance_repository.update.assert_called()
self.assertEqual(
private_computation_instance,
self.private_computation_service.instance_repository.update.call_args[0][0],
)
# check updated_instance has new status
self.assertEqual(
PrivateComputationInstanceStatus.COMPUTATION_COMPLETED,
updated_instance.status,
)
@staticmethod
def _get_dummy_stage_svc(
stage_type: PrivateComputationStageType,
) -> PrivateComputationStageService:
"""create a DummyTestStageService class and instantiate an instance of it"""
return type(
"DummyTestStageService",
(PrivateComputationStageService,),
{
"run_async": AsyncMock(
# run_async will return whatever pc_instance privatelift.run_stage passes it
side_effect=lambda pc_instance, *args, **kwargs: pc_instance
),
"stage_type": stage_type,
},
)()
@data_provider(_get_valid_stages_data)
def test_run_stage_correct_stage_order(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage runs stage_svc when the stage_svc is the next stage in the sequence
"""
################# PREVIOUS STAGE COMPLETED OR RETRY #######################
stage_svc = self._get_dummy_stage_svc(stage_type)
for status in (
stage_type.previous_stage.completed_status,
stage_type.failed_status,
):
pl_instance = self.create_sample_instance(status=status)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc
)
self.assertEqual(pl_instance.status, stage_type.start_status)
@data_provider(_get_valid_stages_data)
def test_run_stage_status_already_started(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage does not run stage_svc when the instance status is already started
"""
################# CURRENT STAGE STATUS NOT VALID #######################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(status=stage_type.start_status)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
with self.assertRaises(ValueError):
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc
)
@data_provider(_get_valid_stages_data)
def test_run_stage_out_of_order_with_dry_run(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage runs stage_svc out of order when dry run is passed
"""
################ STAGE OUT OF ORDER WITH DRY RUN #####################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.UNKNOWN
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc, dry_run=True
)
self.assertEqual(pl_instance.status, stage_type.start_status)
@data_provider(_get_valid_stages_data)
def test_run_stage_out_of_order_without_dry_run(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage does not run stage_svc out of order when dry run is not passed
"""
####################### STAGE OUT OF ORDER NO DRY RUN ############################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.UNKNOWN
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
with self.assertRaises(ValueError):
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc, dry_run=False
)
@data_provider(_get_valid_stages_data)
def test_run_stage_partner_no_server_ips(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage does not if role is partner and no server ips are specified
"""
####################### PARTNER NO SERVER IPS ############################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(
status=stage_type.previous_stage.completed_status,
role=PrivateComputationRole.PARTNER,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
with self.assertRaises(ValueError):
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc
)
@data_provider(_get_valid_stages_data)
def test_run_stage_fails(self, stage_type: PrivateComputationStageType) -> None:
"""
tests that statuses are set properly when a run fails
"""
######################### STAGE FAILS ####################################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(
status=stage_type.previous_stage.completed_status
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
# create a custom exception class to make sure we have a unique exception for the test
stage_failure_exception = type("TestStageFailureException", (Exception,), {})
stage_svc.run_async = AsyncMock(side_effect=stage_failure_exception())
with self.assertRaises(stage_failure_exception):
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc
)
self.assertEqual(pl_instance.status, stage_type.failed_status)
def test_partner_missing_server_ips(self):
test_private_computation_id = "test_private_computation_id"
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# exception because role is partner but server ips are not given
with self.assertRaises(ValueError):
self.private_computation_service.aggregate_shards(
instance_id=test_private_computation_id,
)
@patch("fbpcp.service.mpc.MPCService")
@patch(
"fbpcs.private_computation.service.private_computation.create_and_start_mpc_instance"
)
def test_aggregate_shards(self, mock_create_and_start_mpc_instance, mock_mpc_svc):
# construct a private_computation_instance with an mpc_instance handling metrics computation
test_mpc_id = self.test_private_computation_id + "_compute_metrics"
mpc_instance = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.SERVER,
num_workers=self.test_num_containers,
status=MPCInstanceStatus.COMPLETED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_COMPLETED,
instances=[mpc_instance],
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
mock_mpc_svc.update_instance = MagicMock(return_value=mpc_instance)
# call aggregate_shards
self.private_computation_service.aggregate_shards(
instance_id=self.test_private_computation_id,
server_ips=["192.0.2.0", "192.0.2.1"],
)
test_game_args = [
{
"input_base_path": private_computation_instance.compute_stage_output_base_path,
"metrics_format_type": "lift",
"num_shards": self.test_num_containers * NUM_NEW_SHARDS_PER_FILE,
"output_path": private_computation_instance.shard_aggregate_stage_output_path,
"threshold": private_computation_instance.k_anonymity_threshold,
"run_name": "",
}
]
# check a new MPC instance handling metrics aggregation was to be created
self.assertEqual(
GameNames.SHARD_AGGREGATOR.value,
mock_create_and_start_mpc_instance.call_args[1]["game_name"],
)
self.assertEqual(
test_game_args,
mock_create_and_start_mpc_instance.call_args[1]["game_args"],
)
self.private_computation_service.instance_repository.update.assert_called()
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_STARTED,
private_computation_instance.status,
)
@patch("fbpcp.service.mpc.MPCService")
@patch(
"fbpcs.private_computation.service.private_computation.create_and_start_mpc_instance"
)
def test_aggregate_shards_rerun(
self, mock_create_and_start_mpc_instance, mock_mpc_svc
):
# construct a private_computation_instance
test_private_computation_id = "test_private_computation_id"
mpc_instance = PCSMPCInstance.create_instance(
instance_id=test_private_computation_id + "_aggregate_shards",
game_name=GameNames.SHARD_AGGREGATOR.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
status=MPCInstanceStatus.FAILED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.AGGREGATION_FAILED,
instances=[mpc_instance],
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
mock_mpc_svc.update_instance = MagicMock(return_value=mpc_instance)
# call aggregate_shards
self.private_computation_service.aggregate_shards(
instance_id=test_private_computation_id,
server_ips=["192.0.2.0", "192.0.2.1"],
)
# check that the retry counter has been incremented
self.assertEqual(private_computation_instance.retry_counter, 1)
# check a new MPC instance handling metrics aggregation was to be created
self.assertEqual(2, len(private_computation_instance.instances))
self.assertEqual(
test_private_computation_id + "_aggregate_shards1",
mock_create_and_start_mpc_instance.call_args[1]["instance_id"],
)
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_STARTED,
private_computation_instance.status,
)
@patch("fbpcp.service.mpc.MPCService")
@patch(
"fbpcs.private_computation.service.private_computation.create_and_start_mpc_instance"
)
def test_aggregate_shards_dry_run(
self, mock_create_and_start_mpc_instance, mock_mpc_svc
):
# construct a private_computation_instance
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_FAILED,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# call aggregate_shards with ad-hoc input_path and num_shards
test_format_type = "lift"
test_game_args = [
{
"input_base_path": private_computation_instance.compute_stage_output_base_path,
"num_shards": self.test_num_containers * NUM_NEW_SHARDS_PER_FILE,
"metrics_format_type": test_format_type,
"output_path": private_computation_instance.shard_aggregate_stage_output_path,
"threshold": private_computation_instance.k_anonymity_threshold,
"run_name": "",
}
]
self.private_computation_service.aggregate_shards(
instance_id=self.test_private_computation_id,
server_ips=["192.0.2.0", "192.0.2.1"],
dry_run=True,
)
# check a new MPC instance handling metrics aggregation was to be created
# with the overwritten input_path and num_shards
self.assertEqual(
GameNames.SHARD_AGGREGATOR.value,
mock_create_and_start_mpc_instance.call_args[1]["game_name"],
)
self.assertEqual(
test_game_args,
mock_create_and_start_mpc_instance.call_args[1]["game_args"],
)
self.private_computation_service.instance_repository.update.assert_called()
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_STARTED,
private_computation_instance.status,
)
@to_sync
@patch("fbpcp.service.mpc.MPCService")
async def test_create_and_start_mpc_instance(self, mock_mpc_svc):
mock_mpc_svc.create_instance = MagicMock()
mock_mpc_svc.start_instance_async = AsyncMock()
instance_id = "test_instance_id"
game_name = GameNames.LIFT.value
mpc_party = MPCParty.CLIENT
num_containers = 4
input_file = "input_file"
output_file = "output_file"
input_directory = "input_directory"
output_directory = "output_directory"
server_ips = ["192.0.2.0", "192.0.2.1"]
game_args = {
"input_filenames": input_file,
"input_directory": input_directory,
"output_filenames": output_file,
"output_directory": output_directory,
"concurrency": 1,
}
binary_version = self.onedocker_binary_config_map[
OneDockerBinaryNames.LIFT_COMPUTE.value
].binary_version
await create_and_start_mpc_instance(
mpc_svc=mock_mpc_svc,
instance_id=instance_id,
game_name=game_name,
mpc_party=mpc_party,
num_containers=num_containers,
binary_version=binary_version,
container_timeout=DEFAULT_CONTAINER_TIMEOUT_IN_SEC,
server_ips=server_ips,
game_args=game_args,
)
# check create_instance and start_instance were called with the right parameters
self.assertEqual(
call(
instance_id=instance_id,
game_name=game_name,
mpc_party=mpc_party,
num_workers=num_containers,
game_args=game_args,
),
mock_mpc_svc.create_instance.call_args,
)
self.assertEqual(
call(
instance_id=instance_id,
server_ips=server_ips,
timeout=DEFAULT_CONTAINER_TIMEOUT_IN_SEC,
version=binary_version,
),
mock_mpc_svc.start_instance_async.call_args,
)
def test_map_private_computation_role_to_mpc_party(self):
self.assertEqual(
MPCParty.SERVER,
map_private_computation_role_to_mpc_party(PrivateComputationRole.PUBLISHER),
)
self.assertEqual(
MPCParty.CLIENT,
map_private_computation_role_to_mpc_party(PrivateComputationRole.PARTNER),
)
def test_get_status_from_stage(self):
# Test get status from an MPC stage
mpc_instance = PCSMPCInstance.create_instance(
instance_id="test_mpc_id",
game_name=GameNames.SHARD_AGGREGATOR.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
status=MPCInstanceStatus.FAILED,
)
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_FAILED,
self.private_computation_service._get_status_from_stage(mpc_instance),
)
# Test get status from the PID stage
pid_instance = PIDInstance(
instance_id="test_pid_id",
protocol=PIDProtocol.UNION_PID,
pid_role=PIDRole.PUBLISHER,
num_shards=4,
input_path="input",
output_path="output",
stages_containers={},
stages_status={},
status=PIDInstanceStatus.COMPLETED,
)
self.assertEqual(
PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
self.private_computation_service._get_status_from_stage(pid_instance),
)
def test_prepare_data(self):
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.CREATED,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
with patch.object(
CppLiftIdSpineCombinerService,
"combine_on_container_async",
) as mock_combine, patch.object(
CppShardingService,
"shard_on_container_async",
) as mock_shard:
# call prepare_data
self.private_computation_service.prepare_data(
instance_id=self.test_private_computation_id,
dry_run=True,
)
binary_config = self.onedocker_binary_config_map[
OneDockerBinaryNames.LIFT_ID_SPINE_COMBINER.value
]
mock_combine.assert_called_once_with(
spine_path=private_computation_instance.pid_stage_output_spine_path,
data_path=private_computation_instance.pid_stage_output_data_path,
output_path=private_computation_instance.data_processing_output_path
+ "_combine",
num_shards=self.test_num_containers,
onedocker_svc=self.onedocker_service,
binary_version=binary_config.binary_version,
tmp_directory=binary_config.tmp_directory,
)
mock_shard.assert_called()
def test_prepare_data_tasks_skipped(self):
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_FAILED,
)
private_computation_instance.partial_container_retry_enabled = True
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
with patch.object(
CppLiftIdSpineCombinerService,
"combine_on_container_async",
) as mock_combine, patch.object(
CppShardingService,
"shard_on_container_async",
) as mock_shard:
# call prepare_data
self.private_computation_service.prepare_data(
instance_id=self.test_private_computation_id,
)
# expect combining and sharding skipped because this private_computation_instance has
# status PrivateComputationInstanceStatus.COMPUTATION_FAILED, so this run
# is to recover from a previous compute metrics failure, meaning data
# preparation should have been done
mock_combine.assert_not_called()
mock_shard.assert_not_called()
def test_validate_metrics_results_doesnt_match(self):
self.private_computation_service.pid_svc.storage_svc.read = MagicMock()
self.private_computation_service.pid_svc.storage_svc.read.side_effect = [
'{"subGroupMetrics":[],"metrics":{"controlClicks":1,"testSpend":0,"controlImpressions":0,"testImpressions":0,"controlMatchCount":0,"testMatchCount":0,"controlNumConvSquared":0,"testNumConvSquared":0,"testValueSquared":0,"controlValue":0,"testValue":0,"testConverters":0,"testConversions":0,"testPopulation":0,"controlClickers":0,"testClickers":0,"controlReach":0,"testReach":0,"controlSpend":0,"testClicks":0,"controlValueSquared":0,"controlConverters":0,"controlConversions":0,"controlPopulation":0}}',
'{"subGroupMetrics":[],"metrics":{"testSpend":0,"controlClicks":0,"controlImpressions":0,"testImpressions":0,"controlMatchCount":0,"testMatchCount":0,"controlNumConvSquared":0,"testNumConvSquared":0,"testValueSquared":0,"controlValue":0,"testValue":0,"testConverters":0,"testConversions":0,"testPopulation":0,"controlClickers":0,"testClickers":0,"controlReach":0,"testReach":0,"controlSpend":0,"testClicks":0,"controlValueSquared":0,"controlConverters":0,"controlConversions":0,"controlPopulation":0}}',
]
with self.assertRaises(PrivateComputationServiceValidationError):
self.private_computation_service.validate_metrics(
instance_id="test_id",
aggregated_result_path="aggregated_result_path",
expected_result_path="expected_result_path",
)
def test_cancel_current_stage(self):
test_mpc_id = self.test_private_computation_id + "_compute_metrics"
test_game_name = GameNames.LIFT.value
test_mpc_party = MPCParty.CLIENT
# prepare the pl instance that will be read in to memory from the repository
# at the beginning of the cancel_current_stage function
mpc_instance_started = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=test_game_name,
mpc_party=test_mpc_party,
num_workers=self.test_num_containers,
status=MPCInstanceStatus.STARTED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_STARTED,
role=PrivateComputationRole.PARTNER,
instances=[mpc_instance_started],
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# prepare the mpc instance that's returned from mpc_service.stop_instance()
mpc_instance_canceled = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=test_game_name,
mpc_party=test_mpc_party,
num_workers=self.test_num_containers,
status=MPCInstanceStatus.CANCELED,
)
self.private_computation_service.mpc_svc.stop_instance = MagicMock(
return_value=mpc_instance_canceled
)
self.private_computation_service.mpc_svc.instance_repository.read = MagicMock(
return_value=mpc_instance_canceled
)
# call cancel, expect no exception
private_computation_instance = (
self.private_computation_service.cancel_current_stage(
instance_id=self.test_private_computation_id,
)
)
# assert the pl instance returned has the correct status
self.assertEqual(
PrivateComputationInstanceStatus.COMPUTATION_FAILED,
private_computation_instance.status,
)
def test_gen_game_args_to_retry(self):
test_input = "test_input_retry"
mpc_instance = PCSMPCInstance.create_instance(
instance_id="mpc_instance",
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
status=MPCInstanceStatus.FAILED,
containers=[
ContainerInstance(
instance_id="container_instance_0",
status=ContainerInstanceStatus.FAILED,
),
ContainerInstance(
instance_id="container_instance_1",
status=ContainerInstanceStatus.COMPLETED,
),
],
game_args=[
{
"input_filenames": test_input,
},
{
"input_filenames": "input_filenames",
},
],
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_FAILED,
instances=[mpc_instance],
)
game_args = gen_mpc_game_args_to_retry(
private_computation_instance
)
self.assertEqual(1, len(game_args)) # only 1 failed container
self.assertEqual(test_input, game_args[0]["input_filenames"])
def create_sample_instance(
self,
status: PrivateComputationInstanceStatus,
role: PrivateComputationRole = PrivateComputationRole.PUBLISHER,
instances: Optional[List[UnionedPCInstance]] = None,
) -> PrivateComputationInstance:
return PrivateComputationInstance(
instance_id=self.test_private_computation_id,
role=role,
instances=instances or [],
status=status,
status_update_ts=1600000000,
num_pid_containers=self.test_num_containers,
num_mpc_containers=self.test_num_containers,
concurrency=self.test_concurrency,
num_files_per_mpc_container=NUM_NEW_SHARDS_PER_FILE,
game_type=PrivateComputationGameType.LIFT,
input_path=self.test_input_path,
output_dir=self.test_output_dir,
fail_fast=True,
k_anonymity_threshold=DEFAULT_K_ANONYMITY_THRESHOLD,
)
| 41.558691
| 515
| 0.677222
| 3,786
| 36,821
| 6.189118
| 0.095087
| 0.113691
| 0.064015
| 0.065594
| 0.656111
| 0.599479
| 0.560516
| 0.516388
| 0.483441
| 0.457665
| 0
| 0.005401
| 0.250781
| 36,821
| 885
| 516
| 41.60565
| 0.843984
| 0.084734
| 0
| 0.408276
| 0
| 0.002759
| 0.083955
| 0.059671
| 0
| 0
| 0
| 0.00113
| 0.066207
| 1
| 0.031724
| false
| 0
| 0.034483
| 0.002759
| 0.071724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53090ea45ea4a45cdc2f0069622a80742e35321e
| 5,215
|
py
|
Python
|
indico/modules/oauth/models/applications.py
|
yamiacat/indico
|
754c02cd7cd25bf1eab0ca5f497eb24b135dd51c
|
[
"MIT"
] | null | null | null |
indico/modules/oauth/models/applications.py
|
yamiacat/indico
|
754c02cd7cd25bf1eab0ca5f497eb24b135dd51c
|
[
"MIT"
] | null | null | null |
indico/modules/oauth/models/applications.py
|
yamiacat/indico
|
754c02cd7cd25bf1eab0ca5f497eb24b135dd51c
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from uuid import uuid4
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.ext.declarative import declared_attr
from werkzeug.urls import url_parse
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.modules.oauth import logger
from indico.util.i18n import _
from indico.util.struct.enum import IndicoEnum
SCOPES = {'read:user': _("User information (read only)"),
'read:legacy_api': _('Legacy API (read only)'),
'write:legacy_api': _('Legacy API (write only)'),
'registrants': _('Event registrants')}
class SystemAppType(int, IndicoEnum):
none = 0
checkin = 1
flower = 2
__enforced_data__ = {
checkin: {'default_scopes': {'registrants'},
'redirect_uris': ['http://localhost'],
'is_enabled': True},
flower: {'default_scopes': {'read:user'},
'is_enabled': True}
}
__default_data__ = {
checkin: {'is_trusted': True,
'name': 'Checkin App',
'description': 'The checkin app for mobile devices allows scanning ticket QR codes and '
'checking-in event participants.'},
flower: {'is_trusted': True,
'name': 'Flower',
'description': 'Flower allows monitoring Celery tasks. If flower is installed, this app is used to '
'restrict access to Indico administrators.'}
}
@property
def enforced_data(self):
return self.__enforced_data__.get(self, {})
@property
def default_data(self):
return dict(self.__default_data__.get(self, {}), **self.enforced_data)
class OAuthApplication(db.Model):
"""OAuth applications registered in Indico."""
__tablename__ = 'applications'
@declared_attr
def __table_args__(cls):
return (db.Index('ix_uq_applications_name_lower', db.func.lower(cls.name), unique=True),
db.Index(None, cls.system_app_type, unique=True,
postgresql_where=db.text(f'system_app_type != {SystemAppType.none.value}')),
{'schema': 'oauth'})
#: the unique id of the application
id = db.Column(
db.Integer,
primary_key=True
)
#: human readable name
name = db.Column(
db.String,
nullable=False
)
#: human readable description
description = db.Column(
db.Text,
nullable=False,
default=''
)
#: the OAuth client_id
client_id = db.Column(
UUID,
unique=True,
nullable=False,
default=lambda: str(uuid4())
)
#: the OAuth client_secret
client_secret = db.Column(
UUID,
nullable=False,
default=lambda: str(uuid4())
)
#: the OAuth default scopes the application may request access to
default_scopes = db.Column(
ARRAY(db.String),
nullable=False
)
#: the OAuth absolute URIs that a application may use to redirect to after authorization
redirect_uris = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
#: whether the application is enabled or disabled
is_enabled = db.Column(
db.Boolean,
nullable=False,
default=True
)
#: whether the application can access user data without asking for permission
is_trusted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: the type of system app (if any). system apps cannot be deleted
system_app_type = db.Column(
PyIntEnum(SystemAppType),
nullable=False,
default=SystemAppType.none
)
# relationship backrefs:
# - tokens (OAuthToken.application)
@property
def client_type(self):
return 'public'
@property
def default_redirect_uri(self):
return self.redirect_uris[0] if self.redirect_uris else None
@property
def locator(self):
return {'id': self.id}
def __repr__(self): # pragma: no cover
return f'<OAuthApplication({self.id}, {self.name}, {self.client_id})>'
def reset_client_secret(self):
self.client_secret = str(uuid4())
logger.info("Client secret for %s has been reset.", self)
def validate_redirect_uri(self, redirect_uri):
"""Called by flask-oauthlib to validate the redirect_uri.
Uses a logic similar to the one at GitHub, i.e. protocol and
host/port must match exactly and if there is a path in the
whitelisted URL, the path of the redirect_uri must start with
that path.
"""
uri_data = url_parse(redirect_uri)
for valid_uri_data in map(url_parse, self.redirect_uris):
if (uri_data.scheme == valid_uri_data.scheme and uri_data.netloc == valid_uri_data.netloc and
uri_data.path.startswith(valid_uri_data.path)):
return True
return False
| 31.227545
| 117
| 0.623011
| 625
| 5,215
| 5.0336
| 0.344
| 0.025429
| 0.044501
| 0.020025
| 0.071837
| 0.071837
| 0.071837
| 0.026701
| 0
| 0
| 0
| 0.004796
| 0.280345
| 5,215
| 166
| 118
| 31.415663
| 0.833467
| 0.198082
| 0
| 0.188034
| 0
| 0
| 0.178016
| 0.020185
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0.059829
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
530c6ba5f7b617f99321342102c64a175ed1a651
| 6,257
|
py
|
Python
|
PaddleNLP/unarchived/deep_attention_matching_net/utils/layers.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 3
|
2019-09-05T14:03:42.000Z
|
2019-09-09T10:34:35.000Z
|
PaddleNLP/unarchived/deep_attention_matching_net/utils/layers.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 2
|
2019-06-26T03:21:49.000Z
|
2019-09-19T09:43:42.000Z
|
PaddleNLP/unarchived/deep_attention_matching_net/utils/layers.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 2
|
2018-06-14T13:59:36.000Z
|
2018-11-14T12:34:47.000Z
|
import paddle.fluid as fluid
def loss(x, y, clip_value=10.0):
"""Calculate the sigmoid cross entropy with logits for input(x).
Args:
x: Variable with shape with shape [batch, dim]
y: Input label
Returns:
loss: cross entropy
logits: prediction
"""
logits = fluid.layers.fc(
input=x,
size=1,
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(0.)))
loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits, label=y)
loss = fluid.layers.reduce_mean(
fluid.layers.clip(
loss, min=-clip_value, max=clip_value))
return loss, logits
def ffn(input, d_inner_hid, d_hid, name=None):
"""Position-wise Feed-Forward Network
"""
hidden = fluid.layers.fc(input=input,
size=d_inner_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(name=name + '_fc.w_0'),
bias_attr=fluid.ParamAttr(
name=name + '_fc.b_0',
initializer=fluid.initializer.Constant(0.)),
act="relu")
out = fluid.layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(name=name + '_fc.w_1'),
bias_attr=fluid.ParamAttr(
name=name + '_fc.b_1',
initializer=fluid.initializer.Constant(0.)))
return out
def dot_product_attention(query,
key,
value,
d_key,
q_mask=None,
k_mask=None,
dropout_rate=None,
mask_cache=None):
"""Dot product layer.
Args:
query: a tensor with shape [batch, Q_time, Q_dimension]
key: a tensor with shape [batch, time, K_dimension]
value: a tensor with shape [batch, time, V_dimension]
q_lengths: a tensor with shape [batch]
k_lengths: a tensor with shape [batch]
Returns:
a tensor with shape [batch, query_time, value_dimension]
Raises:
AssertionError: if Q_dimension not equal to K_dimension when attention
type is dot.
"""
logits = fluid.layers.matmul(
x=query, y=key, transpose_y=True, alpha=d_key**(-0.5))
if (q_mask is not None) and (k_mask is not None):
if mask_cache is not None and q_mask.name in mask_cache and k_mask.name in mask_cache[
q_mask.name]:
mask, another_mask = mask_cache[q_mask.name][k_mask.name]
else:
mask = fluid.layers.matmul(x=q_mask, y=k_mask, transpose_y=True)
another_mask = fluid.layers.scale(
mask,
scale=float(2**32 - 1),
bias=float(-1),
bias_after_scale=False)
if mask_cache is not None:
if q_mask.name not in mask_cache:
mask_cache[q_mask.name] = dict()
mask_cache[q_mask.name][k_mask.name] = [mask, another_mask]
logits = mask * logits + another_mask
attention = fluid.layers.softmax(logits)
if dropout_rate:
attention = fluid.layers.dropout(
input=attention, dropout_prob=dropout_rate, is_test=False, seed=2)
atten_out = fluid.layers.matmul(x=attention, y=value)
return atten_out
def block(name,
query,
key,
value,
d_key,
q_mask=None,
k_mask=None,
is_layer_norm=True,
dropout_rate=None,
mask_cache=None):
"""
"""
att_out = dot_product_attention(
query,
key,
value,
d_key,
q_mask,
k_mask,
dropout_rate,
mask_cache=mask_cache)
y = query + att_out
if is_layer_norm:
y = fluid.layers.layer_norm(
input=y,
begin_norm_axis=len(y.shape) - 1,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.),
name=name + '_layer_norm.w_0'),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.),
name=name + '_layer_norm.b_0'))
z = ffn(y, d_key, d_key, name)
w = y + z
if is_layer_norm:
w = fluid.layers.layer_norm(
input=w,
begin_norm_axis=len(w.shape) - 1,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.),
name=name + '_layer_norm.w_1'),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.),
name=name + '_layer_norm.b_1'))
return w
def cnn_3d(input, out_channels_0, out_channels_1, add_relu=True):
# same padding
conv_0 = fluid.layers.conv3d(
name="conv3d_0",
input=input,
num_filters=out_channels_0,
filter_size=[3, 3, 3],
padding=[1, 1, 1],
act="elu" if add_relu else None,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-0.01, high=0.01)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.0)))
# same padding
pooling_0 = fluid.layers.pool3d(
input=conv_0,
pool_type="max",
pool_size=3,
pool_padding=1,
pool_stride=3)
conv_1 = fluid.layers.conv3d(
name="conv3d_1",
input=pooling_0,
num_filters=out_channels_1,
filter_size=[3, 3, 3],
padding=[1, 1, 1],
act="elu" if add_relu else None,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-0.01, high=0.01)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.0)))
# same padding
pooling_1 = fluid.layers.pool3d(
input=conv_1,
pool_type="max",
pool_size=3,
pool_padding=1,
pool_stride=3)
return pooling_1
| 31.129353
| 94
| 0.546588
| 769
| 6,257
| 4.230169
| 0.180754
| 0.060867
| 0.071934
| 0.080234
| 0.566247
| 0.436213
| 0.373194
| 0.373194
| 0.336305
| 0.336305
| 0
| 0.021292
| 0.354483
| 6,257
| 200
| 95
| 31.285
| 0.784105
| 0.120026
| 0
| 0.395683
| 0
| 0
| 0.022276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035971
| false
| 0
| 0.007194
| 0
| 0.079137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
530c6de530c859b58a3a007a91c54314cf276d8d
| 6,896
|
py
|
Python
|
plugin.video.saltsrd.lite/js2py/translators/jsregexps.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:38:10.000Z
|
2019-03-05T09:38:10.000Z
|
plugin.video.saltsrd.lite/js2py/translators/jsregexps.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
plugin.video.saltsrd.lite/js2py/translators/jsregexps.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
from salts_lib.pyjsparser.pyjsparserdata import *
REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.'}
NOT_PATTERN_CHARS = {'^', '$', '\\', '.', '*', '+', '?', '(', ')', '[', ']', '|'} # what about '{', '}', ???
CHAR_CLASS_ESCAPE = {'d', 'D', 's', 'S', 'w', 'W'}
CONTROL_ESCAPE_CHARS = {'f', 'n', 'r', 't', 'v'}
CONTROL_LETTERS = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}
def SpecialChar(char):
return {'type': 'SpecialChar',
'content': char}
def isPatternCharacter(char):
return char not in NOT_PATTERN_CHARS
class JsRegExpParser:
def __init__(self, source, flags):
self.source = source
self.flags = flags
self.index = 0
self.length = len(source)
self.lineNumber = 0
self.lineStart = 0
def parsePattern(self):
'''Perform sctring escape - for regexp literals'''
return {'type': 'Pattern',
'contents': self.parseDisjunction()}
def parseDisjunction(self):
alternatives = []
while True:
alternatives.append(self.parseAlternative())
if not self.isEOF():
self.expect_character('|')
else:
break
return {'type': 'Disjunction',
'contents': alternatives}
def isEOF(self):
if self.index>=self.length:
return True
return False
def expect_character(self, character):
if self.source[self.index]!=character:
self.throwUnexpected(character)
self.index += 1
def parseAlternative(self):
contents = []
while not self.isEOF() and self.source[self.index]!='|':
contents.append(self.parseTerm())
return {'type': 'Alternative',
'contents': contents}
def follows(self, chars):
for i, c in enumerate(chars):
if self.index+i>=self.length or self.source[self.index+i] != c:
return False
return True
def parseTerm(self):
assertion = self.parseAssertion()
if assertion:
return assertion
else:
return {'type': 'Term',
'contents': self.parseAtom()} # quantifier will go inside atom!
def parseAssertion(self):
if self.follows('$'):
content = SpecialChar('$')
self.index += 1
elif self.follows('^'):
content = SpecialChar('^')
self.index += 1
elif self.follows('\\b'):
content = SpecialChar('\\b')
self.index += 2
elif self.follows('\\B'):
content = SpecialChar('\\B')
self.index += 2
elif self.follows('(?='):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': False}
elif self.follows('(?!'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': True}
else:
return None
return {'type': 'Assertion',
'content': content}
def parseAtom(self):
if self.follows('.'):
content = SpecialChar('.')
self.index += 1
elif self.follows('\\'):
self.index += 1
content = self.parseAtomEscape()
elif self.follows('['):
content = self.parseCharacterClass()
elif self.follows('(?:'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif self.follows('('):
self.index += 1
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif isPatternCharacter(self.source[self.index]):
content = self.source[self.index]
self.index += 1
else:
return None
quantifier = self.parseQuantifier()
return {'type': 'Atom',
'content': content,
'quantifier': quantifier}
def parseQuantifier(self):
prefix = self.parseQuantifierPrefix()
if not prefix:
return None
greedy = True
if self.follows('?'):
self.index += 1
greedy = False
return {'type': 'Quantifier',
'contents': prefix,
'greedy': greedy}
def parseQuantifierPrefix(self):
if self.isEOF():
return None
if self.follows('+'):
content = '+'
self.index += 1
elif self.follows('?'):
content = '?'
self.index += 1
elif self.follows('*'):
content = '*'
self.index += 1
elif self.follows('{'): # try matching otherwise return None and restore the state
i = self.index
self.index += 1
digs1 = self.scanDecimalDigs()
# if no minimal number of digs provided then return no quantifier
if not digs1:
self.index = i
return None
# scan char limit if provided
if self.follows(','):
self.index += 1
digs2 = self.scanDecimalDigs()
else:
digs2 = ''
# must be valid!
if not self.follows('}'):
self.index = i
return None
else:
self.expect_character('}')
content = int(digs1), int(digs2) if digs2 else None
else:
return None
return content
def parseAtomEscape(self):
ch = self.source[self.index]
if isDecimalDigit(ch) and ch!=0:
digs = self.scanDecimalDigs()
elif ch in CHAR_CLASS_ESCAPE:
self.index += 1
return SpecialChar('\\' + ch)
else:
return self.parseCharacterEscape()
def parseCharacterEscape(self):
ch = self.source[self.index]
if ch in CONTROL_ESCAPE_CHARS:
return SpecialChar('\\' + ch)
if ch=='c':
'ok, fuck this shit.'
def scanDecimalDigs(self):
s = self.index
while not self.isEOF() and isDecimalDigit(self.source[self.index]):
self.index += 1
return self.source[s:self.index]
a = JsRegExpParser('a(?=x)', '')
print(a.parsePattern())
| 31.488584
| 118
| 0.484049
| 666
| 6,896
| 4.971471
| 0.216216
| 0.097856
| 0.045304
| 0.045908
| 0.319541
| 0.281788
| 0.258532
| 0.224706
| 0.224706
| 0.205678
| 0
| 0.0071
| 0.366879
| 6,896
| 219
| 119
| 31.488584
| 0.75126
| 0.038718
| 0
| 0.318919
| 0
| 0
| 0.060432
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 1
| 0.091892
| false
| 0
| 0.005405
| 0.010811
| 0.243243
| 0.005405
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
530dbebdee877862aa38a08b696073a86196141c
| 4,282
|
py
|
Python
|
duckdown/handlers/site_handler.py
|
blueshed/duckdown
|
e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020
|
[
"MIT"
] | null | null | null |
duckdown/handlers/site_handler.py
|
blueshed/duckdown
|
e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020
|
[
"MIT"
] | null | null | null |
duckdown/handlers/site_handler.py
|
blueshed/duckdown
|
e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020
|
[
"MIT"
] | null | null | null |
# pylint: disable=W0201, E1101
""" handle request for markdown pages """
import logging
import os
import importlib
from tornado.web import RequestHandler, HTTPError
from tornado.escape import url_escape
from ..utils.converter_mixin import ConverterMixin
from .access_control import UserMixin
from ..utils.nav import nav
LOGGER = logging.getLogger(__name__)
EMPTY_TOC = '<div class="toc">\n<ul></ul>\n</div>\n'
class SiteHandler(
UserMixin, ConverterMixin, RequestHandler
): # pylint: disable=W0223
""" inline transform request for markdown pages """
def initialize(self, pages):
""" setup init properties """
self.pages = pages
self.meta = None
self.nav = None
self.site_nav = None
self.site = None
def create_template_loader(self, template_path):
""" if we have one, us it """
if self.site.template_loader:
return self.site.template_loader
return super().create_template_loader(template_path)
@property
def has_toc(self):
""" determin if toc is empty """
return self.meta.toc != EMPTY_TOC
def meta_value(self, name, default=None):
""" return markdown meta value """
return self.meta.Meta.get(name, [default])
def one_meta_value(self, name, default=None):
""" return markdown meta value """
result = self.meta_value(name, default)
return result[0] if result else None
def load_site_nav(self, site, path):
""" set the handler site_nav attribute """
menu = nav(site, root=self.pages, path=path)
if menu:
self.site_nav = "\n".join(menu)
def load_dir_nav(self, site, path):
""" load nav section if it exist """
folder = os.path.dirname(path)
if folder:
LOGGER.info(" -- folder: %s", folder)
nav_path = os.path.join(folder, "-nav.md")
_, content = site.get_file(nav_path)
if content:
content = content.decode("utf-8")
LOGGER.info(" -- nav: %s", nav_path)
content = self.meta.convert(content)
self.nav = self.convert_images(content)
def run_script(
self, site, script_name, path
): # pylint: disable=unused-argument
""" load a module and call module.main """
name = f"{self.settings['script_path']}.{script_name}"
script_module = importlib.import_module(name)
return script_module.main(path)
async def get(self, path):
""" handle get """
path = path if path else "index.html"
file, ext = os.path.splitext(path)
doc = os.path.join(self.pages, f"{file}.md")
self.site = self.get_site(path)
_, content = self.site.get_file(doc)
if content is None:
raise HTTPError(404)
if content:
content = content.decode("utf-8")
self.meta = self.markdown
self.load_dir_nav(self.site, doc)
self.load_site_nav(self.site, path)
file_path = os.path.split(file)[0]
# load theme
theme_file = os.path.join(self.pages, file_path, "-theme.css")
_, theme_css = self.site.get_file(theme_file)
if theme_css:
LOGGER.info(" -- theme.css")
theme_css = theme_css.decode("utf-8")
edit_path = "/edit"
if file:
edit_path = f"/edit?path={ url_escape(file) }.md"
LOGGER.info(" -- ext: %s", ext)
if ext == ".html":
content = self.meta.convert(content)
LOGGER.info(" -- meta: %s", self.meta.Meta)
template = self.one_meta_value("template", "site")
LOGGER.info(" -- tmpl: %s", template)
for key in self.meta.Meta:
if key.startswith("x-script-"):
outcome = self.run_script(
self.site, self.meta.Meta[key][0], path
)
self.meta.Meta[key] = [outcome]
self.render(
f"{template}_tmpl.html",
content=self.convert_images(content),
edit_path=edit_path,
theme_css=theme_css,
)
else:
self.write(self.convert_images(content))
| 33.193798
| 70
| 0.578001
| 529
| 4,282
| 4.542533
| 0.238185
| 0.046608
| 0.024969
| 0.026633
| 0.183937
| 0.089055
| 0.069913
| 0.042447
| 0.042447
| 0.042447
| 0
| 0.007014
| 0.300794
| 4,282
| 128
| 71
| 33.453125
| 0.795591
| 0.094348
| 0
| 0.085106
| 0
| 0
| 0.077187
| 0.020285
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.095745
| 0
| 0.255319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
530e6c72942083800f06be0b1704afe86e8b9dd0
| 627
|
py
|
Python
|
Problemset/binary-search-tree-to-greater-sum-tree/binary-search-tree-to-greater-sum-tree.py
|
KivenCkl/LeetCode
|
fcc97c66f8154a5d20c2aca86120cb37b9d2d83d
|
[
"MIT"
] | 7
|
2019-05-08T03:41:05.000Z
|
2020-12-22T12:39:43.000Z
|
Problemset/binary-search-tree-to-greater-sum-tree/binary-search-tree-to-greater-sum-tree.py
|
Yuziquan/LeetCode
|
303fc1c8af847f783c4020bd731b28b72ed92a35
|
[
"MIT"
] | 1
|
2021-07-19T03:48:35.000Z
|
2021-07-19T03:48:35.000Z
|
Problemset/binary-search-tree-to-greater-sum-tree/binary-search-tree-to-greater-sum-tree.py
|
Yuziquan/LeetCode
|
303fc1c8af847f783c4020bd731b28b72ed92a35
|
[
"MIT"
] | 7
|
2019-05-10T20:43:20.000Z
|
2021-02-22T03:47:35.000Z
|
# @Title: 从二叉搜索树到更大和树 (Binary Search Tree to Greater Sum Tree)
# @Author: KivenC
# @Date: 2019-05-15 19:52:08
# @Runtime: 48 ms
# @Memory: 13 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def __init__(self):
self.sum_value = 0
def bstToGst(self, root: TreeNode) -> TreeNode:
if not root:
return
self.bstToGst(root.right)
root.val = self.sum_value = self.sum_value + root.val
self.bstToGst(root.left)
return root
| 24.115385
| 62
| 0.593301
| 84
| 627
| 4.297619
| 0.535714
| 0.058172
| 0.099723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043182
| 0.298246
| 627
| 25
| 63
| 25.08
| 0.777273
| 0.452951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
530ebd58aea0c6b33d05245813f2f54d1c4a046b
| 2,058
|
py
|
Python
|
vine/clone.py
|
robinson96/GRAPE
|
f6404ae6ee2933647e515a9480077ab01fb2c430
|
[
"BSD-3-Clause"
] | 4
|
2017-04-30T17:08:42.000Z
|
2019-11-15T04:44:09.000Z
|
vine/clone.py
|
robinson96/GRAPE
|
f6404ae6ee2933647e515a9480077ab01fb2c430
|
[
"BSD-3-Clause"
] | 1
|
2016-02-12T07:51:30.000Z
|
2016-02-12T07:51:30.000Z
|
vine/clone.py
|
robinson96/GRAPE
|
f6404ae6ee2933647e515a9480077ab01fb2c430
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import option
import utility
import grapeMenu
import grapeGit as git
import grapeConfig
class Clone(option.Option):
""" grape-clone
Clones a git repo and configures it for use with git.
Usage: grape-clone <url> <path> [--recursive] [--allNested]
Arguments:
<url> The URL of the remote repository
<path> The directory where you want to clone the repo to.
Options:
--recursive Recursively clone submodules.
--allNested Get all nested subprojects.
"""
def __init__(self):
super(Clone, self).__init__()
self._key = "clone"
self._section = "Getting Started"
#Clones the default repo into a new local repo
def description(self):
return "Clone a repo and configure it for grape"
def execute(self, args):
remotepath = args["<url>"]
destpath = args["<path>"]
rstr = "--recursive" if args["--recursive"] else ""
utility.printMsg("Cloning %s into %s %s" % (remotepath, destpath, "recursively" if args["--recursive"] else ""))
git.clone(" %s %s %s" % (rstr, remotepath, destpath))
utility.printMsg("Clone succeeded!")
os.chdir(destpath)
grapeConfig.read()
# ensure you start on a reasonable publish branch
menu = grapeMenu.menu()
config = grapeConfig.grapeConfig()
publicBranches = config.getPublicBranchList()
if publicBranches:
if "develop" in publicBranches:
initialBranch = "develop"
elif "master" in publicBranches:
initialBranch = "master"
else:
initialBranch = publicBranches[0]
menu.applyMenuChoice("checkout", args=[initialBranch])
if args["--allNested"]:
configArgs = ["--uv","--uvArg=--allNestedSubprojects"]
else:
configArgs = []
return menu.applyMenuChoice("config", configArgs)
def setDefaultConfig(self, config):
pass
| 30.716418
| 120
| 0.59378
| 208
| 2,058
| 5.826923
| 0.456731
| 0.014851
| 0.024752
| 0.031353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000695
| 0.301263
| 2,058
| 66
| 121
| 31.181818
| 0.842142
| 0.222546
| 0
| 0.05
| 0
| 0
| 0.157963
| 0.019342
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.025
| 0.15
| 0.025
| 0.325
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
530f17168f0cbb129e06d1280fd5322946f49710
| 45,589
|
py
|
Python
|
neo/test/iotest/test_nixio.py
|
pearsonlab/python-neo
|
8915dfe9e55fd3a36be83d820bdd83ab085e9402
|
[
"BSD-3-Clause"
] | null | null | null |
neo/test/iotest/test_nixio.py
|
pearsonlab/python-neo
|
8915dfe9e55fd3a36be83d820bdd83ab085e9402
|
[
"BSD-3-Clause"
] | null | null | null |
neo/test/iotest/test_nixio.py
|
pearsonlab/python-neo
|
8915dfe9e55fd3a36be83d820bdd83ab085e9402
|
[
"BSD-3-Clause"
] | 1
|
2018-04-13T04:48:48.000Z
|
2018-04-13T04:48:48.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, German Neuroinformatics Node (G-Node)
# Achilleas Koutsou <achilleas.k@gmail.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
"""
Tests for neo.io.nixio
"""
import os
from datetime import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import string
import itertools
from six import string_types
import numpy as np
import quantities as pq
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal,
IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch)
from neo.test.iotest.common_io_test import BaseTestIO
try:
import nixio
HAVE_NIX = True
except ImportError:
HAVE_NIX = False
from neo.io.nixio import NixIO
from neo.io.nixio import nixtypes
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class NixIOTest(unittest.TestCase):
filename = None
io = None
def compare_blocks(self, neoblocks, nixblocks):
for neoblock, nixblock in zip(neoblocks, nixblocks):
self.compare_attr(neoblock, nixblock)
self.assertEqual(len(neoblock.segments), len(nixblock.groups))
for idx, neoseg in enumerate(neoblock.segments):
nixgrp = nixblock.groups[neoseg.name]
self.compare_segment_group(neoseg, nixgrp)
for idx, neochx in enumerate(neoblock.channel_indexes):
if neochx.name:
nixsrc = nixblock.sources[neochx.name]
else:
nixsrc = nixblock.sources[idx]
self.compare_chx_source(neochx, nixsrc)
self.check_refs(neoblock, nixblock)
def compare_chx_source(self, neochx, nixsrc):
self.compare_attr(neochx, nixsrc)
nix_channels = list(src for src in nixsrc.sources
if src.type == "neo.channelindex")
self.assertEqual(len(neochx.index), len(nix_channels))
for nixchan in nix_channels:
nixchanidx = nixchan.metadata["index"]
try:
neochanpos = list(neochx.index).index(nixchanidx)
except ValueError:
self.fail("Channel indexes do not match.")
if len(neochx.channel_names):
neochanname = neochx.channel_names[neochanpos]
if ((not isinstance(neochanname, str)) and
isinstance(neochanname, bytes)):
neochanname = neochanname.decode()
nixchanname = nixchan.name
self.assertEqual(neochanname, nixchanname)
nix_units = list(src for src in nixsrc.sources
if src.type == "neo.unit")
self.assertEqual(len(neochx.units), len(nix_units))
for neounit in neochx.units:
nixunit = nixsrc.sources[neounit.name]
self.compare_attr(neounit, nixunit)
def check_refs(self, neoblock, nixblock):
"""
Checks whether the references between objects that are not nested are
mapped correctly (e.g., SpikeTrains referenced by a Unit).
:param neoblock: A Neo block
:param nixblock: The corresponding NIX block
"""
for idx, neochx in enumerate(neoblock.channel_indexes):
if neochx.name:
nixchx = nixblock.sources[neochx.name]
else:
nixchx = nixblock.sources[idx]
# AnalogSignals referencing CHX
neoasigs = list(sig.name for sig in neochx.analogsignals)
nixasigs = list(set(da.metadata.name for da in nixblock.data_arrays
if da.type == "neo.analogsignal" and
nixchx in da.sources))
self.assertEqual(len(neoasigs), len(nixasigs))
# IrregularlySampledSignals referencing CHX
neoisigs = list(sig.name for sig in neochx.irregularlysampledsignals)
nixisigs = list(set(da.metadata.name for da in nixblock.data_arrays
if da.type == "neo.irregularlysampledsignal" and
nixchx in da.sources))
self.assertEqual(len(neoisigs), len(nixisigs))
# SpikeTrains referencing CHX and Units
for sidx, neounit in enumerate(neochx.units):
if neounit.name:
nixunit = nixchx.sources[neounit.name]
else:
nixunit = nixchx.sources[sidx]
neosts = list(st.name for st in neounit.spiketrains)
nixsts = list(mt for mt in nixblock.multi_tags
if mt.type == "neo.spiketrain" and
nixunit.name in mt.sources)
# SpikeTrains must also reference CHX
for nixst in nixsts:
self.assertIn(nixchx.name, nixst.sources)
nixsts = list(st.name for st in nixsts)
self.assertEqual(len(neosts), len(nixsts))
for neoname in neosts:
if neoname:
self.assertIn(neoname, nixsts)
# Events and Epochs must reference all Signals in the Group (NIX only)
for nixgroup in nixblock.groups:
nixevep = list(mt for mt in nixgroup.multi_tags
if mt.type in ["neo.event", "neo.epoch"])
nixsigs = list(da.name for da in nixgroup.data_arrays
if da.type in ["neo.analogsignal",
"neo.irregularlysampledsignal"])
for nee in nixevep:
for ns in nixsigs:
self.assertIn(ns, nee.references)
def compare_segment_group(self, neoseg, nixgroup):
self.compare_attr(neoseg, nixgroup)
neo_signals = neoseg.analogsignals + neoseg.irregularlysampledsignals
self.compare_signals_das(neo_signals, nixgroup.data_arrays)
neo_eests = neoseg.epochs + neoseg.events + neoseg.spiketrains
self.compare_eests_mtags(neo_eests, nixgroup.multi_tags)
def compare_signals_das(self, neosignals, data_arrays):
for sig in neosignals:
if self.io._find_lazy_loaded(sig) is not None:
sig = self.io.load_lazy_object(sig)
dalist = list()
for idx in itertools.count():
nixname = "{}.{}".format(sig.name, idx)
if nixname in data_arrays:
dalist.append(data_arrays[nixname])
else:
break
_, nsig = np.shape(sig)
self.assertEqual(nsig, len(dalist))
self.compare_signal_dalist(sig, dalist)
def compare_signal_dalist(self, neosig, nixdalist):
"""
Check if a Neo Analog or IrregularlySampledSignal matches a list of
NIX DataArrays.
:param neosig: Neo Analog or IrregularlySampledSignal
:param nixdalist: List of DataArrays
"""
nixmd = nixdalist[0].metadata
self.assertTrue(all(nixmd == da.metadata for da in nixdalist))
neounit = str(neosig.dimensionality)
for sig, da in zip(np.transpose(neosig),
sorted(nixdalist, key=lambda d: d.name)):
self.compare_attr(neosig, da)
np.testing.assert_almost_equal(sig.magnitude, da)
self.assertEqual(neounit, da.unit)
timedim = da.dimensions[0]
if isinstance(neosig, AnalogSignal):
self.assertIsInstance(timedim, nixtypes["SampledDimension"])
self.assertEqual(
pq.Quantity(timedim.sampling_interval, timedim.unit),
neosig.sampling_period
)
self.assertEqual(timedim.offset, neosig.t_start.magnitude)
if "t_start.units" in da.metadata.props:
self.assertEqual(da.metadata["t_start.units"],
str(neosig.t_start.dimensionality))
elif isinstance(neosig, IrregularlySampledSignal):
self.assertIsInstance(timedim, nixtypes["RangeDimension"])
np.testing.assert_almost_equal(neosig.times.magnitude,
timedim.ticks)
self.assertEqual(timedim.unit,
str(neosig.times.dimensionality))
def compare_eests_mtags(self, eestlist, mtaglist):
self.assertEqual(len(eestlist), len(mtaglist))
for eest in eestlist:
if self.io._find_lazy_loaded(eest) is not None:
eest = self.io.load_lazy_object(eest)
mtag = mtaglist[eest.name]
if isinstance(eest, Epoch):
self.compare_epoch_mtag(eest, mtag)
elif isinstance(eest, Event):
self.compare_event_mtag(eest, mtag)
elif isinstance(eest, SpikeTrain):
self.compare_spiketrain_mtag(eest, mtag)
def compare_epoch_mtag(self, epoch, mtag):
self.assertEqual(mtag.type, "neo.epoch")
self.compare_attr(epoch, mtag)
np.testing.assert_almost_equal(epoch.times.magnitude, mtag.positions)
np.testing.assert_almost_equal(epoch.durations.magnitude, mtag.extents)
self.assertEqual(mtag.positions.unit,
str(epoch.times.units.dimensionality))
self.assertEqual(mtag.extents.unit,
str(epoch.durations.units.dimensionality))
for neol, nixl in zip(epoch.labels,
mtag.positions.dimensions[0].labels):
# Dirty. Should find the root cause instead
if isinstance(neol, bytes):
neol = neol.decode()
if isinstance(nixl, bytes):
nixl = nixl.decode()
self.assertEqual(neol, nixl)
def compare_event_mtag(self, event, mtag):
self.assertEqual(mtag.type, "neo.event")
self.compare_attr(event, mtag)
np.testing.assert_almost_equal(event.times.magnitude, mtag.positions)
self.assertEqual(mtag.positions.unit, str(event.units.dimensionality))
for neol, nixl in zip(event.labels,
mtag.positions.dimensions[0].labels):
# Dirty. Should find the root cause instead
# Only happens in 3.2
if isinstance(neol, bytes):
neol = neol.decode()
if isinstance(nixl, bytes):
nixl = nixl.decode()
self.assertEqual(neol, nixl)
def compare_spiketrain_mtag(self, spiketrain, mtag):
self.assertEqual(mtag.type, "neo.spiketrain")
self.compare_attr(spiketrain, mtag)
np.testing.assert_almost_equal(spiketrain.times.magnitude,
mtag.positions)
if len(mtag.features):
neowf = spiketrain.waveforms
nixwf = mtag.features[0].data
self.assertEqual(np.shape(neowf), np.shape(nixwf))
self.assertEqual(nixwf.unit, str(neowf.units.dimensionality))
np.testing.assert_almost_equal(neowf.magnitude, nixwf)
self.assertIsInstance(nixwf.dimensions[0], nixtypes["SetDimension"])
self.assertIsInstance(nixwf.dimensions[1], nixtypes["SetDimension"])
self.assertIsInstance(nixwf.dimensions[2],
nixtypes["SampledDimension"])
def compare_attr(self, neoobj, nixobj):
if neoobj.name:
if isinstance(neoobj, (AnalogSignal, IrregularlySampledSignal)):
nix_name = ".".join(nixobj.name.split(".")[:-1])
else:
nix_name = nixobj.name
self.assertEqual(neoobj.name, nix_name)
self.assertEqual(neoobj.description, nixobj.definition)
if hasattr(neoobj, "rec_datetime") and neoobj.rec_datetime:
self.assertEqual(neoobj.rec_datetime,
datetime.fromtimestamp(nixobj.created_at))
if hasattr(neoobj, "file_datetime") and neoobj.file_datetime:
self.assertEqual(neoobj.file_datetime,
datetime.fromtimestamp(
nixobj.metadata["file_datetime"]))
if neoobj.annotations:
nixmd = nixobj.metadata
for k, v, in neoobj.annotations.items():
if isinstance(v, pq.Quantity):
self.assertEqual(nixmd.props[str(k)].unit,
str(v.dimensionality))
np.testing.assert_almost_equal(nixmd[str(k)],
v.magnitude)
else:
self.assertEqual(nixmd[str(k)], v)
@classmethod
def create_full_nix_file(cls, filename):
nixfile = nixio.File.open(filename, nixio.FileMode.Overwrite)
nix_block_a = nixfile.create_block(cls.rword(10), "neo.block")
nix_block_a.definition = cls.rsentence(5, 10)
nix_block_b = nixfile.create_block(cls.rword(10), "neo.block")
nix_block_b.definition = cls.rsentence(3, 3)
nix_block_a.metadata = nixfile.create_section(
nix_block_a.name, nix_block_a.name+".metadata"
)
nix_block_b.metadata = nixfile.create_section(
nix_block_b.name, nix_block_b.name+".metadata"
)
nix_blocks = [nix_block_a, nix_block_b]
for blk in nix_blocks:
for ind in range(3):
group = blk.create_group(cls.rword(), "neo.segment")
group.definition = cls.rsentence(10, 15)
group_md = blk.metadata.create_section(group.name,
group.name+".metadata")
group.metadata = group_md
blk = nix_blocks[0]
group = blk.groups[0]
allspiketrains = list()
allsignalgroups = list()
# analogsignals
for n in range(3):
siggroup = list()
asig_name = "{}_asig{}".format(cls.rword(10), n)
asig_definition = cls.rsentence(5, 5)
asig_md = group.metadata.create_section(asig_name,
asig_name+".metadata")
for idx in range(3):
da_asig = blk.create_data_array(
"{}.{}".format(asig_name, idx),
"neo.analogsignal",
data=cls.rquant(100, 1)
)
da_asig.definition = asig_definition
da_asig.unit = "mV"
da_asig.metadata = asig_md
timedim = da_asig.append_sampled_dimension(0.01)
timedim.unit = "ms"
timedim.label = "time"
timedim.offset = 10
da_asig.append_set_dimension()
group.data_arrays.append(da_asig)
siggroup.append(da_asig)
allsignalgroups.append(siggroup)
# irregularlysampledsignals
for n in range(2):
siggroup = list()
isig_name = "{}_isig{}".format(cls.rword(10), n)
isig_definition = cls.rsentence(12, 12)
isig_md = group.metadata.create_section(isig_name,
isig_name+".metadata")
isig_times = cls.rquant(200, 1, True)
for idx in range(10):
da_isig = blk.create_data_array(
"{}.{}".format(isig_name, idx),
"neo.irregularlysampledsignal",
data=cls.rquant(200, 1)
)
da_isig.definition = isig_definition
da_isig.unit = "mV"
da_isig.metadata = isig_md
timedim = da_isig.append_range_dimension(isig_times)
timedim.unit = "s"
timedim.label = "time"
da_isig.append_set_dimension()
group.data_arrays.append(da_isig)
siggroup.append(da_isig)
allsignalgroups.append(siggroup)
# SpikeTrains with Waveforms
for n in range(4):
stname = "{}-st{}".format(cls.rword(20), n)
times = cls.rquant(400, 1, True)
times_da = blk.create_data_array(
"{}.times".format(stname),
"neo.spiketrain.times",
data=times
)
times_da.unit = "ms"
mtag_st = blk.create_multi_tag(stname,
"neo.spiketrain",
times_da)
group.multi_tags.append(mtag_st)
mtag_st.definition = cls.rsentence(20, 30)
mtag_st_md = group.metadata.create_section(
mtag_st.name, mtag_st.name+".metadata"
)
mtag_st.metadata = mtag_st_md
mtag_st_md.create_property(
"t_stop", nixio.Value(max(times_da).item()+1)
)
waveforms = cls.rquant((10, 8, 5), 1)
wfname = "{}.waveforms".format(mtag_st.name)
wfda = blk.create_data_array(wfname, "neo.waveforms",
data=waveforms)
wfda.unit = "mV"
mtag_st.create_feature(wfda, nixio.LinkType.Indexed)
wfda.append_set_dimension() # spike dimension
wfda.append_set_dimension() # channel dimension
wftimedim = wfda.append_sampled_dimension(0.1)
wftimedim.unit = "ms"
wftimedim.label = "time"
wfda.metadata = mtag_st_md.create_section(
wfname, "neo.waveforms.metadata"
)
wfda.metadata.create_property("left_sweep",
[nixio.Value(20)]*5)
allspiketrains.append(mtag_st)
# Epochs
for n in range(3):
epname = "{}-ep{}".format(cls.rword(5), n)
times = cls.rquant(5, 1, True)
times_da = blk.create_data_array(
"{}.times".format(epname),
"neo.epoch.times",
data=times
)
times_da.unit = "s"
extents = cls.rquant(5, 1)
extents_da = blk.create_data_array(
"{}.durations".format(epname),
"neo.epoch.durations",
data=extents
)
extents_da.unit = "s"
mtag_ep = blk.create_multi_tag(
epname, "neo.epoch", times_da
)
group.multi_tags.append(mtag_ep)
mtag_ep.definition = cls.rsentence(2)
mtag_ep.extents = extents_da
label_dim = mtag_ep.positions.append_set_dimension()
label_dim.labels = cls.rsentence(5).split(" ")
# reference all signals in the group
for siggroup in allsignalgroups:
mtag_ep.references.extend(siggroup)
# Events
for n in range(2):
evname = "{}-ev{}".format(cls.rword(5), n)
times = cls.rquant(5, 1, True)
times_da = blk.create_data_array(
"{}.times".format(evname),
"neo.event.times",
data=times
)
times_da.unit = "s"
mtag_ev = blk.create_multi_tag(
evname, "neo.event", times_da
)
group.multi_tags.append(mtag_ev)
mtag_ev.definition = cls.rsentence(2)
label_dim = mtag_ev.positions.append_set_dimension()
label_dim.labels = cls.rsentence(5).split(" ")
# reference all signals in the group
for siggroup in allsignalgroups:
mtag_ev.references.extend(siggroup)
# CHX
nixchx = blk.create_source(cls.rword(10),
"neo.channelindex")
nixchx.metadata = nix_blocks[0].metadata.create_section(
nixchx.name, "neo.channelindex.metadata"
)
chantype = "neo.channelindex"
# 3 channels
for idx in [2, 5, 9]:
channame = cls.rword(20)
nixrc = nixchx.create_source(channame, chantype)
nixrc.definition = cls.rsentence(13)
nixrc.metadata = nixchx.metadata.create_section(
nixrc.name, "neo.channelindex.metadata"
)
nixrc.metadata.create_property("index", nixio.Value(idx))
dims = tuple(map(nixio.Value, cls.rquant(3, 1)))
nixrc.metadata.create_property("coordinates", dims)
nixrc.metadata.create_property("coordinates.units",
nixio.Value("um"))
nunits = 1
stsperunit = np.array_split(allspiketrains, nunits)
for idx in range(nunits):
unitname = "{}-unit{}".format(cls.rword(5), idx)
nixunit = nixchx.create_source(unitname, "neo.unit")
nixunit.definition = cls.rsentence(4, 10)
for st in stsperunit[idx]:
st.sources.append(nixchx)
st.sources.append(nixunit)
# pick a few signal groups to reference this CHX
randsiggroups = np.random.choice(allsignalgroups, 5, False)
for siggroup in randsiggroups:
for sig in siggroup:
sig.sources.append(nixchx)
return nixfile
@staticmethod
def rdate():
return datetime(year=np.random.randint(1980, 2020),
month=np.random.randint(1, 13),
day=np.random.randint(1, 29))
@classmethod
def populate_dates(cls, obj):
obj.file_datetime = cls.rdate()
obj.rec_datetime = cls.rdate()
@staticmethod
def rword(n=10):
return "".join(np.random.choice(list(string.ascii_letters), n))
@classmethod
def rsentence(cls, n=3, maxwl=10):
return " ".join(cls.rword(np.random.randint(1, maxwl))
for _ in range(n))
@classmethod
def rdict(cls, nitems):
rd = dict()
for _ in range(nitems):
key = cls.rword()
value = cls.rword() if np.random.choice((0, 1)) \
else np.random.uniform()
rd[key] = value
return rd
@staticmethod
def rquant(shape, unit, incr=False):
try:
dim = len(shape)
except TypeError:
dim = 1
if incr and dim > 1:
raise TypeError("Shape of quantity array may only be "
"one-dimensional when incremental values are "
"requested.")
arr = np.random.random(shape)
if incr:
arr = np.array(np.cumsum(arr))
return arr*unit
@classmethod
def create_all_annotated(cls):
times = cls.rquant(1, pq.s)
signal = cls.rquant(1, pq.V)
blk = Block()
blk.annotate(**cls.rdict(3))
seg = Segment()
seg.annotate(**cls.rdict(4))
blk.segments.append(seg)
asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
asig.annotate(**cls.rdict(2))
seg.analogsignals.append(asig)
isig = IrregularlySampledSignal(times=times, signal=signal,
time_units=pq.s)
isig.annotate(**cls.rdict(2))
seg.irregularlysampledsignals.append(isig)
epoch = Epoch(times=times, durations=times)
epoch.annotate(**cls.rdict(4))
seg.epochs.append(epoch)
event = Event(times=times)
event.annotate(**cls.rdict(4))
seg.events.append(event)
spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
d = cls.rdict(6)
d["quantity"] = pq.Quantity(10, "mV")
d["qarray"] = pq.Quantity(range(10), "mA")
spiketrain.annotate(**d)
seg.spiketrains.append(spiketrain)
chx = ChannelIndex(name="achx", index=[1, 2])
chx.annotate(**cls.rdict(5))
blk.channel_indexes.append(chx)
unit = Unit()
unit.annotate(**cls.rdict(2))
chx.units.append(unit)
return blk
class NixIOWriteTest(NixIOTest):
def setUp(self):
self.filename = "nixio_testfile_write.h5"
self.writer = NixIO(self.filename, "ow")
self.io = self.writer
self.reader = nixio.File.open(self.filename,
nixio.FileMode.ReadOnly)
def tearDown(self):
del self.writer
self.reader.close()
os.remove(self.filename)
def write_and_compare(self, blocks):
self.writer.write_all_blocks(blocks)
self.compare_blocks(self.writer.read_all_blocks(), self.reader.blocks)
def test_block_write(self):
block = Block(name=self.rword(),
description=self.rsentence())
self.write_and_compare([block])
block.annotate(**self.rdict(5))
self.write_and_compare([block])
def test_segment_write(self):
block = Block(name=self.rword())
segment = Segment(name=self.rword(), description=self.rword())
block.segments.append(segment)
self.write_and_compare([block])
segment.annotate(**self.rdict(2))
self.write_and_compare([block])
def test_channel_index_write(self):
block = Block(name=self.rword())
chx = ChannelIndex(name=self.rword(),
description=self.rsentence(),
index=[1, 2, 3, 5, 8, 13])
block.channel_indexes.append(chx)
self.write_and_compare([block])
chx.annotate(**self.rdict(3))
self.write_and_compare([block])
def test_signals_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
asig = AnalogSignal(signal=self.rquant((10, 3), pq.mV),
sampling_rate=pq.Quantity(10, "Hz"))
seg.analogsignals.append(asig)
self.write_and_compare([block])
anotherblock = Block("ir signal block")
seg = Segment("ir signal seg")
anotherblock.segments.append(seg)
irsig = IrregularlySampledSignal(
signal=np.random.random((20, 3)),
times=self.rquant(20, pq.ms, True),
units=pq.A
)
seg.irregularlysampledsignals.append(irsig)
self.write_and_compare([anotherblock])
block.segments[0].analogsignals.append(
AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S,
sampling_period=pq.Quantity(3, "s"),
dtype=np.double, name="signal42",
description="this is an analogsignal",
t_start=45 * pq.ms),
)
self.write_and_compare([block, anotherblock])
block.segments[0].irregularlysampledsignals.append(
IrregularlySampledSignal(times=np.random.random(10),
signal=np.random.random((10, 3)),
units="mV", time_units="s",
dtype=np.float,
name="some sort of signal",
description="the signal is described")
)
self.write_and_compare([block, anotherblock])
def test_epoch_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
epoch = Epoch(times=[1, 1, 10, 3]*pq.ms, durations=[3, 3, 3, 1]*pq.ms,
labels=np.array(["one", "two", "three", "four"]),
name="test epoch", description="an epoch for testing")
seg.epochs.append(epoch)
self.write_and_compare([block])
def test_event_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
event = Event(times=np.arange(0, 30, 10)*pq.s,
labels=np.array(["0", "1", "2"]),
name="event name",
description="event description")
seg.events.append(event)
self.write_and_compare([block])
def test_spiketrain_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
spiketrain = SpikeTrain(times=[3, 4, 5]*pq.s, t_stop=10.0,
name="spikes!", description="sssssspikes")
seg.spiketrains.append(spiketrain)
self.write_and_compare([block])
waveforms = self.rquant((20, 5, 10), pq.mV)
spiketrain = SpikeTrain(times=[1, 1.1, 1.2]*pq.ms, t_stop=1.5*pq.s,
name="spikes with wf",
description="spikes for waveform test",
waveforms=waveforms)
seg.spiketrains.append(spiketrain)
self.write_and_compare([block])
spiketrain.left_sweep = np.random.random(10)*pq.ms
self.write_and_compare([block])
def test_metadata_structure_write(self):
neoblk = self.create_all_annotated()
self.io.write_block(neoblk)
blk = self.io.nix_file.blocks[0]
blkmd = blk.metadata
self.assertEqual(blk.name, blkmd.name)
grp = blk.groups[0] # segment
self.assertIn(grp.name, blkmd.sections)
grpmd = blkmd.sections[grp.name]
for da in grp.data_arrays: # signals
name = ".".join(da.name.split(".")[:-1])
self.assertIn(name, grpmd.sections)
for mtag in grp.multi_tags: # spiketrains, events, and epochs
self.assertIn(mtag.name, grpmd.sections)
srcchx = blk.sources[0] # chx
self.assertIn(srcchx.name, blkmd.sections)
for srcunit in blk.sources: # units
self.assertIn(srcunit.name, blkmd.sections)
self.write_and_compare([neoblk])
def test_anonymous_objects_write(self):
nblocks = 2
nsegs = 2
nanasig = 4
nirrseg = 2
nepochs = 3
nevents = 4
nspiketrains = 3
nchx = 5
nunits = 10
times = self.rquant(1, pq.s)
signal = self.rquant(1, pq.V)
blocks = []
for blkidx in range(nblocks):
blk = Block()
blocks.append(blk)
for segidx in range(nsegs):
seg = Segment()
blk.segments.append(seg)
for anaidx in range(nanasig):
seg.analogsignals.append(AnalogSignal(signal=signal,
sampling_rate=pq.Hz))
for irridx in range(nirrseg):
seg.irregularlysampledsignals.append(
IrregularlySampledSignal(times=times,
signal=signal,
time_units=pq.s)
)
for epidx in range(nepochs):
seg.epochs.append(Epoch(times=times, durations=times))
for evidx in range(nevents):
seg.events.append(Event(times=times))
for stidx in range(nspiketrains):
seg.spiketrains.append(SpikeTrain(times=times, t_stop=pq.s,
units=pq.s))
for chidx in range(nchx):
chx = ChannelIndex(name="chx{}".format(chidx),
index=[1, 2])
blk.channel_indexes.append(chx)
for unidx in range(nunits):
unit = Unit()
chx.units.append(unit)
self.writer.write_all_blocks(blocks)
self.compare_blocks(blocks, self.reader.blocks)
def test_to_value(self):
section = self.io.nix_file.create_section("Metadata value test", "Test")
writeprop = self.io._write_property
# quantity
qvalue = pq.Quantity(10, "mV")
writeprop(section, "qvalue", qvalue)
self.assertEqual(section["qvalue"], 10)
self.assertEqual(section.props["qvalue"].unit, "mV")
# datetime
dt = self.rdate()
writeprop(section, "dt", dt)
self.assertEqual(datetime.fromtimestamp(section["dt"]), dt)
# string
randstr = self.rsentence()
writeprop(section, "randstr", randstr)
self.assertEqual(section["randstr"], randstr)
# bytes
bytestring = b"bytestring"
writeprop(section, "randbytes", bytestring)
self.assertEqual(section["randbytes"], bytestring.decode())
# iterables
randlist = np.random.random(10).tolist()
writeprop(section, "randlist", randlist)
self.assertEqual(randlist, section["randlist"])
randarray = np.random.random(10)
writeprop(section, "randarray", randarray)
np.testing.assert_almost_equal(randarray, section["randarray"])
# numpy item
npval = np.float64(2398)
writeprop(section, "npval", npval)
self.assertEqual(npval, section["npval"])
# number
val = 42
writeprop(section, "val", val)
self.assertEqual(val, section["val"])
# multi-dimensional data -- UNSUPORTED
# mdlist = [[1, 2, 3], [4, 5, 6]]
# writeprop(section, "mdlist", mdlist)
# mdarray = np.random.random((10, 3))
# writeprop(section, "mdarray", mdarray)
class NixIOReadTest(NixIOTest):
filename = "testfile_readtest.h5"
nixfile = None
nix_blocks = None
original_methods = dict()
@classmethod
def setUpClass(cls):
if HAVE_NIX:
cls.nixfile = cls.create_full_nix_file(cls.filename)
def setUp(self):
self.io = NixIO(self.filename, "ro")
self.original_methods["_read_cascade"] = self.io._read_cascade
self.original_methods["_update_maps"] = self.io._update_maps
@classmethod
def tearDownClass(cls):
if HAVE_NIX:
cls.nixfile.close()
def tearDown(self):
del self.io
def test_all_read(self):
neo_blocks = self.io.read_all_blocks(cascade=True, lazy=False)
nix_blocks = self.io.nix_file.blocks
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazyload_fullcascade_read(self):
neo_blocks = self.io.read_all_blocks(cascade=True, lazy=True)
nix_blocks = self.io.nix_file.blocks
# data objects should be empty
for block in neo_blocks:
for seg in block.segments:
for asig in seg.analogsignals:
self.assertEqual(len(asig), 0)
for isig in seg.irregularlysampledsignals:
self.assertEqual(len(isig), 0)
for epoch in seg.epochs:
self.assertEqual(len(epoch), 0)
for event in seg.events:
self.assertEqual(len(event), 0)
for st in seg.spiketrains:
self.assertEqual(len(st), 0)
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazyload_lazycascade_read(self):
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=True)
nix_blocks = self.io.nix_file.blocks
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazycascade_read(self):
def getitem(self, index):
return self._data.__getitem__(index)
from neo.io.nixio import LazyList
getitem_original = LazyList.__getitem__
LazyList.__getitem__ = getitem
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=False)
for block in neo_blocks:
self.assertIsInstance(block.segments, LazyList)
self.assertIsInstance(block.channel_indexes, LazyList)
for seg in block.segments:
self.assertIsInstance(seg, string_types)
for chx in block.channel_indexes:
self.assertIsInstance(chx, string_types)
LazyList.__getitem__ = getitem_original
def test_load_lazy_cascade(self):
from neo.io.nixio import LazyList
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=False)
for block in neo_blocks:
self.assertIsInstance(block.segments, LazyList)
self.assertIsInstance(block.channel_indexes, LazyList)
name = block.name
block = self.io.load_lazy_cascade("/" + name, lazy=False)
self.assertIsInstance(block.segments, list)
self.assertIsInstance(block.channel_indexes, list)
for seg in block.segments:
self.assertIsInstance(seg.analogsignals, list)
self.assertIsInstance(seg.irregularlysampledsignals, list)
self.assertIsInstance(seg.epochs, list)
self.assertIsInstance(seg.events, list)
self.assertIsInstance(seg.spiketrains, list)
def test_nocascade_read(self):
self.io._read_cascade = mock.Mock()
neo_blocks = self.io.read_all_blocks(cascade=False)
self.io._read_cascade.assert_not_called()
for block in neo_blocks:
self.assertEqual(len(block.segments), 0)
nix_block = self.io.nix_file.blocks[block.name]
self.compare_attr(block, nix_block)
def test_lazy_load_subschema(self):
blk = self.io.nix_file.blocks[0]
segpath = "/" + blk.name + "/segments/" + blk.groups[0].name
segment = self.io.load_lazy_cascade(segpath, lazy=True)
self.assertIsInstance(segment, Segment)
self.assertEqual(segment.name, blk.groups[0].name)
self.assertIs(segment.block, None)
self.assertEqual(len(segment.analogsignals[0]), 0)
segment = self.io.load_lazy_cascade(segpath, lazy=False)
self.assertEqual(np.shape(segment.analogsignals[0]), (100, 3))
class NixIOHashTest(NixIOTest):
def setUp(self):
self.hash = NixIO._hash_object
def _hash_test(self, objtype, argfuncs):
attr = {}
for arg, func in argfuncs.items():
attr[arg] = func()
obj_one = objtype(**attr)
obj_two = objtype(**attr)
hash_one = self.hash(obj_one)
hash_two = self.hash(obj_two)
self.assertEqual(hash_one, hash_two)
for arg, func in argfuncs.items():
chattr = attr.copy()
chattr[arg] = func()
obj_two = objtype(**chattr)
hash_two = self.hash(obj_two)
self.assertNotEqual(
hash_one, hash_two,
"Hash test failed with different '{}'".format(arg)
)
def test_block_seg_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"rec_datetime": self.rdate,
"file_datetime": self.rdate,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(Block, argfuncs)
self._hash_test(Segment, argfuncs)
self._hash_test(Unit, argfuncs)
def test_chx_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"index": lambda: np.random.random(10).tolist(),
"channel_names": lambda: self.rsentence(10).split(" "),
"coordinates": lambda: [(np.random.random() * pq.cm,
np.random.random() * pq.cm,
np.random.random() * pq.cm)]*10,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(ChannelIndex, argfuncs)
def test_analogsignal_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"signal": lambda: self.rquant((10, 10), pq.mV),
"sampling_rate": lambda: np.random.random() * pq.Hz,
"t_start": lambda: np.random.random() * pq.sec,
"t_stop": lambda: np.random.random() * pq.sec,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(AnalogSignal, argfuncs)
def test_irregularsignal_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"signal": lambda: self.rquant((10, 10), pq.mV),
"times": lambda: self.rquant(10, pq.ms, True),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(IrregularlySampledSignal, argfuncs)
def test_event_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"times": lambda: self.rquant(10, pq.ms),
"durations": lambda: self.rquant(10, pq.ms),
"labels": lambda: self.rsentence(10).split(" "),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(Event, argfuncs)
self._hash_test(Epoch, argfuncs)
def test_spiketrain_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"times": lambda: self.rquant(10, pq.ms, True),
"t_start": lambda: -np.random.random() * pq.sec,
"t_stop": lambda: np.random.random() * 100 * pq.sec,
"waveforms": lambda: self.rquant((10, 10, 20), pq.mV),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(SpikeTrain, argfuncs)
class NixIOPartialWriteTest(NixIOTest):
filename = "testfile_partialwrite.h5"
nixfile = None
neo_blocks = None
original_methods = dict()
@classmethod
def setUpClass(cls):
if HAVE_NIX:
cls.nixfile = cls.create_full_nix_file(cls.filename)
def setUp(self):
self.io = NixIO(self.filename, "rw")
self.neo_blocks = self.io.read_all_blocks()
self.original_methods["_write_attr_annotations"] =\
self.io._write_attr_annotations
@classmethod
def tearDownClass(cls):
if HAVE_NIX:
cls.nixfile.close()
def tearDown(self):
self.restore_methods()
del self.io
def restore_methods(self):
for name, method in self.original_methods.items():
setattr(self.io, name, self.original_methods[name])
def _mock_write_attr(self, objclass):
typestr = str(objclass.__name__).lower()
self.io._write_attr_annotations = mock.Mock(
wraps=self.io._write_attr_annotations,
side_effect=self.check_obj_type("neo.{}".format(typestr))
)
neo_blocks = self.neo_blocks
self.modify_objects(neo_blocks, excludes=[objclass])
self.io.write_all_blocks(neo_blocks)
self.restore_methods()
def check_obj_type(self, typestring):
neq = self.assertNotEqual
def side_effect_func(*args, **kwargs):
obj = kwargs.get("nixobj", args[0])
if isinstance(obj, list):
for sig in obj:
neq(sig.type, typestring)
else:
neq(obj.type, typestring)
return side_effect_func
@classmethod
def modify_objects(cls, objs, excludes=()):
excludes = tuple(excludes)
for obj in objs:
if not (excludes and isinstance(obj, excludes)):
obj.description = cls.rsentence()
for container in getattr(obj, "_child_containers", []):
children = getattr(obj, container)
cls.modify_objects(children, excludes)
def test_partial(self):
for objclass in NixIO.supported_objects:
self._mock_write_attr(objclass)
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
def test_no_modifications(self):
self.io._write_attr_annotations = mock.Mock()
self.io.write_all_blocks(self.neo_blocks)
self.io._write_attr_annotations.assert_not_called()
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
# clearing hashes and checking again
for k in self.io._object_hashes.keys():
self.io._object_hashes[k] = None
self.io.write_all_blocks(self.neo_blocks)
self.io._write_attr_annotations.assert_not_called()
# changing hashes to force rewrite
for k in self.io._object_hashes.keys():
self.io._object_hashes[k] = "_"
self.io.write_all_blocks(self.neo_blocks)
callcount = self.io._write_attr_annotations.call_count
self.assertEqual(callcount, len(self.io._object_hashes))
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class CommonTests(BaseTestIO, unittest.TestCase):
ioclass = NixIO
| 39.132189
| 81
| 0.564873
| 4,963
| 45,589
| 5.05138
| 0.114648
| 0.012445
| 0.009334
| 0.012126
| 0.330515
| 0.273753
| 0.230475
| 0.190187
| 0.171879
| 0.15353
| 0
| 0.01171
| 0.331286
| 45,589
| 1,164
| 82
| 39.165808
| 0.810634
| 0.038935
| 0
| 0.233475
| 0
| 0
| 0.042628
| 0.005177
| 0
| 0
| 0
| 0
| 0.099147
| 1
| 0.067164
| false
| 0
| 0.022388
| 0.004264
| 0.117271
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
530f7b706908016c5f54e7ff0367363c422ad2e4
| 5,686
|
py
|
Python
|
lib/taudataNlpTm.py
|
taudata-indonesia/elearning
|
6f9db8b829357cde1ae678255cc251629dfc25d2
|
[
"Apache-2.0"
] | 3
|
2020-08-29T04:54:25.000Z
|
2021-12-12T08:25:48.000Z
|
lib/taudataNlpTm.py
|
taudataid/eLearning
|
6f9db8b829357cde1ae678255cc251629dfc25d2
|
[
"Apache-2.0"
] | null | null | null |
lib/taudataNlpTm.py
|
taudataid/eLearning
|
6f9db8b829357cde1ae678255cc251629dfc25d2
|
[
"Apache-2.0"
] | 6
|
2020-07-28T23:46:57.000Z
|
2021-09-27T02:22:01.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 11:25:43 2019
@author: Taufik Sutanto
taufik@tau-data.id
https://tau-data.id
~~Perjanjian Penggunaan Materi & Codes (PPMC) - License:~~
* Modul Python dan gambar-gambar (images) yang digunakan adalah milik dari berbagai sumber sebagaimana yang telah dicantumkan dalam masing-masing license modul, caption atau watermark.
* Materi & Codes diluar point (1) (i.e. code ini & semua slide ".ipynb)) yang digunakan di tau-data dapat digunakan untuk keperluan akademis dan kegiatan non-komersil lainnya.
* Untuk keperluan diluar point (2), maka dibutuhkan izin tertulis dari Taufik Edy Sutanto (selanjutnya disebut sebagai pengarang).
* Materi & Codes tidak boleh dipublikasikan tanpa izin dari pengarang.
* Materi & codes diberikan "as-is", tanpa warranty. Pengarang tidak bertanggung jawab atas penggunaannya diluar kegiatan resmi yang dilaksanakan pengarang.
* Dengan menggunakan materi dan codes ini berarti pengguna telah menyetujui PPMC ini.
"""
import re, numpy as np
import itertools, nltk
from collections import Counter
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer;ps = PorterStemmer()
from itertools import chain
import warnings; warnings.simplefilter('ignore')
def lesk_wsd(sentence, ambiguous_word, pos=None, stem=True, hyperhypo=True):
# https://en.wikipedia.org/wiki/Lesk_algorithm
# https://stackoverflow.com/questions/20896278/word-sense-disambiguation-algorithm-in-python
max_overlaps = 0; lesk_sense = None
context_sentence = sentence.split()
for ss in wn.synsets(ambiguous_word):
#break
if pos and ss.pos is not pos: # If POS is specified.
continue
lesk_dictionary = []
lesk_dictionary+= ss.definition().replace('(','').replace(')','').split() # Includes definition.
lesk_dictionary+= ss.lemma_names() # Includes lemma_names.
# Optional: includes lemma_names of hypernyms and hyponyms.
if hyperhypo == True:
lesk_dictionary+= list(chain(*[i.lemma_names() for i in ss.hypernyms()+ss.hyponyms()]))
if stem == True: # Matching exact words causes sparsity, so lets match stems.
lesk_dictionary = [ps.stem(i) for i in lesk_dictionary]
context_sentence = [ps.stem(i) for i in context_sentence]
overlaps = set(lesk_dictionary).intersection(context_sentence)
if len(overlaps) > max_overlaps:
lesk_sense = ss
max_overlaps = len(overlaps)
return lesk_sense.name()
def words(text): return re.findall(r'\w+', text.lower())
corpus = 'data/corpus_sederhana.txt'
WORDS = Counter(words(open(corpus).read()))
def P(word):
"Probability of `word`."
N=sum(WORDS.values())
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
def get_nMax(arr, n):
indices = arr.ravel().argsort()[-n:]
indices = (np.unravel_index(i, arr.shape) for i in indices)
return [(arr[i], i) for i in indices]
def filter_for_tags(tagged, tags=['NN', 'JJ', 'NNP']):
return [item for item in tagged if item[1] in tags]
def normalize(tagged):
return [(item[0].replace('.', ''), item[1]) for item in tagged]
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in itertools.ifilterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def lDistance(firstString, secondString):
"Function to find the Levenshtein distance between two words/sentences - gotten from http://rosettacode.org/wiki/Levenshtein_distance#Python"
if len(firstString) > len(secondString):
firstString, secondString = secondString, firstString
distances = range(len(firstString) + 1)
for index2, char2 in enumerate(secondString):
newDistances = [index2 + 1]
for index1, char1 in enumerate(firstString):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1], distances[index1+1], newDistances[-1])))
distances = newDistances
return distances[-1]
| 44.077519
| 185
| 0.657052
| 754
| 5,686
| 4.900531
| 0.376658
| 0.026522
| 0.009743
| 0.007578
| 0.034641
| 0.020839
| 0.009743
| 0.009743
| 0.009743
| 0
| 0
| 0.013815
| 0.236194
| 5,686
| 129
| 186
| 44.077519
| 0.836979
| 0.332219
| 0
| 0.044444
| 0
| 0.011111
| 0.132012
| 0.012331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144444
| false
| 0
| 0.077778
| 0.033333
| 0.344444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53120b489f06aa51d07ee8b517ab0cf190f8e1f9
| 807
|
py
|
Python
|
counting_capitals.py
|
m10singh94/Python-programs
|
a83083044b4a85afcf70c4b7024287a808b01fee
|
[
"Apache-2.0"
] | null | null | null |
counting_capitals.py
|
m10singh94/Python-programs
|
a83083044b4a85afcf70c4b7024287a808b01fee
|
[
"Apache-2.0"
] | null | null | null |
counting_capitals.py
|
m10singh94/Python-programs
|
a83083044b4a85afcf70c4b7024287a808b01fee
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 08:09:31 2020
@author: Shivadhar SIngh
"""
def count_capitals(string):
count = 0
for ch in string:
if ord(ch) >= 65 and ord(ch) <= 90:
count += 1
return count
def remove_substring_everywhere(string, substring):
'''
Remove all occurrences of substring from string, and return
the resulting string. Both arguments must be strings.
'''
p = string.find(substring)
if p == -1:
return string
i = p
newstr = string[0:i]
lsub = len(substring) # length of the substring
while p < len(string) and string.find(substring) != -1:
p = string.find(substring)
if p==-1:
return newstr+string[i+lsub:]
newstr += string[p + lsub : p]
return newstr
| 25.21875
| 63
| 0.591078
| 110
| 807
| 4.309091
| 0.472727
| 0.044304
| 0.120253
| 0.084388
| 0.126582
| 0.126582
| 0.126582
| 0.126582
| 0
| 0
| 0
| 0.04028
| 0.292441
| 807
| 32
| 64
| 25.21875
| 0.789842
| 0.275093
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53122c71e26ce2a712524601aa3f1353a6ea1b32
| 11,576
|
py
|
Python
|
lib/flows/general/discovery_test.py
|
nahidupa/grr
|
100a9d85ef2abb234e12e3ac2623caffb4116be7
|
[
"Apache-2.0"
] | 1
|
2015-02-22T16:05:06.000Z
|
2015-02-22T16:05:06.000Z
|
lib/flows/general/discovery_test.py
|
nahidupa/grr
|
100a9d85ef2abb234e12e3ac2623caffb4116be7
|
[
"Apache-2.0"
] | 3
|
2020-09-11T12:54:50.000Z
|
2020-09-11T12:55:01.000Z
|
lib/flows/general/discovery_test.py
|
nahidupa/grr
|
100a9d85ef2abb234e12e3ac2623caffb4116be7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for Interrogate."""
import socket
from grr.client import vfs
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact_test
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import search
from grr.lib import test_lib
class DiscoveryTestEventListener(flow.EventListener):
"""A test listener to receive new client discoveries."""
well_known_session_id = rdfvalue.SessionID(flow_name="discovery_test")
EVENTS = ["Discovery"]
# For this test we just write the event as a class attribute.
event = None
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
_ = message
DiscoveryTestEventListener.event = event
class TestClientInterrogate(artifact_test.ArtifactTest):
"""Test the interrogate flow."""
def _CheckUsers(self, all_users):
"""Check all user stores."""
summary = self.fd.GetSummary()
self.assertItemsEqual([x.username for x in summary.users], all_users)
users = [x.username for x in self.fd.Get(self.fd.Schema.USER)]
self.assertItemsEqual(users, all_users)
self.assertItemsEqual(self.fd.Get(self.fd.Schema.USERNAMES), all_users)
# Check kb users
kbusers = [x.username for x in
self.fd.Get(self.fd.Schema.KNOWLEDGE_BASE).users]
self.assertItemsEqual(kbusers, all_users)
def _CheckAFF4Object(self, hostname, system, install_date):
self.assertEqual(self.fd.Get(self.fd.Schema.HOSTNAME), hostname)
self.assertEqual(self.fd.Get(self.fd.Schema.SYSTEM), system)
self.assertEqual(self.fd.Get(self.fd.Schema.INSTALL_DATE), install_date)
def _CheckClientInfo(self):
info = self.fd.Get(self.fd.Schema.CLIENT_INFO)
self.assertEqual(info.client_name, config_lib.CONFIG["Client.name"])
self.assertEqual(info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(info.build_time, config_lib.CONFIG["Client.build_time"])
def _CheckGRRConfig(self):
"""Check old and new client config."""
config_info = self.fd.Get(self.fd.Schema.GRR_CONFIGURATION)
self.assertEqual(config_info["Client.control_urls"],
["http://localhost:8001/control"])
self.assertEqual(config_info["Client.poll_min"], 1.0)
def _CheckClientIndex(self, host_pattern):
"""Check that the index has been updated."""
index_fd = aff4.FACTORY.Create(self.fd.Schema.client_index, "AFF4Index",
mode="r", token=self.token)
self.assertEqual(
[self.fd.urn],
[x for x in index_fd.Query([self.fd.Schema.HOSTNAME], host_pattern)])
def _CheckClientKwIndex(self, keywords, expected_count):
# Tests that the client index has expected_count results when
# searched for keywords.
index = aff4.FACTORY.Create(client_index.MAIN_INDEX,
aff4_type="ClientIndex",
mode="rw",
token=self.token)
self.assertEqual(len(index.LookupClients(keywords)),
expected_count)
def _CheckNotificationsCreated(self):
user_fd = aff4.FACTORY.Open("aff4:/users/test", token=self.token)
notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS)
self.assertEqual(len(notifications), 1)
notification = notifications[0]
self.assertEqual(notification.subject, rdfvalue.RDFURN(self.client_id))
def _CheckClientSummary(self, osname, version, kernel="3.13.0-39-generic",
release="5"):
summary = self.fd.GetSummary()
self.assertEqual(summary.client_info.client_name,
config_lib.CONFIG["Client.name"])
self.assertEqual(summary.client_info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(summary.client_info.build_time,
config_lib.CONFIG["Client.build_time"])
self.assertEqual(summary.system_info.system, osname)
self.assertEqual(summary.system_info.node, "test_node")
self.assertEqual(summary.system_info.release, release)
self.assertEqual(summary.system_info.version, version)
self.assertEqual(summary.system_info.machine, "i386")
self.assertEqual(summary.system_info.kernel, kernel)
self.assertEqual(len(summary.interfaces), 1)
self.assertEqual(summary.interfaces[0].mac_address, "123456")
# Check that the client summary was published to the event listener.
self.assertEqual(DiscoveryTestEventListener.event.client_id, self.client_id)
self.assertEqual(
DiscoveryTestEventListener.event.interfaces[0].mac_address,
"123456")
def _CheckNetworkInfo(self):
net_fd = self.fd.OpenMember("network")
interfaces = list(net_fd.Get(net_fd.Schema.INTERFACES))
self.assertEqual(interfaces[0].mac_address, "123456")
self.assertEqual(interfaces[0].addresses[0].human_readable, "100.100.100.1")
self.assertEqual(socket.inet_ntoa(interfaces[0].addresses[0].packed_bytes),
"100.100.100.1")
# Mac addresses should be available as hex for searching
mac_addresses = self.fd.Get(self.fd.Schema.MAC_ADDRESS)
self.assertTrue("123456".encode("hex") in str(mac_addresses))
# Same for IP addresses.
ip_addresses = self.fd.Get(self.fd.Schema.HOST_IPS)
self.assertTrue("100.100.100.1" in str(ip_addresses))
def _CheckVFS(self):
# Check that virtual directories exist for the mount points
fd = aff4.FACTORY.Open(self.client_id.Add("fs/os/mnt/data"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("fs/tsk/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("devices/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
def _CheckLabelIndex(self):
"""Check that label indexes are updated."""
self.assertEqual(
list(search.SearchClients("label:Label2", token=self.token)),
[self.client_id])
def _CheckWindowsDiskInfo(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token)
volumes = client.Get(client.Schema.VOLUMES)
self.assertEqual(len(volumes), 2)
for result in volumes:
self.assertTrue(isinstance(result, rdfvalue.Volume))
self.assertTrue(result.windows.drive_letter in ["Z:", "C:"])
def _CheckRegistryPathspec(self):
# This tests that we can click refresh on a key in the registry vfs subtree
# even if we haven't downloaded any other key above it in the tree.
fd = aff4.FACTORY.Open(self.client_id.Add("registry").Add(
"HKEY_LOCAL_MACHINE").Add("random/path/bla"), token=self.token)
pathspec = fd.real_pathspec
self.assertEqual(pathspec.pathtype, rdfvalue.PathSpec.PathType.REGISTRY)
self.assertEqual(pathspec.CollapsePath(),
u"/HKEY_LOCAL_MACHINE/random/path/bla")
def _CheckRelease(self, desired_release, desired_version):
# Test for correct Linux release override behaviour.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
release = str(client.Get(client.Schema.OS_RELEASE))
version = str(client.Get(client.Schema.OS_VERSION))
self.assertEqual(release, desired_release)
self.assertEqual(version, desired_version)
def testInterrogateLinuxWithWtmp(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["LinuxWtmp",
"NetgroupConfiguration",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes", [r"^login$"])
self.SetLinuxClient()
client_mock = action_mocks.InterrogatedClient("TransferBuffer", "StatFile",
"Find", "HashBuffer",
"ListDirectory",
"FingerprintFile")
client_mock.InitializeClient()
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Linux", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*test.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Linux", "14.4", release="Ubuntu",
kernel="3.13.0-39-generic")
self._CheckRelease("Ubuntu", "14.4")
# users 1,2,3 from wtmp
# users yagharek, isaac from netgroup
self._CheckUsers(["yagharek", "isaac", "user1", "user2", "user3"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckClientKwIndex(["Linux"], 1)
self._CheckClientKwIndex(["Label2"], 1)
def testInterrogateWindows(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.InterrogatedClient("TransferBuffer", "StatFile",
"Find", "HashBuffer",
"ListDirectory",
"FingerprintFile")
self.SetWindowsClient()
client_mock.InitializeClient(system="Windows", version="6.1.7600",
kernel="6.1.7601")
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Windows", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*Host.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Windows", "6.1.7600", kernel="6.1.7601")
# users Bert and Ernie added by the fixture should not be present (USERS
# overriden by kb)
# jim parsed from registry profile keys
self._CheckUsers(["jim", "kovacs"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckWindowsDiskInfo()
self._CheckRegistryPathspec()
self._CheckClientKwIndex(["Linux"], 0)
self._CheckClientKwIndex(["Windows"], 1)
self._CheckClientKwIndex(["Label2"], 1)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 40.194444
| 80
| 0.666379
| 1,366
| 11,576
| 5.505857
| 0.239385
| 0.073793
| 0.029783
| 0.021274
| 0.415104
| 0.342641
| 0.282276
| 0.248238
| 0.229624
| 0.205957
| 0
| 0.01912
| 0.218383
| 11,576
| 287
| 81
| 40.334495
| 0.812113
| 0.116621
| 0
| 0.265
| 0
| 0
| 0.091357
| 0.015456
| 0
| 0
| 0
| 0
| 0.225
| 1
| 0.09
| false
| 0.01
| 0.06
| 0
| 0.175
| 0.01
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5312934109f52156adbe6f8ebda77f7b1fb3121e
| 3,595
|
py
|
Python
|
practices/20210112/GraphicsView.py
|
liff-engineer/articles
|
ad3386ef9cda5083793f485e309a9f85ab36f664
|
[
"MIT"
] | 2
|
2020-12-01T06:44:41.000Z
|
2021-11-22T06:07:52.000Z
|
practices/20210112/GraphicsView.py
|
liff-engineer/articles
|
ad3386ef9cda5083793f485e309a9f85ab36f664
|
[
"MIT"
] | null | null | null |
practices/20210112/GraphicsView.py
|
liff-engineer/articles
|
ad3386ef9cda5083793f485e309a9f85ab36f664
|
[
"MIT"
] | null | null | null |
import sys
from PySide2.QtWidgets import QGraphicsView, QGraphicsScene, QApplication
from PySide2.QtCore import *
from PySide2.QtGui import *
class GraphicsView(QGraphicsView):
def __init__(self, parent=None):
super().__init__(parent)
# 画布视图尺寸
self.w = 64000.0
self.h = 32000.0
# 缩放相关
self.zoomInFactor = 1.25
self.zoomClamp = True
self.zoom = 10
self.zoomStep = 1
self.zoomRange = [0, 20]
self.setRenderHints(QPainter.Antialiasing | QPainter.HighQualityAntialiasing |
QPainter.TextAntialiasing | QPainter.SmoothPixmapTransform)
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setDragMode(QGraphicsView.RubberBandDrag)
self.setScene(QGraphicsScene())
self.setSceneRect(-self.w/2, -self.h/2, self.w, self.h)
def zoomImpl(self, bigOrSmall: bool):
zoomOutFactor = 1 / self.zoomInFactor
zoomFactor = zoomOutFactor
if bigOrSmall:
zoomFactor = self.zoomInFactor
self.zoom += self.zoomStep
else:
zoomFactor = zoomOutFactor
self.zoom -= self.zoomStep
clamped = False
if self.zoom < self.zoomRange[0]:
self.zoom, clamped = self.zoomRange[0], True
if self.zoom > self.zoomRange[1]:
self.zoom, clamped = self.zoomRange[1], True
if not clamped or self.zoomClamp is False:
self.scale(zoomFactor, zoomFactor)
def panBeginImpl(self, event):
releaseEvent = QMouseEvent(QEvent.MouseButtonRelease, event.localPos(), event.screenPos(),
Qt.LeftButton, Qt.NoButton, event.modifiers())
super().mouseReleaseEvent(releaseEvent)
self.setDragMode(QGraphicsView.ScrollHandDrag)
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() | Qt.LeftButton, event.modifiers())
super().mousePressEvent(fakeEvent)
def panEndImpl(self, event):
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() & ~Qt.LeftButton, event.modifiers())
super().mouseReleaseEvent(fakeEvent)
self.setDragMode(QGraphicsView.RubberBandDrag)
def keyPressEvent(self, event):
if event.matches(QKeySequence.ZoomIn):
self.zoomImpl(True)
elif event.matches(QKeySequence.ZoomOut):
self.zoomImpl(False)
else:
super().keyPressEvent(event)
def wheelEvent(self, event):
if self.dragMode() == QGraphicsView.ScrollHandDrag:
return
return self.zoomImpl(event.angleDelta().y() > 0)
def mousePressEvent(self, event):
if event.button() == Qt.MiddleButton:
return self.panBeginImpl(event)
super().mousePressEvent(event)
def mouseReleaseEvent(self, event):
if event.button() == Qt.MiddleButton:
return self.panEndImpl(event)
super().mouseReleaseEvent(event)
if __name__ == "__main__":
app = QApplication(sys.argv)
appView = GraphicsView()
appView.scene().addSimpleText('liff.engineer@gmail.com')
appView.scene().addRect(-200, -150, 400, 300)
appView.show()
sys.exit(app.exec_())
| 36.313131
| 99
| 0.638387
| 340
| 3,595
| 6.7
| 0.344118
| 0.024583
| 0.021071
| 0.035558
| 0.199737
| 0.15496
| 0.13784
| 0.13784
| 0.13784
| 0.097454
| 0
| 0.016461
| 0.256467
| 3,595
| 98
| 100
| 36.683673
| 0.835765
| 0.00306
| 0
| 0.128205
| 0
| 0
| 0.008657
| 0.006423
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.051282
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5313e0d9c7ffb25cacea29febc7679af1ef4f1a0
| 7,997
|
py
|
Python
|
tests/propositional/test_natural_deduction.py
|
ariroffe/logics
|
fb918ae8cf243a452e5b030f0df17add83f47f8b
|
[
"MIT"
] | 12
|
2021-03-31T08:12:09.000Z
|
2022-03-15T21:36:59.000Z
|
tests/propositional/test_natural_deduction.py
|
ariroffe/logics
|
fb918ae8cf243a452e5b030f0df17add83f47f8b
|
[
"MIT"
] | null | null | null |
tests/propositional/test_natural_deduction.py
|
ariroffe/logics
|
fb918ae8cf243a452e5b030f0df17add83f47f8b
|
[
"MIT"
] | 1
|
2021-03-31T15:14:26.000Z
|
2021-03-31T15:14:26.000Z
|
import unittest
from logics.classes.propositional import Inference, Formula
from logics.classes.propositional.proof_theories import NaturalDeductionStep, NaturalDeductionRule
from logics.utils.parsers import classical_parser
from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system
class TestClassicalNaturalDeductionSystem(unittest.TestCase):
def test_natural_deduction_rule(self):
"""Test overriding of index and len methods in NaturalDeductionRule"""
rule = NaturalDeductionRule([
'(...)',
NaturalDeductionStep(Formula(['→', ['A'], ['B']])),
'(...)',
NaturalDeductionStep(Formula(['B']), 'E→', [0, 1])
])
self.assertEqual(rule.index(NaturalDeductionStep(Formula(['B']), 'E→', [0, 1])), 1)
self.assertEqual(len(rule), 2)
def test_nd_system(self):
"""Test the method that tells if a step is a correct application of a rule"""
# A correct derivation
deriv = classical_parser.parse_derivation(
"""p; premise
(p → q); premise
q; E→; [1, 0]; []
p ∧ q; I∧; [0, 2]; []""",
natural_deduction=True)
# Check is application of the correct rule, and a different rule
self.assertTrue(nd_system.is_correct_application(deriv, 2, nd_system.rules['E→']))
self.assertFalse(nd_system.is_correct_application(deriv, 2, nd_system.rules['E∧2']))
self.assertTrue(nd_system.is_correct_application(deriv, 3, nd_system.rules['I∧']))
self.assertFalse(nd_system.is_correct_application(deriv, 3, nd_system.rules['E→']))
# Check is correct derivation of the correct and an incorrect inference
i = Inference([Formula(['p']), Formula(['→', ['p'], ['q']])],
[Formula(['∧', ['p'], ['q']])])
self.assertTrue(nd_system.is_correct_derivation(deriv, i))
i2 = Inference([Formula(['p']), Formula(['→', ['p'], ['q']])],
[Formula(['∧', ['q'], ['p']])])
self.assertFalse(nd_system.is_correct_derivation(deriv, i2))
# Repeating steps should not alter the outcome (should print a warning)
# deriv2_0 = classical_parser.parse_derivation(
# """p; supposition; []; [0]
# p; repetition; [0, 0]; [0]""",
# natural_deduction=True)
# self.assertTrue(nd_system.is_correct_application(deriv2_0, 1, nd_system.rules['repetition']))
# Test step in the future
deriv2_1 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [1]; [0]""",
natural_deduction=True)
deriv2_2 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [2]; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv2_1, 1, nd_system.rules['repetition']))
self.assertFalse(nd_system.is_correct_application(deriv2_2, 1, nd_system.rules['repetition']))
# -------------------------------------------------
# Test incorrect use of suppositions
# Using a step in a closed supposition
deriv3_1 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [0]; [0]
(p → p); I→; [0, 1]; []
p; E→; [2, 0]; []""",
natural_deduction=True)
# Check correct application of rep and I→
self.assertTrue(nd_system.is_correct_application(deriv3_1, 1, nd_system.rules['repetition']))
self.assertTrue(nd_system.is_correct_application(deriv3_1, 2, nd_system.rules['I→']))
self.assertFalse(nd_system.is_correct_application(deriv3_1, 3, nd_system.rules['E→']))
# Closing a supposition with a rule that does not close
deriv3_2 = classical_parser.parse_derivation('''
p; premise
p; supposition; []; [1]
p; repetition; [0]; [1]
(p ∨ q); I∨1; [0]; []''',
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_2, 3, nd_system.rules['I∨1']))
# Closing two suppositions at once
deriv3_3 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; supposition; [0]; [0, 1]
(p → p); I→; [0, 1]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_3, 2, nd_system.rules['I→']))
# Not closing a supposition with a rule that does close
deriv3_4 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [0]; [0]
(p → p); I→; [0, 1]; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_4, 2, nd_system.rules['I→']))
# Incorrect opening of suppositions
deriv3_5 = classical_parser.parse_derivation(
"""p; supposition; []; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv3_5, None))
deriv3_6 = classical_parser.parse_derivation(
"""p; premise; []; []
q; supposition; []; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv3_6, None))
# -------------------------------------------------
# A correct derivation using all the rules
deriv4 = classical_parser.parse_derivation(
"""q; premise; []; []
~q; supposition; []; [1]
~q; repetition; [1]; [1]
(q ∧ ~q); I∧; [0, 2]; [1]
q; E∧1; [3]; [1]
⊥; E~; [1, 4]; [1]
p; EFSQ; [5]; [1]
⊥; repetition; [5]; [1]
~~q; I~; [1, 7]; []
q; ~~; [8]; []
q; supposition; []; [10]
q; repetition; [10]; [10]
(q → q); I→; [10, 11]; []
q; E→; [12, 9]; []
(q ∨ p); I∨1; [13]; []
(p → q); premise; []; []
q; E∨; [14, 12, 15]; []
""", natural_deduction=True)
i3 = Inference([Formula(['q']), Formula(['→', ['p'], ['q']])],
[Formula(['q'])])
self.assertTrue(nd_system.is_correct_derivation(deriv4, i3))
def test_rule_order(self):
# i1 is conjunction introduction
i1 = Inference([Formula(['p']), Formula(['q'])],
[Formula(['∧', ['p'], ['q']])])
# First derivation: standard one
deriv1_1 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(p ∧ q); I∧; [0, 1]; []""",
natural_deduction=True)
self.assertTrue(nd_system.is_correct_derivation(deriv1_1, i1))
# Second derivation: reverse on_steps order
deriv1_2 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(p ∧ q); I∧; [1, 0]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv1_2, i1))
i2 = Inference([Formula(['p']), Formula(['q'])],
[Formula(['∧', ['q'], ['p']])])
# Third derivation: reverse the conjuncts
deriv2_1 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(q ∧ p); I∧; [1, 0]; []""",
natural_deduction=True)
self.assertTrue(nd_system.is_correct_derivation(deriv2_1, i2))
# Fourth derivation: reverse the conjuncts and the on_steps
deriv2_2 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(q ∧ p); I∧; [0, 1]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv2_2, i2))
if __name__ == '__main__':
unittest.main()
| 43.227027
| 108
| 0.549456
| 941
| 7,997
| 4.538789
| 0.140276
| 0.069305
| 0.05151
| 0.087567
| 0.620464
| 0.599625
| 0.543667
| 0.45446
| 0.368532
| 0.315617
| 0
| 0.030957
| 0.28098
| 7,997
| 184
| 109
| 43.461957
| 0.702609
| 0.156184
| 0
| 0.244681
| 0
| 0
| 0.046097
| 0
| 0
| 0
| 0
| 0
| 0.244681
| 1
| 0.031915
| false
| 0
| 0.053191
| 0
| 0.095745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5314e40633c46116c596429cdd1af4edda4e5856
| 10,244
|
py
|
Python
|
src/trusted/validator_arm/dgen_decoder_output.py
|
cohortfsllc/cohort-cocl2-sandbox
|
0ac6669d1a459d65a52007b80d5cffa4ef330287
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
src/trusted/validator_arm/dgen_decoder_output.py
|
cohortfsllc/cohort-cocl2-sandbox
|
0ac6669d1a459d65a52007b80d5cffa4ef330287
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
src/trusted/validator_arm/dgen_decoder_output.py
|
cohortfsllc/cohort-cocl2-sandbox
|
0ac6669d1a459d65a52007b80d5cffa4ef330287
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the decoder based on parsed
table representations.
"""
import dgen_opt
import dgen_output
import dgen_actuals
# This file generates the class decoder Decoder as defined by the
# decoder tables. The code is specifically written to minimize the
# number of decoder classes needed to parse valid ARM
# instructions. Many rows in the table use the same decoder class. In
# addition, we optimize tables by merging, so long as the same decoder
# class is built.
#
# The following files are generated:
#
# decoder.h
# decoder.cc
#
# decoder.h declares the generated decoder parser class while
# decoder.cc contains the implementation of that decoder class.
#
# For testing purposes (see dgen_test_output.py) different rules are
# applied. Note: It may be worth reading dgen_test_output.py preamble
# to get a better understanding of decoder actions, and why we need
# the "action_filter" methods.
"""The current command line arguments to use"""
_cl_args = {}
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
# Defines the header for decoder.h
H_HEADER="""%(FILE_HEADER)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_actuals.h"
namespace nacl_arm_dec {
"""
DECODER_DECLARE_HEADER="""
// Defines a decoder class selector for instructions.
class %(decoder_name)s : DecoderState {
public:
explicit %(decoder_name)s();
// Parses the given instruction, returning the decoder to use.
virtual const ClassDecoder& decode(const Instruction) const;
// Returns the class decoder to use to process the fictitious instruction
// that is inserted before the first instruction in the code block by
// the validator.
const ClassDecoder &fictitious_decoder() const {
return %(fictitious_decoder)s_instance_;
}
private:
"""
DECODER_DECLARE_METHOD_COMMENTS="""
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction.
"""
DECODER_DECLARE_METHOD="""
inline const ClassDecoder& decode_%(table_name)s(
const Instruction inst) const;
"""
DECODER_DECLARE_FIELD_COMMENTS="""
// The following fields define the set of class decoders
// that can be returned by the API function "decode". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be built once (and reused
// for each call to "decode")."""
DECODER_DECLARE_FIELD="""
const %(decoder)s %(decoder)s_instance_;"""
DECODER_DECLARE_FOOTER="""
};
"""
H_FOOTER="""
} // namespace nacl_arm_dec
#endif // %(IFDEF_NAME)s
"""
def generate_h(decoder, decoder_name, filename, out, cl_args):
"""Entry point to the decoder for .h file.
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.h')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'IFDEF_NAME': dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('.h')],
'decoder_name': decoder_name,
}
out.write(H_HEADER % values)
values['fictitious_decoder'] = (
decoder.get_value('FictitiousFirst').actual())
out.write(DECODER_DECLARE_HEADER % values)
out.write(DECODER_DECLARE_METHOD_COMMENTS)
for table in decoder.tables():
values['table_name'] = table.name
out.write(DECODER_DECLARE_METHOD % values)
out.write(DECODER_DECLARE_FIELD_COMMENTS)
for action in decoder.action_filter(['actual']).decoders():
values['decoder'] = action.actual()
out.write(DECODER_DECLARE_FIELD % values)
out.write(DECODER_DECLARE_FOOTER % values)
out.write(H_FOOTER % values)
# Defines the header for DECODER.h
CC_HEADER="""%(FILE_HEADER)s
#include "%(header_filename)s"
namespace nacl_arm_dec {
"""
CONSTRUCTOR_HEADER="""
%(decoder_name)s::%(decoder_name)s() : DecoderState()"""
CONSTRUCTOR_FIELD_INIT="""
, %(decoder)s_instance_()"""
CONSTRUCTOR_FOOTER="""
{}
"""
METHOD_HEADER="""
// Implementation of table: %(table_name)s.
// Specified by: %(citation)s
const ClassDecoder& %(decoder_name)s::decode_%(table_name)s(
const Instruction inst) const
{"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
METHOD_DISPATCH_CLASS_DECODER="""
return %(decoder)s_instance_;"""
METHOD_DISPATCH_SUBMETHOD="""
return decode_%(subtable_name)s(inst);"""
METHOD_DISPATCH_CLOSE="""
}
"""
METHOD_FOOTER="""
// Catch any attempt to fall though ...
return %(not_implemented)s_instance_;
}
"""
DECODER_METHOD_HEADER="""
const ClassDecoder& %(decoder_name)s::decode(const Instruction inst) const {"""
DECODER_METHOD_TRACE="""
fprintf(stderr, "Parsing %%08x\\n", inst.Bits());"""
DECODER_METHOD_FOOTER="""
return decode_%(entry_table_name)s(inst);
}
"""
CC_FOOTER="""
} // namespace nacl_arm_dec
"""
def generate_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the decoder in .cc file
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.cc')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed
# tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'header_filename': filename[:-2] + 'h',
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(CC_HEADER % values)
_generate_constructors(decoder, values, out)
_generate_methods(decoder, values, out)
out.write(DECODER_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(DECODER_METHOD_TRACE % values)
out.write(DECODER_METHOD_FOOTER % values)
out.write(CC_FOOTER % values)
def _generate_constructors(decoder, values, out):
out.write(CONSTRUCTOR_HEADER % values)
for decoder in decoder.action_filter(['actual']).decoders():
values['decoder'] = decoder.actual()
out.write(CONSTRUCTOR_FIELD_INIT % values)
out.write(CONSTRUCTOR_FOOTER % values)
def _generate_methods(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(dgen_opt.optimize_rows(table.rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation
out.write(METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write("\n UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
if row.action.__class__.__name__ == 'DecoderAction':
values['decoder'] = row.action.actual()
out.write(METHOD_DISPATCH_CLASS_DECODER % values)
elif row.action.__class__.__name__ == 'DecoderMethod':
values['subtable_name'] = row.action.name
out.write(METHOD_DISPATCH_SUBMETHOD % values)
else:
raise Exception('Bad table action: %s' % repr(row.action))
out.write(METHOD_DISPATCH_CLOSE % values)
values['not_implemented'] = decoder.get_value('NotImplemented').actual()
out.write(METHOD_FOOTER % values)
| 31.327217
| 79
| 0.695334
| 1,341
| 10,244
| 5.121551
| 0.234899
| 0.03145
| 0.020384
| 0.022423
| 0.29339
| 0.205009
| 0.187828
| 0.177635
| 0.151718
| 0.145894
| 0
| 0.006318
| 0.196505
| 10,244
| 326
| 80
| 31.423313
| 0.828089
| 0.271964
| 0
| 0.193878
| 0
| 0
| 0.401845
| 0.065116
| 0
| 0
| 0
| 0
| 0.010204
| 1
| 0.020408
| false
| 0
| 0.015306
| 0
| 0.061224
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5314f89fdb3bc7bd293961169b273d8fa69fd14c
| 2,985
|
py
|
Python
|
compose/progress_stream.py
|
ilinum/compose
|
d1633d8e9df3c2dd4fa6f562c6b037cfe1af8ddb
|
[
"Apache-2.0"
] | 1
|
2019-03-06T08:03:18.000Z
|
2019-03-06T08:03:18.000Z
|
compose/progress_stream.py
|
SeppPenner/compose
|
87b25363a385b108066f87570aa5396567585324
|
[
"Apache-2.0"
] | 2
|
2021-03-25T21:27:44.000Z
|
2021-06-01T21:41:30.000Z
|
compose/progress_stream.py
|
SeppPenner/compose
|
87b25363a385b108066f87570aa5396567585324
|
[
"Apache-2.0"
] | 2
|
2018-07-20T15:52:21.000Z
|
2018-12-14T11:54:03.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
from compose import utils
class StreamOutputError(Exception):
pass
def stream_output(output, stream):
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
stream = utils.get_output_stream(stream)
all_events = []
lines = {}
diff = 0
for event in utils.json_stream(output):
all_events.append(event)
is_progress_event = 'progress' in event or 'progressDetail' in event
if not is_progress_event:
print_output_event(event, stream, is_terminal)
stream.flush()
continue
if not is_terminal:
continue
# if it's a progress event and we have a terminal, then display the progress bars
image_id = event.get('id')
if not image_id:
continue
if image_id not in lines:
lines[image_id] = len(lines)
stream.write("\n")
diff = len(lines) - lines[image_id]
# move cursor up `diff` rows
stream.write("%c[%dA" % (27, diff))
print_output_event(event, stream, is_terminal)
if 'id' in event:
# move cursor back down
stream.write("%c[%dB" % (27, diff))
stream.flush()
return all_events
def print_output_event(event, stream, is_terminal):
if 'errorDetail' in event:
raise StreamOutputError(event['errorDetail']['message'])
terminator = ''
if is_terminal and 'stream' not in event:
# erase current line
stream.write("%c[2K\r" % 27)
terminator = "\r"
elif 'progressDetail' in event:
return
if 'time' in event:
stream.write("[%s] " % event['time'])
if 'id' in event:
stream.write("%s: " % event['id'])
if 'from' in event:
stream.write("(from %s) " % event['from'])
status = event.get('status', '')
if 'progress' in event:
stream.write("%s %s%s" % (status, event['progress'], terminator))
elif 'progressDetail' in event:
detail = event['progressDetail']
total = detail.get('total')
if 'current' in detail and total:
percentage = float(detail['current']) / float(total) * 100
stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
else:
stream.write('%s%s' % (status, terminator))
elif 'stream' in event:
stream.write("%s%s" % (event['stream'], terminator))
else:
stream.write("%s%s\n" % (status, terminator))
def get_digest_from_pull(events):
for event in events:
status = event.get('status')
if not status or 'Digest' not in status:
continue
_, digest = status.split(':', 1)
return digest.strip()
return None
def get_digest_from_push(events):
for event in events:
digest = event.get('aux', {}).get('Digest')
if digest:
return digest
return None
| 26.651786
| 89
| 0.58794
| 365
| 2,985
| 4.684932
| 0.249315
| 0.049123
| 0.049123
| 0.052632
| 0.201754
| 0.150292
| 0.067251
| 0.045614
| 0
| 0
| 0
| 0.006135
| 0.290117
| 2,985
| 111
| 90
| 26.891892
| 0.800849
| 0.049246
| 0
| 0.233766
| 0
| 0
| 0.098835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051948
| false
| 0.012987
| 0.038961
| 0
| 0.181818
| 0.038961
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53180c720a6c468e5a33fc70c5bb3a7936430339
| 808
|
py
|
Python
|
libs/BIDS.py
|
GuillermoPerez32/EE2BIDS_backend
|
2cab240840e11e227ad60e4c8e17ac9ac87defd4
|
[
"MIT"
] | null | null | null |
libs/BIDS.py
|
GuillermoPerez32/EE2BIDS_backend
|
2cab240840e11e227ad60e4c8e17ac9ac87defd4
|
[
"MIT"
] | null | null | null |
libs/BIDS.py
|
GuillermoPerez32/EE2BIDS_backend
|
2cab240840e11e227ad60e4c8e17ac9ac87defd4
|
[
"MIT"
] | null | null | null |
import os
from bids_validator import BIDSValidator
def validate(bids_directory):
print('- Validate: init started.')
file_paths = []
result = []
validator = BIDSValidator()
for path, dirs, files in os.walk(bids_directory):
for filename in files:
if filename == '.bidsignore':
continue
if filename.endswith('_annotations.tsv'):
continue
if filename.endswith('_annotations.json'):
continue
temp = os.path.join(path, filename)
file_paths.append(temp[len(bids_directory):len(temp)])
result.append(validator.is_bids(temp[len(bids_directory):len(temp)]))
# print(validator.is_bids(temp[len(bids_directory):len(temp)]))
return file_paths, result
| 29.925926
| 81
| 0.613861
| 89
| 808
| 5.426966
| 0.393258
| 0.134576
| 0.068323
| 0.124224
| 0.383023
| 0.229814
| 0.173913
| 0.173913
| 0.173913
| 0
| 0
| 0
| 0.27599
| 808
| 26
| 82
| 31.076923
| 0.825641
| 0.075495
| 0
| 0.157895
| 0
| 0
| 0.092617
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
531bf28ae62e80e65e0bf53d63aae0164d547e7c
| 696
|
py
|
Python
|
harmony_tools/core/colors.py
|
a1fred/guitar_gammas
|
9933fe899af7a8e7f490f61d58004bb59f03271c
|
[
"MIT"
] | 1
|
2021-02-26T03:52:26.000Z
|
2021-02-26T03:52:26.000Z
|
harmony_tools/core/colors.py
|
a1fred/harmony_tools
|
9933fe899af7a8e7f490f61d58004bb59f03271c
|
[
"MIT"
] | null | null | null |
harmony_tools/core/colors.py
|
a1fred/harmony_tools
|
9933fe899af7a8e7f490f61d58004bb59f03271c
|
[
"MIT"
] | null | null | null |
COLOR_BLUE = '\033[0;34m'
COLOR_GREEN = '\033[0;32m'
COLOR_CYAN = '\033[0;36m'
COLOR_RED = '\033[0;31m'
COLOR_PURPLE = '\033[0;35m'
COLOR_BROWN = '\033[0;33m'
COLOR_YELLOW = '\033[1;33m'
COLOR_GRAY = '\033[1;30m'
COLOR_RESET = '\033[0m'
FG_COLORS = [
# COLOR_BLUE,
COLOR_GREEN,
# COLOR_CYAN,
# COLOR_RED,
# COLOR_PURPLE,
# COLOR_BROWN,
# COLOR_YELLOW,
]
def next_color(color):
assert color in FG_COLORS
index = FG_COLORS.index(color)
index += 1
try:
return FG_COLORS[index]
except IndexError:
index = 0
return FG_COLORS[index]
def c(string, color):
global COLOR_RESET
return f"{color}{string}{COLOR_RESET}"
| 17.4
| 42
| 0.627874
| 102
| 696
| 4.04902
| 0.352941
| 0.058111
| 0.125908
| 0.09201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101124
| 0.232759
| 696
| 39
| 43
| 17.846154
| 0.672285
| 0.107759
| 0
| 0.083333
| 0
| 0
| 0.187296
| 0.045603
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
531ccd14367ef1d863f40816ee6edf521bc6c3f6
| 712
|
py
|
Python
|
Common_Questions/TextBookQuestions/PythonCrashCourse/Chapter_8/8_5.py
|
tegamax/ProjectCode
|
0ed86e227fba50b453c5c4a2596afbadc39a167e
|
[
"MIT"
] | null | null | null |
Common_Questions/TextBookQuestions/PythonCrashCourse/Chapter_8/8_5.py
|
tegamax/ProjectCode
|
0ed86e227fba50b453c5c4a2596afbadc39a167e
|
[
"MIT"
] | null | null | null |
Common_Questions/TextBookQuestions/PythonCrashCourse/Chapter_8/8_5.py
|
tegamax/ProjectCode
|
0ed86e227fba50b453c5c4a2596afbadc39a167e
|
[
"MIT"
] | null | null | null |
'''
8-5. Cities: Write a function called describe_city() that accepts the name of a city and its country.
The function should print a simple sentence, such as Reykjavik is in Iceland. Give the parameter for the country a default value.
Call your function for three different cities, at least one of which is not in the default country.
'''
def describe_city(city,country='Iceland'):
cities = ['Reykjavik','Kópavogur','Reykjanesbær','Garðabær','Mosfellsbær','Hafnarfjörður']
for i in cities:
if i==city:
print(f'{i} is in Iceland')
break
else:
print(f'So you think {city} is a city Iceland?')
describe_city('Garðabær','country')
'''
Árborg
Akureyri
'''
| 28.48
| 130
| 0.685393
| 103
| 712
| 4.708738
| 0.553398
| 0.074227
| 0.045361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003552
| 0.20927
| 712
| 25
| 131
| 28.48
| 0.857904
| 0.467697
| 0
| 0
| 0
| 0
| 0.398281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.111111
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
531ce1da9e2ab397f8a8222a28bef7e919e9c968
| 12,383
|
py
|
Python
|
tests/test_geometry_loader.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_geometry_loader.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_geometry_loader.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
from nexus_constructor.geometry import OFFGeometryNoNexus
from nexus_constructor.geometry.geometry_loader import load_geometry_from_file_object
from nexus_constructor.off_renderer import repeat_shape_over_positions
from PySide2.QtGui import QVector3D
from io import StringIO
def test_GIVEN_off_file_containing_geometry_WHEN_loading_geometry_to_file_THEN_vertices_and_faces_loaded_are_the_same_as_the_file():
model = OFFGeometryNoNexus()
model.units = "m"
off_file = (
"OFF\n"
"# cube.off\n"
"# A cube\n"
"8 6 0\n"
"-0.500000 -0.500000 0.500000\n"
"0.500000 -0.500000 0.500000\n"
"-0.500000 0.500000 0.500000\n"
"0.500000 0.500000 0.500000\n"
"-0.500000 0.500000 -0.500000\n"
"0.500000 0.500000 -0.500000\n"
"-0.500000 -0.500000 -0.500000\n"
"0.500000 -0.500000 -0.500000\n"
"4 0 1 3 2\n"
"4 2 3 5 4\n"
"4 4 5 7 6\n"
"4 6 7 1 0\n"
"4 1 7 5 3\n"
"4 6 0 2 4\n"
)
load_geometry_from_file_object(StringIO(off_file), ".off", model.units, model)
assert model.vertices == [
QVector3D(-0.5, -0.5, 0.5),
QVector3D(0.5, -0.5, 0.5),
QVector3D(-0.5, 0.5, 0.5),
QVector3D(0.5, 0.5, 0.5),
QVector3D(-0.5, 0.5, -0.5),
QVector3D(0.5, 0.5, -0.5),
QVector3D(-0.5, -0.5, -0.5),
QVector3D(0.5, -0.5, -0.5),
]
assert model.faces == [
[0, 1, 3, 2],
[2, 3, 5, 4],
[4, 5, 7, 6],
[6, 7, 1, 0],
[1, 7, 5, 3],
[6, 0, 2, 4],
]
assert model.winding_order == [
0,
1,
3,
2,
2,
3,
5,
4,
4,
5,
7,
6,
6,
7,
1,
0,
1,
7,
5,
3,
6,
0,
2,
4,
]
assert model.winding_order_indices == [0, 4, 8, 12, 16, 20]
def test_GIVEN_stl_file_with_cube_geometry_WHEN_loading_geometry_THEN_all_faces_are_present():
length = 30
left_lower_rear = QVector3D(0, 0, 0)
right_lower_rear = QVector3D(length, 0, 0)
left_upper_rear = QVector3D(0, length, 0)
right_upper_rear = QVector3D(length, length, 0)
left_lower_front = QVector3D(0, 0, length)
right_lower_front = QVector3D(length, 0, length)
left_upper_front = QVector3D(0, length, length)
right_upper_front = QVector3D(length, length, length)
# faces on a cube with a right hand winding order
faces = [
[
left_lower_front,
left_lower_rear,
right_lower_rear,
right_lower_front,
], # bottom
[left_lower_front, left_upper_front, left_upper_rear, left_lower_rear], # left
[
left_upper_front,
left_lower_front,
right_lower_front,
right_upper_front,
], # front
[
right_upper_front,
right_lower_front,
right_lower_rear,
right_upper_rear,
], # right
[right_upper_rear, right_lower_rear, left_lower_rear, left_upper_rear], # rear
[left_upper_rear, left_upper_front, right_upper_front, right_upper_rear], # top
]
cube = """solid vcg
facet normal -1.000000e+00 0.000000e+00 0.000000e+00
outer loop
vertex 0.000000e+00 3.000000e+01 0.000000e+00
vertex 0.000000e+00 0.000000e+00 3.000000e+01
vertex 0.000000e+00 3.000000e+01 3.000000e+01
endloop
endfacet
facet normal -1.000000e+00 0.000000e+00 0.000000e+00
outer loop
vertex 0.000000e+00 0.000000e+00 0.000000e+00
vertex 0.000000e+00 0.000000e+00 3.000000e+01
vertex 0.000000e+00 3.000000e+01 0.000000e+00
endloop
endfacet
facet normal 1.000000e+00 -0.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 0.000000e+00 3.000000e+01
vertex 3.000000e+01 3.000000e+01 0.000000e+00
vertex 3.000000e+01 3.000000e+01 3.000000e+01
endloop
endfacet
facet normal 1.000000e+00 0.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 0.000000e+00 3.000000e+01
vertex 3.000000e+01 0.000000e+00 0.000000e+00
vertex 3.000000e+01 3.000000e+01 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 -1.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 0.000000e+00 0.000000e+00
vertex 3.000000e+01 0.000000e+00 3.000000e+01
vertex 0.000000e+00 0.000000e+00 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 -1.000000e+00 0.000000e+00
outer loop
vertex 0.000000e+00 0.000000e+00 0.000000e+00
vertex 3.000000e+01 0.000000e+00 3.000000e+01
vertex 0.000000e+00 0.000000e+00 3.000000e+01
endloop
endfacet
facet normal 0.000000e+00 1.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 3.000000e+01 3.000000e+01
vertex 3.000000e+01 3.000000e+01 0.000000e+00
vertex 0.000000e+00 3.000000e+01 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 1.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 3.000000e+01 3.000000e+01
vertex 0.000000e+00 3.000000e+01 0.000000e+00
vertex 0.000000e+00 3.000000e+01 3.000000e+01
endloop
endfacet
facet normal 0.000000e+00 0.000000e+00 -1.000000e+00
outer loop
vertex 0.000000e+00 3.000000e+01 0.000000e+00
vertex 3.000000e+01 3.000000e+01 0.000000e+00
vertex 0.000000e+00 0.000000e+00 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 0.000000e+00 -1.000000e+00
outer loop
vertex 0.000000e+00 0.000000e+00 0.000000e+00
vertex 3.000000e+01 3.000000e+01 0.000000e+00
vertex 3.000000e+01 0.000000e+00 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 0.000000e+00 1.000000e+00
outer loop
vertex 3.000000e+01 3.000000e+01 3.000000e+01
vertex 0.000000e+00 3.000000e+01 3.000000e+01
vertex 0.000000e+00 0.000000e+00 3.000000e+01
endloop
endfacet
facet normal 0.000000e+00 0.000000e+00 1.000000e+00
outer loop
vertex 3.000000e+01 3.000000e+01 3.000000e+01
vertex 0.000000e+00 0.000000e+00 3.000000e+01
vertex 3.000000e+01 0.000000e+00 3.000000e+01
endloop
endfacet
endsolid vcg"""
geometry = load_geometry_from_file_object(StringIO(cube), ".stl", "m")
# 2 triangles per face, 6 faces in the cube
assert len(geometry.faces) == 6 * 2
assert geometry.winding_order_indices == [i * 3 for i in range(12)]
# each expected vertex is in the shape
for vertex in [
left_lower_rear,
right_lower_rear,
left_upper_rear,
right_upper_rear,
left_lower_front,
right_lower_front,
left_upper_front,
right_upper_front,
]:
assert vertex in geometry.vertices
# each face must be in the loaded geometry
for face in faces:
face_found = False
# each face could be split into triangles in one of two ways
for triangle_split in [
[[face[0], face[1], face[2]], [face[2], face[3], face[0]]],
[[face[1], face[2], face[3]], [face[3], face[0], face[1]]],
]:
triangle_matches = 0
# each triangle in the square's split must be in the loaded geometry for the square to be
for triangle in triangle_split:
# check the triangle against each rotation of each triangle in the geometry
for candidate_triangle_indices in geometry.faces:
a = geometry.vertices[candidate_triangle_indices[0]]
b = geometry.vertices[candidate_triangle_indices[1]]
c = geometry.vertices[candidate_triangle_indices[2]]
if (
triangle == [a, b, c]
or triangle == [b, c, a]
or triangle == [c, a, b]
):
triangle_matches += 1
if triangle_matches == 2:
face_found = True
assert face_found
def test_GIVEN_unrecognised_file_extension_WHEN_loading_geometry_THEN_returns_empty_geometry():
geometry = load_geometry_from_file_object(StringIO(), ".txt", "m")
assert len(geometry.vertices) == 0
assert len(geometry.faces) == 0
def get_dummy_OFF():
# A square with a triangle on the side
original_vertices = [
QVector3D(0, 0, 0),
QVector3D(0, 1, 0),
QVector3D(1, 1, 0),
QVector3D(1, 0, 0),
QVector3D(1.5, 0.5, 0),
]
original_faces = [[0, 1, 2, 3], [2, 3, 4]]
return OFFGeometryNoNexus(vertices=original_vertices, faces=original_faces)
def test_WHEN_generate_off_mesh_with_no_repeat_THEN_off_unchanged():
off_geometry = get_dummy_OFF()
positions = [QVector3D(0, 0, 0)]
faces, vertices = repeat_shape_over_positions(off_geometry, positions)
assert faces == off_geometry.faces
assert vertices == off_geometry.vertices
def test_WHEN_generate_off_mesh_with_three_copies_THEN_original_shape_remains():
off_geometry = get_dummy_OFF()
positions = [QVector3D(0, 0, 0), QVector3D(0, 0, 1), QVector3D(1, 0, 0)]
faces, vertices = repeat_shape_over_positions(off_geometry, positions)
assert faces[: len(off_geometry.faces)] == off_geometry.faces
assert vertices[: len(off_geometry.vertices)] == off_geometry.vertices
def _test_position_with_single_translation_helper(translation):
off_geometry = get_dummy_OFF()
positions = [QVector3D(0, 0, 0), translation]
faces, vertices = repeat_shape_over_positions(off_geometry, positions)
second_shape_faces = faces[len(off_geometry.faces) :]
second_shape_vertices = vertices[len(off_geometry.vertices) :]
# Faces will just be the same but every vertex added to be len(vertices)
shifted_faces = []
for face in second_shape_faces:
shifted_face = []
for vertex in face:
shifted_face.append(vertex - len(off_geometry.vertices))
shifted_faces.append(shifted_face)
assert shifted_faces == off_geometry.faces
return off_geometry.vertices, second_shape_vertices
def test_WHEN_generate_off_mesh_with_single_x_position_THEN_second_shape_just_translation_of_first():
(
original_vertices,
second_shape_vertices,
) = _test_position_with_single_translation_helper(QVector3D(1, 0, 0))
# Vertices will be the same by shifted by 1
for vertex in second_shape_vertices:
vertex.setX(vertex.x() - 1)
assert second_shape_vertices == original_vertices
def test_WHEN_generate_off_mesh_with_single_y_position_THEN_second_shape_just_translation_of_first():
(
original_vertices,
second_shape_vertices,
) = _test_position_with_single_translation_helper(QVector3D(0, 1, 0))
# Vertices will be the same by shifted by 1
for vertex in second_shape_vertices:
vertex.setY(vertex.y() - 1)
assert second_shape_vertices == original_vertices
def test_WHEN_generate_off_mesh_with_single_negative_z_position_THEN_second_shape_just_translation_of_first():
(
original_vertices,
second_shape_vertices,
) = _test_position_with_single_translation_helper(QVector3D(0, 0, -1))
# Vertices will be the same by shifted by 1
for vertex in second_shape_vertices:
vertex.setZ(vertex.z() + 1)
assert second_shape_vertices == original_vertices
def test_WHEN_generate_off_mesh_with_single_diagonal_position_THEN_second_shape_just_translation_of_first():
(
original_vertices,
second_shape_vertices,
) = _test_position_with_single_translation_helper(QVector3D(0, 1, -1))
for vertex in second_shape_vertices:
vertex.setZ(vertex.z() + 1)
vertex.setY(vertex.y() - 1)
assert second_shape_vertices == original_vertices
| 34.112948
| 132
| 0.622305
| 1,752
| 12,383
| 4.187785
| 0.093037
| 0.110399
| 0.10631
| 0.078779
| 0.675889
| 0.623143
| 0.568488
| 0.540139
| 0.532915
| 0.51915
| 0
| 0.196021
| 0.285634
| 12,383
| 362
| 133
| 34.207182
| 0.633394
| 0.052895
| 0
| 0.518272
| 0
| 0
| 0.328892
| 0
| 0
| 0
| 0
| 0
| 0.063123
| 1
| 0.036545
| false
| 0
| 0.016611
| 0
| 0.059801
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
531e5848bd2aa1d173ccaded9dac7c7007b60544
| 1,599
|
py
|
Python
|
main.py
|
marcusviniciusteixeira/RPAPython
|
8055e7283e6a8dd8910139cbbaa914761e2924f2
|
[
"MIT"
] | 1
|
2022-01-23T00:17:05.000Z
|
2022-01-23T00:17:05.000Z
|
main.py
|
marcusviniciusteixeira/RPAPython
|
8055e7283e6a8dd8910139cbbaa914761e2924f2
|
[
"MIT"
] | null | null | null |
main.py
|
marcusviniciusteixeira/RPAPython
|
8055e7283e6a8dd8910139cbbaa914761e2924f2
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
import os
import time
import pyautogui
class TelaPython:
def __init__(self):
layout = [
[sg.Text('Usuário',size=(10,0)), sg.Input(size=(20,0),key='usuario')],
[sg.Text('Senha',size=(10,0)), sg.Input(size=(20,0),key='senha')],
[sg.Text('Número',size=(10,0)), sg.Input(size=(20,0),key='num')],
[sg.Text('Time1',size=(10,0)), sg.Slider(range=(0,30), default_value=0, orientation='h',size=(10,15),key='time1')],
[sg.Text('Time2',size=(10,0)), sg.Slider(range=(0,30), default_value=0, orientation='h',size=(10,15),key='time2')],
[sg.Button('Executar')]
]
janela = sg.Window("Macro Portal CLARO").layout(layout)
self.button, self.values = janela.read()
def Iniciar(self):
usuario = self.values['usuario']
senha = self.values['senha']
num = self.values['num']
time1 = self.values['time1']
time2 = self.values['time2']
os.startfile('PortalClaro.exe')
time.sleep(time1)
pyautogui.moveTo(571, 409)#USUÁRIO
pyautogui.click()
pyautogui.write(usuario)
pyautogui.press('tab')#SENHA
pyautogui.write(senha)#Pjfa#412
pyautogui.moveTo(672, 530)
pyautogui.click()
time.sleep(time2)
pyautogui.moveTo(556, 472)#NUM
pyautogui.click()
pyautogui.write(num)
pyautogui.moveTo(683, 505)
pyautogui.click()
time.sleep(1)
pyautogui.moveTo(576, 437)
pyautogui.click()
tela = TelaPython()
tela.Iniciar()
| 32.632653
| 127
| 0.579112
| 201
| 1,599
| 4.577114
| 0.338308
| 0.045652
| 0.038043
| 0.048913
| 0.206522
| 0.206522
| 0.206522
| 0.206522
| 0.206522
| 0.128261
| 0
| 0.069364
| 0.242652
| 1,599
| 49
| 128
| 32.632653
| 0.690339
| 0.013759
| 0
| 0.119048
| 0
| 0
| 0.07883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
531fa48589e0156d08a9e55a80a6582cdc603310
| 810
|
py
|
Python
|
logistic-regression/plot_binary_losses.py
|
eliben/deep-learning-samples
|
d5ca86c5db664fabfb302cbbc231c50ec3d6a103
|
[
"Unlicense"
] | 183
|
2015-12-29T07:21:24.000Z
|
2022-01-18T01:19:23.000Z
|
logistic-regression/plot_binary_losses.py
|
eliben/deep-learning-samples
|
d5ca86c5db664fabfb302cbbc231c50ec3d6a103
|
[
"Unlicense"
] | null | null | null |
logistic-regression/plot_binary_losses.py
|
eliben/deep-learning-samples
|
d5ca86c5db664fabfb302cbbc231c50ec3d6a103
|
[
"Unlicense"
] | 68
|
2016-06-02T15:31:51.000Z
|
2021-09-08T19:58:10.000Z
|
# Helper code to plot binary losses.
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
fig, ax = plt.subplots()
fig.set_tight_layout(True)
xs = np.linspace(-2, 2, 500)
# plot L0/1 loss
ax.plot(xs, np.where(xs < 0, np.ones_like(xs), np.zeros_like(xs)),
color='r', linewidth=2.0, label='$L_{01}$')
# plot square loss
ax.plot(xs, (xs - 1) ** 2, linestyle='-.', label='$L_2$')
# plot hinge loss
ax.plot(xs, np.maximum(np.zeros_like(xs), 1 - xs),
color='g', linewidth=2.0, label='$L_h$')
ax.grid(True)
plt.ylim((-1, 4))
ax.legend()
fig.savefig('loss.png', dpi=80)
plt.show()
| 23.823529
| 70
| 0.608642
| 130
| 810
| 3.630769
| 0.546154
| 0.033898
| 0.063559
| 0.076271
| 0.131356
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034865
| 0.220988
| 810
| 33
| 71
| 24.545455
| 0.713154
| 0.198765
| 0
| 0
| 0
| 0
| 0.059282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
531fbba3b5ee2b6bc1a9dd51c2d5b53732a600be
| 1,243
|
py
|
Python
|
utils/watch-less.py
|
K-Fitzpatrick/crop_planner
|
2605c0886fd3b4681c2ea3ac5e88e1d8555178f5
|
[
"MIT"
] | 91
|
2016-03-15T16:41:41.000Z
|
2022-03-25T16:30:09.000Z
|
utils/watch-less.py
|
SoaringDragon42/crop_planner
|
2605c0886fd3b4681c2ea3ac5e88e1d8555178f5
|
[
"MIT"
] | 18
|
2016-03-30T15:01:25.000Z
|
2020-03-09T06:17:08.000Z
|
utils/watch-less.py
|
SoaringDragon42/crop_planner
|
2605c0886fd3b4681c2ea3ac5e88e1d8555178f5
|
[
"MIT"
] | 48
|
2016-03-15T16:41:44.000Z
|
2022-03-09T21:28:05.000Z
|
#!/usr/bin/env python3
################################
# Development tool
# Auto-compiles style.less to style.css
#
# Requires lessc and less clean css to be installed:
# npm install -g less
# npm install -g less-plugin-clean-css
################################
import os, time
from os import path
from math import floor
from _helper import *
# Main application
class Main:
style_less = "style.less"
style_css = "style.css"
def __init__(self):
clear()
os.chdir("../")
header("Watching style.less for changes\nctrl+c to exit")
print()
while True:
if not os.path.exists(self.style_less):
print(self.style_less + " does not exist. Exiting.")
return
if not os.path.exists(self.style_css):
self.compile()
elif path.getmtime(self.style_less) > path.getmtime(self.style_css):
self.compile()
time.sleep(.2)
def compile(self):
start = time.time()
os.system("lessc " + self.style_less + " " + self.style_css + " --clean-css")
touch(self.style_css, path.getmtime(self.style_less))
print("Recompiled [" + str(floor((time.time() - start) * 100)) + " ms]")
print()
# Run application
if __name__ == "__main__":
try:
app = Main()
except KeyboardInterrupt:
print("Exiting")
| 23.45283
| 79
| 0.636364
| 169
| 1,243
| 4.538462
| 0.431953
| 0.105606
| 0.084746
| 0.082138
| 0.181226
| 0.067797
| 0.067797
| 0
| 0
| 0
| 0
| 0.004897
| 0.1786
| 1,243
| 53
| 80
| 23.45283
| 0.746327
| 0.174578
| 0
| 0.125
| 0
| 0
| 0.150943
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.3125
| 0.15625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|