hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22f9996e35b6cbbaeea6e8c3929b7498dd603017
| 4,471
|
py
|
Python
|
ppq/utils/round.py
|
xiguadong/ppq
|
6c71adb3c2a8ca95967f101724b5e4b3e6f761ff
|
[
"Apache-2.0"
] | null | null | null |
ppq/utils/round.py
|
xiguadong/ppq
|
6c71adb3c2a8ca95967f101724b5e4b3e6f761ff
|
[
"Apache-2.0"
] | null | null | null |
ppq/utils/round.py
|
xiguadong/ppq
|
6c71adb3c2a8ca95967f101724b5e4b3e6f761ff
|
[
"Apache-2.0"
] | null | null | null |
from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal
from math import ceil, floor, log2
from typing import Union
import torch
from ppq.core import RoundingPolicy
def ppq_numerical_round(value: float,
policy: RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> int:
"""
reference: https://en.wikipedia.org/wiki/Rounding
decimal defination:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Args:
value (float): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
int: [description]
"""
assert isinstance(value, float), 'numerical round only takes effect on float number.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_EVEN))
elif policy == RoundingPolicy.ROUND_HALF_UP:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_DOWN)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_UP)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
if value > 0: return floor(value + 0.5)
else: return ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_UP:
return ceil(value)
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_tensor_round(value: torch.Tensor,
policy:RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> torch.Tensor:
"""
reference: https://en.wikipedia.org/wiki/Rounding
Args:
value (torch.Tensor): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
torch.Tensor: [description]
"""
assert isinstance(value, torch.Tensor), 'tensor round only takes effect on torch tensor.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
# default rounding policy of torch is ROUND_TO_NEAR_EVEN
# try this: print(torch.Tensor([1.5, 2.5, 3.5, 4.5]).round())
# However it may generate unexpected results due to version difference.
return value.round()
elif policy == RoundingPolicy.ROUND_UP:
return value.ceil()
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return torch.sign(value) * torch.ceil(value.abs() - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return torch.sign(value) * torch.floor(value.abs() + 0.5)
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
return torch.ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_UP:
return torch.floor(value + 0.5)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
raise NotImplementedError(f'Torch Tensor can not use this rounding policy({policy}) try ROUND_HALF_EVEN instead.')
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_round_to_power_of_2(value: Union[float, int],
policy: RoundingPolicy=RoundingPolicy.ROUND_UP) -> float:
if value == 0: return 0
sign = 1 if value >= 0 else -1
assert isinstance(value, float) or isinstance(value, int), \
'power-of-2 round only takes effect on float or int.'
return sign * float(pow(2, ppq_numerical_round(log2(sign * value), policy=policy)))
| 45.622449
| 138
| 0.698054
| 577
| 4,471
| 5.246101
| 0.188908
| 0.083251
| 0.121573
| 0.114965
| 0.588702
| 0.543773
| 0.429468
| 0.373637
| 0.305253
| 0.185993
| 0
| 0.011841
| 0.206665
| 4,471
| 97
| 139
| 46.092784
| 0.841556
| 0.294565
| 0
| 0.339623
| 0
| 0
| 0.098643
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 1
| 0.056604
| false
| 0
| 0.09434
| 0
| 0.358491
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22f9fe832c0a98e82946d0744a46553bfba443ca
| 11,944
|
py
|
Python
|
python/repair/train.py
|
maropu/scavenger
|
03a935968f4aa507d4d98c8ca528195b770757d9
|
[
"Apache-2.0"
] | null | null | null |
python/repair/train.py
|
maropu/scavenger
|
03a935968f4aa507d4d98c8ca528195b770757d9
|
[
"Apache-2.0"
] | 2
|
2019-12-22T13:29:07.000Z
|
2020-01-07T11:55:41.000Z
|
python/repair/train.py
|
maropu/scavenger
|
03a935968f4aa507d4d98c8ca528195b770757d9
|
[
"Apache-2.0"
] | 1
|
2020-10-26T20:07:28.000Z
|
2020-10-26T20:07:28.000Z
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import time
import numpy as np # type: ignore[import]
import pandas as pd # type: ignore[import]
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple
from repair.utils import elapsed_time, get_option_value, setup_logger
_logger = setup_logger()
# List of internal configurations
_option = namedtuple('_option', 'key default_value type_class validator err_msg')
_opt_boosting_type = \
_option('model.lgb.boosting_type', 'gbdt', str,
lambda v: v in ['gbdt', 'dart', 'goss', 'rf'], "`{}` should be in ['gbdt', 'dart', 'goss', 'rf']")
_opt_class_weight = \
_option('model.lgb.class_weight', 'balanced', str, None, None)
_opt_learning_rate = \
_option('model.lgb.learning_rate', 0.01, float,
lambda v: v > 0.0, '`{}` should be positive')
_opt_max_depth = \
_option('model.lgb.max_depth', 7, int, None, None)
_opt_max_bin = \
_option('model.lgb.max_bin', 255, int, None, None)
_opt_reg_alpha = \
_option('model.lgb.reg_alpha', 0.0, float,
lambda v: v >= 0.0, '`{}` should be greater than or equal to 0.0')
_opt_min_split_gain = \
_option('model.lgb.min_split_gain', 0.0, float,
lambda v: v >= 0.0, '`{}` should be greater than or equal to 0.0')
_opt_n_estimators = \
_option('model.lgb.n_estimators', 300, int,
lambda v: v > 0, '`{}` should be positive')
_opt_importance_type = \
_option('model.lgb.importance_type', 'gain', str,
lambda v: v in ['split', 'gain'], "`{}` should be in ['split', 'gain']")
_opt_n_splits = \
_option('model.cv.n_splits', 3, int,
lambda v: v >= 3, '`{}` should be greater than 2')
_opt_timeout = \
_option('model.hp.timeout', 0, int, None, None)
_opt_max_evals = \
_option('model.hp.max_evals', 100000000, int,
lambda v: v > 0, '`{}` should be positive')
_opt_no_progress_loss = \
_option('model.hp.no_progress_loss', 50, int,
lambda v: v > 0, '`{}` should be positive')
train_option_keys = [
_opt_boosting_type.key,
_opt_class_weight.key,
_opt_learning_rate.key,
_opt_max_depth.key,
_opt_max_bin.key,
_opt_reg_alpha.key,
_opt_min_split_gain.key,
_opt_n_estimators.key,
_opt_importance_type.key,
_opt_n_splits.key,
_opt_timeout.key,
_opt_max_evals.key,
_opt_no_progress_loss.key
]
@elapsed_time # type: ignore
def _build_lgb_model(X: pd.DataFrame, y: pd.Series, is_discrete: bool, num_class: int, n_jobs: int,
opts: Dict[str, str]) -> Tuple[Any, float]:
import lightgbm as lgb # type: ignore[import]
def _get_option_value(*args) -> Any: # type: ignore
return get_option_value(opts, *args)
if is_discrete:
objective = "binary" if num_class <= 2 else "multiclass"
else:
objective = "regression"
fixed_params = {
"boosting_type": _get_option_value(*_opt_boosting_type),
"objective": objective,
"class_weight": _get_option_value(*_opt_class_weight),
"learning_rate": _get_option_value(*_opt_learning_rate),
"max_depth": _get_option_value(*_opt_max_depth),
"max_bin": _get_option_value(*_opt_max_bin),
"reg_alpha": _get_option_value(*_opt_reg_alpha),
"min_split_gain": _get_option_value(*_opt_min_split_gain),
"n_estimators": _get_option_value(*_opt_n_estimators),
"importance_type": _get_option_value(*_opt_importance_type),
"random_state": 42,
"n_jobs": n_jobs
}
# Set `num_class` only in the `multiclass` mode
if objective == "multiclass":
fixed_params["num_class"] = num_class
model_class = lgb.LGBMClassifier if is_discrete \
else lgb.LGBMRegressor
def _create_model(params: Dict[str, Any]) -> Any:
# Some params must be int
for k in ["num_leaves", "subsample_freq", "min_child_samples"]:
if k in params:
params[k] = int(params[k])
p = copy.deepcopy(fixed_params)
p.update(params)
return model_class(**p)
from hyperopt import hp, tpe, Trials # type: ignore[import]
from hyperopt.early_stop import no_progress_loss # type: ignore[import]
from hyperopt.fmin import fmin # type: ignore[import]
from sklearn.model_selection import ( # type: ignore[import]
cross_val_score, KFold, StratifiedKFold
)
# TODO: Temporality supress `sklearn.model_selection` user's warning
import warnings
warnings.simplefilter("ignore", UserWarning)
# Forcibly disable INFO-level logging in the `hyperopt` module
from logging import getLogger, WARN
getLogger("hyperopt").setLevel(WARN)
param_space = {
"num_leaves": hp.quniform("num_leaves", 2, 100, 1),
"subsample": hp.uniform("subsample", 0.5, 1.0),
"subsample_freq": hp.quniform("subsample_freq", 1, 20, 1),
"colsample_bytree": hp.uniform("colsample_bytree", 0.01, 1.0),
"min_child_samples": hp.quniform("min_child_samples", 1, 50, 1),
"min_child_weight": hp.loguniform("min_child_weight", -3, 1),
"reg_lambda": hp.loguniform("reg_lambda", -2, 3)
}
scorer = "f1_macro" if is_discrete else "neg_mean_squared_error"
n_splits = int(_get_option_value(*_opt_n_splits))
cv = StratifiedKFold(n_splits=n_splits, shuffle=True) if is_discrete \
else KFold(n_splits=n_splits, shuffle=True)
def _objective(params: Dict[str, Any]) -> float:
model = _create_model(params)
fit_params: Dict[str, str] = {
# TODO: Raises an error if a single regressor is used
# "categorical_feature": "auto",
}
try:
# TODO: Replace with `lgb.cv` to remove the `sklearn` dependency
scores = cross_val_score(
model, X, y, scoring=scorer, cv=cv, fit_params=fit_params, n_jobs=n_jobs)
return -scores.mean()
# it might throw an exception because `y` contains
# previously unseen labels.
except Exception as e:
_logger.warning(f"{e.__class__}: {e}")
return 0.0
def _early_stop_fn() -> Any:
no_progress_loss_fn = no_progress_loss(int(_get_option_value(*_opt_no_progress_loss)))
timeout = int(_get_option_value(*_opt_timeout))
if timeout <= 0:
return no_progress_loss_fn
# Set base time for budget mechanism
start_time = time.time()
def timeout_fn(trials, best_loss=None, iteration_no_progress=0): # type: ignore
no_progress_loss, meta = no_progress_loss_fn(trials, best_loss, iteration_no_progress)
to = time.time() - start_time > timeout
return no_progress_loss or to, meta
return timeout_fn
try:
trials = Trials()
max_evals = int(_get_option_value(*_opt_max_evals))
best_params = fmin(
fn=_objective,
space=param_space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals,
early_stop_fn=_early_stop_fn(),
rstate=np.random.RandomState(42),
show_progressbar=False,
verbose=False)
_logger.info("hyperopt: #eval={}/{}".format(len(trials.trials), max_evals))
# Builds a model with `best_params`
# TODO: Could we extract constraint rules (e.g., FD and CFD) from built statistical models?
model = _create_model(best_params)
model.fit(X, y)
def _feature_importances() -> List[Any]:
f = filter(lambda x: x[1] > 0.0, zip(model.feature_name_, model.feature_importances_))
return list(sorted(f, key=lambda x: x[1], reverse=True))
_logger.debug(f"lightgbm: feature_importances={_feature_importances()}")
sorted_lst = sorted(trials.trials, key=lambda x: x['result']['loss'])
min_loss = sorted_lst[0]['result']['loss']
return model, -min_loss
except Exception as e:
_logger.warning(f"Failed to build a stat model because: {e}")
return None, 0.0
def build_model(X: pd.DataFrame, y: pd.Series, is_discrete: bool, num_class: int, n_jobs: int,
opts: Dict[str, str]) -> Tuple[Any, float]:
return _build_lgb_model(X, y, is_discrete, num_class, n_jobs, opts)
def compute_class_nrow_stdv(y: pd.Series, is_discrete: bool) -> Optional[float]:
from collections import Counter
return float(np.std(list(map(lambda x: x[1], Counter(y).items())))) if is_discrete else None
def rebalance_training_data(X: pd.DataFrame, y: pd.Series, target: str) -> Tuple[pd.DataFrame, pd.Series]:
# Uses median as the number of training rows for each class
from collections import Counter
prev_nrows = len(X)
prev_stdv = compute_class_nrow_stdv(y, is_discrete=True)
hist = dict(Counter(y).items()) # type: ignore
median = int(np.median([count for key, count in hist.items()]))
def _split_data(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.Series]:
X = df[df.columns[df.columns != target]] # type: ignore
y = df[target]
return X, y
# Filters out rows having NaN values for over-sampling
X[target] = y
X_notna, y_notna = _split_data(X.dropna())
X_na, y_na = _split_data(X[X.isnull().any(axis=1)])
# Over-sampling for training data whose row number is smaller than the median value
hist_na = dict(Counter(y_na).items()) # type: ignore
smote_targets = []
kn = 5 # `k_neighbors` default value in `SMOTEN`
for key, count in hist.items():
if count < median:
nna = hist_na[key] if key in hist_na else 0
if count - nna > kn:
smote_targets.append((key, median - nna))
else:
_logger.warning(f"Over-sampling of '{key}' in y='{target}' failed because the number of the clean rows "
f"is too small: {count - nna}")
if len(smote_targets) > 0:
from imblearn.over_sampling import SMOTEN
sampler = SMOTEN(random_state=42, sampling_strategy=dict(smote_targets), k_neighbors=kn)
X_notna, y_notna = sampler.fit_resample(X_notna, y_notna)
X = pd.concat([X_notna, X_na])
y = pd.concat([y_notna, y_na])
# Under-sampling for training data whose row number is greater than the median value
rus_targets = list(map(lambda x: (x[0], median), filter(lambda x: x[1] > median, hist.items())))
if len(rus_targets) > 0:
# NOTE: The other smarter implementations can skew samples if there are many rows having NaN values,
# so we just use `RandomUnderSampler` here.
from imblearn.under_sampling import RandomUnderSampler
sampler = RandomUnderSampler(random_state=42, sampling_strategy=dict(rus_targets))
X, y = sampler.fit_resample(X, y)
_logger.info("Rebalanced training data (y={}, median={}): #rows={}(stdv={}) -> #rows={}(stdv={})".format(
target, median, prev_nrows, prev_stdv, len(X), compute_class_nrow_stdv(y, is_discrete=True)))
_logger.debug("class hist: {} => {}".format(hist.items(), Counter(y).items()))
return X, y
| 40.62585
| 120
| 0.654638
| 1,668
| 11,944
| 4.433453
| 0.231415
| 0.019473
| 0.030291
| 0.029885
| 0.171738
| 0.108316
| 0.08073
| 0.072076
| 0.045166
| 0.036782
| 0
| 0.011068
| 0.228399
| 11,944
| 293
| 121
| 40.764505
| 0.791341
| 0.17289
| 0
| 0.080952
| 0
| 0
| 0.149049
| 0.0234
| 0
| 0
| 0
| 0.003413
| 0
| 1
| 0.052381
| false
| 0
| 0.119048
| 0.009524
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22fa19437d01af6a56a8a1b30127d97248a1bdcd
| 519
|
py
|
Python
|
howl/roomsensor/urls.py
|
volzotan/django-howl
|
3b11c530da95d152844934da09592619b3d4497f
|
[
"MIT"
] | null | null | null |
howl/roomsensor/urls.py
|
volzotan/django-howl
|
3b11c530da95d152844934da09592619b3d4497f
|
[
"MIT"
] | null | null | null |
howl/roomsensor/urls.py
|
volzotan/django-howl
|
3b11c530da95d152844934da09592619b3d4497f
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, url
from roomsensor import views
urlpatterns = patterns('',
url(r'^$', views.index, name='roomsensor'),
# ex: /roomsensor/name/
url(r'^(?P<roomsensor_name>\w+)/$', views.display, name='roomsensor_display'),
url(r'^(?P<roomsensor_name>\w+)/read/$', views.read, name='roomsensor_read'),
# JSON data for graph creation
url(r'^(?P<roomsensor_name>\w+)/rawdata/(?P<datapoints>\d+)/(?P<compression_factor>\d+)/$', views.rawdata, name='roomsensor_rawdata'),
)
| 37.071429
| 138
| 0.672447
| 69
| 519
| 4.956522
| 0.405797
| 0.046784
| 0.04386
| 0.131579
| 0.175439
| 0.175439
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117534
| 519
| 14
| 139
| 37.071429
| 0.746725
| 0.096339
| 0
| 0
| 0
| 0.125
| 0.438972
| 0.304069
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22fc97fb3dafaa3d0c68a5549bbe8a39af3d15d4
| 7,031
|
py
|
Python
|
app.py
|
kosovojs/wikibooster
|
70a9d9d7bf41be9fa5e58d40fba216d9b6df008d
|
[
"MIT"
] | null | null | null |
app.py
|
kosovojs/wikibooster
|
70a9d9d7bf41be9fa5e58d40fba216d9b6df008d
|
[
"MIT"
] | 17
|
2019-07-08T15:32:18.000Z
|
2021-01-03T10:30:55.000Z
|
app.py
|
kosovojs/wikibooster
|
70a9d9d7bf41be9fa5e58d40fba216d9b6df008d
|
[
"MIT"
] | 1
|
2019-08-28T21:23:48.000Z
|
2019-08-28T21:23:48.000Z
|
import flask
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS, cross_origin
from flask import render_template
import mwoauth
import requests_oauthlib
import os
import yaml
import mwapi
from tasks.main import Tasks
from save import Save
from db import DB
from typo.fix import TypoFix
app = Flask(__name__, static_folder="./frontend/build/static", template_folder="./frontend/build")
#app = Flask(__name__)
CORS(app)
user_agent = 'WikiBooster'
__dir__ = os.path.dirname(__file__)
configFile = open(os.path.join(__dir__, 'config.yaml'))
app.config.update(yaml.safe_load(configFile))
def authenticated_session(domain = 'meta.wikimedia.org'):
if 'oauth_access_token' in flask.session:
access_token = mwoauth.AccessToken(**flask.session['oauth_access_token'])
auth = requests_oauthlib.OAuth1(client_key=app.config['CONSUMER_KEY'], client_secret=app.config['CONSUMER_SECRET'],
resource_owner_key=access_token.key, resource_owner_secret=access_token.secret)
return mwapi.Session(host='https://'+domain, auth=auth, user_agent=user_agent)
else:
return None
def getUserInfo(domain = 'meta.wikimedia.org'):
session = authenticated_session(domain)
if not session:
return None, None, {'status':'error','message':'not logged in'}
try:
userinfo = session.get(action='query',
meta='userinfo',
uiprop=['groups', 'centralids'])['query']['userinfo']
return True, session, {'status':'ok','username':userinfo['name']}
except mwapi.errors.APIError as e:
if e.code == 'mwoauth-invalid-authorization-invalid-user':
# user is viewing a batch for a wiki where they do not have a local user account
# treat as anonymous on the local wiki, but query Meta to find out if they’re a steward
return None, None, {'status':'error','message':'server error'}
else:
raise e
return None, None, {'status':'error','message':'server error'}
@app.route('/', methods=['GET'])
def index_page():
return render_template('index.html')
#http://127.0.0.1:5000/task/lvwiki/1/Helēna Mārnija
@app.route('/task/<wiki>/<name>/<page>', methods=['GET'])
def getTaskResult(wiki,name,page):
tasks = Tasks(wiki)
articleInfo = tasks.getDataForTask(name,page)
return jsonify(articleInfo)
@app.route('/testing', methods=['GET'])
def runTests():
tasks = Tasks('lvwiki')
articleInfo = tasks.runTests()
return articleInfo
@app.route('/wikis', methods=['GET'])
def listWikis():
db = DB()
wikis = db.getAvailableWikis()
return jsonify(wikis)
@app.route('/tasks/<wiki>', methods=['GET'])
def listJobs(wiki):
db = DB()
articles = db.getTasksForWiki(wiki)
return jsonify(articles)
@app.route('/task/<wiki>/<task_id>/articles', methods=['GET'])
def listArticles(wiki,task_id):
db = DB()
articles = db.get_articles_for_task(wiki,task_id)
return jsonify(articles)
#
@app.route('/typo/<wiki>', methods=['GET'])
def listTypos(wiki):
db = DB()
typos = db.getTyposForWiki(wiki)
return jsonify(typos)
@app.route('/typo/articles', methods=['GET'])
def typo_list_for_wiki():
db = DB()
wiki = 'lvwiki'
typos = db.get_typo_articles(wiki)
return jsonify(typos)
@app.route('/typo/fix/<article>', methods=['GET'])
def fix_typos(article):
db = DB()
typoFixer = TypoFix()
res = typoFixer.getData('lvwiki', article, db)
return jsonify(res)
@app.route('/rules/<wiki>', methods=['GET'])
def listRules(wiki):
db = DB()
rules = db.getRulesForWiki(wiki)
return jsonify(rules)
@app.route('/save', methods=['POST'])
def doSave():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki)
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
#
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
job = req['job']
article = req['article']
result = req['result']
wikitext = req['wikitext']
status = req['status']
handlingSave = Save(session)
respFromSave = handlingSave.saveArticle(job,article,result,wikitext,status,userName)
return jsonify(respFromSave)
@app.route('/save_typo', methods=['POST'])
def doSaveTypo():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki.replace('wiki',''))
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
active = req['active']
case = req['case']
comment = req['comment']
dumpsearch = req['dumpsearch']
minor = req['minor']
name = req['name']
regex = req['regex']
replace_with = req['replace_with']
search_for = req['search_for']
test_cases = req['test_cases']
whole = req['whole']
id = req['id']
db = DB()
typoData = db.saveTypo(active,case,comment,dumpsearch,minor,name,regex,replace_with,search_for,test_cases,whole,wiki,userName,id)
return jsonify({'status':'ok', 'info':typoData})
@app.route('/save_rule', methods=['POST'])
def saveRule():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki.replace('wiki',''))
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
wiki = req['wiki']
rule_name = req['rule_name']
rule_object = req['rule_object']
rule = req['rule']
result = req['result']
id = req['id']
db = DB()
db.saveRule(id, wiki, rule_name, rule_object, rule, result)
return jsonify({'status':'ok'})
@app.route('/info', methods=['GET'])
def user_info():
userStatus, _,respFromGettingUserInfo = getUserInfo()
return jsonify(respFromGettingUserInfo)
@app.route('/login')
def login():
consumer_token = mwoauth.ConsumerToken(app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
redirect, request_token = mwoauth.initiate('https://meta.wikimedia.org/w/index.php', consumer_token, user_agent=user_agent)
flask.session['oauth_request_token'] = dict(zip(request_token._fields, request_token))
return flask.redirect(redirect)
@app.route('/oauth-callback')
def oauth_callback():
consumer_token = mwoauth.ConsumerToken(app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
request_token = mwoauth.RequestToken(**flask.session.pop('oauth_request_token'))
access_token = mwoauth.complete('https://meta.wikimedia.org/w/index.php', consumer_token, request_token, flask.request.query_string, user_agent=user_agent)
flask.session['oauth_access_token'] = dict(zip(access_token._fields, access_token))
return flask.redirect(flask.url_for('index_page'))
@app.route('/logout')
def logout():
"""Log the user out by clearing their session."""
flask.session.clear()
return flask.redirect(flask.url_for('index_page'))
if __name__ == '__main__':
app.run(debug=True)
| 29.542017
| 156
| 0.729768
| 911
| 7,031
| 5.493963
| 0.232711
| 0.027173
| 0.028571
| 0.011988
| 0.314685
| 0.281718
| 0.275325
| 0.247752
| 0.214985
| 0.197802
| 0
| 0.001926
| 0.114066
| 7,031
| 238
| 157
| 29.542017
| 0.801573
| 0.039824
| 0
| 0.267045
| 0
| 0
| 0.171191
| 0.018098
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107955
| false
| 0
| 0.085227
| 0.005682
| 0.340909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22fcb38b78558c9add6900dca954fd92ecf359b7
| 1,483
|
py
|
Python
|
pre_embed.py
|
shelleyyyyu/few_shot
|
0fe54444e820fe3201927e6363682913b6d61028
|
[
"Apache-2.0"
] | 253
|
2018-08-29T18:59:00.000Z
|
2022-03-15T04:53:47.000Z
|
pre_embed.py
|
shelleyyyyu/few_shot
|
0fe54444e820fe3201927e6363682913b6d61028
|
[
"Apache-2.0"
] | 18
|
2018-10-24T09:49:44.000Z
|
2022-03-31T14:39:37.000Z
|
pre_embed.py
|
shelleyyyyu/few_shot
|
0fe54444e820fe3201927e6363682913b6d61028
|
[
"Apache-2.0"
] | 38
|
2018-10-17T07:43:25.000Z
|
2022-03-05T12:20:33.000Z
|
import numpy as np
from collections import defaultdict, Counter
import random
import json
from tqdm import tqdm
def transX(dataset):
rel2id = json.load(open(dataset + '/relation2ids'))
ent2id = json.load(open(dataset + '/ent2ids'))
with open('../Fast-TransX/' + dataset + '_base/entity2id.txt', 'w') as g1:
num_ents = len(ent2id.keys())
g1.write(str(num_ents) + '\n')
for k, v in ent2id.items():
g1.write(k + '\t' + str(v) + '\n')
with open('../Fast-TransX/' + dataset + '_base/relation2id.txt', 'w') as g1:
num_rels = len(rel2id.keys())
g1.write(str(num_rels) + '\n')
for k, v in rel2id.items():
g1.write(k + '\t' + str(v) + '\n')
file_name = dataset + '/path_graph'
train_triples = []
with open(file_name) as f:
lines = f.readlines()
for line in tqdm(lines):
e1 = line.split('\t')[0]
e2 = line.rstrip().split('\t')[2]
rel = line.split('\t')[1]
train_triples.append([e1,rel,e2])
train_triples.append([e2,rel+'_inv',e1])
with open('../Fast-TransX/' + dataset + '_base/train2id.txt', 'w') as g3:
num_triples = len(train_triples)
g3.write(str(num_triples) + '\n')
for triple in train_triples:
e1, rel, e2 = triple
g3.write(str(ent2id[e1]) + '\t' + str(ent2id[e2]) + '\t' + str(rel2id[rel]) + '\n')
if __name__ == '__main__':
transX('Wiki')
| 32.23913
| 95
| 0.55091
| 202
| 1,483
| 3.915842
| 0.336634
| 0.075853
| 0.045512
| 0.068268
| 0.249052
| 0.158028
| 0.04804
| 0.04804
| 0
| 0
| 0
| 0.03318
| 0.268375
| 1,483
| 46
| 96
| 32.23913
| 0.695853
| 0
| 0
| 0.055556
| 0
| 0
| 0.121294
| 0.014151
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.138889
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22fd80b994ca4f5c482661c444d74e7a50232ab0
| 7,673
|
py
|
Python
|
botc/gamemodes/troublebrewing/FortuneTeller.py
|
Xinverse/BOTC-Bot
|
1932c649c81a5a1eab735d7abdee0761c2853940
|
[
"MIT"
] | 1
|
2020-06-21T17:20:17.000Z
|
2020-06-21T17:20:17.000Z
|
botc/gamemodes/troublebrewing/FortuneTeller.py
|
BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot
|
1932c649c81a5a1eab735d7abdee0761c2853940
|
[
"MIT"
] | 1
|
2020-07-07T03:47:44.000Z
|
2020-07-07T03:47:44.000Z
|
botc/gamemodes/troublebrewing/FortuneTeller.py
|
BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot
|
1932c649c81a5a1eab735d7abdee0761c2853940
|
[
"MIT"
] | 1
|
2022-02-18T00:42:19.000Z
|
2022-02-18T00:42:19.000Z
|
"""Contains the Fortune Teller Character class"""
import json
import random
import discord
import datetime
from botc import Action, ActionTypes, Townsfolk, Character, Storyteller, RedHerring, \
RecurringAction, Category, StatusList
from botc.BOTCUtils import GameLogic
from ._utils import TroubleBrewing, TBRole
import globvars
with open('botc/gamemodes/troublebrewing/character_text.json') as json_file:
character_text = json.load(json_file)[TBRole.fortuneteller.value.lower()]
with open('botutils/bot_text.json') as json_file:
bot_text = json.load(json_file)
butterfly = bot_text["esthetics"]["butterfly"]
with open('botc/game_text.json') as json_file:
strings = json.load(json_file)
fortune_teller_nightly = strings["gameplay"]["fortune_teller_nightly"]
copyrights_str = strings["misc"]["copyrights"]
yes = strings["gameplay"]["yes"]
no = strings["gameplay"]["no"]
good_link = strings["images"]["good"]
evil_link = strings["images"]["evil"]
class FortuneTeller(Townsfolk, TroubleBrewing, Character, RecurringAction):
"""Fortune Teller: Each night, choose 2 players: you learn if either is a Demon.
There is 1 good player that registers falsely to you.
===== FORTUNE TELLER =====
true_self = fortune teller
ego_self = fortune teller
social_self = fortune teller
commands:
- read <player> and <player>
initialize setup? -> NO
initialize role? -> YES
----- First night
START:
override first night instruction? -> YES # default is to send instruction string only
=> Send query for "read" command
----- Regular night
START:
override regular night instruction? -> YES # default is to send nothing
=> Send query for "read" command
"""
def __init__(self):
Character.__init__(self)
TroubleBrewing.__init__(self)
Townsfolk.__init__(self)
self._desc_string = character_text["description"]
self._examp_string = character_text["examples"]
self._instr_string = character_text["instruction"]
self._lore_string = character_text["lore"]
self._brief_string = character_text["brief"]
self._action = character_text["action"]
self._art_link = "https://bloodontheclocktower.com/wiki/images/3/3a/Fortune_Teller_Token.png"
self._art_link_cropped = "https://imgur.com/23ZXb1y.png"
self._wiki_link = "https://bloodontheclocktower.com/wiki/Fortune_Teller"
self._role_enum = TBRole.fortuneteller
self._emoji = "<:tbfortuneteller:739317350733578280>"
def create_n1_instr_str(self):
"""Create the instruction field on the opening dm card"""
# First line is the character instruction string
msg = f"{self.emoji} {self.instruction}"
addendum = character_text["n1_addendum"]
# Some characters have a line of addendum
if addendum:
with open("botutils/bot_text.json") as json_file:
bot_text = json.load(json_file)
scroll_emoji = bot_text["esthetics"]["scroll"]
msg += f"\n{scroll_emoji} {addendum}"
return msg
def add_action_field_n1(self, embed_obj):
"""Send the stats list n1"""
msg = self.action
msg += globvars.master_state.game.create_sitting_order_stats_string()
embed_obj.add_field(name = butterfly + " **「 Your Action 」**", value = msg, inline = False)
return embed_obj
def exec_init_role(self, setup):
"""Assign one of the townsfolks or outsiders as a red herring"""
possibilities = setup.townsfolks + setup.outsiders
chosen = random.choice(possibilities)
chosen.add_status_effect(RedHerring(Storyteller(), chosen))
globvars.logging.info(f">>> Fortune Teller [exec_init_role] Set red herring to {str(chosen)}")
def has_finished_night_action(self, player):
"""Return True if fortune teller has submitted the read action"""
if player.is_alive():
current_phase_id = globvars.master_state.game._chrono.phase_id
received_action = player.action_grid.retrieve_an_action(current_phase_id)
return received_action is not None and received_action.action_type == ActionTypes.read
return True
@GameLogic.requires_two_targets
@GameLogic.requires_different_targets
@GameLogic.changes_not_allowed
async def register_read(self, player, targets):
"""Read command"""
# Must be 2 targets
assert len(targets) == 2, "Received a number of targets different than 2 for fortune teller 'read'"
action = Action(player, targets, ActionTypes.read, globvars.master_state.game._chrono.phase_id)
player.action_grid.register_an_action(action, globvars.master_state.game._chrono.phase_id)
msg = butterfly + " " + character_text["feedback"].format(targets[0].game_nametag, targets[1].game_nametag)
await player.user.send(msg)
async def exec_read(self, fortune_teller_player, read_player_1, read_player_2):
"""Execute the read action (night ability interaction)"""
if fortune_teller_player.is_alive():
# Correct info
if not fortune_teller_player.is_droisoned():
response = read_player_1.role.social_self.category == Category.demon or \
read_player_2.role.social_self.category == Category.demon or \
read_player_1.has_status_effect(StatusList.red_herring) or \
read_player_2.has_status_effect(StatusList.red_herring)
# Droisoned info
else:
response = random.choice((True, False))
reply = yes if response else no
link = evil_link if response else good_link
recipient = fortune_teller_player.user
msg = f"***{recipient.name}#{recipient.discriminator}***, the **{self.name}**:"
msg += "\n"
msg += self.emoji + " " + self.instruction
msg += "\n"
msg += fortune_teller_nightly.format(reply)
embed = discord.Embed(description = msg)
embed.set_thumbnail(url = link)
embed.set_footer(text = copyrights_str)
embed.timestamp = datetime.datetime.utcnow()
try:
await recipient.send(embed = embed)
except discord.Forbidden:
pass
# If the fortune teller player is dead, then nothing is sent to them
else:
pass
async def process_night_ability(self, player):
"""Process night actions for the fortune teller character.
@player : the Fortune Teller player (Player object)
"""
phase = globvars.master_state.game._chrono.phase_id
action = player.action_grid.retrieve_an_action(phase)
# The Fortune teller has submitted an action. We call the execution function immediately
if action:
assert action.action_type == ActionTypes.read, f"Wrong action type {action} in fortune teller"
targets = action.target_player
read_player_1 = targets[0]
read_player_2 = targets[1]
await self.exec_read(player, read_player_1, read_player_2)
# The fortune teller has not submitted an action. We will not randomize the action since
# the reading ability is a "priviledged" ability
else:
pass
| 41.032086
| 115
| 0.644598
| 898
| 7,673
| 5.292873
| 0.269488
| 0.065643
| 0.020198
| 0.024195
| 0.18178
| 0.132127
| 0.1174
| 0.045024
| 0.045024
| 0.025247
| 0
| 0.008134
| 0.263
| 7,673
| 186
| 116
| 41.252688
| 0.832361
| 0.17516
| 0
| 0.090909
| 0
| 0
| 0.136893
| 0.033433
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.045455
| false
| 0.027273
| 0.072727
| 0
| 0.163636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22fdcdf03da29d4d6e3f5e50e7e03925c3c15cdd
| 10,849
|
py
|
Python
|
src/schmetterling/build/tests/test_maven.py
|
bjuvensjo/schmetterling
|
0cdbfe4f379a081d9d4711dd21866b90983365cf
|
[
"Apache-2.0"
] | null | null | null |
src/schmetterling/build/tests/test_maven.py
|
bjuvensjo/schmetterling
|
0cdbfe4f379a081d9d4711dd21866b90983365cf
|
[
"Apache-2.0"
] | null | null | null |
src/schmetterling/build/tests/test_maven.py
|
bjuvensjo/schmetterling
|
0cdbfe4f379a081d9d4711dd21866b90983365cf
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import call, MagicMock, patch
from schmetterling.build.maven import build_multi_modules
from schmetterling.build.maven import create_build_result
from schmetterling.build.maven import create_command
from schmetterling.build.maven import create_multi_modules
from schmetterling.build.maven import create_state
from schmetterling.build.maven import get_maven_infos
from schmetterling.build.maven import get_maven_repos
from schmetterling.build.maven import get_multi_modules
from schmetterling.build.state import BuildState, Build
from schmetterling.setup.state import Repo
def test_build_multi_modules():
mm = [
{
'updated': 'updated1',
'pom_dir': 'pom_dir1',
'coordinates': 'coordinates1'
},
{
'updated': 'updated2',
'pom_dir': 'pom_dir2',
'coordinates': 'coordinates2'
},
]
with patch(
'schmetterling.build.maven.create_command',
return_value='create_command') as m_create_command, patch(
'schmetterling.build.maven.run_command') as m_run_command, patch(
'schmetterling.build.maven.create_build_result',
return_value=[['success_coordinates'], [
'failure_coordinates'
]]) as m_create_build_result:
assert (
['success_coordinates', 'success_coordinates'],
['failure_coordinates', 'failure_coordinates'],
) == build_multi_modules(mm, 'repository_dir', 'settings_file', 'logback_file')
assert [
call('updated1', 'pom_dir1/mvn.log', 'repository_dir', 'settings_file', 'logback_file'),
call('updated2', 'pom_dir2/mvn.log', 'repository_dir', 'settings_file', 'logback_file')
] == m_create_command.mock_calls
assert [
call('create_command', cwd='pom_dir1'),
call('create_command', cwd='pom_dir2')
] == m_run_command.mock_calls
assert [
call('coordinates1', 'updated1', 'pom_dir1/mvn.log'),
call('coordinates2', 'updated2', 'pom_dir2/mvn.log')
] == m_create_build_result.mock_calls
def test_create_command():
assert str('mvn -Dmaven.repo.local=repository '
'-s settings.xml '
'-DcreateChecksum=true '
'-Dfile.encoding=UTF-8 '
'-Dsun.jnu.encoding=UTF-8 '
'-Dlogback.configurationFile=logback.xml '
'-B -amd -pl mygroup:app.admin,mygroup:app.sign '
'clean install javadoc:jar source:jar '
'--fail-at-end | tee mvn.log') == create_command(
[{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
}, {
'artifact_id': 'app.sign',
'group_id': 'mygroup',
}], 'mvn.log', 'repository', 'settings.xml', 'logback.xml')
@patch(
'schmetterling.build.maven.get_summary',
return_value=(['mygroup:app.admin'], ['app.sign']))
def test_create_build_result(mock_get_summary):
assert (
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
],
[
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
],
) == create_build_result(
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
{
'artifact_id': 'xml.ws',
'group_id': 'mygroup',
},
],
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
],
'mvn.log',
)
def test_create_multi_modules():
with patch('schmetterling.build.maven.makedirs') as m, patch(
'schmetterling.build.maven.open') as o:
f = MagicMock()
o.return_value = MagicMock(__enter__=MagicMock(return_value=f))
create_multi_modules([
{
'pom_dir': 'pd1',
'pom_content': 'pc1'
},
{
'pom_dir': 'pd2',
'pom_content': 'pc2'
},
])
assert [call('pd1', exist_ok=True),
call('pd2', exist_ok=True)] == m.mock_calls
assert [call.write('pc1'), call.write('pc2')] == f.mock_calls
def test_create_state():
state = BuildState('schmetterling.build.maven',
[
Build('mygroup', 'app.admin', '0.0.1-SNAPSHOT', 'app.admin',
Build.SUCCESS, 1),
Build('mygroup', 'pipeline-apache-proxy', '1.0.0-SNAPSHOT',
'pipeline-apache-proxy', Build.FAILURE, 1),
])
assert state == create_state(
[],
[{
'pom_path': 'app.admin/pom.xml',
'artifact_id': 'app.admin',
'group_id': 'mygroup',
'version': '0.0.1-SNAPSHOT',
'packaging': 'jar'
}],
[{
'pom_path': 'pipeline-apache-proxy/pom.xml',
'artifact_id': 'pipeline-apache-proxy',
'group_id': 'mygroup',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
}],
1,
)
def test_get_maven_info():
with patch('schmetterling.build.maven.get_pom_info', side_effect=lambda x: x):
repos = [
MagicMock(status=Repo.STATUS_UPDATED, path='path1'),
MagicMock(status=Repo.STATUS_UNCHANGED, path='path2'),
]
assert [(True, 'path1/pom.xml'),
(False, 'path2/pom.xml')] == get_maven_infos(repos)
def test_get_maven_repos():
with patch('schmetterling.build.maven.isinstance', return_value=True):
with patch('schmetterling.build.maven.exists', side_effect=[False, True]):
m = MagicMock(path='pom_repo', return_value='pom_repo')
state = [MagicMock(repos=[
MagicMock(path='non_pom_repo'),
m,
])]
assert [m] == get_maven_repos(state)
def test_get_multi_modules():
with patch('schmetterling.build.maven.get_pom', return_value='pom_content'):
assert [] == get_multi_modules([(False, {})], 'build_dir')
assert [{
'coordinates': [{}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/jar-modules',
'updated': [{}]
}] == get_multi_modules([(True, {})], 'build_dir')
assert [{
'coordinates': [{
'packaging': 'jar'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/jar-modules',
'updated': [{
'packaging': 'jar'
}]
}] == get_multi_modules([(True, {
'packaging': 'jar'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'super-pom',
'packaging': 'pom'
}],
'pom_content':
'pom_content',
'pom_dir':
'build_dir/super-pom-modules',
'updated': [{
'artifact_id': 'super-pom',
'packaging': 'pom'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'super-pom',
'packaging': 'pom'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'pom',
'packaging': 'pom'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/pom-pom-modules',
'updated': [{
'artifact_id': 'pom',
'packaging': 'pom'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'pom',
'packaging': 'pom'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'x',
'packaging': 'x'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/other-modules',
'updated': [{
'artifact_id': 'x',
'packaging': 'x'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'x',
'packaging': 'x'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'war',
'packaging': 'war'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/war-modules',
'updated': [{
'artifact_id': 'war',
'packaging': 'war'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'war',
'packaging': 'war'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'jar1',
'packaging': 'jar'
}, {
'artifact_id': 'jar2'
}, {
'artifact_id': 'jar3'
}],
'pom_content':
'pom_content',
'pom_dir':
'build_dir/jar-modules',
'updated': [{
'artifact_id': 'jar1',
'packaging': 'jar'
}, {
'artifact_id': 'jar2'
}]
}, {
'coordinates': [{
'artifact_id': 'war',
'packaging': 'war'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/war-modules',
'updated': [{
'artifact_id': 'war',
'packaging': 'war'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'jar1',
'packaging': 'jar'
}), (True, {
'artifact_id': 'jar2'
}), (False, {
'artifact_id': 'jar3'
}), (True, {
'artifact_id': 'war',
'packaging': 'war'
})], 'build_dir')
| 33.381538
| 107
| 0.461702
| 940
| 10,849
| 5.07234
| 0.138298
| 0.077601
| 0.091653
| 0.058725
| 0.540898
| 0.412752
| 0.353188
| 0.301594
| 0.226091
| 0.186661
| 0
| 0.008349
| 0.392755
| 10,849
| 324
| 108
| 33.484568
| 0.715392
| 0
| 0
| 0.51634
| 0
| 0
| 0.306849
| 0.076228
| 0
| 0
| 0
| 0
| 0.062092
| 1
| 0.026144
| false
| 0
| 0.035948
| 0
| 0.062092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22fe0847296c50b27120f9c55084e9eba84b2a5a
| 1,753
|
py
|
Python
|
Copados y Clases/Mastermind_DEBUG.py
|
FdelMazo/7540rw-Algo1
|
8900604873195df9e902ead6bcb67723a8b654c8
|
[
"MIT"
] | 1
|
2021-11-20T18:41:34.000Z
|
2021-11-20T18:41:34.000Z
|
Copados y Clases/Mastermind_DEBUG.py
|
FdelMazo/7540rw-Algo1
|
8900604873195df9e902ead6bcb67723a8b654c8
|
[
"MIT"
] | null | null | null |
Copados y Clases/Mastermind_DEBUG.py
|
FdelMazo/7540rw-Algo1
|
8900604873195df9e902ead6bcb67723a8b654c8
|
[
"MIT"
] | null | null | null |
#Sacar las lineas con DEBUG para que el juego funcione
import random
DIGITOS = 4
def mastermind():
"""Funcion principal del juego Mastermind"""
print("Bienvenido al Mastermind!")
print("Instrucciones: Tenes que adivinar un codigo de {} digitos distintos. Tu cantidad de aciertos son los numeros que estan correctamente posicionados, tu cantidad de coincidencias son los numeros bien elegidos pero mal posicionados. Suerte!".format(DIGITOS))
codigo = elegir_codigo()
intentos = 1
propuesta = input("Que codigo propones? (o pone 'Me retiro') ")
retirarse = "Me retiro"
while propuesta != codigo and propuesta != retirarse:
intentos+=1
aciertos, coincidencias = analizar_propuesta(propuesta, codigo)
print ("Tu propuesta ({}) tiene {} aciertos y {} coincidencias.".format(propuesta,aciertos,coincidencias))
propuesta = input("Propone otro codigo: ")
if propuesta == retirarse:
print ("El codigo era: {}".format(codigo))
else:
print ("Ganaste! Ganaste en {} intentos".format(intentos))
def elegir_codigo():
"""Elige un codigo de DIGITOS digitos al azar"""
digitos= ("0","1","2","3","4","5","6","7","8","9")
codigo = ""
for i in range(DIGITOS):
candidato = random.choice(digitos)
print("[DEBUG] candidato:", candidato)
while candidato in codigo:
candidato = random.choice(digitos)
codigo = codigo + candidato
print("[DEBUG] el codigo va siendo", codigo)
return codigo
def analizar_propuesta(propuesta, codigo):
"""Determina aciertos y coincidencias"""
aciertos = 0
coincidencias = 0
for i in range(DIGITOS):
if propuesta[i] == codigo[i]:
aciertos += 1
elif propuesta[i] in codigo:
coincidencias += 1
return aciertos,coincidencias
mastermind()
| 37.297872
| 263
| 0.697091
| 217
| 1,753
| 5.612903
| 0.410138
| 0.036946
| 0.01642
| 0.027915
| 0.029557
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011855
| 0.181974
| 1,753
| 47
| 264
| 37.297872
| 0.837517
| 0.096977
| 0
| 0.102564
| 0
| 0.025641
| 0.322602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.025641
| 0
| 0.153846
| 0.179487
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
22ffabcfd90f7354812821f61ad46409c8d4a120
| 15,233
|
py
|
Python
|
PyPortal_User_Interface/code.py
|
RichardA1/Adafruit_Learning_System_Guides
|
7d06d8a126f357a431384c3af73339cb46f44c19
|
[
"MIT"
] | 1
|
2022-01-31T21:55:48.000Z
|
2022-01-31T21:55:48.000Z
|
PyPortal_User_Interface/code.py
|
aadisalimani/Adafruit_Learning_System_Guides
|
1b18cfcd6d426bf018545fd7b4102a8196c11c16
|
[
"MIT"
] | null | null | null |
PyPortal_User_Interface/code.py
|
aadisalimani/Adafruit_Learning_System_Guides
|
1b18cfcd6d426bf018545fd7b4102a8196c11c16
|
[
"MIT"
] | null | null | null |
import time
import board
import displayio
import busio
from analogio import AnalogIn
import neopixel
import adafruit_adt7410
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text.label import Label
from adafruit_button import Button
import adafruit_touchscreen
from adafruit_pyportal import PyPortal
# ------------- Inputs and Outputs Setup ------------- #
# init. the temperature sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
adt = adafruit_adt7410.ADT7410(i2c_bus, address=0x48)
adt.high_resolution = True
# init. the light sensor
light_sensor = AnalogIn(board.LIGHT)
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=1)
WHITE = 0xffffff
RED = 0xff0000
YELLOW = 0xffff00
GREEN = 0x00ff00
BLUE = 0x0000ff
PURPLE = 0xff00ff
BLACK = 0x000000
# ---------- Sound Effects ------------- #
soundDemo = '/sounds/sound.wav'
soundBeep = '/sounds/beep.wav'
soundTab = '/sounds/tab.wav'
# ------------- Other Helper Functions------------- #
# Helper for cycling through a number set of 1 to x.
def numberUP(num, max_val):
num += 1
if num <= max_val:
return num
else:
return 1
# ------------- Screen Setup ------------- #
pyportal = PyPortal()
display = board.DISPLAY
display.rotation = 270
# Backlight function
# Value between 0 and 1 where 0 is OFF, 0.5 is 50% and 1 is 100% brightness.
def set_backlight(val):
val = max(0, min(1.0, val))
board.DISPLAY.auto_brightness = False
board.DISPLAY.brightness = val
# Set the Backlight
set_backlight(0.3)
# Touchscreen setup
# ------Rotate 270:
screen_width = 240
screen_height = 320
ts = adafruit_touchscreen.Touchscreen(board.TOUCH_YD, board.TOUCH_YU,
board.TOUCH_XR, board.TOUCH_XL,
calibration=((5200, 59000),
(5800, 57000)),
size=(screen_width, screen_height))
# ------------- Display Groups ------------- #
splash = displayio.Group(max_size=15) # The Main Display Group
view1 = displayio.Group(max_size=15) # Group for View 1 objects
view2 = displayio.Group(max_size=15) # Group for View 2 objects
view3 = displayio.Group(max_size=15) # Group for View 3 objects
def hideLayer(hide_target):
try:
splash.remove(hide_target)
except ValueError:
pass
def showLayer(show_target):
try:
time.sleep(0.1)
splash.append(show_target)
except ValueError:
pass
# ------------- Setup for Images ------------- #
# Display an image until the loop starts
pyportal.set_background('/images/loading.bmp')
bg_group = displayio.Group(max_size=1)
splash.append(bg_group)
icon_group = displayio.Group(max_size=1)
icon_group.x = 180
icon_group.y = 120
icon_group.scale = 1
view2.append(icon_group)
# This will handel switching Images and Icons
def set_image(group, filename):
"""Set the image file for a given goup for display.
This is most useful for Icons or image slideshows.
:param group: The chosen group
:param filename: The filename of the chosen image
"""
print("Set image to ", filename)
if group:
group.pop()
if not filename:
return # we're done, no icon desired
image_file = open(filename, "rb")
image = displayio.OnDiskBitmap(image_file)
try:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter())
except TypeError:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter(),
position=(0, 0))
group.append(image_sprite)
set_image(bg_group, "/images/BGimage.bmp")
# ---------- Text Boxes ------------- #
# Set the font and preload letters
font = bitmap_font.load_font("/fonts/Helvetica-Bold-16.bdf")
font.load_glyphs(b'abcdefghjiklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890- ()')
# Default Label styling:
TABS_X = 5
TABS_Y = 50
# Text Label Objects
feed1_label = Label(font, text="Text Wondow 1", color=0xE39300, max_glyphs=200)
feed1_label.x = TABS_X
feed1_label.y = TABS_Y
view1.append(feed1_label)
feed2_label = Label(font, text="Text Wondow 2", color=0xFFFFFF, max_glyphs=200)
feed2_label.x = TABS_X
feed2_label.y = TABS_Y
view2.append(feed2_label)
sensors_label = Label(font, text="Data View", color=0x03AD31, max_glyphs=200)
sensors_label.x = TABS_X
sensors_label.y = TABS_Y
view3.append(sensors_label)
sensor_data = Label(font, text="Data View", color=0x03AD31, max_glyphs=100)
sensor_data.x = TABS_X+15
sensor_data.y = 170
view3.append(sensor_data)
text_hight = Label(font, text="M", color=0x03AD31, max_glyphs=10)
# return a reformatted string with word wrapping using PyPortal.wrap_nicely
def text_box(target, top, string, max_chars):
text = pyportal.wrap_nicely(string, max_chars)
new_text = ""
test = ""
for w in text:
new_text += '\n'+w
test += 'M\n'
text_hight.text = test # Odd things happen without this
glyph_box = text_hight.bounding_box
target.text = "" # Odd things happen without this
target.y = int(glyph_box[3]/2)+top
target.text = new_text
# ---------- Display Buttons ------------- #
# Default button styling:
BUTTON_HEIGHT = 40
BUTTON_WIDTH = 80
# We want three buttons across the top of the screen
TAPS_HEIGHT = 40
TAPS_WIDTH = int(screen_width/3)
TAPS_Y = 0
# We want two big buttons at the bottom of the screen
BIG_BUTTON_HEIGHT = int(screen_height/3.2)
BIG_BUTTON_WIDTH = int(screen_width/2)
BIG_BUTTON_Y = int(screen_height-BIG_BUTTON_HEIGHT)
# This group will make it easy for us to read a button press later.
buttons = []
# Main User Interface Buttons
button_view1 = Button(x=0, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View1", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view1) # adding this button to the buttons group
button_view2 = Button(x=TAPS_WIDTH, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View2", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view2) # adding this button to the buttons group
button_view3 = Button(x=TAPS_WIDTH*2, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View3", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view3) # adding this button to the buttons group
button_switch = Button(x=0, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Switch", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_switch) # adding this button to the buttons group
button_2 = Button(x=BIG_BUTTON_WIDTH, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Button", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_2) # adding this button to the buttons group
# Add all of the main buttons to the spalsh Group
for b in buttons:
splash.append(b.group)
# Make a button to change the icon image on view2
button_icon = Button(x=150, y=60,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Icon", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_icon) # adding this button to the buttons group
# Add this button to view2 Group
view2.append(button_icon.group)
# Make a button to play a sound on view2
button_sound = Button(x=150, y=170,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Sound", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_sound) # adding this button to the buttons group
# Add this button to view2 Group
view3.append(button_sound.group)
#pylint: disable=global-statement
def switch_view(what_view):
global view_live
if what_view == 1:
hideLayer(view2)
hideLayer(view3)
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
view_live = 1
print("View1 On")
elif what_view == 2:
# global icon
hideLayer(view1)
hideLayer(view3)
button_view1.selected = True
button_view2.selected = False
button_view3.selected = True
showLayer(view2)
view_live = 2
print("View2 On")
else:
hideLayer(view1)
hideLayer(view2)
button_view1.selected = True
button_view2.selected = True
button_view3.selected = False
showLayer(view3)
view_live = 3
print("View3 On")
#pylint: enable=global-statement
# Set veriables and startup states
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
hideLayer(view2)
hideLayer(view3)
view_live = 1
icon = 1
icon_name = "Ruby"
button_mode = 1
switch_state = 0
button_switch.label = "OFF"
button_switch.selected = True
# Update out Labels with display text.
text_box(feed1_label, TABS_Y,
"The text on this screen is wrapped so that all of it fits nicely into a \
text box that is ### x ###.", 30)
text_box(feed1_label, TABS_Y,
'The text on this screen is wrapped so that all of it fits nicely into a \
text box that is {} x {}.'
.format(feed1_label.bounding_box[2], feed1_label.bounding_box[3]*2), 30)
text_box(feed2_label, TABS_Y, 'Tap on the Icon button to meet a new friend.', 18)
text_box(sensors_label, TABS_Y,
"This screen can display sensor readings and tap Sound to play a WAV file.", 28)
board.DISPLAY.show(splash)
# ------------- Code Loop ------------- #
while True:
touch = ts.touch_point
light = light_sensor.value
tempC = round(adt.temperature)
tempF = tempC * 1.8 + 32
sensor_data.text = 'Touch: {}\nLight: {}\n Temp: {}°F'.format(touch, light, tempF)
# ------------- Handle Button Press Detection ------------- #
if touch: # Only do this if the screen is touched
# loop with buttons using enumerate() to number each button group as i
for i, b in enumerate(buttons):
if b.contains(touch): # Test each button to see if it was pressed
print('button%d pressed' % i)
if i == 0 and view_live != 1: # only if view1 is visable
pyportal.play_file(soundTab)
switch_view(1)
while ts.touch_point:
pass
if i == 1 and view_live != 2: # only if view2 is visable
pyportal.play_file(soundTab)
switch_view(2)
while ts.touch_point:
pass
if i == 2 and view_live != 3: # only if view3 is visable
pyportal.play_file(soundTab)
switch_view(3)
while ts.touch_point:
pass
if i == 3:
pyportal.play_file(soundBeep)
# Toggle switch button type
if switch_state == 0:
switch_state = 1
b.label = "ON"
b.selected = False
pixel.fill(WHITE)
print("Swich ON")
else:
switch_state = 0
b.label = "OFF"
b.selected = True
pixel.fill(BLACK)
print("Swich OFF")
# for debounce
while ts.touch_point:
pass
print("Swich Pressed")
if i == 4:
pyportal.play_file(soundBeep)
# Momentary button type
b.selected = True
print('Button Pressed')
button_mode = numberUP(button_mode, 5)
if button_mode == 1:
pixel.fill(RED)
elif button_mode == 2:
pixel.fill(YELLOW)
elif button_mode == 3:
pixel.fill(GREEN)
elif button_mode == 4:
pixel.fill(BLUE)
elif button_mode == 5:
pixel.fill(PURPLE)
switch_state = 1
button_switch.label = "ON"
button_switch.selected = False
# for debounce
while ts.touch_point:
pass
print("Button released")
b.selected = False
if i == 5 and view_live == 2: # only if view2 is visable
pyportal.play_file(soundBeep)
b.selected = True
while ts.touch_point:
pass
print("Icon Button Pressed")
icon = numberUP(icon, 3)
if icon == 1:
icon_name = "Ruby"
elif icon == 2:
icon_name = "Gus"
elif icon == 3:
icon_name = "Billie"
b.selected = False
text_box(feed2_label, TABS_Y,
"Every time you tap the Icon button the icon image will \
change. Say hi to {}!".format(icon_name), 18)
set_image(icon_group, "/images/"+icon_name+".bmp")
if i == 6 and view_live == 3: # only if view3 is visable
b.selected = True
while ts.touch_point:
pass
print("Sound Button Pressed")
pyportal.play_file(soundDemo)
b.selected = False
| 35.508159
| 89
| 0.594433
| 1,861
| 15,233
| 4.70446
| 0.192907
| 0.011879
| 0.012336
| 0.014392
| 0.380126
| 0.35671
| 0.339121
| 0.309766
| 0.256539
| 0.202513
| 0
| 0.051023
| 0.310379
| 15,233
| 428
| 90
| 35.591122
| 0.782294
| 0.164511
| 0
| 0.297214
| 0
| 0
| 0.048139
| 0.007205
| 0
| 0
| 0.034521
| 0
| 0
| 1
| 0.021672
| false
| 0.027864
| 0.037152
| 0
| 0.068111
| 0.037152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe00cf45d1015948865b349bcd27a15e243e3e66
| 7,741
|
py
|
Python
|
btse_futures/order.py
|
yottatix/btse-python
|
1c5019d0a68dff797afc70c4cc32c1950c28af4e
|
[
"MIT"
] | null | null | null |
btse_futures/order.py
|
yottatix/btse-python
|
1c5019d0a68dff797afc70c4cc32c1950c28af4e
|
[
"MIT"
] | null | null | null |
btse_futures/order.py
|
yottatix/btse-python
|
1c5019d0a68dff797afc70c4cc32c1950c28af4e
|
[
"MIT"
] | null | null | null |
import json
from btse_futures.constants import OrderType, Side, TimeInForce
class Order:
"""
Class to represent a BTSE Order
...
Attributes
----------
size : int
order quantity or size. e.g. 1
price : float
price. e.g. 7000.0
side: str
order side. BUY or SELL
time_in_force: str
time the order is in force. Possible options defined in TimeInForce. e.g. GTC
symbol: str
instrument symbol. e.g. BTCPFC
type: str
order type. "LIMIT", "MARKET", or "OCO"
txType: str
transaction type
postOnly: bool
Is order post only?
reduceOnly: bool
Is order reduce only?
triggerPrice: float
Trigger price. Relevant only for LIMIT and OCO order types
stopPrice: float
Stop price.
trailValue: float
Trail value.
clOrderId: str
User defined order id
trigger: str
If an order is a stop loss or take profit order, then this parameter determines the trigger price.
Available values are: 1. markPrice = Mark Price (Default) and 2. lastPrice = Last transacted Price
Documentation: https://www.btse.com/apiexplorer/futures/?shell#tocs_orderformv2
"""
def __init__(self, size: int, price: float, side: str, time_in_force: str, symbol: str, type: str, txType: str, postOnly: bool, reduceOnly: bool, triggerPrice: float, stopPrice: float = None, trailValue: float = None, clOrderId: str = None, trigger: str = None) -> None:
assert(isinstance(size, int))
assert(isinstance(price, float))
assert(isinstance(side, str))
assert(isinstance(time_in_force, str))
assert(isinstance(symbol, str))
assert(isinstance(type, str))
assert(isinstance(postOnly, bool))
assert(isinstance(reduceOnly, bool))
assert(isinstance(triggerPrice, float))
self.size = size
self.price = price
self.side = side
self.time_in_force = time_in_force
self.symbol = symbol
self.type = type
self.txType = txType
self.postOnly = postOnly
self.reduceOnly = reduceOnly
self.triggerPrice = triggerPrice
self.stopPrice = stopPrice
self.trailValue = trailValue
self.clOrderId = clOrderId
self.trigger = trigger
@property
def quantity(self):
return self.size
def to_json(self):
json_string = json.dumps(self.order_without_none_values())
print(f'json string: {json_string}')
return json_string
def order_without_none_values(self):
order_dict = self.__dict__
for key, value in list(order_dict.items()):
if value is None:
del order_dict[key]
return order_dict
class OpenOrder:
"""
open order endpoint response format
https://www.btse.com/apiexplorer/futures/#tocs_positionrespv2_1
Example:
--------
`{
"orderType": 0,
"price": 6875,
"size": 4,
"side": "BUY",
"filledSize": 3,
"orderValue": 20.625,
"pegPriceMin": 0,
"pegPriceMax": 0,
"pegPriceDeviation": 0,
"cancelDuration": 0,
"timestamp": 1576661434072,
"orderID": "string",
"stealth": 0.2,
"triggerOrder": true,
"triggered": true,
"triggerPrice": 0,
"triggerOriginalPrice": 0,
"triggerOrderType": 1001,
"triggerTrailingStopDeviation": 0,
"triggerStopPrice": 0,
"symbol": "string",
"trailValue": 0,
"clOrderID": "market001",
"reduceOnly": true,
"orderState": "string"
}`
"""
def __init__(self) -> None:
self.orderType = 0
self.price = 0
self.size = 0
self.side = ''
self.filledSize = 0
self.orderValue = 0.0
self.pegPriceMin = 0
self.pegPriceMax = 0
self.pegPriceDeviation = 0
self.cancelDuration = 0
self.timestamp = 0
self.orderID = ''
self.stealth = 0.0
self.triggerOrder = ''
self.triggered = ''
self.triggerPrice = 0
self.triggerOriginalPrice = 0
self.triggerOrderType = 0
self.triggerTrailingStopDeviation = 0
self.triggerStopPrice = 0
self.symbol = ''
self.trailValue = 0
self.clOrderID = ''
self.reduceOnly = ''
self.orderState = ''
@staticmethod
def from_dict(data):
open_order = OpenOrder()
open_order.orderType = data.get('orderType')
open_order.price = data.get('price')
open_order.size = data.get('size')
open_order.side = data.get('side')
open_order.filledSize = data.get('filledSize')
open_order.orderValue = data.get('orderValue')
open_order.pegPriceMin = data.get('pegPriceMin')
open_order.pegPriceMax = data.get('pegPriceMax')
open_order.pegPriceDeviation = data.get('pegPriceDeviation')
open_order.cancelDuration = data.get('cancelDuration')
open_order.timestamp = data.get('timestamp')
open_order.orderID = data.get('orderID')
open_order.stealth = data.get('stealth')
open_order.triggerOrder = data.get('triggerOrder')
open_order.triggered = data.get('triggered')
open_order.triggerPrice = data.get('triggerPrice')
open_order.triggerOriginalPrice = data.get('triggerOriginalPrice')
open_order.triggerOrderType = data.get('triggerOrderType')
open_order.triggerTrailingStopDeviation = data.get(
'triggerTrailingStopDeviation')
open_order.triggerStopPrice = data.get('triggerStopPrice')
open_order.symbol = data.get('symbol')
open_order.trailValue = data.get('trailValue')
open_order.clOrderID = data.get('clOrderID')
open_order.reduceOnly = data.get('reduceOnly')
open_order.orderState = data.get('orderState')
return open_order
class OrderResponseV21:
"""
Order Response V2.1
Documentation -- https://www.btse.com/apiexplorer/futures/?shell#tocs_orderrespv2_1
"""
def __init__(self) -> None:
self.status = 0
self.symbol = ''
self.orderType = 0
self.price = 0.0
self.side = ''
self.size = 0
self.orderID = ''
self.timestamp = 0
self.triggerPrice = 0.0
self.trigger = ''
self.deviation = 0.0
self.stealth = 0.0
self.message = ''
self.avgFillPrice = 0.0
self.fillSize = 0.0
self.clOrderID = ''
@staticmethod
def from_dict(data):
order_response_v21 = OrderResponseV21()
order_response_v21.status = data.get('status')
order_response_v21.symbol = data.get('symbol')
order_response_v21.orderType = data.get('orderType')
order_response_v21.price = data.get('price')
order_response_v21.side = data.get('side')
order_response_v21.size = data.get('size')
order_response_v21.orderID = data.get('orderID')
order_response_v21.timestamp = data.get('timestamp')
order_response_v21.triggerPrice = data.get('triggerPrice')
order_response_v21.trigger = data.get('trigger')
order_response_v21.deviation = data.get('deviation')
order_response_v21.stealth = data.get('stealth')
order_response_v21.message = data.get('message')
order_response_v21.avgFillPrice = data.get('avgFillPrice')
order_response_v21.fillSize = data.get('fillSize')
order_response_v21.clOrderID = data.get('clOrderID')
return order_response_v21
| 33.510823
| 274
| 0.612324
| 838
| 7,741
| 5.523866
| 0.186158
| 0.062
| 0.062216
| 0.009073
| 0.067617
| 0.041262
| 0.023763
| 0.023763
| 0.023763
| 0
| 0
| 0.023912
| 0.281488
| 7,741
| 230
| 275
| 33.656522
| 0.808342
| 0.245317
| 0
| 0.162963
| 0
| 0
| 0.076395
| 0.005057
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.059259
| false
| 0
| 0.014815
| 0.007407
| 0.133333
| 0.007407
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe00feaeeab5dd9b94bc8b6fc0a0dcbedc801a5d
| 2,037
|
py
|
Python
|
tests/mock_responses.py
|
md-reddevil/blinkpy
|
3c7892385352079227c6251eb88257870bea0bb3
|
[
"MIT"
] | null | null | null |
tests/mock_responses.py
|
md-reddevil/blinkpy
|
3c7892385352079227c6251eb88257870bea0bb3
|
[
"MIT"
] | null | null | null |
tests/mock_responses.py
|
md-reddevil/blinkpy
|
3c7892385352079227c6251eb88257870bea0bb3
|
[
"MIT"
] | null | null | null |
"""Simple mock responses definitions."""
from blinkpy.helpers.util import BlinkURLHandler
import blinkpy.helpers.constants as const
LOGIN_RESPONSE = {
'region': {'mock': 'Test'},
'networks': {
'1234': {'name': 'test', 'onboarded': True}
},
'authtoken': {'authtoken': 'foobar123', 'message': 'auth'}
}
class MockResponse:
"""Class for mock request response."""
def __init__(self, json_data, status_code, raw_data=None):
"""Initialize mock get response."""
self.json_data = json_data
self.status_code = status_code
self.raw_data = raw_data
def json(self):
"""Return json data from get_request."""
return self.json_data
@property
def raw(self):
"""Return raw data from get request."""
return self.raw_data
def mocked_session_send(*args, **kwargs):
"""Mock session."""
prepped = args[0]
url = prepped.url
header = prepped.headers
method = prepped.method
if method == 'GET':
expected_token = LOGIN_RESPONSE['authtoken']['authtoken']
if header['TOKEN_AUTH'] != expected_token:
response = {'message': 'Not Authorized', 'code': 400}
status = 400
elif url == 'use_bad_response':
response = {'foo': 'bar'}
status = 200
elif url == 'reauth':
response = {'message': 'REAUTH', 'code': 777}
status = 777
else:
response = {'test': 'foo'}
status = 200
elif method == 'POST':
if url in (const.LOGIN_URL, const.LOGIN_BACKUP_URL):
response = LOGIN_RESPONSE
status = 200
elif url == 'http://wrong.url/' or url is None:
response = {'message': 'Error', 'code': 404}
status = 404
else:
response = {'message': 'foo', 'code': 200}
status = 200
return MockResponse(response, status)
class MockURLHandler(BlinkURLHandler):
"""Mocks URL Handler in blinkpy module."""
pass
| 28.291667
| 65
| 0.573883
| 220
| 2,037
| 5.181818
| 0.377273
| 0.035088
| 0.031579
| 0.031579
| 0.049123
| 0.049123
| 0
| 0
| 0
| 0
| 0
| 0.028413
| 0.291605
| 2,037
| 71
| 66
| 28.690141
| 0.761608
| 0.106529
| 0
| 0.117647
| 0
| 0
| 0.134529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0.019608
| 0.039216
| 0
| 0.215686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe01b90ce53e119b08e13770e4500dbf262d962f
| 2,061
|
py
|
Python
|
fits_tools.py
|
steveschulze/Photometry
|
3bc4ce457a270962321176d0e3e288b5a96cd34b
|
[
"BSD-2-Clause"
] | 6
|
2020-03-05T20:58:35.000Z
|
2022-02-13T20:18:46.000Z
|
fits_tools.py
|
steveschulze/Photometry
|
3bc4ce457a270962321176d0e3e288b5a96cd34b
|
[
"BSD-2-Clause"
] | 1
|
2020-03-10T00:03:46.000Z
|
2020-03-10T00:03:46.000Z
|
fits_tools.py
|
steveschulze/Photometry
|
3bc4ce457a270962321176d0e3e288b5a96cd34b
|
[
"BSD-2-Clause"
] | 1
|
2020-11-26T10:38:47.000Z
|
2020-11-26T10:38:47.000Z
|
from astropy import coordinates as coord
from astropy import wcs
from astropy.io import fits
from astropy import units as u
from misc import bcolors
import numpy as np
import os
def convert_hms_dd(RA, DEC):
'''
Convert HMS to DD system
'''
if (':' in RA) and (':' in DEC):
Coord_dd = coord.SkyCoord(RA, DEC, unit=(u.hour,u.degree), frame='icrs')
RA_dd = Coord_dd.ra.deg
Dec_dd = Coord_dd.dec.deg
elif (not (':' in RA) and not (':' in DEC)) and (('.' in RA) and ('.' in DEC)):
RA_dd, Dec_dd = float(RA), float(DEC)
else:
print(bcolors.FAIL + 'Coordinates have wrong format.' + bcolors.ENDC)
sys.exit()
return RA_dd, Dec_dd
def get_header(FILE, KEYWORD):
'''
Get keyword from fits file
'''
header = fits.getheader(FILE)
return header[KEYWORD]
def pix2arcsec(FITS):
'''
Get pixel scale
'''
hdu = fits.open(FITS)
if len(hdu) > 1:
header = fits.getheader(FITS, 0)
header += fits.getheader(FITS, 1)
else:
header = fits.getheader(FITS)
hdu_wcs = wcs.WCS(header)
return np.median(wcs.utils.proj_plane_pixel_scales(hdu_wcs)) * 3600
def sky2xy (FITS, RA=False, DEC=False, CAT=None):
'''
Coordinate transformation: sky -> xy
'''
if CAT == None:
if RA != False and DEC != False:
cmd=('sky2xy %s %s %s | grep -v off' %(FITS, RA, DEC))
program_call = os.popen(cmd)
xy = []
for line in program_call:
xy=np.array(line.strip().split()[-2:]).astype(float)
if len(xy) > 0:
return xy
else:
cmd =("more %s | awk '{print $1,$2}' > %s" %(CAT, CAT.replace(CAT.split('.')[-1], 'reg')))
os.system(cmd)
cmd = ("sky2xy %s @%s | grep -v off | awk '{print $5, $6}'" %(FITS, CAT.replace(CAT.split('.')[-1], 'reg')))
cat = os.popen(cmd)
xy = []
for line in cat:
xy.append(list(map(float, line.replace('\n', '').split())))
return np.array(xy)
def xy2sky (FITSFILE,X,Y):
'''
Coordinate transformation: xy -> sky
'''
program_call = os.popen('xy2sky %s %s %s' %(FITSFILE, X, Y))
sky = []
for line in program_call:
sky.append(line.strip().split()[:2])
return sky
| 21.247423
| 111
| 0.622028
| 327
| 2,061
| 3.856269
| 0.314985
| 0.00793
| 0.06027
| 0.054718
| 0.127676
| 0.0682
| 0.033307
| 0
| 0
| 0
| 0
| 0.013301
| 0.197477
| 2,061
| 96
| 112
| 21.46875
| 0.749093
| 0.068413
| 0
| 0.127273
| 0
| 0
| 0.094984
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.127273
| 0
| 0.327273
| 0.054545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe028f3f35a9ad5d36908ec80630b139c6300e3c
| 2,155
|
py
|
Python
|
test_stbp_snn_eval.py
|
neurom-iot/n3ml
|
39c6b50661f293d58b4b37ef613643860724bb24
|
[
"MIT"
] | 11
|
2019-03-15T17:20:54.000Z
|
2022-03-01T08:25:36.000Z
|
test_stbp_snn_eval.py
|
neurom-iot/n3ml
|
39c6b50661f293d58b4b37ef613643860724bb24
|
[
"MIT"
] | 7
|
2019-03-15T16:02:51.000Z
|
2021-12-03T08:17:06.000Z
|
test_stbp_snn_eval.py
|
neurom-iot/n3ml
|
39c6b50661f293d58b4b37ef613643860724bb24
|
[
"MIT"
] | 9
|
2019-10-14T12:38:19.000Z
|
2021-12-02T04:49:28.000Z
|
import argparse
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from n3ml.model import DynamicModel_STBP_SNN
def validate(val_loader, model, encoder, criterion, opt):
model.eval()
total_images = 0
num_corrects = 0
total_loss = 0
with torch.no_grad():
for step, (images, labels) in enumerate(val_loader):
images = images.cuda()
labels = labels.cuda()
preds = model(encoder, images, opt.num_steps)
labels_ = torch.zeros(torch.numel(labels), 10, device=labels.device)
labels_ = labels_.scatter_(1, labels.view(-1, 1), 1)
loss = criterion(preds, labels_)
num_corrects += torch.argmax(preds, dim=1).eq(labels).sum(dim=0)
total_loss += loss.cpu().detach().numpy() * images.size(0)
total_images += images.size(0)
val_acc = num_corrects.float() / total_images
val_loss = total_loss / total_images
return val_acc, val_loss
def app(opt):
print(opt)
val_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
opt.data,
train=False,
download=True,
transform=torchvision.transforms.Compose([transforms.ToTensor()])),
batch_size=opt.batch_size)
state_dict = torch.load(opt.pretrained)
model = DynamicModel_STBP_SNN(batch_size=opt.batch_size)
for m in state_dict['arch']:
model.add_module(m[0], m[1])
if torch.cuda.is_available():
model.cuda()
encoder = lambda x: (x > torch.rand(x.size(), device=x.device)).float()
criterion = nn.MSELoss()
acc, loss = validate(val_loader, model, encoder, criterion, opt)
print("In test, loss: {} - acc: {}".format(loss, acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', default='data')
parser.add_argument('--batch_size', default=100, type=int)
parser.add_argument('--num_steps', default=15, type=int)
parser.add_argument('--pretrained', default='pretrained/stbp_dynamic_acc_9897.pt')
app(parser.parse_args())
| 28.355263
| 86
| 0.647332
| 277
| 2,155
| 4.837545
| 0.371841
| 0.033582
| 0.050746
| 0.032836
| 0.128358
| 0.061194
| 0.061194
| 0
| 0
| 0
| 0
| 0.014934
| 0.223202
| 2,155
| 75
| 87
| 28.733333
| 0.785544
| 0
| 0
| 0
| 0
| 0
| 0.05522
| 0.016241
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.117647
| 0
| 0.176471
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe03d9810588ad4d8d061ca21558f5e026141e64
| 2,334
|
py
|
Python
|
kaggle_melanoma/schedulers.py
|
tinve/kaggle_melanoma
|
6d2d16d62a394fd9cc2498bdf1a19ce60fe047eb
|
[
"MIT"
] | 8
|
2020-06-01T10:42:40.000Z
|
2022-02-17T08:42:49.000Z
|
kaggle_melanoma/schedulers.py
|
tinve/kaggle_melanoma
|
6d2d16d62a394fd9cc2498bdf1a19ce60fe047eb
|
[
"MIT"
] | null | null | null |
kaggle_melanoma/schedulers.py
|
tinve/kaggle_melanoma
|
6d2d16d62a394fd9cc2498bdf1a19ce60fe047eb
|
[
"MIT"
] | 2
|
2020-06-08T22:34:38.000Z
|
2022-02-24T03:15:59.000Z
|
import math
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
class PolyLR(_LRScheduler):
"""
Sets the learning rate of each parameter group according to poly learning rate policy
"""
def __init__(self, optimizer, max_iter=90000, power=0.9, last_epoch=-1):
self.max_iter = max_iter
self.power = power
super().__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * (1 - float(self.last_epoch) / self.max_iter) ** self.power for base_lr in self.base_lrs]
func_zoo = {
"cosine_decay": lambda epoch, step, len_epoch, total_epoch: 0.5
* (math.cos(step * math.pi / (total_epoch * len_epoch)) + 1)
}
class CosineWarmRestart:
def __init__(
self,
optimizer: Optimizer,
func: str = "cosine_decay",
warmup: bool = True,
warmup_epoch: int = 1,
period: int = 10,
min_lr: float = 1e-5,
low_epoch: int = 1,
):
# self.base_lrs = list(map(lambda group: group["lr"], optimizer.param_groups))[0]
self.base_lrs = [x["lr"] for x in optimizer.param_groups][0]
self.optimizer = optimizer
self.warmup = warmup
self.warmup_epoch = warmup_epoch
self.period = period
self.cos_period = period - low_epoch
self.low_epoch = low_epoch
self.lr_func = func_zoo[func]
self.min_lr = min_lr
def cosine_step(self, current_epoch: int, global_step: int, len_epoch: int) -> float:
if self.warmup and current_epoch < self.warmup_epoch:
lr = self.base_lrs * float(1 + global_step) / (self.warmup_epoch * len_epoch)
else:
lr = self.base_lrs * self.lr_func(current_epoch, global_step, len_epoch, self.cos_period)
lr = max(self.min_lr, lr)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
return lr
def step(self, current_epoch: int, global_step: int, len_epoch: int) -> float:
current_epoch = current_epoch % self.period
if current_epoch >= self.period - self.low_epoch:
global_step = len_epoch * self.cos_period
else:
global_step = global_step % (self.period * len_epoch)
return self.cosine_step(current_epoch, global_step, len_epoch)
| 35.363636
| 114
| 0.641388
| 322
| 2,334
| 4.378882
| 0.232919
| 0.057447
| 0.039007
| 0.038298
| 0.186525
| 0.151064
| 0.124823
| 0.124823
| 0.073759
| 0.073759
| 0
| 0.012118
| 0.257498
| 2,334
| 65
| 115
| 35.907692
| 0.8015
| 0.071123
| 0
| 0.04
| 0
| 0
| 0.013011
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.06
| 0.02
| 0.26
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe056ef418d151035d2b9bd419b580cf756d0fd1
| 1,099
|
py
|
Python
|
utils.py
|
federicosapienza/InboxNotionTelegramBot
|
031d5e78cd352dfb692b93f3e0b421695f1dc18e
|
[
"MIT"
] | null | null | null |
utils.py
|
federicosapienza/InboxNotionTelegramBot
|
031d5e78cd352dfb692b93f3e0b421695f1dc18e
|
[
"MIT"
] | null | null | null |
utils.py
|
federicosapienza/InboxNotionTelegramBot
|
031d5e78cd352dfb692b93f3e0b421695f1dc18e
|
[
"MIT"
] | null | null | null |
import json
import logging
logger = logging.getLogger(__name__)
with open('configuration.json') as f:
config = json.load(f)
TELEGRAM_TOKEN = config["telegram-bot-token"]
NOTION_TOKEN = config["notion-token"]
NOTION_TABLE_URL = config["inbox_table"]["table_url"]
def check_allowed_user(user_id):
"""
check if allowed user
:param user_id: telegram user id
:return True if user is valid , False otherwise
"""
valid_user = config["allowed_user_id"]
user_id = str(user_id)
return user_id == valid_user
def restrict_action(handled_action):
"""
Wrapper for creating a private bot
:param handled_action: the action to perform
"""
def check_private(update, context):
if not (check_allowed_user(update.message.from_user.id)):
logging.warning("An unauthorized user attempted to use the bot. username: {}, id: {} .".format(
update.message.from_user.username, update.message.from_user.id
))
return
else:
return handled_action(update, context)
return check_private
| 27.475
| 107
| 0.674249
| 143
| 1,099
| 4.958042
| 0.405594
| 0.076164
| 0.050776
| 0.088858
| 0.06488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22566
| 1,099
| 39
| 108
| 28.179487
| 0.833137
| 0.165605
| 0
| 0
| 0
| 0
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.090909
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe068879b9f1513a9f5e49e88200ed64c8fa16f1
| 12,623
|
py
|
Python
|
cassiopeia/datastores/riotapi/match.py
|
artemigkh/cassiopeia
|
fa78cb8f86ea21857916a707d04de6a05498033e
|
[
"MIT"
] | 1
|
2021-09-07T05:26:21.000Z
|
2021-09-07T05:26:21.000Z
|
cassiopeia/datastores/riotapi/match.py
|
artemigkh/cassiopeia
|
fa78cb8f86ea21857916a707d04de6a05498033e
|
[
"MIT"
] | null | null | null |
cassiopeia/datastores/riotapi/match.py
|
artemigkh/cassiopeia
|
fa78cb8f86ea21857916a707d04de6a05498033e
|
[
"MIT"
] | 1
|
2016-10-20T11:54:20.000Z
|
2016-10-20T11:54:20.000Z
|
from time import time
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator, Union
import arrow
import datetime
import math
from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query
from .common import RiotAPIService, APINotFoundError
from ...data import Platform, Season, Queue, SEASON_IDS, QUEUE_IDS
from ...dto.match import MatchDto, MatchListDto, TimelineDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_current_time(query: MutableMapping[str, Any], context: PipelineContext = None) -> int:
return int(time()) * 1000
class MatchAPI(RiotAPIService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
_validate_get_match_query = Query. \
has("id").as_(int).also. \
has("platform").as_(Platform)
@get.register(MatchDto)
@validate_query(_validate_get_match_query, convert_region_to_platform)
def get_match(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MatchDto:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matches/{id}".format(platform=query["platform"].value.lower(), id=query["id"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matches/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["gameId"] = query["id"]
data["region"] = query["platform"].region.value
for p in data["participantIdentities"]:
aid = p.get("player", {}).get("currentAccountId", None)
if aid == 0:
p["player"]["bot"] = True
return MatchDto(data)
_validate_get_many_match_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(MatchDto)
@validate_query(_validate_get_many_match_query, convert_region_to_platform)
def get_many_match(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MatchDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matches/{id}".format(platform=query["platform"].value.lower(), id=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matches/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
for participant in data["participants"]:
participant.setdefault("runes", [])
for p in data["participantIdentities"]:
aid = p.get("player", {}).get("currentAccountId", None)
if aid == 0:
p["player"]["bot"] = True
data["gameId"] = id
data["region"] = query["platform"].region.value
yield MatchDto(data)
return generator()
_validate_get_match_list_query = Query. \
has("accountId").as_(str).also. \
has("platform").as_(Platform).also. \
has("beginTime").as_(int).also. \
can_have("endTime").as_(int).also. \
has("beginIndex").as_(int).also. \
has("maxNumberOfMatches").as_(float).also. \
can_have("seasons").as_(Iterable).also. \
can_have("champion.ids").as_(Iterable).also. \
can_have("queues").as_(Iterable)
@get.register(MatchListDto)
@validate_query(_validate_get_match_list_query, convert_region_to_platform)
def get_match_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MatchListDto:
params = {}
riot_index_interval = 100
riot_date_interval = datetime.timedelta(days=7)
begin_time = query["beginTime"] # type: arrow.Arrow
end_time = query.get("endTime", arrow.now()) # type: arrow.Arrow
if isinstance(begin_time, int):
begin_time = arrow.get(begin_time / 1000)
if isinstance(end_time, int):
end_time = arrow.get(end_time / 1000)
def determine_calling_method(begin_time, end_time) -> str:
"""Returns either "by_date" or "by_index"."""
matches_per_date_interval = 10 # This is an assumption
seconds_per_day = (60 * 60 * 24)
riot_date_interval_in_days = riot_date_interval.total_seconds() / seconds_per_day # in units of days
npulls_by_date = (end_time - begin_time).total_seconds() / seconds_per_day / riot_date_interval_in_days
npulls_by_index = (arrow.now() - begin_time).total_seconds() / seconds_per_day / riot_date_interval_in_days * matches_per_date_interval / riot_index_interval
if math.ceil(npulls_by_date) < math.ceil(npulls_by_index):
by = "by_date"
else:
by = "by_index"
return by
calling_method = determine_calling_method(begin_time, end_time)
if calling_method == "by_date":
params["beginTime"] = begin_time.timestamp * 1000
if "endTime" in query:
params["endTime"] = min((begin_time + riot_date_interval).timestamp * 1000, query["endTime"])
else:
params["endTime"] = (begin_time + riot_date_interval).timestamp * 1000
else:
params["beginIndex"] = query["beginIndex"]
params["endIndex"] = query["beginIndex"] + min(riot_index_interval, query["maxNumberOfMatches"])
params["endIndex"] = int(params["endIndex"])
if "seasons" in query:
seasons = {Season(season) for season in query["seasons"]}
params["season"] = {SEASON_IDS[season] for season in seasons}
else:
seasons = set()
if "champion.ids" in query:
champions = query["champion.ids"]
params["champion"] = champions
else:
champions = set()
if "queues" in query:
queues = {Queue(queue) for queue in query["queues"]}
params["queue"] = {QUEUE_IDS[queue] for queue in queues}
else:
queues = set()
url = "https://{platform}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}".format(platform=query["platform"].value.lower(), accountId=query["accountId"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matchlists/by-account/accountId")
data = self._get(url, params, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError:
data = {"matches": []}
data["accountId"] = query["accountId"]
data["region"] = query["platform"].region.value
data["season"] = seasons
data["champion"] = champions
data["queue"] = queues
if calling_method == "by_index":
data["beginIndex"] = params["beginIndex"]
data["endIndex"] = params["endIndex"]
data["maxNumberOfMatches"] = query["maxNumberOfMatches"]
else:
data["beginTime"] = params["beginTime"]
data["endTime"] = params["endTime"]
for match in data["matches"]:
match["accountId"] = query["accountId"]
match["region"] = Platform(match["platformId"]).region.value
return MatchListDto(data)
_validate_get_many_match_list_query = Query. \
has("accountIds").as_(Iterable).also. \
has("platform").as_(Platform).also. \
can_have("beginTime").as_(int).also. \
can_have("endTime").as_(int).also. \
can_have("beginIndex").as_(int).also. \
can_have("endIndex").as_(int).also. \
can_have("seasons").as_(Iterable).also. \
can_have("champion.ids").as_(Iterable).also. \
can_have("queues").as_(Iterable)
@get_many.register(MatchListDto)
@validate_query(_validate_get_many_match_list_query, convert_region_to_platform)
def get_many_match_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MatchListDto, None, None]:
params = {}
if "beginIndex" in query:
params["beginIndex"] = query["beginIndex"]
if "endIndex" in query:
params["endIndex"] = query["endIndex"]
if "seasons" in query:
seasons = {Season(season) for season in query["seasons"]}
params["season"] = {SEASON_IDS[season] for season in seasons}
else:
seasons = set()
if "champion.ids" in query:
params["champion"] = {query["champion.ids"]}
if "queues" in query:
queues = {Queue(queue) for queue in query["queues"]}
params["queue"] = {QUEUE_IDS[queue] for queue in queues}
else:
queues = set()
def generator():
for id in query["accountIds"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}".format(platform=query["platform"].value.lower(), accountId=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matchlists/by-account/accountId")
data = self._get(url, params, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["accountId"] = id
data["region"] = query["platform"].region.value
if "beginIndex" in query:
data["beginIndex"] = query["beginIndex"]
if "endIndex" in query:
data["endIndex"] = query["endIndex"]
if "seasons" in query:
data["seasons"] = seasons
if "champion.ids" in query:
data["champion"] = params["champion"]
if "queues" in query:
params["queue"] = queues
yield MatchListDto(data)
return generator()
_validate_get_timeline_query = Query. \
has("id").as_(int).also. \
has("platform").as_(Platform)
@get.register(TimelineDto)
@validate_query(_validate_get_timeline_query, convert_region_to_platform)
def get_match_timeline(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> TimelineDto:
url = "https://{platform}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}".format(platform=query["platform"].value.lower(), id=query["id"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "timelines/by-match/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["matchId"] = query["id"]
data["region"] = query["platform"].region.value
return TimelineDto(data)
_validate_get_many_timeline_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(TimelineDto)
@validate_query(_validate_get_many_timeline_query, convert_region_to_platform)
def get_many_match_timeline(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[TimelineDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}".format(platform=query["platform"].value.lower(), id=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "timelines/by-match/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["matchId"] = id
data["region"] = query["platform"].region.value
yield TimelineDto(data)
return generator()
| 45.735507
| 178
| 0.614751
| 1,405
| 12,623
| 5.316014
| 0.108897
| 0.019681
| 0.048199
| 0.036953
| 0.688579
| 0.656447
| 0.609586
| 0.554961
| 0.520418
| 0.478511
| 0
| 0.004687
| 0.256278
| 12,623
| 275
| 179
| 45.901818
| 0.790903
| 0.00911
| 0
| 0.480349
| 0
| 0.017467
| 0.142251
| 0.011681
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056769
| false
| 0.008734
| 0.043668
| 0.004367
| 0.165939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe073352dbed399802293822986fcaea27535a33
| 10,374
|
py
|
Python
|
Lib/site-packages/hackedit/vendor/jedi/cache.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 1
|
2017-08-19T08:13:28.000Z
|
2017-08-19T08:13:28.000Z
|
node_modules/nuclide/pkg/nuclide-python-rpc/VendorLib/jedi/cache.py
|
kevingatera/kgatewebapp
|
f0dbc50b7af2736e1f6c6f96f0a26fc7ff69db20
|
[
"Unlicense"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/hackedit/vendor/jedi/cache.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
"""
This caching is very important for speed and memory optimizations. There's
nothing really spectacular, just some decorators. The following cache types are
available:
- module caching (`load_parser` and `save_parser`), which uses pickle and is
really important to assure low load times of modules like ``numpy``.
- ``time_cache`` can be used to cache something for just a limited time span,
which can be useful if there's user interaction and the user cannot react
faster than a certain time.
This module is one of the reasons why |jedi| is not thread-safe. As you can see
there are global variables, which are holding the cache information. Some of
these variables are being cleaned after every API usage.
"""
import time
import os
import sys
import json
import hashlib
import gc
import inspect
import shutil
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from jedi import settings
from jedi import common
from jedi import debug
_time_caches = {}
# for fast_parser, should not be deleted
parser_cache = {}
class ParserCacheItem(object):
def __init__(self, parser, change_time=None):
self.parser = parser
if change_time is None:
change_time = time.time()
self.change_time = change_time
def clear_time_caches(delete_all=False):
""" Jedi caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing.
"""
global _time_caches
if delete_all:
for cache in _time_caches.values():
cache.clear()
parser_cache.clear()
else:
# normally just kill the expired entries, not all
for tc in _time_caches.values():
# check time_cache for expired entries
for key, (t, value) in list(tc.items()):
if t < time.time():
# delete expired entries
del tc[key]
def time_cache(time_add_setting):
"""
s
This decorator works as follows: Call it with a setting and after that
use the function with a callable that returns the key.
But: This function is only called if the key is not available. After a
certain amount of time (`time_add_setting`) the cache is invalid.
"""
def _temp(key_func):
dct = {}
_time_caches[time_add_setting] = dct
def wrapper(*args, **kwargs):
generator = key_func(*args, **kwargs)
key = next(generator)
try:
expiry, value = dct[key]
if expiry > time.time():
return value
except KeyError:
pass
value = next(generator)
time_add = getattr(settings, time_add_setting)
if key is not None:
dct[key] = time.time() + time_add, value
return value
return wrapper
return _temp
@time_cache("call_signatures_validity")
def cache_call_signatures(evaluator, call, source, user_pos):
"""This function calculates the cache key."""
index = user_pos[0] - 1
lines = common.splitlines(source)
before_cursor = lines[index][:user_pos[1]]
other_lines = lines[call.start_pos[0]:index]
whole = '\n'.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = call.get_parent_until().path
yield None if module_path is None else (module_path, before_bracket, call.start_pos)
yield evaluator.eval_element(call)
def underscore_memoization(func):
"""
Decorator for methods::
class A(object):
def x(self):
if self._x:
self._x = 10
return self._x
Becomes::
class A(object):
@underscore_memoization
def x(self):
return 10
A now has an attribute ``_x`` written by this decorator.
"""
name = '_' + func.__name__
def wrapper(self):
try:
return getattr(self, name)
except AttributeError:
result = func(self)
if inspect.isgenerator(result):
result = list(result)
setattr(self, name, result)
return result
return wrapper
def memoize_method(method):
"""A normal memoize function."""
def wrapper(self, *args, **kwargs):
dct = self.__dict__.setdefault('_memoize_method_dct', {})
key = (args, frozenset(kwargs.items()))
try:
return dct[key]
except KeyError:
result = method(self, *args, **kwargs)
dct[key] = result
return result
return wrapper
def memoize_function(obj):
""" A normal memoize function for memoizing free functions. """
cache = obj.cache = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def cache_star_import(func):
@time_cache("star_import_cache_validity")
def wrapper(self):
yield self.base # The cache key
yield func(self)
return wrapper
def _invalidate_star_import_cache_module(module, only_main=False):
""" Important if some new modules are being reparsed """
try:
t, modules = _time_caches['star_import_cache_validity'][module]
except KeyError:
pass
else:
del _time_caches['star_import_cache_validity'][module]
def invalidate_star_import_cache(path):
"""On success returns True."""
try:
parser_cache_item = parser_cache[path]
except KeyError:
pass
else:
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
def load_parser(path):
"""
Returns the module or None, if it fails.
"""
p_time = os.path.getmtime(path) if path else None
try:
parser_cache_item = parser_cache[path]
if not path or p_time <= parser_cache_item.change_time:
return parser_cache_item.parser
else:
# In case there is already a module cached and this module
# has to be reparsed, we also need to invalidate the import
# caches.
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
except KeyError:
if settings.use_filesystem_cache:
return ParserPickling.load_parser(path, p_time)
def save_parser(path, parser, pickling=True):
try:
p_time = None if path is None else os.path.getmtime(path)
except OSError:
p_time = None
pickling = False
item = ParserCacheItem(parser, p_time)
parser_cache[path] = item
if settings.use_filesystem_cache and pickling:
ParserPickling.save_parser(path, item)
class ParserPickling(object):
version = 24
"""
Version number (integer) for file system cache.
Increment this number when there are any incompatible changes in
parser representation classes. For example, the following changes
are regarded as incompatible.
- Class name is changed.
- Class is moved to another module.
- Defined slot of the class is changed.
"""
def __init__(self):
self.__index = None
self.py_tag = 'cpython-%s%s' % sys.version_info[:2]
"""
Short name for distinguish Python implementations and versions.
It's like `sys.implementation.cache_tag` but for Python < 3.3
we generate something similar. See:
http://docs.python.org/3/library/sys.html#sys.implementation
.. todo:: Detect interpreter (e.g., PyPy).
"""
def load_parser(self, path, original_changed_time):
try:
pickle_changed_time = self._index[path]
except KeyError:
return None
if original_changed_time is not None \
and pickle_changed_time < original_changed_time:
# the pickle file is outdated
return None
with open(self._get_hashed_path(path), 'rb') as f:
try:
gc.disable()
parser_cache_item = pickle.load(f)
finally:
gc.enable()
debug.dbg('pickle loaded: %s', path)
parser_cache[path] = parser_cache_item
return parser_cache_item.parser
def save_parser(self, path, parser_cache_item):
self.__index = None
try:
files = self._index
except KeyError:
files = {}
self._index = files
with open(self._get_hashed_path(path), 'wb') as f:
pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL)
files[path] = parser_cache_item.change_time
self._flush_index()
@property
def _index(self):
if self.__index is None:
try:
with open(self._get_path('index.json')) as f:
data = json.load(f)
except (IOError, ValueError):
self.__index = {}
else:
# 0 means version is not defined (= always delete cache):
if data.get('version', 0) != self.version:
self.clear_cache()
self.__index = {}
else:
self.__index = data['index']
return self.__index
def _remove_old_modules(self):
# TODO use
change = False
if change:
self._flush_index(self)
self._index # reload index
def _flush_index(self):
data = {'version': self.version, 'index': self._index}
with open(self._get_path('index.json'), 'w') as f:
json.dump(data, f)
self.__index = None
def clear_cache(self):
shutil.rmtree(self._cache_directory())
def _get_hashed_path(self, path):
return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest())
def _get_path(self, file):
dir = self._cache_directory()
if not os.path.exists(dir):
os.makedirs(dir)
return os.path.join(dir, file)
def _cache_directory(self):
return os.path.join(settings.cache_directory, self.py_tag)
# is a singleton
ParserPickling = ParserPickling()
| 29.724928
| 88
| 0.618662
| 1,303
| 10,374
| 4.737529
| 0.254029
| 0.033857
| 0.029159
| 0.020411
| 0.109833
| 0.07387
| 0.07387
| 0.018792
| 0.018792
| 0.018792
| 0
| 0.002463
| 0.295547
| 10,374
| 348
| 89
| 29.810345
| 0.842228
| 0.207634
| 0
| 0.245192
| 0
| 0
| 0.029448
| 0.013842
| 0
| 0
| 0
| 0.005747
| 0
| 1
| 0.134615
| false
| 0.014423
| 0.110577
| 0.009615
| 0.365385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe0a261cca22dd0888b296d89b5ce6c47723b470
| 4,569
|
py
|
Python
|
python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py
|
yschiebelhut/ewm-cloud-robotics
|
bdf3a6c13850d266b70168912494300c32d4d803
|
[
"Apache-2.0"
] | null | null | null |
python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py
|
yschiebelhut/ewm-cloud-robotics
|
bdf3a6c13850d266b70168912494300c32d4d803
|
[
"Apache-2.0"
] | null | null | null |
python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py
|
yschiebelhut/ewm-cloud-robotics
|
bdf3a6c13850d266b70168912494300c32d4d803
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""Run the SAP EWM robot configurator."""
import sys
import signal
import traceback
import logging
import time
from robcoewmrobotconfigurator.ewm_robot_sync import EWMRobotSync
from robcoewmrobotconfigurator.robotconfigcontroller import RobotConfigurationController
from robcoewmrobotconfigurator.robco_robot_api import RobCoRobotAPI
_LOGGER = logging.getLogger(__name__)
class MainLoopController:
"""Control the main loop."""
def __init__(self):
"""Construct."""
# Shutdown Handler
self.shutdown = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
# Sleep handler
self.last_time = time.time()
def exit_gracefully(self, signum, frame):
"""Set shutdown flag on SIGTERM and SIGINT."""
self.shutdown = True
_LOGGER.info('Closing application because signal %s received', signum)
def sleep(self, seconds: float):
"""Sleep maximum n seconds after the last call."""
timediff = time.time() - self.last_time
if timediff < seconds:
time.sleep(seconds-timediff)
self.last_time = time.time()
def run_robotconfigurator():
"""Run one instance of the robot configurator."""
# Register handler to control main loop
loop_control = MainLoopController()
# Create CR watcher instances
k8s_rb = RobCoRobotAPI()
k8s_rc = RobotConfigurationController()
# Create EWM robot syncer instance
robotsync = EWMRobotSync(k8s_rc)
# Register callback functions
k8s_rb.register_callback('ConfigurationController', ['ADDED'], k8s_rc.robco_robot_cb)
k8s_rc.register_callback(
'EWMRobotSync', ['ADDED', 'MODIFIED', 'REPROCESS'], robotsync.robotconfiguration_cb)
# Start
k8s_rb.run()
k8s_rc.run(reprocess=True)
_LOGGER.info('SAP EWM Robot Configurator started')
try:
# Looping while K8S watchers are running
while loop_control.shutdown is False:
# Refresh bearer token when using OAuth
if robotsync.odataconfig.authorization == robotsync.odataconfig.AUTH_OAUTH:
robotsync.odatahandler.refresh_access_token()
# Check if K8S CR handler exception occured
for k, exc in k8s_rb.thread_exceptions.items():
_LOGGER.error(
'Uncovered exception in "%s" thread of RobCoRobotAPI. Raising it in main '
'thread', k)
raise exc
for k, exc in k8s_rc.thread_exceptions.items():
_LOGGER.error(
'Uncovered exception in "%s" thread of RobotConfigurationController. Raising '
'it in main thread', k)
raise exc
# Sleep maximum 1.0 second
loop_control.sleep(1.0)
except KeyboardInterrupt:
_LOGGER.info('Keyboard interrupt - terminating')
except SystemExit:
_LOGGER.info('System exit - terminating')
finally:
# Stop K8S CR watchers
_LOGGER.info('Stopping K8S CR watchers')
k8s_rb.stop_watcher()
k8s_rc.stop_watcher()
# Shutdown threadpool executor
robotsync.executor.shutdown()
if __name__ == '__main__':
# Create root logger if running as main program
ROOT_LOGGER = logging.getLogger()
ROOT_LOGGER.setLevel(logging.INFO)
# Create console handler and set level to info
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
# Create formatter
FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add formatter to ch
CH.setFormatter(FORMATTER)
# Add ch to logger
ROOT_LOGGER.addHandler(CH)
# Run robot master
try:
run_robotconfigurator()
except Exception: # pylint: disable=broad-except
EXC_INFO = sys.exc_info()
_LOGGER.critical(
'Unexpected error "%s" - "%s" - TRACEBACK: %s', EXC_INFO[0], EXC_INFO[1],
traceback.format_exception(*EXC_INFO))
sys.exit('Application terminated with exception: "{}" - "{}"'.format(
EXC_INFO[0], EXC_INFO[1]))
| 33.595588
| 98
| 0.661633
| 527
| 4,569
| 5.593928
| 0.373814
| 0.011872
| 0.016282
| 0.011533
| 0.118725
| 0.110583
| 0.084125
| 0.061737
| 0.041384
| 0.041384
| 0
| 0.008986
| 0.244911
| 4,569
| 135
| 99
| 33.844444
| 0.845507
| 0.245787
| 0
| 0.106667
| 0
| 0
| 0.161891
| 0.015362
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.106667
| 0
| 0.173333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe0d4c9278280b1296bb8358bef8f6502e5d0540
| 82,820
|
py
|
Python
|
ninjabackend.py
|
tp-m/meson
|
2d1aa395e86848ca948d30d83cc5357777e5b490
|
[
"Apache-2.0"
] | null | null | null |
ninjabackend.py
|
tp-m/meson
|
2d1aa395e86848ca948d30d83cc5357777e5b490
|
[
"Apache-2.0"
] | null | null | null |
ninjabackend.py
|
tp-m/meson
|
2d1aa395e86848ca948d30d83cc5357777e5b490
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import backends
import environment, mesonlib
import build
import mlog
import dependencies
from mesonlib import File
from meson_install import InstallData
from build import InvalidArguments
from coredata import MesonException
import os, sys, pickle, re
import subprocess, shutil
if mesonlib.is_windows():
quote_char = '"'
execute_wrapper = 'cmd /c'
else:
quote_char = "'"
execute_wrapper = ''
def ninja_quote(text):
return text.replace(' ', '$ ').replace(':', '$:')
class RawFilename():
def __init__(self, fname):
self.fname = fname
def split(self, c):
return self.fname.split(c)
def startswith(self, s):
return self.fname.startswith(s)
class NinjaBuildElement():
def __init__(self, outfilenames, rule, infilenames):
if isinstance(outfilenames, str):
self.outfilenames = [outfilenames]
else:
self.outfilenames = outfilenames
assert(isinstance(rule, str))
self.rule = rule
if isinstance(infilenames, str):
self.infilenames = [infilenames]
else:
self.infilenames = infilenames
self.deps = []
self.orderdeps = []
self.elems = []
def add_dep(self, dep):
if isinstance(dep, list):
self.deps += dep
else:
self.deps.append(dep)
def add_orderdep(self, dep):
if isinstance(dep, list):
self.orderdeps += dep
else:
self.orderdeps.append(dep)
def add_item(self, name, elems):
if isinstance(elems, str):
elems = [elems]
self.elems.append((name, elems))
def write(self, outfile):
line = 'build %s: %s %s' % (' '.join([ninja_quote(i) for i in self.outfilenames]),\
self.rule,
' '.join([ninja_quote(i) for i in self.infilenames]))
if len(self.deps) > 0:
line += ' | ' + ' '.join([ninja_quote(x) for x in self.deps])
if len(self.orderdeps) > 0:
line += ' || ' + ' '.join([ninja_quote(x) for x in self.orderdeps])
line += '\n'
# This is the only way I could find to make this work on all
# platforms including Windows command shell. Slash is a dir separator
# on Windows, too, so all characters are unambiguous and, more importantly,
# do not require quoting.
line = line.replace('\\', '/')
outfile.write(line)
for e in self.elems:
(name, elems) = e
should_quote = True
if name == 'DEPFILE' or name == 'DESC' or name == 'pool':
should_quote = False
line = ' %s = ' % name
q_templ = quote_char + "%s" + quote_char
noq_templ = "%s"
newelems = []
for i in elems:
if not should_quote or i == '&&': # Hackety hack hack
templ = noq_templ
else:
templ = q_templ
i = i.replace('\\', '\\\\')
if quote_char == '"':
i = i.replace('"', '\\"')
newelems.append(templ % ninja_quote(i))
line += ' '.join(newelems)
line += '\n'
outfile.write(line)
outfile.write('\n')
class NinjaBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.source_suffix_in_objs = True
self.ninja_filename = 'build.ninja'
self.fortran_deps = {}
self.all_outputs = {}
def check_outputs(self, elem):
for n in elem.outfilenames:
if n in self.all_outputs:
raise MesonException('Multiple producers for Ninja target "%s". Please rename your targets.' % n)
self.all_outputs[n] = True
def detect_vs_dep_prefix(self, outfile, tempfilename):
'''VS writes its dependency in a locale dependent format.
Detect the search prefix to use.'''
if shutil.which('cl') is None:
return outfile
outfile.close()
open(os.path.join(self.environment.get_scratch_dir(), 'incdetect.c'),
'w').write('''#include<stdio.h>
int dummy;
''')
pc = subprocess.Popen(['cl', '/showIncludes', '/c', 'incdetect.c'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.environment.get_scratch_dir())
(stdo, _) = pc.communicate()
for line in stdo.split(b'\r\n'):
if line.endswith(b'stdio.h'):
matchstr = b':'.join(line.split(b':')[0:2]) + b':'
binfile = open(tempfilename, 'ab')
binfile.write(b'msvc_deps_prefix = ' + matchstr + b'\r\n')
binfile.close()
return open(tempfilename, 'a')
raise MesonException('Could not determine vs dep dependency prefix string.')
def generate(self, interp):
self.interpreter = interp
outfilename = os.path.join(self.environment.get_build_dir(), self.ninja_filename)
tempfilename = outfilename + '~'
outfile = open(tempfilename, 'w')
outfile.write('# This is the build file for project "%s"\n' % self.build.get_project())
outfile.write('# It is autogenerated by the Meson build system.\n')
outfile.write('# Do not edit by hand.\n\n')
outfile.write('ninja_required_version = 1.5.1\n\n')
outfile = self.detect_vs_dep_prefix(outfile, tempfilename)
self.generate_rules(outfile)
self.generate_phony(outfile)
outfile.write('# Build rules for targets\n\n')
[self.generate_target(t, outfile) for t in self.build.get_targets().values()]
if len(self.build.pot) > 0:
outfile.write('# Build rules for localisation.\n\n')
self.generate_po(outfile)
outfile.write('# Test rules\n\n')
self.generate_tests(outfile)
outfile.write('# Install rules\n\n')
self.generate_install(outfile)
if self.environment.coredata.get_builtin_option('coverage'):
outfile.write('# Coverage rules\n\n')
self.generate_coverage_rules(outfile)
outfile.write('# Suffix\n\n')
self.generate_ending(outfile)
# Only ovewrite the old build file after the new one has been
# fully created.
outfile.close()
os.replace(tempfilename, outfilename)
self.generate_compdb()
# http://clang.llvm.org/docs/JSONCompilationDatabase.html
def generate_compdb(self):
ninja_exe = environment.detect_ninja()
builddir = self.environment.get_build_dir()
jsondb = subprocess.check_output([ninja_exe, '-t', 'compdb', 'c_COMPILER', 'cpp_COMPILER'], cwd=builddir)
open(os.path.join(builddir, 'compile_commands.json'), 'wb').write(jsondb)
# Get all generated headers. Any source file might need them so
# we need to add an order dependency to them.
def get_generated_headers(self, target):
header_deps = []
for gensource in target.get_generated_sources():
if isinstance(gensource, build.CustomTarget):
continue
for src in gensource.get_outfilelist():
if self.environment.is_header(src):
header_deps.append(os.path.join(self.get_target_private_dir(target), src))
for dep in target.link_targets:
if isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
header_deps += self.get_generated_headers(dep)
return header_deps
def generate_target(self, target, outfile):
if isinstance(target, build.CustomTarget):
self.generate_custom_target(target, outfile)
if isinstance(target, build.RunTarget):
self.generate_run_target(target, outfile)
name = target.get_id()
gen_src_deps = []
if name in self.processed_targets:
return
if isinstance(target, build.Jar):
self.generate_jar_target(target, outfile)
return
if 'rust' in self.environment.coredata.compilers.keys() and self.has_rust(target):
self.generate_rust_target(target, outfile)
return
if 'cs' in self.environment.coredata.compilers.keys() and self.has_cs(target):
self.generate_cs_target(target, outfile)
return
if 'vala' in self.environment.coredata.compilers.keys() and self.has_vala(target):
gen_src_deps += self.generate_vala_compile(target, outfile)
if 'swift' in self.environment.coredata.compilers.keys() and self.has_swift(target):
self.generate_swift_target(target, outfile)
return
self.scan_fortran_module_outputs(target)
# The following deals with C/C++ compilation.
(gen_src, gen_other_deps) = self.process_dep_gens(outfile, target)
gen_src_deps += gen_src
self.process_target_dependencies(target, outfile)
self.generate_custom_generator_rules(target, outfile)
outname = self.get_target_filename(target)
obj_list = []
use_pch = self.environment.coredata.get_builtin_option('use_pch')
is_unity = self.environment.coredata.get_builtin_option('unity')
if use_pch and target.has_pch():
pch_objects = self.generate_pch(target, outfile)
else:
pch_objects = []
header_deps = gen_other_deps
unity_src = []
unity_deps = [] # Generated sources that must be built before compiling a Unity target.
header_deps += self.get_generated_headers(target)
for gensource in target.get_generated_sources():
if isinstance(gensource, build.CustomTarget):
for src in gensource.output:
src = os.path.join(self.get_target_dir(gensource), src)
if self.environment.is_source(src) and not self.environment.is_header(src):
if is_unity:
unity_deps.append(os.path.join(self.environment.get_build_dir(), RawFilename(src)))
else:
obj_list.append(self.generate_single_compile(target, outfile, RawFilename(src), True,
header_deps))
elif self.environment.is_object(src):
obj_list.append(src)
elif self.environment.is_library(src):
pass
else:
# Assume anything not specifically a source file is a header. This is because
# people generate files with weird suffixes (.inc, .fh) that they then include
# in their source files.
header_deps.append(RawFilename(src))
else:
for src in gensource.get_outfilelist():
if self.environment.is_object(src):
obj_list.append(os.path.join(self.get_target_private_dir(target), src))
elif not self.environment.is_header(src):
if is_unity:
if self.has_dir_part(src):
rel_src = src
else:
rel_src = os.path.join(self.get_target_private_dir(target), src)
unity_deps.append(rel_src)
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, True,
header_deps=header_deps))
src_list = []
for src in gen_src_deps:
src_list.append(src)
if is_unity:
unity_src.append(os.path.join(self.environment.get_build_dir(), src))
header_deps.append(src)
else:
# Generated targets are ordered deps because the must exist
# before the sources compiling them are used. After the first
# compile we get precise dependency info from dep files.
# This should work in all cases. If it does not, then just
# move them from orderdeps to proper deps.
if self.environment.is_header(src):
header_deps.append(src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, True, [], header_deps))
for src in target.get_sources():
if src.endswith('.vala'):
continue
if not self.environment.is_header(src):
src_list.append(src)
if is_unity:
abs_src = os.path.join(self.environment.get_build_dir(),
src.rel_to_builddir(self.build_to_src))
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, False, [], header_deps))
obj_list += self.flatten_object_list(target)
if is_unity:
for src in self.generate_unity_files(target, unity_src):
obj_list.append(self.generate_single_compile(target, outfile, src, True, unity_deps + header_deps))
linker = self.determine_linker(target, src_list)
elem = self.generate_link(target, outfile, outname, obj_list, linker, pch_objects)
self.generate_shlib_aliases(target, self.get_target_dir(target))
elem.write(outfile)
self.processed_targets[name] = True
def process_target_dependencies(self, target, outfile):
for t in target.get_dependencies():
tname = t.get_basename() + t.type_suffix()
if not tname in self.processed_targets:
self.generate_target(t, outfile)
def generate_custom_target(self, target, outfile):
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target)
deps = []
for i in target.get_dependencies():
# FIXME, should not grab element at zero but rather expand all.
if isinstance(i, list):
i = i[0]
fname = i.get_filename()
if isinstance(fname, list):
fname = fname[0]
deps.append(os.path.join(self.get_target_dir(i), fname))
if target.build_always:
deps.append('PHONY')
elem = NinjaBuildElement(ofilenames, 'CUSTOM_COMMAND', srcs)
for i in target.depend_files:
if isinstance(i, mesonlib.File):
deps.append(i.rel_to_builddir(self.build_to_src))
else:
deps.append(os.path.join(self.build_to_src, i))
elem.add_dep(deps)
for d in target.extra_depends:
tmp = d.get_filename()
if not isinstance(tmp, list):
tmp = [tmp]
for fname in tmp:
elem.add_dep(os.path.join(self.get_target_dir(d), fname))
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Generating %s with a custom command.' % target.name)
elem.write(outfile)
self.check_outputs(elem)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_run_target(self, target, outfile):
runnerscript = os.path.join(self.environment.get_script_dir(), 'commandrunner.py')
deps = []
arg_strings = []
for i in target.args:
if isinstance(i, str):
arg_strings.append(i)
elif isinstance(i, (build.BuildTarget, build.CustomTarget)):
relfname = self.get_target_filename(i)
deps.append(relfname)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
else:
mlog.debug(str(i))
raise MesonException('Unreachable code in generate_run_target.')
elem = NinjaBuildElement(target.name, 'CUSTOM_COMMAND', deps)
cmd = [sys.executable, runnerscript, self.environment.get_source_dir(), self.environment.get_build_dir(), target.subdir]
texe = target.command
try:
texe = texe.held_object
except AttributeError:
pass
if isinstance(texe, build.Executable):
abs_exe = os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))
deps.append(self.get_target_filename(texe))
if self.environment.is_cross_build() \
and self.environment.cross_info.config['binaries'].get('exe_wrapper', None) is not None:
cmd += [self.environment.cross_info.config['binaries']['exe_wrapper']]
cmd.append(abs_exe)
else:
cmd.append(target.command)
cmd += arg_strings
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Running external command %s.' % target.name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_po(self, outfile):
for p in self.build.pot:
(packagename, languages, subdir) = p
input_file = os.path.join(subdir, 'POTFILES')
elem = NinjaBuildElement('pot', 'GEN_POT', [])
elem.add_item('PACKAGENAME', packagename)
elem.add_item('OUTFILE', packagename + '.pot')
elem.add_item('FILELIST', os.path.join(self.environment.get_source_dir(), input_file))
elem.add_item('OUTDIR', os.path.join(self.environment.get_source_dir(), subdir))
elem.write(outfile)
self.check_outputs(elem)
for l in languages:
infile = os.path.join(self.environment.get_source_dir(), subdir, l + '.po')
outfilename = os.path.join(subdir, l + '.gmo')
lelem = NinjaBuildElement(outfilename, 'GEN_GMO', infile)
lelem.add_item('INFILE', infile)
lelem.add_item('OUTFILE', outfilename)
lelem.write(outfile)
self.check_outputs(lelem)
def generate_coverage_rules(self, outfile):
(gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools()
added_rule = False
if gcovr_exe:
added_rule = True
elem = NinjaBuildElement('coverage-xml', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-x', '-r', self.environment.get_build_dir(),\
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.xml')])
elem.add_item('DESC', 'Generating XML coverage report.')
elem.write(outfile)
elem = NinjaBuildElement('coverage-text', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-r', self.environment.get_build_dir(),\
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.txt')])
elem.add_item('DESC', 'Generating text coverage report.')
elem.write(outfile)
self.check_outputs(elem)
if lcov_exe and genhtml_exe:
added_rule = True
phony_elem = NinjaBuildElement('coverage-html', 'phony', 'coveragereport/index.html')
phony_elem.write(outfile)
elem = NinjaBuildElement('coveragereport/index.html', 'CUSTOM_COMMAND', '')
command = [lcov_exe, '--directory', self.environment.get_build_dir(),\
'--capture', '--output-file', 'coverage.info', '--no-checksum',\
'&&', genhtml_exe, '--prefix', self.environment.get_build_dir(),\
'--output-directory', self.environment.get_log_dir(), '--title', 'Code coverage',\
'--legend', '--show-details', 'coverage.info']
elem.add_item('COMMAND', command)
elem.add_item('DESC', 'Generating HTML coverage report.')
self.check_outputs(elem)
elem.write(outfile)
if not added_rule:
mlog.log(mlog.red('Warning:'), 'coverage requested but neither gcovr nor lcov/genhtml found.')
def generate_install(self, outfile):
script_root = self.environment.get_script_dir()
install_script = os.path.join(script_root, 'meson_install.py')
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
depfixer = os.path.join(script_root, 'depfixer.py')
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(), depfixer)
elem = NinjaBuildElement('install', 'CUSTOM_COMMAND', 'PHONY')
elem.add_dep('all')
elem.add_item('DESC', 'Installing files.')
elem.add_item('COMMAND', [sys.executable, install_script, install_data_file])
elem.add_item('pool', 'console')
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_data_install(d)
self.generate_po_install(d, elem)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
elem.write(outfile)
self.check_outputs(elem)
ofile = open(install_data_file, 'wb')
pickle.dump(d, ofile)
def generate_po_install(self, d, elem):
for p in self.build.pot:
(package_name, languages, subdir) = p
# FIXME: assumes only one po package per source
d.po_package_name = package_name
for lang in languages:
rel_src = os.path.join(subdir, lang + '.gmo')
src_file = os.path.join(self.environment.get_build_dir(), rel_src)
d.po.append((src_file, self.environment.coredata.get_builtin_option('localedir'), lang))
elem.add_dep(rel_src)
def generate_target_install(self, d):
libdir = self.environment.get_libdir()
bindir = self.environment.get_bindir()
should_strip = self.environment.coredata.get_builtin_option('strip')
for t in self.build.get_targets().values():
if t.should_install():
outdir = t.get_custom_install_dir()
if outdir is None:
if isinstance(t, build.Executable):
outdir = bindir
else:
outdir = libdir
i = [self.get_target_filename(t), outdir, t.get_aliaslist(),\
should_strip, t.install_rpath]
d.targets.append(i)
def generate_custom_install_script(self, d):
d.install_scripts = self.build.install_scripts
def generate_header_install(self, d):
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
for h in headers:
outdir = h.get_custom_install_dir()
if outdir is None:
outdir = os.path.join(incroot, h.get_install_subdir())
for f in h.get_sources():
abspath = os.path.join(self.environment.get_source_dir(), h.get_source_subdir(), f)
i = [abspath, outdir]
d.headers.append(i)
def generate_man_install(self, d):
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
subdir = os.path.join(manroot, 'man' + num)
srcabs = os.path.join(self.environment.get_source_dir(), m.get_source_subdir(), f)
dstabs = os.path.join(subdir, f + '.gz')
i = [srcabs, dstabs]
d.man.append(i)
def generate_data_install(self, d):
data = self.build.get_data()
for de in data:
assert(isinstance(de, build.Data))
subdir = de.install_dir
for f in de.sources:
if de.in_sourcetree:
srcprefix = self.environment.get_source_dir()
else:
srcprefix = self.environment.get_build_dir()
srcabs = os.path.join(srcprefix, de.source_subdir, f)
dstabs = os.path.join(subdir, f)
i = [srcabs, dstabs]
d.data.append(i)
def generate_subdir_install(self, d):
for sd in self.build.get_install_subdirs():
src_dir = os.path.join(self.environment.get_source_dir(), sd.source_subdir, sd.installable_subdir)
dst_dir = os.path.join(self.environment.get_prefix(), sd.install_dir)
d.install_subdirs.append([src_dir, dst_dir])
def write_test_suite_targets(self, cmd, outfile):
suites = {}
for t in self.build.get_tests():
for s in t.suite:
suites[s] = True
suites = list(suites.keys())
suites.sort()
for s in suites:
if s == '':
visible_name = 'for top level tests'
else:
visible_name = s
elem = NinjaBuildElement('test-' + s, 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd + ['--suite=' + s])
elem.add_item('DESC', 'Running test suite %s.' % visible_name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_tests(self, outfile):
self.serialise_tests()
valgrind = environment.find_valgrind()
script_root = self.environment.get_script_dir()
test_script = os.path.join(script_root, 'meson_test.py')
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
cmd = [sys.executable, test_script, test_data]
elem = NinjaBuildElement('test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
self.write_test_suite_targets(cmd, outfile)
if valgrind:
velem = NinjaBuildElement('test-valgrind', 'CUSTOM_COMMAND', ['all', 'PHONY'])
velem.add_item('COMMAND', cmd + ['--wrapper=' + valgrind])
velem.add_item('DESC', 'Running test suite under Valgrind.')
velem.add_item('pool', 'console')
velem.write(outfile)
self.check_outputs(velem)
# And then benchmarks.
benchmark_script = os.path.join(script_root, 'meson_benchmark.py')
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
cmd = [sys.executable, benchmark_script, benchmark_data]
elem = NinjaBuildElement('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
outfile.write('# Rules for linking.\n\n')
if self.environment.is_cross_build():
self.generate_static_link_rules(True, outfile)
self.generate_static_link_rules(False, outfile)
self.generate_dynamic_link_rules(outfile)
outfile.write('# Other rules\n\n')
outfile.write('rule CUSTOM_COMMAND\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' restat = 1\n\n')
outfile.write('rule REGENERATE_BUILD\n')
c = (quote_char + ninja_quote(sys.executable) + quote_char,
quote_char + ninja_quote(self.environment.get_build_command()) + quote_char,
quote_char + ninja_quote(self.environment.get_source_dir()) + quote_char,
quote_char + ninja_quote(self.environment.get_build_dir()) + quote_char)
outfile.write(" command = %s %s %s %s --backend ninja secret-handshake\n" % c)
outfile.write(' description = Regenerating build files\n')
outfile.write(' generator = 1\n\n')
if len(self.build.pot) > 0:
self.generate_gettext_rules(outfile)
outfile.write('\n')
def generate_gettext_rules(self, outfile):
rule = 'rule GEN_POT\n'
command = " command = xgettext --package-name=$PACKAGENAME -p $OUTDIR -f $FILELIST -D '%s' -k_ -o $OUTFILE\n" % \
self.environment.get_source_dir()
desc = " description = Creating pot file for package $PACKAGENAME.\n"
outfile.write(rule)
outfile.write(command)
outfile.write(desc)
outfile.write('\n')
rule = 'rule GEN_GMO\n'
command = ' command = msgfmt $INFILE -o $OUTFILE\n'
desc = ' description = Generating gmo file $OUTFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(desc)
outfile.write('\n')
def generate_phony(self, outfile):
outfile.write('# Phony build target, always out of date\n')
outfile.write('build PHONY: phony\n')
outfile.write('\n')
def generate_jar_target(self, target, outfile):
fname = target.get_filename()
subdir = target.get_subdir()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []
compiler = self.get_compiler_for_source(src_list[0])
assert(compiler.get_language() == 'java')
c = 'c'
m = ''
e = ''
f = 'f'
main_class = target.get_main_class()
if main_class != '':
e = 'e'
for src in src_list:
plain_class_path = self.generate_single_java_compile(src, target, compiler, outfile)
class_list.append(plain_class_path)
class_dep_list = [os.path.join(self.get_target_private_dir(target), i) for i in class_list]
jar_rule = 'java_LINKER'
commands = [c+m+e+f]
if e != '':
commands.append(main_class)
commands.append(self.get_target_filename(target))
for cls in class_list:
commands += ['-C', self.get_target_private_dir(target), cls]
elem = NinjaBuildElement(outname_rel, jar_rule, [])
elem.add_dep(class_dep_list)
elem.add_item('ARGS', commands)
elem.write(outfile)
self.check_outputs(elem)
def generate_cs_resource_tasks(self, target, outfile):
args = []
deps = []
for r in target.resources:
rel_sourcefile = os.path.join(self.build_to_src, target.subdir, r)
if r.endswith('.resources'):
a = '-resource:' + rel_sourcefile
elif r.endswith('.txt') or r.endswith('.resx'):
ofilebase = os.path.splitext(os.path.basename(r))[0] + '.resources'
ofilename = os.path.join(self.get_target_private_dir(target), ofilebase)
elem = NinjaBuildElement(ofilename, "CUSTOM_COMMAND", rel_sourcefile)
elem.add_item('COMMAND', ['resgen', rel_sourcefile, ofilename])
elem.add_item('DESC', 'Compiling resource %s.' % rel_sourcefile)
elem.write(outfile)
self.check_outputs(elem)
deps.append(ofilename)
a = '-resource:' + ofilename
else:
raise InvalidArguments('Unknown resource file %s.' % r)
args.append(a)
return (args, deps)
def generate_cs_target(self, target, outfile):
buildtype = self.environment.coredata.get_builtin_option('buildtype')
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
compiler = self.get_compiler_for_source(src_list[0])
assert(compiler.get_language() == 'cs')
rel_srcs = [s.rel_to_builddir(self.build_to_src) for s in src_list]
deps = []
commands = target.extra_args.get('cs', [])
commands += compiler.get_buildtype_args(buildtype)
if isinstance(target, build.Executable):
commands.append('-target:exe')
elif isinstance(target, build.SharedLibrary):
commands.append('-target:library')
else:
raise MesonException('Unknown C# target type.')
(resource_args, resource_deps) = self.generate_cs_resource_tasks(target, outfile)
commands += resource_args
deps += resource_deps
commands += compiler.get_output_args(outname_rel)
for l in target.link_targets:
lname = os.path.join(self.get_target_dir(l), l.get_filename())
commands += compiler.get_link_args(lname)
deps.append(lname)
if '-g' in commands:
outputs = [outname_rel, outname_rel + '.mdb']
else:
outputs = [outname_rel]
elem = NinjaBuildElement(outputs, 'cs_COMPILER', rel_srcs)
elem.add_dep(deps)
elem.add_item('ARGS', commands)
self.check_outputs(elem)
elem.write(outfile)
def generate_single_java_compile(self, src, target, compiler, outfile):
args = []
args += compiler.get_buildtype_args(self.environment.coredata.get_builtin_option('buildtype'))
args += compiler.get_output_args(self.get_target_private_dir(target))
for i in target.include_dirs:
for idir in i.get_incdirs():
args += ['-sourcepath', os.path.join(self.build_to_src, i.curdir, idir)]
rel_src = src.rel_to_builddir(self.build_to_src)
plain_class_path = src.fname[:-4] + 'class'
rel_obj = os.path.join(self.get_target_private_dir(target), plain_class_path)
element = NinjaBuildElement(rel_obj, compiler.get_language() + '_COMPILER', rel_src)
element.add_item('ARGS', args)
element.write(outfile)
self.check_outputs(element)
return plain_class_path
def generate_java_link(self, outfile):
rule = 'rule java_LINKER\n'
command = ' command = jar $ARGS\n'
description = ' description = Creating jar $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def split_vala_sources(self, sources):
src = []
vapi_src = []
for s in sources:
if s.endswith('.vapi'):
vapi_src.append(s)
else:
src.append(s)
return (src, vapi_src)
def determine_dep_vapis(self, target):
result = []
for dep in target.link_targets:
for i in dep.sources:
if hasattr(i, 'fname'):
i = i.fname
if i.endswith('vala'):
vapiname = os.path.splitext(os.path.split(i)[1])[0] + '.vapi'
fullname = os.path.join(self.get_target_private_dir(dep), vapiname)
result.append(fullname)
break
return result
def generate_vala_compile(self, target, outfile):
"""Vala is compiled into C. Set up all necessary build steps here."""
valac = self.environment.coredata.compilers['vala']
(src, vapi_src) = self.split_vala_sources(target.get_sources())
vapi_src = [x.rel_to_builddir(self.build_to_src) for x in vapi_src]
extra_dep_files = []
vala_input_files = []
for s in src:
if s.endswith('.vala'):
vala_input_files.append(s.rel_to_builddir(self.build_to_src))
namebase = os.path.splitext(os.path.split(vala_input_files[0])[1])[0]
hname = namebase + '.h'
vapiname = namebase + '.vapi'
outputs = [vapiname]
args = ['-d', self.get_target_private_dir(target)]
args += ['-C']#, '-o', cname]
if not isinstance(target, build.Executable):
outputs.append(hname)
args += ['-H', hname]
args += ['--vapi=' + vapiname]
for src in vala_input_files:
namebase = os.path.splitext(os.path.split(src)[1])[0] + '.c'
outputs.append(namebase)
if self.environment.coredata.get_builtin_option('werror'):
args += valac.get_werror_args()
for d in target.external_deps:
if isinstance(d, dependencies.PkgConfigDependency):
if d.name == 'glib-2.0' and d.version_requirement is not None \
and d.version_requirement.startswith(('>=', '==')):
args += ['--target-glib', d.version_requirement[2:]]
args += ['--pkg', d.name]
extra_args = []
for a in target.extra_args.get('vala', []):
if isinstance(a, File):
relname = a.rel_to_builddir(self.build_to_src)
extra_dep_files.append(relname)
extra_args.append(relname)
else:
extra_args.append(a)
dependency_vapis = self.determine_dep_vapis(target)
extra_dep_files += dependency_vapis
args += extra_args
args += dependency_vapis
outputs = [os.path.join(self.get_target_private_dir(target), x) for x in outputs]
element = NinjaBuildElement(outputs,
valac.get_language() + '_COMPILER',
vala_input_files + vapi_src)
element.add_item('ARGS', args)
element.add_dep(extra_dep_files)
element.write(outfile)
self.check_outputs(element)
return outputs
def generate_rust_target(self, target, outfile):
rustc = self.environment.coredata.compilers['rust']
relsrc = []
for i in target.get_sources():
if not rustc.can_compile(i):
raise InvalidArguments('Rust target %s contains a non-rust source file.' % target.get_basename())
relsrc.append(i.rel_to_builddir(self.build_to_src))
target_name = os.path.join(target.subdir, target.get_filename())
args = ['--crate-type']
if isinstance(target, build.Executable):
cratetype = 'bin'
elif isinstance(target, build.SharedLibrary):
cratetype = 'rlib'
elif isinstance(target, build.StaticLibrary):
cratetype = 'rlib'
else:
raise InvalidArguments('Unknown target type for rustc.')
args.append(cratetype)
args += rustc.get_buildtype_args(self.environment.coredata.get_builtin_option('buildtype'))
depfile = target.name + '.d'
args += ['--out-dir', target.subdir]
args += ['--emit', 'dep-info', '--emit', 'link']
orderdeps = [os.path.join(t.subdir, t.get_filename()) for t in target.link_targets]
linkdirs = {}
for d in target.link_targets:
linkdirs[d.subdir] = True
for d in linkdirs.keys():
if d == '':
d = '.'
args += ['-L', d]
element = NinjaBuildElement(target_name, 'rust_COMPILER', relsrc)
if len(orderdeps) > 0:
element.add_orderdep(orderdeps)
element.add_item('ARGS', args)
element.add_item('targetdep', depfile)
element.add_item('cratetype', cratetype)
element.write(outfile)
self.check_outputs(element)
def swift_module_file_name(self, target):
return os.path.join(self.get_target_private_dir(target),
self.target_swift_modulename(target) + '.swiftmodule')
def target_swift_modulename(self, target):
return target.name
def is_swift_target(self, target):
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_modules(self, target):
result = []
for l in target.link_targets:
if self.is_swift_target(l):
result.append(self.swift_module_file_name(l))
return result
def determine_swift_dep_dirs(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_swift_link_deps(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_filename(l))
return result
def split_swift_generated_sources(self, target):
all_srcs = []
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
for ifile in genlist.get_filename():
rel = os.path.join(self.get_target_dir(genlist), ifile)
all_srcs.append(rel)
else:
for ifile in genlist.get_outfilelist():
rel = os.path.join(self.get_target_private_dir(target), ifile)
all_srcs.append(rel)
srcs = []
others = []
for i in all_srcs:
if i.endswith('.swift'):
srcs.append(i)
else:
others.append(i)
return (srcs, others)
def generate_swift_target(self, target, outfile):
module_name = self.target_swift_modulename(target)
swiftc = self.environment.coredata.compilers['swift']
abssrc = []
abs_headers = []
header_imports = []
for i in target.get_sources():
if swiftc.can_compile(i):
relsrc = i.rel_to_builddir(self.build_to_src)
abss = os.path.normpath(os.path.join(self.environment.get_build_dir(), relsrc))
abssrc.append(abss)
elif self.environment.is_header(i):
relh = i.rel_to_builddir(self.build_to_src)
absh = os.path.normpath(os.path.join(self.environment.get_build_dir(), relh))
abs_headers.append(absh)
header_imports += swiftc.get_header_import_args(absh)
else:
raise InvalidArguments('Swift target %s contains a non-swift source file.' % target.get_basename())
os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)
compile_args = swiftc.get_compile_only_args()
compile_args += swiftc.get_module_args(module_name)
link_args = swiftc.get_output_args(os.path.join(self.environment.get_build_dir(), self.get_target_filename(target)))
rundir = self.get_target_private_dir(target)
out_module_name = self.swift_module_file_name(target)
in_module_files = self.determine_swift_dep_modules(target)
abs_module_dirs = self.determine_swift_dep_dirs(target)
module_includes = []
for x in abs_module_dirs:
module_includes += swiftc.get_include_args(x)
link_deps = self.get_swift_link_deps(target)
abs_link_deps = [os.path.join(self.environment.get_build_dir(), x) for x in link_deps]
(rel_generated, _) = self.split_swift_generated_sources(target)
abs_generated = [os.path.join(self.environment.get_build_dir(), x) for x in rel_generated]
# We need absolute paths because swiftc needs to be invoked in a subdir
# and this is the easiest way about it.
objects = [] # Relative to swift invocation dir
rel_objects = [] # Relative to build.ninja
for i in abssrc + abs_generated:
base = os.path.split(i)[1]
oname = os.path.splitext(base)[0] + '.o'
objects.append(oname)
rel_objects.append(os.path.join(self.get_target_private_dir(target), oname))
# Swiftc does not seem to be able to emit objects and module files in one go.
elem = NinjaBuildElement(rel_objects,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_dep(abs_headers)
elem.add_item('ARGS', compile_args + header_imports + abs_generated + module_includes)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
elem = NinjaBuildElement(out_module_name,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_item('ARGS', compile_args + abs_generated + module_includes + swiftc.get_mod_gen_args())
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
if isinstance(target, build.StaticLibrary):
elem = self.generate_link(target, outfile, self.get_target_filename(target),
rel_objects, self.build.static_linker)
elem.write(outfile)
elif isinstance(target, build.Executable):
elem = NinjaBuildElement(self.get_target_filename(target), 'swift_COMPILER', [])
elem.add_dep(rel_objects)
elem.add_dep(link_deps)
elem.add_item('ARGS', link_args + swiftc.get_std_exe_link_args() + objects + abs_link_deps)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
else:
raise MesonException('Swift supports only executable and static library targets.')
def generate_static_link_rules(self, is_cross, outfile):
if self.build.has_language('java'):
if not is_cross:
self.generate_java_link(outfile)
if is_cross:
if self.environment.cross_info.need_cross_compiler():
static_linker = self.build.static_cross_linker
else:
static_linker = self.build.static_linker
crstr = '_CROSS'
else:
static_linker = self.build.static_linker
crstr = ''
if static_linker is None:
return
rule = 'rule STATIC%s_LINKER\n' % crstr
if mesonlib.is_windows():
command_templ = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = $LINK_ARGS %s $in
'''
else:
command_templ = ' command = %s $LINK_ARGS %s $in\n'
command = command_templ %\
(' '.join(static_linker.get_exelist()),
' '.join(static_linker.get_output_args('$out')))
description = ' description = Static linking library $out\n\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
def generate_dynamic_link_rules(self, outfile):
ctypes = [(self.build.compilers, False)]
if self.environment.is_cross_build():
if self.environment.cross_info.need_cross_compiler():
ctypes.append((self.build.cross_compilers, True))
else:
# Native compiler masquerades as the cross compiler.
ctypes.append((self.build.compilers, True))
else:
ctypes.append((self.build.cross_compilers, True))
for (complist, is_cross) in ctypes:
for compiler in complist:
langname = compiler.get_language()
if langname == 'java' or langname == 'vala' or\
langname == 'rust' or langname == 'cs':
continue
crstr = ''
cross_args = []
if is_cross:
crstr = '_CROSS'
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_link_args']
except KeyError:
pass
rule = 'rule %s%s_LINKER\n' % (langname, crstr)
if mesonlib.is_windows():
command_template = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = %s $ARGS %s $in $LINK_ARGS $aliasing
'''
else:
command_template = ' command = %s %s $ARGS %s $in $LINK_ARGS $aliasing\n'
command = command_template % \
(' '.join(compiler.get_linker_exelist()),\
' '.join(cross_args),\
' '.join(compiler.get_linker_output_args('$out')))
description = ' description = Linking target $out'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
scriptdir = self.environment.get_script_dir()
outfile.write('\n')
symrule = 'rule SHSYM\n'
symcmd = ' command = "%s" "%s" %s %s $CROSS\n' % (ninja_quote(sys.executable),
ninja_quote(os.path.join(scriptdir, 'symbolextractor.py')),
'$in', '$out')
synstat = ' restat = 1\n'
syndesc = ' description = Generating symbol file $out.\n'
outfile.write(symrule)
outfile.write(symcmd)
outfile.write(synstat)
outfile.write(syndesc)
outfile.write('\n')
def generate_java_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Java object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_cs_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling cs target $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_vala_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Vala source $in.\n'
restat = ' restat = 1\n' # ValaC does this always to take advantage of it.
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(restat)
outfile.write('\n')
def generate_rust_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Rust source $in.\n'
depfile = ' depfile = $targetdep\n'
depstyle = ' deps = gcc\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(depfile)
outfile.write(depstyle)
outfile.write('\n')
def generate_swift_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
full_exe = [sys.executable,
os.path.join(self.environment.get_script_dir(), 'dirchanger.py'),
'$RUNDIR'] + compiler.get_exelist()
invoc = ' '.join([ninja_quote(i) for i in full_exe])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Swift source $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_fortran_dep_hack(self, outfile):
if mesonlib.is_windows():
cmd = 'cmd /C ""'
else:
cmd = 'true'
template = '''# Workaround for these issues:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
rule FORTRAN_DEP_HACK
command = %s
description = Dep hack
restat = 1
'''
outfile.write(template % cmd)
def generate_compile_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname == 'java':
if not is_cross:
self.generate_java_compile_rule(compiler, outfile)
return
if langname == 'cs':
if not is_cross:
self.generate_cs_compile_rule(compiler, outfile)
return
if langname == 'vala':
if not is_cross:
self.generate_vala_compile_rules(compiler, outfile)
return
if langname == 'rust':
if not is_cross:
self.generate_rust_compile_rules(compiler, outfile)
return
if langname == 'swift':
if not is_cross:
self.generate_swift_compile_rules(compiler, outfile)
return
if langname == 'fortran':
self.generate_fortran_dep_hack(outfile)
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_COMPILER\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
if mesonlib.is_windows():
command_template = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = %s $ARGS %s %s %s $in
'''
else:
command_template = ' command = %s %s $ARGS %s %s %s $in\n'
command = command_template % \
(' '.join(compiler.get_exelist()),\
' '.join(cross_args),
' '.join(quoted_depargs),\
' '.join(compiler.get_output_args('$out')),\
' '.join(compiler.get_compile_only_args()))
description = ' description = Compiling %s object $out\n' % langname
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_pch_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname != 'c' and langname != 'cpp':
return
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_PCH\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
if compiler.get_id() == 'msvc':
output = ''
else:
output = ' '.join(compiler.get_output_args('$out'))
command = " command = %s %s $ARGS %s %s %s $in\n" % \
(' '.join(compiler.get_exelist()),\
' '.join(cross_args),\
' '.join(quoted_depargs),\
output,\
' '.join(compiler.get_compile_only_args()))
description = ' description = Precompiling header %s\n' % '$in'
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_compile_rules(self, outfile):
qstr = quote_char + "%s" + quote_char
for compiler in self.build.compilers:
langname = compiler.get_language()
self.generate_compile_rule_for(langname, compiler, qstr, False, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, False, outfile)
if self.environment.is_cross_build():
# In case we are going a target-only build, make the native compilers
# masquerade as cross compilers.
if self.environment.cross_info.need_cross_compiler():
cclist = self.build.cross_compilers
else:
cclist = self.build.compilers
for compiler in cclist:
langname = compiler.get_language()
self.generate_compile_rule_for(langname, compiler, qstr, True, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, True, outfile)
outfile.write('\n')
def replace_outputs(self, args, private_dir, output_list):
newargs = []
regex = re.compile('@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = '@OUTPUT%d@' % index
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def generate_custom_generator_rules(self, target, outfile):
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue # Customtarget has already written its output rules
generator = genlist.get_generator()
exe = generator.get_exe()
exe_arr = self.exe_object_to_cmd_array(exe)
infilelist = genlist.get_infilelist()
outfilelist = genlist.get_outfilelist()
base_args = generator.get_arglist()
extra_dependencies = [os.path.join(self.build_to_src, i) for i in genlist.extra_depends]
for i in range(len(infilelist)):
if len(generator.outputs) == 1:
sole_output = os.path.join(self.get_target_private_dir(target), outfilelist[i])
else:
sole_output = ''
curfile = infilelist[i]
infilename = os.path.join(self.build_to_src, curfile)
outfiles = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(self.get_target_private_dir(target), of) for of in outfiles]
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)\
for x in base_args]
args = self.replace_outputs(args, self.get_target_private_dir(target), outfilelist)
# We have consumed output files, so drop them from the list of remaining outputs.
if sole_output == '':
outfilelist = outfilelist[len(generator.outputs):]
relout = self.get_target_private_dir(target)
args = [x.replace("@SOURCE_DIR@", self.build_to_src).replace("@BUILD_DIR@", relout)
for x in args]
final_args = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
cmdlist = exe_arr + final_args
elem = NinjaBuildElement(outfiles, 'CUSTOM_COMMAND', infilename)
if len(extra_dependencies) > 0:
elem.add_dep(extra_dependencies)
elem.add_item('DESC', 'Generating $out')
if isinstance(exe, build.BuildTarget):
elem.add_dep(self.get_target_filename(exe))
elem.add_item('COMMAND', cmdlist)
elem.write(outfile)
self.check_outputs(elem)
def scan_fortran_module_outputs(self, target):
compiler = None
for c in self.build.compilers:
if c.get_language() == 'fortran':
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for generated Fortran sources,
# but those are really rare. I hope.
if not compiler.can_compile(s):
continue
for line in open(os.path.join(self.environment.get_source_dir(), s.subdir, s.fname)):
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1)
if modname.lower() == 'procedure': # MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments('Namespace collision: module %s defined in two files %s and %s.' %
(modname, module_files[modname], s))
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
def get_fortran_deps(self, compiler, src, target):
mod_files = []
usere = re.compile(r"\s*use\s+(\w+)", re.IGNORECASE)
dirname = self.get_target_private_dir(target)
tdeps= self.fortran_deps[target.get_basename()]
for line in open(src):
usematch = usere.match(line)
if usematch is not None:
usename = usematch.group(1)
if usename not in tdeps:
# The module is not provided by any source file. This is due to
# a) missing file/typo/etc
# b) using a module provided by the compiler, such as OpenMP
# There's no easy way to tell which is which (that I know of)
# so just ignore this and go on. Ideally we would print a
# warning message to the user but this is a common occurrance,
# which would lead to lots of distracting noise.
continue
mod_source_file = tdeps[usename]
# Check if a source uses a module it exports itself.
# Potential bug if multiple targets have a file with
# the same name.
if mod_source_file.fname == os.path.split(src)[1]:
continue
mod_name = compiler.module_name_to_filename(usematch.group(1))
mod_files.append(os.path.join(dirname, mod_name))
return mod_files
def generate_single_compile(self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]):
if(isinstance(src, str) and src.endswith('.h')):
raise RuntimeError('Fug')
if isinstance(src, RawFilename) and src.fname.endswith('.h'):
raise RuntimeError('Fug')
extra_orderdeps = []
compiler = self.get_compiler_for_source(src)
commands = self.generate_basic_compiler_args(target, compiler)
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
curdir = target.get_subdir()
tmppath = os.path.normpath(os.path.join(self.build_to_src, curdir))
commands += compiler.get_include_args(tmppath, False)
if curdir == '':
curdir = '.'
commands += compiler.get_include_args(curdir, False)
for d in target.external_deps:
if d.need_threads():
commands += compiler.thread_flags()
break
if isinstance(src, RawFilename):
rel_src = src.fname
elif is_generated:
if self.has_dir_part(src):
rel_src = src
else:
rel_src = os.path.join(self.get_target_private_dir(target), src)
abs_src = os.path.join(self.environment.get_source_dir(), rel_src)
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise build.InvalidArguments('Invalid source type.')
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, RawFilename):
src_filename = src.fname
elif isinstance(src, File):
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
if self.environment.coredata.get_builtin_option('use_pch'):
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if len(pchlist) == 0:
pch_dep = []
else:
arr = []
i = os.path.join(self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0]))
arr.append(i)
pch_dep = arr
for i in target.get_include_dirs():
basedir = i.get_curdir()
for d in i.get_incdirs():
expdir = os.path.join(basedir, d)
srctreedir = os.path.join(self.build_to_src, expdir)
bargs = compiler.get_include_args(expdir, i.is_system)
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += bargs
commands += sargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
custom_target_include_dirs = []
for i in target.generated:
if isinstance(i, build.CustomTarget):
idir = self.get_target_dir(i)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
for i in custom_target_include_dirs:
commands+= compiler.get_include_args(i, False)
if self.environment.coredata.get_builtin_option('use_pch'):
commands += self.get_pch_include_args(compiler, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
compiler_name = '%s%s_COMPILER' % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == 'fortran':
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(self.get_target_private_dir(target),
compiler.module_name_to_filename(modname))
if srcfile == src:
depelem = NinjaBuildElement(modfile, 'FORTRAN_DEP_HACK', rel_obj)
depelem.write(outfile)
self.check_outputs(depelem)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
element.add_orderdep(extra_orderdeps)
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item('DEPFILE', dep_file)
element.add_item('ARGS', commands)
element.write(outfile)
self.check_outputs(element)
return rel_obj
def has_dir_part(self, fname):
return '/' in fname or '\\' in fname
# Fortran is a bit weird (again). When you link against a library, just compiling a source file
# requires the mod files that are output when single files are built. To do this right we would need to
# scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so
# instead just have an ordered dependendy on the library. This ensures all required mod files are created.
# The real deps are then detected via dep file generation from the compiler. This breaks on compilers that
# produce incorrect dep files but such is life.
def get_fortran_orderdeps(self, target, compiler):
if compiler.language != 'fortran':
return []
return [os.path.join(self.get_target_dir(lt), lt.get_filename()) for lt in target.link_targets]
def generate_msvc_pch_command(self, target, compiler, pch):
if len(pch) != 2:
raise RuntimeError('MSVC requires one header and one source to produce precompiled headers.')
header = pch[0]
source = pch[1]
pchname = compiler.get_pch_name(header)
dst = os.path.join(self.get_target_private_dir(target), pchname)
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
just_name = os.path.split(header)[1]
(objname, pch_args) = compiler.gen_pch_args(just_name, source, dst)
commands += pch_args
dep = dst + '.' + compiler.get_depfile_suffix()
return (commands, dep, dst, [objname])
def generate_gcc_pch_command(self, target, compiler, pch):
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
dst = os.path.join(self.get_target_private_dir(target),
os.path.split(pch)[-1] + '.' + compiler.get_pch_suffix())
dep = dst + '.' + compiler.get_depfile_suffix()
return (commands, dep, dst, []) # Gcc does not create an object file during pch generation.
def generate_pch(self, target, outfile):
cstr = ''
pch_objects = []
if target.is_cross:
cstr = '_CROSS'
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if len(pch) == 0:
continue
if '/' not in pch[0] or '/' not in pch[-1]:
raise build.InvalidArguments('Precompiled header of "%s" must not be in the same directory as source, please put it in a subdirectory.' % target.get_basename())
compiler = self.get_compiler_for_lang(lang)
if compiler.id == 'msvc':
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[-1])
(commands, dep, dst, objs) = self.generate_msvc_pch_command(target, compiler, pch)
extradep = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
else:
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
(commands, dep, dst, objs) = self.generate_gcc_pch_command(target, compiler, pch[0])
extradep = None
pch_objects += objs
rulename = compiler.get_language() + cstr + '_PCH'
elem = NinjaBuildElement(dst, rulename, src)
if extradep is not None:
elem.add_dep(extradep)
elem.add_item('ARGS', commands)
elem.add_item('DEPFILE', dep)
elem.write(outfile)
self.check_outputs(elem)
return pch_objects
def generate_shsym(self, outfile, target):
target_name = self.get_target_filename(target)
targetdir = self.get_target_private_dir(target)
symname = os.path.join(targetdir, target_name + '.symbols')
elem = NinjaBuildElement(symname, 'SHSYM', target_name)
if self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler():
elem.add_item('CROSS', '--cross-host=' + self.environment.cross_info.config['host_machine']['system'])
elem.write(outfile)
self.check_outputs(elem)
def generate_link(self, target, outfile, outname, obj_list, linker, extra_args=[]):
if isinstance(target, build.StaticLibrary):
linker_base = 'STATIC'
else:
linker_base = linker.get_language() # Fixme.
if isinstance(target, build.SharedLibrary):
self.generate_shsym(outfile, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
linker_rule = linker_base + crstr + '_LINKER'
abspath = os.path.join(self.environment.get_build_dir(), target.subdir)
commands = []
commands += linker.get_linker_always_args()
commands += linker.get_buildtype_linker_args(self.environment.coredata.get_builtin_option('buildtype'))
commands += linker.get_option_link_args(self.environment.coredata.compiler_options)
if not(isinstance(target, build.StaticLibrary)):
commands += self.environment.coredata.external_link_args[linker.get_language()]
if isinstance(target, build.Executable):
commands += linker.get_std_exe_link_args()
elif isinstance(target, build.SharedLibrary):
commands += linker.get_std_shared_lib_link_args()
commands += linker.get_pic_args()
if hasattr(target, 'soversion'):
soversion = target.soversion
else:
soversion = None
commands += linker.get_soname_args(target.name, abspath, soversion)
elif isinstance(target, build.StaticLibrary):
commands += linker.get_std_link_args()
else:
raise RuntimeError('Unknown build target type.')
# Link arguments of static libraries are not put in the command line of
# the library. They are instead appended to the command line where
# the static library is used.
if linker_base == 'STATIC':
dependencies = []
else:
dependencies = target.get_dependencies()
commands += self.build_target_link_arguments(linker, dependencies)
for d in target.external_deps:
if d.need_threads():
commands += linker.thread_link_flags()
if not isinstance(target, build.StaticLibrary):
commands += target.link_args
# External deps must be last because target link libraries may depend on them.
if not(isinstance(target, build.StaticLibrary)):
for dep in target.get_external_deps():
commands += dep.get_link_args()
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
commands += dep.get_link_args()
commands += linker.build_rpath_args(self.environment.get_build_dir(),\
self.determine_rpath_dirs(target), target.install_rpath)
if self.environment.coredata.get_builtin_option('coverage'):
commands += linker.get_coverage_link_args()
custom_target_libraries = self.get_custom_target_provided_libraries(target)
commands += extra_args
commands += custom_target_libraries
commands = linker.unixtype_flags_to_native(commands)
dep_targets = [self.get_dependency_filename(t) for t in dependencies]
dep_targets += [os.path.join(self.environment.source_dir,
target.subdir, t) for t in target.link_depends]
elem = NinjaBuildElement(outname, linker_rule, obj_list)
elem.add_dep(dep_targets + custom_target_libraries)
elem.add_item('LINK_ARGS', commands)
self.check_outputs(elem)
return elem
def get_custom_target_provided_libraries(self, target):
libs = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
for f in t.output:
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(t), f))
return libs
def determine_rpath_dirs(self, target):
link_deps = target.get_all_link_deps()
result = []
for ld in link_deps:
prospective = self.get_target_dir(ld)
if not prospective in result:
result.append(prospective)
return result
def get_dependency_filename(self, t):
if isinstance(t, build.SharedLibrary):
return os.path.join(self.get_target_private_dir(t), self.get_target_filename(t) + '.symbols')
return self.get_target_filename(t)
def generate_shlib_aliases(self, target, outdir):
basename = target.get_filename()
aliases = target.get_aliaslist()
if not mesonlib.is_windows():
for alias in aliases:
aliasfile = os.path.join(self.environment.get_build_dir(), outdir, alias)
try:
os.remove(aliasfile)
except Exception:
pass
os.symlink(basename, aliasfile)
else:
mlog.debug("Library versioning disabled because host does not support symlinks.")
def generate_gcov_clean(self, outfile):
gcno_elem = NinjaBuildElement('clean-gcno', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcno_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcno'])
gcno_elem.add_item('description', 'Deleting gcno files')
gcno_elem.write(outfile)
self.check_outputs(gcno_elem)
gcda_elem = NinjaBuildElement('clean-gcda', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcda_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcda'])
gcda_elem.add_item('description', 'Deleting gcda files')
gcda_elem.write(outfile)
self.check_outputs(gcda_elem)
def is_compilable_file(self, filename):
if filename.endswith('.cpp') or\
filename.endswith('.c') or\
filename.endswith('.cxx') or\
filename.endswith('.cc') or\
filename.endswith('.C'):
return True
return False
def process_dep_gens(self, outfile, target):
src_deps = []
other_deps = []
for rule in self.dep_rules.values():
srcs = target.get_original_kwargs().get(rule.src_keyword, [])
if isinstance(srcs, str):
srcs = [srcs]
for src in srcs:
plainname = os.path.split(src)[1]
basename = plainname.split('.')[0]
outname = rule.name_templ.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
outfilename = os.path.join(self.get_target_private_dir(target), outname)
infilename = os.path.join(self.build_to_src, target.get_source_subdir(), src)
elem = NinjaBuildElement(outfilename, rule.name, infilename)
elem.write(outfile)
self.check_outputs(elem)
if self.is_compilable_file(outfilename):
src_deps.append(outfilename)
else:
other_deps.append(outfilename)
return (src_deps, other_deps)
def generate_ending(self, outfile):
targetlist = [self.get_target_filename(t) for t in self.build.get_targets().values()\
if not isinstance(t, build.RunTarget)]
elem = NinjaBuildElement('all', 'phony', targetlist)
elem.write(outfile)
self.check_outputs(elem)
default = 'default all\n\n'
outfile.write(default)
ninja_command = environment.detect_ninja()
if ninja_command is None:
raise MesonException('Could not detect ninja command')
elem = NinjaBuildElement('clean', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', [ninja_command, '-t', 'clean'])
elem.add_item('description', 'Cleaning')
if self.environment.coredata.get_builtin_option('coverage'):
self.generate_gcov_clean(outfile)
elem.add_dep('clean-gcda')
elem.add_dep('clean-gcno')
elem.write(outfile)
self.check_outputs(elem)
deps = self.get_regen_filelist()
elem = NinjaBuildElement('build.ninja', 'REGENERATE_BUILD', deps)
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(deps, 'phony', '')
elem.write(outfile)
self.check_outputs(elem)
| 45.630854
| 176
| 0.589942
| 9,661
| 82,820
| 4.858089
| 0.086533
| 0.015085
| 0.021093
| 0.022968
| 0.420335
| 0.346636
| 0.302574
| 0.259109
| 0.206183
| 0.165317
| 0
| 0.001351
| 0.303043
| 82,820
| 1,814
| 177
| 45.656009
| 0.811753
| 0.049022
| 0
| 0.301345
| 0
| 0.001222
| 0.083425
| 0.001843
| 0.001222
| 0
| 0
| 0.000551
| 0.002445
| 1
| 0.050122
| false
| 0.003667
| 0.008557
| 0.003667
| 0.090465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe12421e5a8c03bfd1fbb0c021c5255e880a14d5
| 7,737
|
py
|
Python
|
tools/third_party/iniconfig/testing/test_iniconfig.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 2,479
|
2018-05-28T14:51:29.000Z
|
2022-03-30T14:41:18.000Z
|
tools/third_party/iniconfig/testing/test_iniconfig.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 7,642
|
2018-05-28T09:38:03.000Z
|
2022-03-31T20:55:48.000Z
|
tools/third_party/iniconfig/testing/test_iniconfig.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 1,303
|
2018-05-29T14:50:02.000Z
|
2022-03-30T17:30:42.000Z
|
import py
import pytest
from iniconfig import IniConfig, ParseError, __all__ as ALL
from iniconfig import iscommentline
from textwrap import dedent
check_tokens = {
'section': (
'[section]',
[(0, 'section', None, None)]
),
'value': (
'value = 1',
[(0, None, 'value', '1')]
),
'value in section': (
'[section]\nvalue=1',
[(0, 'section', None, None), (1, 'section', 'value', '1')]
),
'value with continuation': (
'names =\n Alice\n Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'value with aligned continuation': (
'names = Alice\n'
' Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'blank line': (
'[section]\n\nvalue=1',
[(0, 'section', None, None), (2, 'section', 'value', '1')]
),
'comment': (
'# comment',
[]
),
'comment on value': (
'value = 1',
[(0, None, 'value', '1')]
),
'comment on section': (
'[section] #comment',
[(0, 'section', None, None)]
),
'comment2': (
'; comment',
[]
),
'comment2 on section': (
'[section] ;comment',
[(0, 'section', None, None)]
),
'pseudo section syntax in value': (
'name = value []',
[(0, None, 'name', 'value []')]
),
'assignment in value': (
'value = x = 3',
[(0, None, 'value', 'x = 3')]
),
'use of colon for name-values': (
'name: y',
[(0, None, 'name', 'y')]
),
'use of colon without space': (
'value:y=5',
[(0, None, 'value', 'y=5')]
),
'equality gets precedence': (
'value=xyz:5',
[(0, None, 'value', 'xyz:5')]
),
}
@pytest.fixture(params=sorted(check_tokens))
def input_expected(request):
return check_tokens[request.param]
@pytest.fixture
def input(input_expected):
return input_expected[0]
@pytest.fixture
def expected(input_expected):
return input_expected[1]
def parse(input):
# only for testing purposes - _parse() does not use state except path
ini = object.__new__(IniConfig)
ini.path = "sample"
return ini._parse(input.splitlines(True))
def parse_a_error(input):
return py.test.raises(ParseError, parse, input)
def test_tokenize(input, expected):
parsed = parse(input)
assert parsed == expected
def test_parse_empty():
parsed = parse("")
assert not parsed
ini = IniConfig("sample", "")
assert not ini.sections
def test_ParseError():
e = ParseError("filename", 0, "hello")
assert str(e) == "filename:1: hello"
def test_continuation_needs_perceeding_token():
excinfo = parse_a_error(' Foo')
assert excinfo.value.lineno == 0
def test_continuation_cant_be_after_section():
excinfo = parse_a_error('[section]\n Foo')
assert excinfo.value.lineno == 1
def test_section_cant_be_empty():
excinfo = parse_a_error('[]')
assert excinfo.value.lineno == 0
@py.test.mark.parametrize('line', [
'!!',
])
def test_error_on_weird_lines(line):
parse_a_error(line)
def test_iniconfig_from_file(tmpdir):
path = tmpdir/'test.txt'
path.write('[metadata]\nname=1')
config = IniConfig(path=path)
assert list(config.sections) == ['metadata']
config = IniConfig(path, "[diff]")
assert list(config.sections) == ['diff']
with pytest.raises(TypeError):
IniConfig(data=path.read())
def test_iniconfig_section_first(tmpdir):
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='name=1')
assert excinfo.value.msg == "no section header defined"
def test_iniconig_section_duplicate_fails():
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='[section]\n[section]')
assert 'duplicate section' in str(excinfo.value)
def test_iniconfig_duplicate_key_fails():
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='[section]\nname = Alice\nname = bob')
assert 'duplicate name' in str(excinfo.value)
def test_iniconfig_lineof():
config = IniConfig("x.ini", data=(
'[section]\n'
'value = 1\n'
'[section2]\n'
'# comment\n'
'value =2'
))
assert config.lineof('missing') is None
assert config.lineof('section') == 1
assert config.lineof('section2') == 3
assert config.lineof('section', 'value') == 2
assert config.lineof('section2', 'value') == 5
assert config['section'].lineof('value') == 2
assert config['section2'].lineof('value') == 5
def test_iniconfig_get_convert():
config = IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'int') == '1'
assert config.get('section', 'int', convert=int) == 1
def test_iniconfig_get_missing():
config = IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'missing', default=1) == 1
assert config.get('section', 'missing') is None
def test_section_get():
config = IniConfig("x", data='[section]\nvalue=1')
section = config['section']
assert section.get('value', convert=int) == 1
assert section.get('value', 1) == "1"
assert section.get('missing', 2) == 2
def test_missing_section():
config = IniConfig("x", data='[section]\nvalue=1')
with pytest.raises(KeyError):
config["other"]
def test_section_getitem():
config = IniConfig("x", data='[section]\nvalue=1')
assert config['section']['value'] == '1'
assert config['section']['value'] == '1'
def test_section_iter():
config = IniConfig("x", data='[section]\nvalue=1')
names = list(config['section'])
assert names == ['value']
items = list(config['section'].items())
assert items == [('value', '1')]
def test_config_iter():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
l = list(config)
assert len(l) == 2
assert l[0].name == 'section1'
assert l[0]['value'] == '1'
assert l[1].name == 'section2'
assert l[1]['value'] == '2'
def test_config_contains():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
assert 'xyz' not in config
assert 'section1' in config
assert 'section2' in config
def test_iter_file_order():
config = IniConfig("x.ini", data="""
[section2] #cpython dict ordered before section
value = 1
value2 = 2 # dict ordered before value
[section]
a = 1
b = 2
""")
l = list(config)
secnames = [x.name for x in l]
assert secnames == ['section2', 'section']
assert list(config['section2']) == ['value', 'value2']
assert list(config['section']) == ['a', 'b']
def test_example_pypirc():
config = IniConfig("pypirc", data=dedent('''
[distutils]
index-servers =
pypi
other
[pypi]
repository: <repository-url>
username: <username>
password: <password>
[other]
repository: http://example.com/pypi
username: <username>
password: <password>
'''))
distutils, pypi, other = list(config)
assert distutils["index-servers"] == "pypi\nother"
assert pypi['repository'] == '<repository-url>'
assert pypi['username'] == '<username>'
assert pypi['password'] == '<password>'
assert ['repository', 'username', 'password'] == list(other)
def test_api_import():
assert ALL == ['IniConfig', 'ParseError']
@pytest.mark.parametrize("line", [
"#qwe",
" #qwe",
";qwe",
" ;qwe",
])
def test_iscommentline_true(line):
assert iscommentline(line)
| 24.561905
| 73
| 0.586145
| 907
| 7,737
| 4.898567
| 0.183021
| 0.037812
| 0.036012
| 0.037812
| 0.267837
| 0.21427
| 0.186135
| 0.122215
| 0.092055
| 0.081026
| 0
| 0.017586
| 0.242988
| 7,737
| 314
| 74
| 24.640127
| 0.740994
| 0.00866
| 0
| 0.278689
| 0
| 0
| 0.280908
| 0
| 0
| 0
| 0
| 0
| 0.196721
| 1
| 0.118852
| false
| 0.016393
| 0.02459
| 0.016393
| 0.163934
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe13276650bb177fc42299abc71b473c1a0414dc
| 3,586
|
py
|
Python
|
jskparser/jskparser/util.py
|
natebragg/java-sketch
|
f5ac26f2cc46ae4556f9a61c55afd37f55c961ff
|
[
"MIT"
] | 15
|
2015-12-15T18:33:50.000Z
|
2021-09-29T11:48:54.000Z
|
jskparser/jskparser/util.py
|
natebragg/java-sketch
|
f5ac26f2cc46ae4556f9a61c55afd37f55c961ff
|
[
"MIT"
] | 11
|
2015-11-16T22:14:58.000Z
|
2021-09-23T05:28:40.000Z
|
jskparser/jskparser/util.py
|
natebragg/java-sketch
|
f5ac26f2cc46ae4556f9a61c55afd37f55c961ff
|
[
"MIT"
] | 8
|
2015-11-16T21:50:08.000Z
|
2021-03-23T15:15:34.000Z
|
import os
from subprocess import call
from . import glob2
pwd = os.path.dirname(__file__)
def get_files_from_path(path, ext):
# use set to remove duplicate files. weird...but it happens
if os.path.isfile(path): return set([os.path.abspath(path)])
else: # i.e., folder
files = glob2.glob(os.path.abspath(os.path.join(path, "**/*.{}".format(ext))))
return set(sorted(files)) # to guarantee the order of files read
"""
handling javajskparser AST
"""
def toAST(files, ext, add_libs):
prg_files = []
for f in files:
prg_files.extend(get_files_from_path(f, "java"))
if not prg_files: exit('jskparser.util: File(s) not found!')
java_in = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/API.java'))
json_out = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/java.json'))
if add_libs:
obj_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Object.java'))
str_path = os.path.abspath(os.path.join(pwd, '../../model/lang/String.java'))
num_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Number.java'))
int_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Integer.java'))
char_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Character.java'))
itbl_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Iterable.java'))
iter_path = os.path.abspath(os.path.join(pwd, '../../model/util/Iterator.java'))
arr_path = os.path.abspath(os.path.join(pwd, '../../model/util/Arrays.java'))
list_path = os.path.abspath(os.path.join(pwd, '../../model/util/List.java'))
alist_path = os.path.abspath(os.path.join(pwd, '../../model/util/ArrayList.java'))
llist_path = os.path.abspath(os.path.join(pwd, '../../model/util/LinkedList.java'))
hmap_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashMap.java'))
hset_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashSet.java'))
if obj_path not in prg_files: prg_files.append(obj_path)
if str_path not in prg_files: prg_files.append(str_path)
if num_path not in prg_files: prg_files.append(num_path)
if int_path not in prg_files: prg_files.append(int_path)
if char_path not in prg_files: prg_files.append(char_path)
if itbl_path not in prg_files: prg_files.append(itbl_path)
if iter_path not in prg_files: prg_files.append(iter_path)
if arr_path not in prg_files: prg_files.append(arr_path)
if list_path not in prg_files: prg_files.append(list_path)
if alist_path not in prg_files: prg_files.append(alist_path)
if llist_path not in prg_files: prg_files.append(llist_path)
if hmap_path not in prg_files: prg_files.append(hmap_path)
if hset_path not in prg_files: prg_files.append(hset_path)
api = ""
for fname in prg_files:
with open(fname, 'r') as fd:
api += fd.read()
with open(java_in, 'w') as fd:
fd.write(api)
# this classpath stuff seems awful. Jsonify is hardcoded, passing a
# single string to subprocess.call is platform dependant, and shell=True
# can be a security vulnerability (if allowed to take user input).
# This just got a whole lot nastier
cmd = 'cd ' + pwd + '/..; /usr/bin/java -cp .:javaparser/javaparser-core/target/classes:$HOME/.m2/repository/com/cedarsoftware/json-io/4.3.0/json-io-4.3.0.jar jskparser.Jsonify ' + java_in + ' ' + json_out
ret = call(cmd, shell=True)
if ret != 0: exit('Problem parsing.')
return json_out
| 54.333333
| 209
| 0.663971
| 572
| 3,586
| 4.006993
| 0.263986
| 0.091623
| 0.096422
| 0.104712
| 0.447208
| 0.439354
| 0.429319
| 0.429319
| 0.25349
| 0.25349
| 0
| 0.003409
| 0.182097
| 3,586
| 65
| 210
| 55.169231
| 0.778043
| 0.09565
| 0
| 0
| 0
| 0.018868
| 0.203999
| 0.169947
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.056604
| 0
| 0.132075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe133101724c39453da53bbd1a90715fd62fd7e1
| 24,301
|
py
|
Python
|
fiftyone/core/patches.py
|
SNeugber/fiftyone
|
a50be47bbbf189e4bbdcd631b93c4c9cbf41c6b7
|
[
"Apache-2.0"
] | null | null | null |
fiftyone/core/patches.py
|
SNeugber/fiftyone
|
a50be47bbbf189e4bbdcd631b93c4c9cbf41c6b7
|
[
"Apache-2.0"
] | null | null | null |
fiftyone/core/patches.py
|
SNeugber/fiftyone
|
a50be47bbbf189e4bbdcd631b93c4c9cbf41c6b7
|
[
"Apache-2.0"
] | null | null | null |
"""
Patches views.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.utils as etau
import fiftyone.core.aggregations as foa
import fiftyone.core.dataset as fod
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.sample as fos
import fiftyone.core.view as fov
_SINGLE_TYPES_MAP = {
fol.Detections: fol.Detection,
fol.Polylines: fol.Polyline,
}
_PATCHES_TYPES = (fol.Detections, fol.Polylines)
_NO_MATCH_ID = ""
class _PatchView(fos.SampleView):
@property
def _sample_id(self):
return self._doc.sample_id
def save(self):
super().save()
self._view._sync_source_sample(self)
class PatchView(_PatchView):
"""A patch in a :class:`PatchesView`.
:class:`PatchView` instances should not be created manually; they are
generated by iterating over :class:`PatchesView` instances.
Args:
doc: a :class:`fiftyone.core.odm.DatasetSampleDocument`
view: the :class:`PatchesView` that the patch belongs to
selected_fields (None): a set of field names that this view is
restricted to
excluded_fields (None): a set of field names that are excluded from
this view
filtered_fields (None): a set of field names of list fields that are
filtered in this view
"""
pass
class EvaluationPatchView(_PatchView):
"""A patch in an :class:`EvaluationPatchesView`.
:class:`EvaluationPatchView` instances should not be created manually; they
are generated by iterating over :class:`EvaluationPatchesView` instances.
Args:
doc: a :class:`fiftyone.core.odm.DatasetSampleDocument`
view: the :class:`EvaluationPatchesView` that the patch belongs to
selected_fields (None): a set of field names that this view is
restricted to
excluded_fields (None): a set of field names that are excluded from
this view
filtered_fields (None): a set of field names of list fields that are
filtered in this view
"""
pass
class _PatchesView(fov.DatasetView):
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
if _stages is None:
_stages = []
self._source_collection = source_collection
self._patches_stage = patches_stage
self._patches_dataset = patches_dataset
self.__stages = _stages
def __copy__(self):
return self.__class__(
self._source_collection,
deepcopy(self._patches_stage),
self._patches_dataset,
_stages=deepcopy(self.__stages),
)
@property
def _base_view(self):
return self.__class__(
self._source_collection,
self._patches_stage,
self._patches_dataset,
)
@property
def _dataset(self):
return self._patches_dataset
@property
def _root_dataset(self):
return self._source_collection._root_dataset
@property
def _stages(self):
return self.__stages
@property
def _all_stages(self):
return (
self._source_collection.view()._all_stages
+ [self._patches_stage]
+ self.__stages
)
@property
def _label_fields(self):
raise NotImplementedError("subclass must implement _label_fields")
@property
def _element_str(self):
return "patch"
@property
def _elements_str(self):
return "patches"
@property
def name(self):
return self.dataset_name + "-patches"
@classmethod
def _get_default_sample_fields(
cls, include_private=False, use_db_fields=False
):
fields = super()._get_default_sample_fields(
include_private=include_private, use_db_fields=use_db_fields
)
if use_db_fields:
return fields + ("_sample_id",)
return fields + ("sample_id",)
def set_values(self, field_name, *args, **kwargs):
field = field_name.split(".", 1)[0]
must_sync = field in self._label_fields
# The `set_values()` operation could change the contents of this view,
# so we first record the sample IDs that need to be synced
if must_sync and self._stages:
ids = self.values("_id")
else:
ids = None
super().set_values(field_name, *args, **kwargs)
if must_sync:
self._sync_source_field(field, ids=ids)
def save(self, fields=None):
"""Overwrites the object patches in the source dataset with the
contents of the view.
If this view contains any additional fields that were not extracted
from the source dataset, these fields are not saved.
.. warning::
This will permanently delete any omitted, filtered, or otherwise
modified patches from the source dataset.
Args:
fields (None): an optional field or list of fields to save. If
specified, only these fields are overwritten
"""
if etau.is_str(fields):
fields = [fields]
super().save(fields=fields)
if fields is None:
fields = self._label_fields
else:
fields = [l for l in fields if l in self._label_fields]
#
# IMPORTANT: we sync the contents of `_patches_dataset`, not `self`
# here because the `save()` call above updated the dataset, which means
# this view may no longer have the same contents (e.g., if `skip()` is
# involved)
#
self._sync_source_root(fields)
def reload(self):
self._root_dataset.reload()
#
# Regenerate the patches dataset
#
# This assumes that calling `load_view()` when the current patches
# dataset has been deleted will cause a new one to be generated
#
self._patches_dataset.delete()
_view = self._patches_stage.load_view(self._source_collection)
self._patches_dataset = _view._patches_dataset
def _sync_source_sample(self, sample):
for field in self._label_fields:
self._sync_source_sample_field(sample, field)
def _sync_source_sample_field(self, sample, field):
label_type = self._patches_dataset._get_label_field_type(field)
is_list_field = issubclass(label_type, fol._LABEL_LIST_FIELDS)
doc = sample._doc.field_to_mongo(field)
if is_list_field:
doc = doc[label_type._LABEL_LIST_FIELD]
self._source_collection._set_labels_by_id(
field, [sample.sample_id], [doc]
)
def _sync_source_field(self, field, ids=None):
_, label_path = self._patches_dataset._get_label_field_path(field)
if ids is not None:
view = self._patches_dataset.mongo(
[{"$match": {"_id": {"$in": ids}}}]
)
else:
view = self._patches_dataset
sample_ids, docs = view.aggregate(
[foa.Values("sample_id"), foa.Values(label_path, _raw=True)]
)
self._source_collection._set_labels_by_id(field, sample_ids, docs)
def _sync_source_root(self, fields):
for field in fields:
self._sync_source_root_field(field)
def _sync_source_root_field(self, field):
_, id_path = self._get_label_field_path(field, "id")
label_path = id_path.rsplit(".", 1)[0]
#
# Sync label updates
#
sample_ids, docs, label_ids = self._patches_dataset.aggregate(
[
foa.Values("sample_id"),
foa.Values(label_path, _raw=True),
foa.Values(id_path, unwind=True),
]
)
self._source_collection._set_labels_by_id(field, sample_ids, docs)
#
# Sync label deletions
#
_, src_id_path = self._source_collection._get_label_field_path(
field, "id"
)
src_ids = self._source_collection.values(src_id_path, unwind=True)
delete_ids = set(src_ids) - set(label_ids)
if delete_ids:
self._source_collection._dataset.delete_labels(
ids=delete_ids, fields=field
)
def _get_ids_map(self, field):
label_type = self._patches_dataset._get_label_field_type(field)
is_list_field = issubclass(label_type, fol._LABEL_LIST_FIELDS)
_, id_path = self._get_label_field_path(field, "id")
sample_ids, label_ids = self.values(["id", id_path])
ids_map = {}
if is_list_field:
for sample_id, _label_ids in zip(sample_ids, label_ids):
if not _label_ids:
continue
for label_id in _label_ids:
ids_map[label_id] = sample_id
else:
for sample_id, label_id in zip(sample_ids, label_ids):
if not label_id:
continue
ids_map[label_id] = sample_id
return ids_map
class PatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` of patches from a
:class:`fiftyone.core.dataset.Dataset`.
Patches views contain an ordered collection of patch samples, each of which
contains a subset of a sample of the parent dataset corresponding to a
single object or logical grouping of of objects.
Patches retrieved from patches views are returned as :class:`PatchView`
objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToPatches` stage that
defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = PatchView
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
super().__init__(
source_collection, patches_stage, patches_dataset, _stages=_stages
)
self._patches_field = patches_stage.field
@property
def _label_fields(self):
return [self._patches_field]
@property
def patches_field(self):
"""The field from which the patches in this view were extracted."""
return self._patches_field
class EvaluationPatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` containing evaluation patches
from a :class:`fiftyone.core.dataset.Dataset`.
Evalation patches views contain an ordered collection of evaluation
examples, each of which contains the ground truth and/or predicted labels
for a true positive, false positive, or false negative example from an
evaluation run on the underlying dataset.
Patches retrieved from patches views are returned as
:class:`EvaluationPatchView` objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToEvaluationPatches`
stage that defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = EvaluationPatchView
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
super().__init__(
source_collection, patches_stage, patches_dataset, _stages=_stages
)
eval_key = patches_stage.eval_key
eval_info = source_collection.get_evaluation_info(eval_key)
self._gt_field = eval_info.config.gt_field
self._pred_field = eval_info.config.pred_field
@property
def _label_fields(self):
return [self._gt_field, self._pred_field]
@property
def gt_field(self):
"""The ground truth field for the evaluation patches in this view."""
return self._gt_field
@property
def pred_field(self):
"""The predictions field for the evaluation patches in this view."""
return self._pred_field
def make_patches_dataset(
sample_collection, field, keep_label_lists=False, name=None
):
"""Creates a dataset that contains one sample per object patch in the
specified field of the collection.
Fields other than ``field`` and the default sample fields will not be
included in the returned dataset. A ``sample_id`` field will be added that
records the sample ID from which each patch was taken.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
field: the patches field, which must be of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
keep_label_lists (False): whether to store the patches in label list
fields of the same type as the input collection rather than using
their single label variants
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
if keep_label_lists:
field_type = sample_collection._get_label_field_type(field)
else:
field_type = _get_single_label_field_type(sample_collection, field)
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field(
field, fof.EmbeddedDocumentField, embedded_doc_type=field_type
)
patches_view = _make_patches_view(
sample_collection, field, keep_label_lists=keep_label_lists
)
_write_samples(dataset, patches_view)
return dataset
def _get_single_label_field_type(sample_collection, field):
label_type = sample_collection._get_label_field_type(field)
if label_type not in _SINGLE_TYPES_MAP:
raise ValueError("Unsupported label field type %s" % label_type)
return _SINGLE_TYPES_MAP[label_type]
def make_evaluation_dataset(sample_collection, eval_key, name=None):
"""Creates a dataset based on the results of the evaluation with the given
key that contains one sample for each true positive, false positive, and
false negative example in the input collection, respectively.
True positive examples will result in samples with both their ground truth
and predicted fields populated, while false positive/negative examples will
only have one of their corresponding predicted/ground truth fields
populated, respectively.
If multiple predictions are matched to a ground truth object (e.g., if the
evaluation protocol includes a crowd attribute), then all matched
predictions will be stored in the single sample along with the ground truth
object.
The returned dataset will also have top-level ``type`` and ``iou`` fields
populated based on the evaluation results for that example, as well as a
``sample_id`` field recording the sample ID of the example, and a ``crowd``
field if the evaluation protocol defines a crowd attribute.
.. note::
The returned dataset will contain patches for the contents of the input
collection, which may differ from the view on which the ``eval_key``
evaluation was performed. This may exclude some labels that were
evaluated and/or include labels that were not evaluated.
If you would like to see patches for the exact view on which an
evaluation was performed, first call
:meth:`load_evaluation_view() <fiftyone.core.collections.SampleCollection.load_evaluation_view>`
to load the view and then convert to patches.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
eval_key: an evaluation key that corresponds to the evaluation of
ground truth/predicted fields that are of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
# Parse evaluation info
eval_info = sample_collection.get_evaluation_info(eval_key)
pred_field = eval_info.config.pred_field
gt_field = eval_info.config.gt_field
if hasattr(eval_info.config, "iscrowd"):
crowd_attr = eval_info.config.iscrowd
else:
crowd_attr = None
pred_type = sample_collection._get_label_field_type(pred_field)
gt_type = sample_collection._get_label_field_type(gt_field)
# Setup dataset with correct schema
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
pred_field, fof.EmbeddedDocumentField, embedded_doc_type=pred_type
)
dataset.add_sample_field(
gt_field, fof.EmbeddedDocumentField, embedded_doc_type=gt_type
)
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field("type", fof.StringField)
dataset.add_sample_field("iou", fof.FloatField)
if crowd_attr is not None:
dataset.add_sample_field("crowd", fof.BooleanField)
# Add ground truth patches
gt_view = _make_eval_view(
sample_collection, eval_key, gt_field, crowd_attr=crowd_attr
)
_write_samples(dataset, gt_view)
# Merge matched predictions
_merge_matched_labels(dataset, sample_collection, eval_key, pred_field)
# Add unmatched predictions
unmatched_pred_view = _make_eval_view(
sample_collection, eval_key, pred_field, skip_matched=True
)
_add_samples(dataset, unmatched_pred_view)
return dataset
def _make_patches_view(sample_collection, field, keep_label_lists=False):
if sample_collection._is_frames:
raise ValueError(
"Creating patches views into frame views is not yet supported"
)
if sample_collection._is_frame_field(field):
raise ValueError(
"Frame label patches cannot be directly extracted; you must first "
"convert your video dataset to frames via `to_frames()`"
)
label_type = sample_collection._get_label_field_type(field)
if issubclass(label_type, _PATCHES_TYPES):
list_field = field + "." + label_type._LABEL_LIST_FIELD
else:
raise ValueError(
"Invalid label field type %s. Extracting patches is only "
"supported for the following types: %s"
% (label_type, _PATCHES_TYPES)
)
pipeline = [
{
"$project": {
"_id": True,
"_sample_id": "$_id",
"_media_type": True,
"filepath": True,
"metadata": True,
"tags": True,
field + "._cls": True,
list_field: True,
}
},
{"$unwind": "$" + list_field},
{"$set": {"_rand": {"$rand": {}}}},
{"$set": {"_id": "$" + list_field + "._id"}},
]
if keep_label_lists:
pipeline.append({"$set": {list_field: ["$" + list_field]}})
else:
pipeline.append({"$set": {field: "$" + list_field}})
return sample_collection.mongo(pipeline)
def _make_eval_view(
sample_collection, eval_key, field, skip_matched=False, crowd_attr=None
):
eval_type = field + "." + eval_key
eval_id = field + "." + eval_key + "_id"
eval_iou = field + "." + eval_key + "_iou"
view = _make_patches_view(sample_collection, field)
if skip_matched:
view = view.mongo(
[
{
"$match": {
"$expr": {
"$or": [
{"$eq": ["$" + eval_id, _NO_MATCH_ID]},
{"$not": {"$gt": ["$" + eval_id, None]}},
]
}
}
}
]
)
view = view.mongo(
[{"$set": {"type": "$" + eval_type, "iou": "$" + eval_iou}}]
)
if crowd_attr is not None:
crowd_path1 = "$" + field + "." + crowd_attr
# @todo remove Attributes usage
crowd_path2 = "$" + field + ".attributes." + crowd_attr + ".value"
view = view.mongo(
[
{
"$set": {
"crowd": {
"$cond": {
"if": {"$gt": [crowd_path1, None]},
"then": {"$toBool": crowd_path1},
"else": {
"$cond": {
"if": {"$gt": [crowd_path2, None]},
"then": {"$toBool": crowd_path2},
"else": None,
}
},
}
}
}
}
]
)
return _upgrade_labels(view, field)
def _upgrade_labels(view, field):
tmp_field = "_" + field
label_type = view._get_label_field_type(field)
return view.mongo(
[
{"$set": {tmp_field: "$" + field}},
{"$unset": field},
{
"$set": {
field: {
"_cls": label_type.__name__,
label_type._LABEL_LIST_FIELD: ["$" + tmp_field],
}
}
},
{"$unset": tmp_field},
]
)
def _merge_matched_labels(dataset, src_collection, eval_key, field):
field_type = src_collection._get_label_field_type(field)
list_field = field + "." + field_type._LABEL_LIST_FIELD
eval_id = eval_key + "_id"
eval_field = list_field + "." + eval_id
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.extend(
[
{"$project": {list_field: True}},
{"$unwind": "$" + list_field},
{
"$match": {
"$expr": {
"$and": [
{"$gt": ["$" + eval_field, None]},
{"$ne": ["$" + eval_field, _NO_MATCH_ID]},
]
}
}
},
{
"$group": {
"_id": {"$toObjectId": "$" + eval_field},
"_labels": {"$push": "$" + list_field},
}
},
{
"$project": {
field: {
"_cls": field_type.__name__,
field_type._LABEL_LIST_FIELD: "$_labels",
}
},
},
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "merge",
"whenNotMatched": "discard",
}
},
]
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _write_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append({"$out": dataset._sample_collection_name})
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _add_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append(
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "keepExisting",
"whenNotMatched": "insert",
}
}
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
| 32.186755
| 104
| 0.607876
| 2,771
| 24,301
| 5.044388
| 0.134608
| 0.024038
| 0.024324
| 0.012877
| 0.417012
| 0.376592
| 0.338389
| 0.298397
| 0.267706
| 0.219774
| 0
| 0.001421
| 0.305008
| 24,301
| 754
| 105
| 32.229443
| 0.826218
| 0.287848
| 0
| 0.259009
| 0
| 0
| 0.057025
| 0
| 0
| 0
| 0
| 0.001326
| 0
| 1
| 0.087838
| false
| 0.004505
| 0.02027
| 0.027027
| 0.18018
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe14a23d28223212d47c4b4e15846d9b001de45c
| 6,153
|
py
|
Python
|
src/zope/app/debug/debug.py
|
zopefoundation/zope.app.debug
|
4f31e98f6a633f089bf132dd55cb3ead0270887b
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/app/debug/debug.py
|
zopefoundation/zope.app.debug
|
4f31e98f6a633f089bf132dd55cb3ead0270887b
|
[
"ZPL-2.1"
] | 2
|
2017-05-08T10:46:20.000Z
|
2021-02-02T07:16:49.000Z
|
src/zope/app/debug/debug.py
|
zopefoundation/zope.app.debug
|
4f31e98f6a633f089bf132dd55cb3ead0270887b
|
[
"ZPL-2.1"
] | 1
|
2015-04-03T07:36:10.000Z
|
2015-04-03T07:36:10.000Z
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Code to initialize the application server
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
import base64
import time
import sys
from pdb import Pdb
from io import BytesIO
from zope.publisher.publish import publish as _publish, debug_call
from zope.publisher.browser import TestRequest, setDefaultSkin
from zope.app.publication.browser import BrowserPublication
from zope.app.appsetup import config, database
try:
from time import process_time as time_process_time # pragma: PY3
except ImportError:
from time import clock as time_process_time # pragma: PY2
try:
import urllib.parse as urllib # pragma: PY3
except ImportError:
import urllib # pragma: PY2
try:
text_type = unicode # pragma: PY2
except NameError:
text_type = str # pragma: PY3
class Debugger(object):
pdb = Pdb
def __init__(self, db=None, config_file=None, stdout=None):
if db is None and config_file is None:
db = 'Data.fs'
config_file = 'site.zcml'
if config_file is not None:
config(config_file)
self.db = database(db)
self.stdout = stdout
@classmethod
def fromDatabase(cls, db):
inst = cls.__new__(cls)
inst.db = db
return inst
def root(self):
"""Get the top-level application object
The object returned is connected to an open database connection.
"""
from zope.app.publication.zopepublication import ZopePublication
return self.db.open().root()[ZopePublication.root_name]
def _request(self,
path='/', stdin='', basic=None,
environment=None, form=None,
request=None, publication=BrowserPublication):
"""Create a request
"""
env = {}
if isinstance(stdin, text_type):
stdin = stdin.encode("utf-8")
if isinstance(stdin, bytes):
stdin = BytesIO(stdin)
p = path.split('?')
if len(p) == 1:
env['PATH_INFO'] = p[0]
elif len(p) == 2:
env['PATH_INFO'], env['QUERY_STRING'] = p
else:
raise ValueError("Too many ?s in path", path)
env['PATH_INFO'] = urllib.unquote(env['PATH_INFO'])
if environment is not None:
env.update(environment)
if basic:
basic_bytes = basic.encode('ascii') if not isinstance(
basic, bytes) else basic
basic64_bytes = base64.b64encode(basic_bytes)
basic64 = basic64_bytes.decode('ascii').strip()
env['HTTP_AUTHORIZATION'] = "Basic %s" % basic64
pub = publication(self.db)
if request is not None:
request = request(stdin, env)
else:
request = TestRequest(stdin, env)
setDefaultSkin(request)
request.setPublication(pub)
if form:
request.form.update(form)
return request
def publish(self, path='/', stdin='', *args, **kw):
t, pt = time.time(), time_process_time()
request = self._request(path, stdin, *args, **kw)
# agroszer: 2008.feb.1.: if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = _publish(request)
getStatus = getattr(request.response, 'getStatus', lambda: None)
headers = sorted(request.response.getHeaders())
print(
'Status %s\r\n%s\r\n\r\n%s' % (
request.response.getStatusString(),
'\r\n'.join([("%s: %s" % h) for h in headers]),
request.response.consumeBody(),
), file=self.stdout or sys.stdout)
return time.time() - t, time_process_time() - pt, getStatus()
def run(self, *args, **kw):
t, pt = time.time(), time_process_time()
request = self._request(*args, **kw)
# agroszer: 2008.feb.1.: if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = _publish(request, handle_errors=False)
getStatus = getattr(request.response, 'getStatus', lambda: None)
return time.time() - t, time_process_time() - pt, getStatus()
def debug(self, *args, **kw):
out = self.stdout or sys.stdout
class ZopePdb(self.Pdb):
done_pub = False
done_ob = False
def do_pub(self, arg):
if self.done_pub:
print('pub already done.', file=out)
return
self.do_s('')
self.do_s('')
self.do_c('')
self.done_pub = True
def do_ob(self, arg):
if self.done_ob:
print('ob already done.', file=out)
return
self.do_pub('')
self.do_c('')
self.done_ob = True
dbg = ZopePdb()
request = self._request(*args, **kw)
fbreak(dbg, _publish)
fbreak(dbg, debug_call)
print('* Type c<cr> to jump to published object call.',
file=out)
dbg.runcall(_publish, request)
return dbg
def getlineno(code):
return code.co_firstlineno
def fbreak(db, meth):
try:
meth = meth.__func__
except AttributeError:
pass
code = meth.__code__
lineno = getlineno(code)
filename = code.co_filename
db.set_break(filename, lineno)
| 29.868932
| 78
| 0.57988
| 725
| 6,153
| 4.804138
| 0.310345
| 0.022107
| 0.02584
| 0.012633
| 0.223945
| 0.171117
| 0.171117
| 0.125179
| 0.125179
| 0.125179
| 0
| 0.009238
| 0.296278
| 6,153
| 205
| 79
| 30.014634
| 0.79515
| 0.154234
| 0
| 0.164179
| 0
| 0
| 0.055077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08209
| false
| 0.007463
| 0.126866
| 0.007463
| 0.298507
| 0.037313
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe1507ff94aad4e4172a286172e136314812d8b6
| 1,855
|
py
|
Python
|
transfer_learning.py
|
terryli710/SIIM-ACR-Pneumothorax-Classification
|
8b278a9885b71c919d7064b2df42863b53f7adf3
|
[
"MIT"
] | null | null | null |
transfer_learning.py
|
terryli710/SIIM-ACR-Pneumothorax-Classification
|
8b278a9885b71c919d7064b2df42863b53f7adf3
|
[
"MIT"
] | null | null | null |
transfer_learning.py
|
terryli710/SIIM-ACR-Pneumothorax-Classification
|
8b278a9885b71c919d7064b2df42863b53f7adf3
|
[
"MIT"
] | 1
|
2020-05-14T06:16:12.000Z
|
2020-05-14T06:16:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 22:42:54 2020
@author: mike
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications import VGG16
from tensorflow.keras import layers
from sklearn.preprocessing import OneHotEncoder
from skimage.transform import resize
import matplotlib.pyplot as plt
train_data = np.load("train_data.npy")
x_data = np.zeros((210,204,204,3))
y_data = np.zeros(210)
for i in range(210):
img = train_data[i,1:].reshape(1024,1024)
img_resized = resize(img,(204,204))
y_data[i] = train_data[i,0]
x_data[i,:,:,0] = img_resized.astype(int)
x_data[i,:,:,1] = img_resized.astype(int)
x_data[i,:,:,2] = img_resized.astype(int)
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, test_size=0.2, random_state=42)
y_train = OneHotEncoder().fit_transform(y_train.reshape(-1,1)).toarray()
y_test = OneHotEncoder().fit_transform(y_test.reshape(-1,1)).toarray()
base_model = VGG16(include_top=False, weights='vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
input_shape=(204, 204, 3))
base_model.trainable = False
inputs = tf.keras.Input(shape=(204, 204, 3))
x = base_model(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(256, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
outputs = tf.keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
model.summary()
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),loss="binary_crossentropy",metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=16, epochs=5)
pred = model.predict(x_train)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[0],score[1])
| 26.884058
| 117
| 0.725067
| 301
| 1,855
| 4.292359
| 0.38206
| 0.02322
| 0.040248
| 0.044118
| 0.111455
| 0.069659
| 0.0387
| 0
| 0
| 0
| 0
| 0.057917
| 0.125067
| 1,855
| 69
| 118
| 26.884058
| 0.738139
| 0.050674
| 0
| 0
| 0
| 0
| 0.059897
| 0.027952
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe15525a101c45bc65c1049e9b6ece9e4cd29f69
| 2,158
|
py
|
Python
|
core/tests/test_polyflow/test_workflows/test_hyperband.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
core/tests/test_polyflow/test_workflows/test_hyperband.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
core/tests/test_polyflow/test_workflows/test_hyperband.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from marshmallow.exceptions import ValidationError
from tests.utils import BaseTestCase, assert_equal_dict
from polyaxon.polyflow.matrix import V1Hyperband
from polyaxon.polyflow.optimization import V1Optimization, V1OptimizationMetric
@pytest.mark.workflow_mark
class TestWorkflowV1Hyperbands(BaseTestCase):
def test_hyperband_config(self):
config_dict = {
"kind": "hyperband",
"maxIterations": 10,
"eta": 3,
"resource": {"name": "steps", "type": "int"},
"resume": False,
"metric": V1OptimizationMetric(
name="loss", optimization=V1Optimization.MINIMIZE
).to_dict(),
"params": {"lr": {"kind": "choice", "value": [[0.1], [0.9]]}},
}
config = V1Hyperband.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
# Raises for negative values
config_dict["maxIterations"] = 0
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["maxIterations"] = -0.5
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["maxIterations"] = 3
# Add numRuns percent
config_dict["eta"] = -0.5
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["eta"] = 2.9
config = V1Hyperband.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
| 35.377049
| 79
| 0.672845
| 252
| 2,158
| 5.642857
| 0.484127
| 0.091421
| 0.098453
| 0.087904
| 0.279887
| 0.279887
| 0.279887
| 0.279887
| 0.279887
| 0.279887
| 0
| 0.022754
| 0.226135
| 2,158
| 60
| 80
| 35.966667
| 0.828743
| 0.287766
| 0
| 0.294118
| 0
| 0
| 0.090132
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.029412
| false
| 0
| 0.147059
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe185aaa73619017a36f547b25642264993ebd15
| 1,820
|
py
|
Python
|
clickhouse_sqlalchemy/drivers/reflection.py
|
Fozar/clickhouse-sqlalchemy
|
88fd630856655cc470430b365dce7e85516abf62
|
[
"MIT"
] | null | null | null |
clickhouse_sqlalchemy/drivers/reflection.py
|
Fozar/clickhouse-sqlalchemy
|
88fd630856655cc470430b365dce7e85516abf62
|
[
"MIT"
] | null | null | null |
clickhouse_sqlalchemy/drivers/reflection.py
|
Fozar/clickhouse-sqlalchemy
|
88fd630856655cc470430b365dce7e85516abf62
|
[
"MIT"
] | null | null | null |
from sqlalchemy.engine import reflection
from clickhouse_sqlalchemy import Table, engines
class ClickHouseInspector(reflection.Inspector):
def reflect_table(self, table, *args, **kwargs):
# This check is necessary to support direct instantiation of
# `clickhouse_sqlalchemy.Table` and then reflection of it.
if not isinstance(table, Table):
table.metadata.remove(table)
ch_table = Table._make_from_standard(
table, _extend_on=kwargs.get('_extend_on')
)
else:
ch_table = table
super(ClickHouseInspector, self).reflect_table(
ch_table, *args, **kwargs
)
with self._operation_context() as conn:
schema = conn.schema_for_object(ch_table)
self._reflect_engine(ch_table.name, schema, ch_table)
def _reflect_engine(self, table_name, schema, table):
should_reflect = (
self.dialect.supports_engine_reflection and
self.dialect.engine_reflection
)
if not should_reflect:
return
engine_cls_by_name = {e.__name__: e for e in engines.__all__}
e = self.get_engine(table_name, schema=table.schema)
if not e:
raise ValueError("Cannot find engine for table '%s'" % table_name)
engine_cls = engine_cls_by_name.get(e['engine'])
if engine_cls is not None:
engine = engine_cls.reflect(table, **e)
engine._set_parent(table)
else:
table.engine = None
def get_engine(self, table_name, schema=None, **kw):
with self._operation_context() as conn:
return self.dialect.get_engine(
conn, table_name, schema=schema, info_cache=self.info_cache,
**kw
)
| 33.703704
| 78
| 0.621978
| 215
| 1,820
| 4.981395
| 0.325581
| 0.039216
| 0.070028
| 0.044818
| 0.102708
| 0.056022
| 0
| 0
| 0
| 0
| 0
| 0
| 0.295055
| 1,820
| 53
| 79
| 34.339623
| 0.834762
| 0.063187
| 0
| 0.1
| 0
| 0
| 0.02879
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.05
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe190819e431106bd53c08a681b3911ad9502e88
| 6,289
|
py
|
Python
|
src/runner.py
|
samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers
|
619c5c0b17438d1014f7ca7e4ce13cc44c45de3c
|
[
"MIT"
] | 1
|
2020-11-17T16:09:13.000Z
|
2020-11-17T16:09:13.000Z
|
src/runner.py
|
samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers
|
619c5c0b17438d1014f7ca7e4ce13cc44c45de3c
|
[
"MIT"
] | null | null | null |
src/runner.py
|
samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers
|
619c5c0b17438d1014f7ca7e4ce13cc44c45de3c
|
[
"MIT"
] | 4
|
2019-07-05T02:03:02.000Z
|
2022-01-21T22:12:16.000Z
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ runner.py ]
# Synopsis [ main program that runs the 'Naive Bayes' and 'Decision Tree' training / testing ]
# Author [ Ting-Wei Liu (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import csv
import argparse
import numpy as np
from data_loader import data_loader
from classifiers import naive_bayes_runner
from classifiers import decision_tree_runner
##################
# CONFIGURATIONS #
##################
def get_config():
parser = argparse.ArgumentParser(description='descrip_msg')
classifier = parser.add_argument_group('classifier')
classifier.add_argument('--classifier', type=str, default='', help='classifier to be specified by user')
classifier.add_argument('--naive_bayes', action='store_true', help='enable Naive Bayes classification mode')
classifier.add_argument('--decision_tree', action='store_true', help='enable Decision Tree classification mode')
mode_args = parser.add_argument_group('mode')
mode_args.add_argument('--search_opt', action='store_true', help='search for optimal parameters for classifiers')
mode_args.add_argument('--run_all', action='store_true', help='run all distribution assumption for the Naive Bayes classifier')
mode_args.add_argument('--visualize_tree', action='store_true', help='plot and visualize the Decision Tree classifier')
data_args = parser.add_argument_group('data')
data_args.add_argument('--data_news', action='store_true', help='Training and testing on the News dataset')
data_args.add_argument('--data_mushroom', action='store_true', help='Training and testing on the Mushroom dataset')
data_args.add_argument('--data_income', action='store_true', help='Training and testing on the Income dataset')
path_args = parser.add_argument_group('train_path')
path_args.add_argument('--train_path', type=str, default='', help='training path to be specified by user')
path_args.add_argument('--train_path_news', type=str, default='../data/news/news_train.csv', help='path to the News training dataset')
path_args.add_argument('--train_path_mushroom', type=str, default='../data/mushroom/mushroom_train.csv', help='path to the Mushroom training dataset')
path_args.add_argument('--train_path_income', type=str, default='../data/income/income_train.csv', help='path to the Income training dataset')
path_args = parser.add_argument_group('test_path')
path_args.add_argument('--test_path', type=str, default='', help='testing path to be specified by user')
path_args.add_argument('--test_path_news', type=str, default='../data/news/news_test.csv', help='path to the News testing dataset')
path_args.add_argument('--test_path_mushroom', type=str, default='../data/mushroom/mushroom_test.csv', help='path to the Mushroom testing dataset')
path_args.add_argument('--test_path_income', type=str, default='../data/income/income_test.csv', help='path to the Income testing dataset')
path_args = parser.add_argument_group('output_path')
path_args.add_argument('--output_path', type=str, default='../result/output.csv', help='path to save model prediction')
args = parser.parse_args()
args = error_handling(args)
return args
##################
# ERROR HANDLING #
##################
def error_handling(args):
if args.classifier != '':
args.naive_bayes = True if args.classifier == 'N' else False
args.decision_tree = True if args.classifier == 'D' else False
if args.naive_bayes and args.decision_tree == True:
raise AssertionError('Please choose one classifier at once, or specify the correct classifier!')
if args.search_opt and args.run_all and args.visualize_tree == True:
raise AssertionError('Please choose one mode at a time!')
if args.data_news and args.data_mushroom and args.income == True:
raise AssertionError('Please choose one and at least one dataset at a time!')
if args.train_path != '' and args.test_path != '':
if not os.path.isfile(args.train_path) or not os.path.isfile(args.test_path):
raise AssertionError('The given file path is invalid!')
if args.data_news:
args.train_path_news = args.train_path
args.test_path_news = args.test_path
elif args.data_mushroom:
args.train_path_mushroom = args.train_path
args.test_path_mushroom = args.test_path
elif args.data_income:
args.train_path_income = args.train_path
args.test_path_income = args.test_path
else:
raise AssertionError('Must choose a dataset!')
return args
#################
# OUTPUT WRITER #
#################
def output_writer(path, result):
with open(path, 'w') as f:
file = csv.writer(f, delimiter=',', quotechar='\r')
for item in result:
file.writerow([int(item)])
print('Results have been successfully saved to: %s' % (path))
return True
########
# MAIN #
########
"""
main function
"""
def main():
args = get_config()
loader = data_loader(args)
#---fetch data---#
if args.data_news:
train_x, train_y, test_x, test_y = loader.fetch_news()
MODEL = 'NEWS'
elif args.data_mushroom:
train_x, train_y, test_x, test_y = loader.fetch_mushroom()
MODEL = 'MUSHROOM'
elif args.data_income:
train_x, train_y, test_x, test_y = loader.fetch_income() # -> test_y == None
MODEL = 'INCOME'
###############
# NAIVE BAYES #
###############
if args.naive_bayes:
#---construct model---#
naive_bayes = naive_bayes_runner(MODEL, train_x, train_y, test_x, test_y)
#---modes---#
if args.search_opt:
naive_bayes.search_alpha()
elif args.run_all:
naive_bayes.run_best_all()
else:
pred_y = naive_bayes.run_best()
output_writer(args.output_path, pred_y)
#################
# DECISION TREE #
#################
if args.decision_tree:
#---construct model---#
decision_tree = decision_tree_runner(MODEL, train_x, train_y, test_x, test_y)
#---modes---#
if args.search_opt:
decision_tree.search_max_depth()
elif args.visualize_tree:
decision_tree.visualize()
else:
pred_y = decision_tree.run_best()
output_writer(args.output_path, pred_y)
if __name__ == '__main__':
main()
| 38.582822
| 151
| 0.690253
| 863
| 6,289
| 4.791425
| 0.181924
| 0.063845
| 0.054414
| 0.041354
| 0.441112
| 0.368077
| 0.260701
| 0.213543
| 0.118742
| 0.069891
| 0
| 0.000727
| 0.125139
| 6,289
| 162
| 152
| 38.820988
| 0.750818
| 0.084274
| 0
| 0.15
| 0
| 0
| 0.295801
| 0.037904
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.04
| false
| 0
| 0.07
| 0
| 0.14
| 0.01
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe195c652a959304ac79843bfd7f33439351fd89
| 7,393
|
py
|
Python
|
igibson/metrics/agent.py
|
Nick-AhSen/iGibson
|
c6854f11eec5d935fa3ef3d6d4852c6571beab4b
|
[
"MIT"
] | null | null | null |
igibson/metrics/agent.py
|
Nick-AhSen/iGibson
|
c6854f11eec5d935fa3ef3d6d4852c6571beab4b
|
[
"MIT"
] | null | null | null |
igibson/metrics/agent.py
|
Nick-AhSen/iGibson
|
c6854f11eec5d935fa3ef3d6d4852c6571beab4b
|
[
"MIT"
] | null | null | null |
import copy
import numpy as np
import pybullet as p
from igibson.metrics.metric_base import MetricBase
class BehaviorRobotMetric(MetricBase):
def __init__(self):
self.initialized = False
self.state_cache = {}
self.next_state_cache = {}
self.agent_pos = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.agent_grasping = {part: [] for part in ["left_hand", "right_hand"]}
self.agent_local_pos = {part: [] for part in ["left_hand", "right_hand"]}
self.agent_reset = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_work = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_distance = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_grasp_distance = {part: [] for part in ["left_hand", "right_hand"]}
self.clip = 0.2
def step_callback(self, igbhvr_act_inst, _):
robot = igbhvr_act_inst.simulator.robots[0]
agent_work = {part: 0 for part in ["left_hand", "right_hand", "body"]}
agent_distance = {part: 0 for part in ["left_hand", "right_hand", "body"]}
for part in ["left_hand", "right_hand", "body"]:
self.next_state_cache[part] = {
"position": np.array(p.getBasePositionAndOrientation(robot.parts[part].get_body_id())[0]),
}
if not self.initialized:
self.state_cache = copy.deepcopy(self.next_state_cache)
self.initialized = True
if robot.action[19] > 0 and robot.action[27] > 0:
self.agent_reset["left_hand"].append(True)
self.agent_reset["right_hand"].append(True)
self.agent_reset["body"].append(True)
if robot.action[19] > 0:
self.agent_reset["left_hand"].append(True)
self.agent_reset["right_hand"].append(False)
self.agent_reset["body"].append(True)
elif robot.action[27] > 0:
self.agent_reset["left_hand"].append(False)
self.agent_reset["right_hand"].append(True)
self.agent_reset["body"].append(True)
else:
self.agent_reset["left_hand"].append(False)
self.agent_reset["right_hand"].append(False)
self.agent_reset["body"].append(False)
for part in self.state_cache:
delta_pos = np.linalg.norm(self.next_state_cache[part]["position"] - self.state_cache[part]["position"])
self.agent_pos[part].append(list(self.state_cache[part]["position"]))
# Exclude agent teleports
delta_pos = np.clip(delta_pos, -self.clip, self.clip)
if robot.parts[part].movement_cid is None:
force = 0
work = 0
else:
force = p.getConstraintState(robot.parts[part].movement_cid)
work = np.abs((delta_pos * np.linalg.norm(force)))
distance = np.abs(delta_pos)
if part in ["left_hand", "right_hand"]:
self.agent_local_pos[part].append(list(robot.parts[part].get_local_position_orientation()[0]))
if part in ["left_hand", "right_hand"] and (
len(p.getContactPoints(robot.parts[part].get_body_id())) > 0
or robot.parts[part].object_in_hand is not None
):
self.delta_agent_grasp_distance[part].append(distance)
self.agent_grasping[part].append(True)
elif part in ["left_hand", "right_hand"]:
self.delta_agent_grasp_distance[part].append(0)
self.agent_grasping[part].append(False)
agent_work[part] = work
agent_distance[part] = distance
self.delta_agent_work[part].append(work)
self.delta_agent_distance[part].append(distance)
self.state_cache = copy.deepcopy(self.next_state_cache)
def gather_results(self):
return {
"agent_distance": {
"timestep": self.delta_agent_distance,
},
"grasp_distance": {
"timestep": self.delta_agent_grasp_distance,
},
"work": {
"timestep": self.delta_agent_work,
},
"pos": {
"timestep": self.agent_pos,
},
"local_pos": {
"timestep": self.agent_local_pos,
},
"grasping": {
"timestep": self.agent_grasping,
},
"reset": {
"timestep": self.agent_reset,
},
}
class FetchRobotMetric(MetricBase):
def __init__(self):
self.initialized = False
self.state_cache = {}
self.next_state_cache = {}
self.agent_pos = {part: [] for part in ["gripper", "body"]}
self.agent_grasping = {part: [] for part in ["gripper"]}
self.agent_local_pos = {part: [] for part in ["gripper"]}
self.delta_agent_distance = {part: [] for part in ["gripper", "body"]}
self.delta_agent_grasp_distance = {part: [] for part in ["gripper"]}
self.clip = 0.2
def step_callback(self, igbhvr_act_inst, _):
robot = igbhvr_act_inst.simulator.robots[0]
agent_distance = {part: 0 for part in self.agent_pos}
self.next_state_cache = {
"gripper": {"position": robot.get_end_effector_position()},
"body": {"position": robot.get_position()},
}
if not self.initialized:
self.state_cache = copy.deepcopy(self.next_state_cache)
self.initialized = True
self.agent_pos["body"].append(list(self.state_cache["body"]["position"]))
delta_pos = np.linalg.norm(
np.array(self.next_state_cache["body"]["position"]) - self.state_cache["body"]["position"]
)
distance = np.abs(delta_pos)
self.delta_agent_distance["body"].append(distance)
self.agent_pos["gripper"].append(list(self.state_cache["gripper"]["position"]))
delta_pos = np.linalg.norm(
self.next_state_cache["gripper"]["position"] - self.state_cache["gripper"]["position"]
)
gripper_distance = np.abs(delta_pos)
self.delta_agent_distance["gripper"].append(gripper_distance)
self.agent_local_pos["gripper"].append(list(robot.get_relative_eef_position()))
contacts = p.getContactPoints(bodyA=robot.robot_ids[0], linkIndexA=robot.eef_link_id)
if len(contacts) > 0:
self.delta_agent_grasp_distance["gripper"].append(gripper_distance)
self.agent_grasping["gripper"].append(True)
else:
self.delta_agent_grasp_distance["gripper"].append(0)
self.agent_grasping["gripper"].append(False)
self.state_cache = copy.deepcopy(self.next_state_cache)
def gather_results(self):
return {
"agent_distance": {
"timestep": self.delta_agent_distance,
},
"grasp_distance": {
"timestep": self.delta_agent_grasp_distance,
},
"pos": {
"timestep": self.agent_pos,
},
"local_pos": {
"timestep": self.agent_local_pos,
},
"grasping": {
"timestep": self.agent_grasping,
},
}
| 38.305699
| 116
| 0.581631
| 857
| 7,393
| 4.75846
| 0.115519
| 0.079451
| 0.061795
| 0.04463
| 0.78102
| 0.670181
| 0.626533
| 0.537764
| 0.510299
| 0.456106
| 0
| 0.005694
| 0.287299
| 7,393
| 192
| 117
| 38.505208
| 0.768267
| 0.003111
| 0
| 0.412903
| 0
| 0
| 0.109663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03871
| false
| 0
| 0.025806
| 0.012903
| 0.090323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe1c00d5c2481798d64766027364e0e668d8c7bc
| 59,866
|
py
|
Python
|
src/ttkbootstrap/dialogs/dialogs.py
|
MrJaatt/ttkbootstrap
|
4e837d64859e5a230ef0500faddbb2c384f5b9d4
|
[
"MIT"
] | 1
|
2022-01-28T09:37:32.000Z
|
2022-01-28T09:37:32.000Z
|
src/ttkbootstrap/dialogs/dialogs.py
|
MrJaatt/ttkbootstrap
|
4e837d64859e5a230ef0500faddbb2c384f5b9d4
|
[
"MIT"
] | null | null | null |
src/ttkbootstrap/dialogs/dialogs.py
|
MrJaatt/ttkbootstrap
|
4e837d64859e5a230ef0500faddbb2c384f5b9d4
|
[
"MIT"
] | null | null | null |
"""
This module contains various base dialog base classes that can be
used to create custom dialogs for the end user.
These classes serve as the basis for the pre-defined static helper
methods in the `Messagebox`, and `Querybox` container classes.
"""
import calendar
import textwrap
from datetime import datetime
from tkinter import font
import ttkbootstrap as ttk
from ttkbootstrap import utility
from ttkbootstrap.icons import Icon
from ttkbootstrap.constants import *
from tkinter import BaseWidget
from ttkbootstrap.localization import MessageCatalog
class Dialog(BaseWidget):
"""A simple dialog base class."""
def __init__(self, parent=None, title="", alert=False):
"""
Parameters:
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent window.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
alert (bool):
Ring the display's bell when the dialog is shown.
"""
BaseWidget._setup(self, parent, {})
self._winsys = self.master.tk.call("tk", "windowingsystem")
self._toplevel = None
self._title = title or " "
self._result = None
self._alert = alert
self._initial_focus = None
def _locate(self):
toplevel = self._toplevel
master = toplevel.master
screen_height = toplevel.winfo_screenheight()
screen_width = toplevel.winfo_screenwidth()
toplevel.update_idletasks()
if master.winfo_viewable():
m_width = master.winfo_width()
m_height = master.winfo_height()
m_x = master.winfo_rootx()
m_y = master.winfo_rooty()
else:
m_width = screen_width
m_height = screen_height
m_x = m_y = 0
w_width = toplevel.winfo_reqwidth()
w_height = toplevel.winfo_reqheight()
x = int(m_x + (m_width - w_width) * 0.45)
y = int(m_y + (m_height - w_height) * 0.3)
if x + w_width > screen_width:
x = screen_width - w_width
elif x < 0:
x = 0
if y + w_height > screen_height:
y = screen_height - w_height
elif y < 0:
y = 0
toplevel.geometry(f"+{x}+{y}")
def show(self):
"""Show the popup dialog"""
self._result = None
self.build()
self._locate()
self._toplevel.deiconify()
if self._alert:
self._toplevel.bell()
if self._initial_focus:
self._initial_focus.focus_force()
self._toplevel.grab_set()
self._toplevel.wait_window()
def create_body(self, master):
"""Create the dialog body.
This method should be overridden and is called by the `build`
method. Set the `self._initial_focus` for the widget that
should receive the initial focus.
Parameters:
master (Widget):
The parent widget.
"""
raise NotImplementedError
def create_buttonbox(self, master):
"""Create the dialog button box.
This method should be overridden and is called by the `build`
method. Set the `self._initial_focus` for the button that
should receive the intial focus.
Parameters:
master (Widget):
The parent widget.
"""
raise NotImplementedError
def build(self):
"""Build the dialog from settings"""
# setup toplevel based on widowing system
if self._winsys == "win32":
self._toplevel = ttk.Toplevel(
transient=self.master,
title=self._title,
resizable=(0, 0),
minsize=(250, 15),
iconify=True,
)
else:
self._toplevel = ttk.Toplevel(
transient=self.master,
title=self._title,
resizable=(0, 0),
windowtype="dialog",
iconify=True,
)
self._toplevel.withdraw() # reset the iconify state
# bind <Escape> event to window close
self._toplevel.bind("<Escape>", lambda _: self._toplevel.destroy())
# set position of popup from parent window
#self._locate()
# create widgets
self.create_body(self._toplevel)
self.create_buttonbox(self._toplevel)
# update the window before showing
self._toplevel.update_idletasks()
@property
def result(self):
"""Returns the result of the dialog."""
return self._result
class MessageDialog(Dialog):
"""A simple modal dialog class that can be used to build simple
message dialogs.
Displays a message and a set of buttons. Each of the buttons in the
message window is identified by a unique symbolic name. After the
message window is popped up, the message box awaits for the user to
select one of the buttons. Then it returns the symbolic name of the
selected button. Use a `Toplevel` widget for more advanced modal
dialog designs.
"""
def __init__(
self,
message,
title=" ",
buttons=None,
command=None,
width=50,
parent=None,
alert=False,
default=None,
padding=(20, 20),
icon=None,
**kwargs
):
"""
Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
buttons (List[str]):
A list of buttons to appear at the bottom of the popup
messagebox. The buttons can be a list of strings which
will define the symbolic name and the button text.
`['OK', 'Cancel']`. Alternatively, you can assign a
bootstyle to each button by using the colon to separate the
button text and the bootstyle. If no colon is found, then
the style is set to 'primary' by default.
`['OK:success','Cancel:danger']`.
command (Tuple[Callable, str]):
The function to invoke when the user closes the dialog.
The actual command is a tuple that consists of the
function to call and the symbolic name of the button that
closes the dialog.
width (int):
The maximum number of characters per line in the message.
If the text stretches beyond the limit, the line will break
at the word.
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent window.
alert (bool):
Ring the display's bell when the dialog is shown.
default (str):
The symbolic name of the default button. The default
button is invoked when the the <Return> key is pressed.
If no default is provided, the right-most button in the
button list will be set as the default.,
padding (Union[int, Tuple[int]]):
The amount of space between the border and the widget
contents.
icon (str):
An image path, path-like object or image data to be
displayed to the left of the text.
**kwargs (Dict):
Other optional keyword arguments.
Example:
```python
root = tk.Tk()
md = MessageDialog("Displays a message with buttons.")
md.show()
```
"""
super().__init__(parent, title, alert)
self._message = message
self._command = command
self._width = width
self._alert = alert
self._default = (default,)
self._padding = padding
self._icon = icon
self._localize = kwargs.get('localize')
if buttons is None:
self._buttons = [
f"{MessageCatalog.translate('Cancel')}:secondary",
f"{MessageCatalog.translate('OK')}:primary"
]
else:
self._buttons = buttons
def create_body(self, master):
"""Overrides the parent method; adds the message section."""
container = ttk.Frame(master, padding=self._padding)
if self._icon:
try:
# assume this is image data
self._img = ttk.PhotoImage(data=self._icon)
icon_lbl = ttk.Label(container, image=self._img)
icon_lbl.pack(side=LEFT, padx=5)
except:
try:
# assume this is a file path
self._img = ttk.PhotoImage(file=self._icon)
icon_lbl = ttk.Label(container, image=self._img)
icon_lbl.pack(side=LEFT, padx=5)
except:
# icon is neither data nor a valid file path
print('MessageDialog icon is invalid')
if self._message:
for msg in self._message.split("\n"):
message = "\n".join(textwrap.wrap(msg, width=self._width))
message_label = ttk.Label(container, text=message)
message_label.pack(pady=(0, 3), fill=X, anchor=N)
container.pack(fill=X, expand=True)
def create_buttonbox(self, master):
"""Overrides the parent method; adds the message buttonbox"""
frame = ttk.Frame(master, padding=(5, 5))
button_list = []
for i, button in enumerate(self._buttons[::-1]):
cnf = button.split(":")
if len(cnf) == 2:
text, bootstyle = cnf
else:
text = cnf[0]
bootstyle = "secondary"
if self._localize == True:
text = MessageCatalog.translate(text)
btn = ttk.Button(frame, bootstyle=bootstyle, text=text)
btn.bind("<Return>", lambda _: btn.invoke())
btn.configure(command=lambda b=btn: self.on_button_press(b))
btn.pack(padx=2, side=RIGHT)
btn.lower() # set focus traversal left-to-right
button_list.append(btn)
if self._default is not None and text == self._default:
self._initial_focus = btn
elif self._default is None and i == 0:
self._initial_focus = btn
# bind default button to return key press and set focus
self._toplevel.bind("<Return>", lambda _, b=btn: b.invoke())
self._toplevel.bind("<KP_Enter>", lambda _, b=btn: b.invoke())
ttk.Separator(self._toplevel).pack(fill=X)
frame.pack(side=BOTTOM, fill=X, anchor=S)
if not self._initial_focus:
self._initial_focus = button_list[0]
def on_button_press(self, button):
"""Save result, destroy the toplevel, and execute command."""
self._result = button["text"]
command = self._command
if command is not None:
command()
self._toplevel.destroy()
def show(self):
"""Create and display the popup messagebox."""
super().show()
class QueryDialog(Dialog):
"""A simple modal dialog class that can be used to build simple
data input dialogs. Displays a prompt, and input box, and a set of
buttons. Additional data manipulation can be performed on the
user input post-hoc by overriding the `apply` method.
Use a `Toplevel` widget for more advanced modal dialog designs.
"""
def __init__(
self,
prompt,
title=" ",
initialvalue="",
minvalue=None,
maxvalue=None,
width=65,
datatype=str,
padding=(20, 20),
parent=None,
):
"""
Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
initialvalue (Any):
The initial value in the entry widget.
minvalue (Any):
The minimum allowed value. Only valid for int and float
data types.
maxvalue (Any):
The maximum allowed value. Only valid for int and float
data types.
width (int):
The maximum number of characters per line in the
message. If the text stretches beyond the limit, the
line will break at the word.
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent
window.
padding (Union[int, Tuple[int]]):
The amount of space between the border and the widget
contents.
datatype (Union[int, str, float]):
The data type used to validate the entry value.
"""
super().__init__(parent, title)
self._prompt = prompt
self._initialvalue = initialvalue
self._minvalue = minvalue
self._maxvalue = maxvalue
self._width = width
self._datatype = datatype
self._padding = padding
self._result = None
def create_body(self, master):
"""Overrides the parent method; adds the message and input
section."""
frame = ttk.Frame(master, padding=self._padding)
if self._prompt:
for p in self._prompt.split("\n"):
prompt = "\n".join(textwrap.wrap(p, width=self._width))
prompt_label = ttk.Label(frame, text=prompt)
prompt_label.pack(pady=(0, 5), fill=X, anchor=N)
entry = ttk.Entry(master=frame)
entry.insert(END, self._initialvalue)
entry.pack(pady=(0, 5), fill=X)
entry.bind("<Return>", self.on_submit)
entry.bind("<KP_Enter>", self.on_submit)
entry.bind("<Escape>", self.on_cancel)
frame.pack(fill=X, expand=True)
self._initial_focus = entry
def create_buttonbox(self, master):
"""Overrides the parent method; adds the message buttonbox"""
frame = ttk.Frame(master, padding=(5, 10))
submit = ttk.Button(
master=frame,
bootstyle="primary",
text=MessageCatalog.translate("Submit"),
command=self.on_submit,
)
submit.pack(padx=5, side=RIGHT)
submit.lower() # set focus traversal left-to-right
cancel = ttk.Button(
master=frame,
bootstyle="secondary",
text=MessageCatalog.translate("Cancel"),
command=self.on_cancel,
)
cancel.pack(padx=5, side=RIGHT)
cancel.lower() # set focus traversal left-to-right
ttk.Separator(self._toplevel).pack(fill=X)
frame.pack(side=BOTTOM, fill=X, anchor=S)
def on_submit(self, *_):
"""Save result, destroy the toplevel, and apply any post-hoc
data manipulations."""
self._result = self._initial_focus.get()
valid_result = self.validate()
if not valid_result:
return # keep toplevel open for valid response
self._toplevel.destroy()
self.apply()
def on_cancel(self, *_):
"""Close the toplevel and return empty."""
self._toplevel.destroy()
return
def validate(self):
"""Validate the data
This method is called automatically to validate the data before
the dialog is destroyed. Can be subclassed and overridden.
"""
# no default checks required for string data types
if self._datatype not in [float, int, complex]:
return True
# convert result to appropriate data type
try:
self._result = self._datatype(self._result)
except ValueError:
msg = MessageCatalog.translate('Should be of data type')
Messagebox.ok(
message=f"{msg} `{self._datatype}`",
title=MessageCatalog.translate("Invalid data type"),
)
return False
# max value range
if self._maxvalue is not None:
if self._result > self._maxvalue:
msg = MessageCatalog.translate('Number cannot be greater than')
Messagebox.ok(
message=f"{msg} {self._maxvalue}",
title=MessageCatalog.translate("Out of range"),
)
return False
# min value range
if self._minvalue is not None:
if self._result < self._minvalue:
msg = MessageCatalog.translate('Number cannot be less than')
Messagebox.ok(
message=f"{msg} {self._minvalue}",
title=MessageCatalog.translate("Out of range"),
)
return False
# valid result
return True
def apply(self):
"""Process the data.
This method is called automatically to process the data after
the dialog is destroyed. By default, it does nothing.
"""
pass # override
class DatePickerDialog:
"""A dialog that displays a calendar popup and returns the
selected date as a datetime object.
The current date is displayed by default unless the `startdate`
parameter is provided.
The month can be changed by clicking the chevrons to the left
and right of the month-year title.
Left-click the arrow to move the calendar by one month.
Right-click the arrow to move the calendar by one year.
Right-click the title to reset the calendar to the start date.
The starting weekday can be changed with the `firstweekday`
parameter for geographies that do not start the calendar on
Sunday, which is the default.
The widget grabs focus and all screen events until released.
If you want to cancel a date selection, click the 'X' button
at the top-right corner of the widget.
The bootstyle api may be used to change the style of the widget.
The available colors include -> primary, secondary, success,
info, warning, danger, light, dark.

"""
def __init__(
self,
parent=None,
title=" ",
firstweekday=6,
startdate=None,
bootstyle=PRIMARY,
):
"""
Parameters:
parent (Widget):
The parent widget; the popup will appear to the
bottom-right of the parent widget. If no parent is
provided, the widget is centered on the screen.
title (str):
The text that appears on the titlebar.
firstweekday (int):
Specifies the first day of the week. 0=Monday,
1=Tuesday, etc...
startdate (datetime):
The date to be in focus when the widget is
displayed.
bootstyle (str):
The following colors can be used to change the color of
the title and hover / pressed color -> primary,
secondary, info, warning, success, danger, light, dark.
"""
self.parent = parent
self.root = ttk.Toplevel(
title=title,
transient=self.parent,
resizable=(False, False),
topmost=True,
minsize=(226, 1),
iconify=True
)
self.firstweekday = firstweekday
self.startdate = startdate or datetime.today().date()
self.bootstyle = bootstyle or PRIMARY
self.date_selected = self.startdate
self.date = startdate or self.date_selected
self.calendar = calendar.Calendar(firstweekday=firstweekday)
self.titlevar = ttk.StringVar()
self.datevar = ttk.IntVar()
self._setup_calendar()
self.root.grab_set()
self.root.wait_window()
def _setup_calendar(self):
"""Setup the calendar widget"""
# create the widget containers
self.frm_calendar = ttk.Frame(
master=self.root, padding=0, borderwidth=0, relief=FLAT
)
self.frm_calendar.pack(fill=BOTH, expand=YES)
self.frm_title = ttk.Frame(self.frm_calendar, padding=(3, 3))
self.frm_title.pack(fill=X)
self.frm_header = ttk.Frame(self.frm_calendar, bootstyle=SECONDARY)
self.frm_header.pack(fill=X)
# setup the toplevel widget
self.root.withdraw() # reset the iconify state
self.frm_calendar.update_idletasks() # actualize geometry
# create visual components
self._draw_titlebar()
self._draw_calendar()
# make toplevel visible
self._set_window_position()
self.root.deiconify()
def _update_widget_bootstyle(self):
self.frm_title.configure(bootstyle=self.bootstyle)
self.title.configure(bootstyle=f"{self.bootstyle}-inverse")
self.prev_period.configure(style=f"Chevron.{self.bootstyle}.TButton")
self.next_period.configure(style=f"Chevron.{self.bootstyle}.TButton")
def _draw_calendar(self):
self._update_widget_bootstyle()
self._set_title()
self._current_month_days()
self.frm_dates = ttk.Frame(self.frm_calendar)
self.frm_dates.pack(fill=BOTH, expand=YES)
for row, weekday_list in enumerate(self.monthdays):
for col, day in enumerate(weekday_list):
self.frm_dates.columnconfigure(col, weight=1)
if day == 0:
ttk.Label(
master=self.frm_dates,
text=self.monthdates[row][col].day,
anchor=CENTER,
padding=5,
bootstyle=SECONDARY,
).grid(row=row, column=col, sticky=NSEW)
else:
if all(
[
day == self.date_selected.day,
self.date.month == self.date_selected.month,
self.date.year == self.date_selected.year,
]
):
day_style = "secondary-toolbutton"
else:
day_style = f"{self.bootstyle}-calendar"
def selected(x=row, y=col):
self._on_date_selected(x, y)
btn = ttk.Radiobutton(
master=self.frm_dates,
variable=self.datevar,
value=day,
text=day,
bootstyle=day_style,
padding=5,
command=selected,
)
btn.grid(row=row, column=col, sticky=NSEW)
def _draw_titlebar(self):
"""Draw the calendar title bar which includes the month title
and the buttons that increment and decrement the selected
month.
In addition to the previous and next MONTH commands that are
assigned to the button press, a "right-click" event is assigned
to each button that causes the calendar to move to the previous
and next YEAR.
"""
# create and pack the title and action buttons
self.prev_period = ttk.Button(
master=self.frm_title, text="«", command=self.on_prev_month
)
self.prev_period.pack(side=LEFT)
self.title = ttk.Label(
master=self.frm_title,
textvariable=self.titlevar,
anchor=CENTER,
font="-weight bold",
)
self.title.pack(side=LEFT, fill=X, expand=YES)
self.next_period = ttk.Button(
master=self.frm_title,
text="»",
command=self.on_next_month,
)
self.next_period.pack(side=LEFT)
# bind "year" callbacks to action buttons
self.prev_period.bind("<Button-3>", self.on_prev_year, "+")
self.next_period.bind("<Button-3>", self.on_next_year, "+")
self.title.bind("<Button-1>", self.on_reset_date)
# create and pack days of the week header
for col in self._header_columns():
ttk.Label(
master=self.frm_header,
text=col,
anchor=CENTER,
padding=5,
bootstyle=(SECONDARY, INVERSE),
).pack(side=LEFT, fill=X, expand=YES)
def _set_title(self):
_titledate = f'{self.date.strftime("%B %Y")}'
self.titlevar.set(value=_titledate)
def _current_month_days(self):
"""Fetch the day numbers and dates for all days in the current
month. `monthdays` is a list of days as integers, and
`monthdates` is a list of `datetime` objects.
"""
self.monthdays = self.calendar.monthdayscalendar(
year=self.date.year, month=self.date.month
)
self.monthdates = self.calendar.monthdatescalendar(
year=self.date.year, month=self.date.month
)
def _header_columns(self):
"""Create and return a list of weekdays to be used as a header
in the calendar. The order of the weekdays is based on the
`firstweekday` property.
Returns:
List[str]:
A list of weekday column names for the calendar header.
"""
weekdays = [MessageCatalog.translate("Mo"),
MessageCatalog.translate("Tu"),
MessageCatalog.translate("We"),
MessageCatalog.translate("Th"),
MessageCatalog.translate("Fr"),
MessageCatalog.translate("Sa"),
MessageCatalog.translate("Su")]
header = weekdays[self.firstweekday :] + weekdays[: self.firstweekday]
return header
def _on_date_selected(self, row, col):
"""Callback for selecting a date.
An index is assigned to each date button that corresponds to
the dates in the `monthdates` matrix. When the user clicks a
button to select a date, the index from this button is used
to lookup the date value of the button based on the row and
column index reference. This value is saved in the
`date_selected` property and the `Toplevel` is destroyed.
Parameters:
index (Tuple[int, int]):
A row and column index of the date selected; to be
found in the `monthdates` matrix.
Returns:
datetime:
The date selected
"""
self.date_selected = self.monthdates[row][col]
self.root.destroy()
def _selection_callback(func):
"""Calls the decorated `func` and redraws the calendar."""
def inner(self, *args):
func(self, *args)
self.frm_dates.destroy()
self._draw_calendar()
return inner
@_selection_callback
def on_next_month(self):
"""Increment the calendar data to the next month"""
year, month = self._nextmonth(self.date.year, self.date.month)
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_next_year(self, *_):
"""Increment the calendar data to the next year"""
year = self.date.year + 1
month = self.date.month
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_prev_month(self):
"""Decrement the calendar to the previous year"""
year, month = self._prevmonth(self.date.year, self.date.month)
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_prev_year(self, *_):
year = self.date.year - 1
month = self.date.month
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_reset_date(self, *_):
"""Set the calendar to the start date"""
self.date = self.startdate
def _set_window_position(self):
"""Move the window the to bottom-right of the parent widget, or
to the middle of the screen if no parent is provided.
"""
width = self.root.winfo_reqwidth()
height = self.root.winfo_reqheight()
if self.parent:
xpos = self.parent.winfo_rootx() + self.parent.winfo_width()
ypos = self.parent.winfo_rooty() + self.parent.winfo_height()
self.root.geometry(f"+{xpos}+{ypos}")
else:
xpos = self.root.winfo_screenwidth() // 2 - width
ypos = self.root.winfo_screenheight() // 2 - height
self.root.geometry(f"+{xpos}+{ypos}")
@staticmethod
def _nextmonth(year, month):
if month == 12:
return year+1, 1
else:
return year, month+1
@staticmethod
def _prevmonth(year, month):
if month == 1:
return year-1, 12
else:
return year, month-1
class FontDialog(Dialog):
"""A dialog that displays a variety of options for choosing a font.
This dialog constructs and returns a `Font` object based on the
options selected by the user. The initial font is based on OS
settings and will vary.
The font object is returned when the **Ok** button is pressed and
can be passed to any widget that accepts a _font_ configuration
option.

"""
def __init__(self, title="Font Selector", parent=None):
title = MessageCatalog.translate(title)
super().__init__(parent=parent, title=title)
self._style = ttk.Style()
self._default = font.nametofont("TkDefaultFont")
self._actual = self._default.actual()
self._size = ttk.Variable(value=self._actual["size"])
self._family = ttk.Variable(value=self._actual["family"])
self._slant = ttk.Variable(value=self._actual["slant"])
self._weight = ttk.Variable(value=self._actual["weight"])
self._overstrike = ttk.Variable(value=self._actual["overstrike"])
self._underline = ttk.Variable(value=self._actual["underline"])
self._preview_font = font.Font()
self._slant.trace_add("write", self._update_font_preview)
self._weight.trace_add("write", self._update_font_preview)
self._overstrike.trace_add("write", self._update_font_preview)
self._underline.trace_add("write", self._update_font_preview)
_headingfont = font.nametofont("TkHeadingFont")
_headingfont.configure(weight="bold")
self._update_font_preview()
self._families = set([self._family.get()])
for f in font.families():
if all([f, not f.startswith("@"), "emoji" not in f.lower()]):
self._families.add(f)
def create_body(self, master):
width = utility.scale_size(master, 600)
height = utility.scale_size(master, 500)
self._toplevel.geometry(f"{width}x{height}")
family_size_frame = ttk.Frame(master, padding=10)
family_size_frame.pack(fill=X, anchor=N)
self._initial_focus = self._font_families_selector(family_size_frame)
self._font_size_selector(family_size_frame)
self._font_options_selectors(master, padding=10)
self._font_preview(master, padding=10)
def create_buttonbox(self, master):
container = ttk.Frame(master, padding=(5, 10))
container.pack(fill=X)
ok_btn = ttk.Button(
master=container,
bootstyle="primary",
text=MessageCatalog.translate("OK"),
command=self._on_submit,
)
ok_btn.pack(side=RIGHT, padx=5)
ok_btn.bind("<Return>", lambda _: ok_btn.invoke())
cancel_btn = ttk.Button(
master=container,
bootstyle="secondary",
text=MessageCatalog.translate("Cancel"),
command=self._on_cancel,
)
cancel_btn.pack(side=RIGHT, padx=5)
cancel_btn.bind("<Return>", lambda _: cancel_btn.invoke())
def _font_families_selector(self, master):
container = ttk.Frame(master)
container.pack(fill=BOTH, expand=YES, side=LEFT)
header = ttk.Label(container, text=MessageCatalog.translate("Family"), font="TkHeadingFont")
header.pack(fill=X, pady=(0, 2), anchor=N)
listbox = ttk.Treeview(
master=container,
height=5,
show="",
columns=[0],
)
listbox.column(0, width=utility.scale_size(listbox, 250))
listbox.pack(side=LEFT, fill=BOTH, expand=YES)
listbox_vbar = ttk.Scrollbar(
container,
command=listbox.yview,
orient=VERTICAL,
bootstyle="rounded",
)
listbox_vbar.pack(side=RIGHT, fill=Y)
listbox.configure(yscrollcommand=listbox_vbar.set)
for f in self._families:
listbox.insert("", iid=f, index=END, tags=[f], values=[f])
listbox.tag_configure(f, font=(f, self._size.get()))
iid = self._family.get()
listbox.selection_set(iid) # select default value
listbox.see(iid) # ensure default is visible
listbox.bind(
"<<TreeviewSelect>>", lambda e: self._on_select_font_family(e)
)
return listbox
def _font_size_selector(self, master):
container = ttk.Frame(master)
container.pack(side=LEFT, fill=Y, padx=(10, 0))
header = ttk.Label(container, text=MessageCatalog.translate("Size"), font="TkHeadingFont")
header.pack(fill=X, pady=(0, 2), anchor=N)
sizes_listbox = ttk.Treeview(container, height=7, columns=[0], show="")
sizes_listbox.column(0, width=utility.scale_size(sizes_listbox, 24))
sizes = [*range(8, 13), *range(13, 30, 2), 36, 48, 72]
for s in sizes:
sizes_listbox.insert("", iid=s, index=END, values=[s])
iid = self._size.get()
sizes_listbox.selection_set(iid)
sizes_listbox.see(iid)
sizes_listbox.bind(
"<<TreeviewSelect>>", lambda e: self._on_select_font_size(e)
)
sizes_listbox_vbar = ttk.Scrollbar(
master=container,
orient=VERTICAL,
command=sizes_listbox.yview,
bootstyle="round",
)
sizes_listbox.configure(yscrollcommand=sizes_listbox_vbar.set)
sizes_listbox.pack(side=LEFT, fill=Y, expand=YES, anchor=N)
sizes_listbox_vbar.pack(side=LEFT, fill=Y, expand=YES)
def _font_options_selectors(self, master, padding: int):
container = ttk.Frame(master, padding=padding)
container.pack(fill=X, padx=2, pady=2, anchor=N)
weight_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Weight"), padding=5)
weight_lframe.pack(side=LEFT, fill=X, expand=YES)
opt_normal = ttk.Radiobutton(
master=weight_lframe,
text=MessageCatalog.translate("normal"),
value="normal",
variable=self._weight,
)
opt_normal.invoke()
opt_normal.pack(side=LEFT, padx=5, pady=5)
opt_bold = ttk.Radiobutton(
master=weight_lframe,
text=MessageCatalog.translate("bold"),
value="bold",
variable=self._weight,
)
opt_bold.pack(side=LEFT, padx=5, pady=5)
slant_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Slant"), padding=5)
slant_lframe.pack(side=LEFT, fill=X, padx=10, expand=YES)
opt_roman = ttk.Radiobutton(
master=slant_lframe,
text=MessageCatalog.translate("roman"),
value="roman",
variable=self._slant,
)
opt_roman.invoke()
opt_roman.pack(side=LEFT, padx=5, pady=5)
opt_italic = ttk.Radiobutton(
master=slant_lframe,
text=MessageCatalog.translate("italic"),
value="italic",
variable=self._slant,
)
opt_italic.pack(side=LEFT, padx=5, pady=5)
effects_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Effects"), padding=5)
effects_lframe.pack(side=LEFT, padx=(2, 0), fill=X, expand=YES)
opt_underline = ttk.Checkbutton(
master=effects_lframe, text=MessageCatalog.translate("underline"), variable=self._underline
)
opt_underline.pack(side=LEFT, padx=5, pady=5)
opt_overstrike = ttk.Checkbutton(
master=effects_lframe, text=MessageCatalog.translate("overstrike"), variable=self._overstrike
)
opt_overstrike.pack(side=LEFT, padx=5, pady=5)
def _font_preview(self, master, padding: int):
container = ttk.Frame(master, padding=padding)
container.pack(fill=BOTH, expand=YES, anchor=N)
header = ttk.Label(container, text=MessageCatalog.translate("Preview"), font="TkHeadingFont")
header.pack(fill=X, pady=2, anchor=N)
content = MessageCatalog.translate("The quick brown fox jumps over the lazy dog.")
self._preview_text = ttk.Text(
master=container,
height=3,
font=self._preview_font,
highlightbackground=self._style.colors.primary,
)
self._preview_text.insert(END, content)
self._preview_text.pack(fill=BOTH, expand=YES)
container.pack_propagate(False)
def _on_select_font_family(self, e):
tree: ttk.Treeview = self._toplevel.nametowidget(e.widget)
fontfamily = tree.selection()[0]
self._family.set(value=fontfamily)
self._update_font_preview()
def _on_select_font_size(self, e):
tree: ttk.Treeview = self._toplevel.nametowidget(e.widget)
fontsize = tree.selection()[0]
self._size.set(value=fontsize)
self._update_font_preview()
def _on_submit(self) -> font.Font:
self._toplevel.destroy()
return self.result
def _on_cancel(self):
self._toplevel.destroy()
def _update_font_preview(self, *_):
family = self._family.get()
size = self._size.get()
slant = self._slant.get()
overstrike = self._overstrike.get()
underline = self._underline.get()
self._preview_font.config(
family=family,
size=size,
slant=slant,
overstrike=overstrike,
underline=underline,
)
try:
self._preview_text.configure(font=self._preview_font)
except:
pass
self._result = self._preview_font
class Messagebox:
"""This class contains various static methods that show popups with
a message to the end user with various arrangments of buttons
and alert options."""
@staticmethod
def show_info(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and an INFO
icon.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.info,
localize=True
)
sd.show()
@staticmethod
def show_warning(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and a
warning icon. Also will ring the display bell.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.warning,
alert=True,
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def show_error(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and an
error icon. Also will ring the display bell.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.error,
alert=True,
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def show_question(
message,
title=" ",
parent=None,
buttons=["No:secondary", "Yes:primary"],
**kwargs,
):
"""Display a modal dialog box with yes, no buttons and a
question icon. Also will ring the display bell. You may also
change the button scheme using the `buttons` parameter.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
buttons (List[str]):
A list of buttons to appear at the bottom of the popup
messagebox. The buttons can be a list of strings which
will define the symbolic name and the button text.
`['OK', 'Cancel']`. Alternatively, you can assign a
bootstyle to each button by using the colon to separate the
button text and the bootstyle. If no colon is found, then
the style is set to 'primary' by default.
`['Yes:success','No:danger']`.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=buttons,
icon=Icon.question,
alert=True,
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def ok(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with an OK button and and optional
bell alert.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["OK:primary"],
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def okcancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Displays a modal dialog box with OK and Cancel buttons and
return the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title, message=message, parent=parent, alert=alert, localize=True, **kwargs
)
sd.show()
return sd.result
@staticmethod
def yesno(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with YES and NO buttons and return
the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
buttons=["No", "Yes:primary"],
alert=alert,
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def yesnocancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with YES, NO, and Cancel buttons,
and return the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["Cancel", "No", "Yes:primary"],
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def retrycancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with RETRY and Cancel buttons;
returns the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["Cancel", "Retry:primary"],
localize=True,
**kwargs,
)
sd.show()
return sd.result
class Querybox:
"""This class contains various static methods that request data
from the end user."""
@staticmethod
def get_color(
parent=None,
title="Color Chooser",
initialcolor=None,
):
"""Show a color picker and return the select color when the
user pressed OK.

Parameters:
parent (Widget):
The parent widget.
title (str):
Optional text that appears on the titlebar.
initialcolor (str):
The initial color to display in the 'Current' color
frame.
Returns:
Tuple[rgb, hsl, hex]
The selected color in various colors models.
"""
from ttkbootstrap.dialogs.colorchooser import ColorChooserDialog
cd = ColorChooserDialog(parent, title, initialcolor)
cd.show()
return cd.result
@staticmethod
def get_date(
parent=None,
title=" ",
firstweekday=6,
startdate=None,
bootstyle="primary",
):
"""Shows a calendar popup and returns the selection.

Parameters:
parent (Widget):
The parent widget; the popup will appear to the
bottom-right of the parent widget. If no parent is
provided, the widget is centered on the screen.
title (str):
The text that appears on the popup titlebar.
firstweekday (int):
Specifies the first day of the week. `0` is Monday, `6` is
Sunday (the default).
startdate (datetime):
The date to be in focus when the widget is displayed;
bootstyle (str):
The following colors can be used to change the color of the
title and hover / pressed color -> primary, secondary, info,
warning, success, danger, light, dark.
Returns:
datetime:
The date selected; the current date if no date is selected.
"""
chooser = DatePickerDialog(
parent=parent,
title=title,
firstweekday=firstweekday,
startdate=startdate,
bootstyle=bootstyle,
)
return chooser.date_selected
@staticmethod
def get_string(
prompt="", title=" ", initialvalue=None, parent=None, **kwargs
):
"""Request a string type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (Any):
The initial value in the entry widget.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
str:
The string value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt, title, initialvalue, parent=parent, **kwargs
)
dialog.show()
return dialog._result
@staticmethod
def get_integer(
prompt="",
title=" ",
initialvalue=None,
minvalue=None,
maxvalue=None,
parent=None,
**kwargs,
):
"""Request an integer type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (int):
The initial value in the entry widget.
minvalue (int):
The minimum allowed value.
maxvalue (int):
The maximum allowed value.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
int:
The integer value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt,
title,
initialvalue,
minvalue,
maxvalue,
datatype=int,
parent=parent,
**kwargs,
)
dialog.show()
return dialog._result
@staticmethod
def get_float(
prompt="",
title=" ",
initialvalue=None,
minvalue=None,
maxvalue=None,
parent=None,
**kwargs,
):
"""Request a float type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (float):
The initial value in the entry widget.
minvalue (float):
The minimum allowed value.
maxvalue (float):
The maximum allowed value.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
float:
The float value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt,
title,
initialvalue,
minvalue,
maxvalue,
datatype=float,
parent=parent,
**kwargs,
)
dialog.show()
return dialog._result
@staticmethod
def get_font(parent=None, **kwargs):
"""Request a customized font

Parameters:
parent (Widget):
Makes the window the logical parent of the dialog box. The
dialog is displayed on top of its parent window.
**kwargs (Dict):
Other keyword arguments.
Returns:
Font:
A font object.
"""
dialog = FontDialog(parent=parent, **kwargs)
dialog.show()
return dialog.result
| 33.65149
| 105
| 0.568219
| 6,836
| 59,866
| 4.88897
| 0.09567
| 0.010173
| 0.017504
| 0.009425
| 0.548757
| 0.504982
| 0.472009
| 0.435415
| 0.398283
| 0.378714
| 0
| 0.004286
| 0.345321
| 59,866
| 1,778
| 106
| 33.670416
| 0.848391
| 0.382187
| 0
| 0.357798
| 0
| 0
| 0.038023
| 0.006764
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075688
| false
| 0.002294
| 0.012615
| 0
| 0.127294
| 0.001147
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a6ae7f4fab920589a878c0b0e9e7fa6a88c26a
| 2,504
|
py
|
Python
|
Google-Play-Store-App-Rating/code.py
|
venky4121994/ga-learner-dsmp-repo
|
1bef03489931eece0d5ecb9ce0501dfeb558dc59
|
[
"MIT"
] | null | null | null |
Google-Play-Store-App-Rating/code.py
|
venky4121994/ga-learner-dsmp-repo
|
1bef03489931eece0d5ecb9ce0501dfeb558dc59
|
[
"MIT"
] | null | null | null |
Google-Play-Store-App-Rating/code.py
|
venky4121994/ga-learner-dsmp-repo
|
1bef03489931eece0d5ecb9ce0501dfeb558dc59
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data.hist(['Rating'])
data = data[data['Rating']<=5]
data.hist(['Rating'])
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())
missing_data = pd.concat([total_null,percent_null],keys=['Total','Percent'],axis=1)
print(missing_data)
data.dropna(inplace=True)
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())
missing_data_1 = pd.concat([total_null_1,percent_null_1],keys=['Total','Percent'],axis=1)
print(missing_data_1)
# code ends here
# --------------
#Code starts here
plt.figure(figsize=(10,20))
catplot = sns.catplot(x = "Category", y = "Rating", data=data, kind="box",height=10)
catplot.set_xticklabels(rotation=90)
plt.title('Rating vs Category [BoxPlot]',size = 20)
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
print(data['Installs'])
data['Installs'] = data['Installs'].str.replace('+','')
data['Installs'] = data['Installs'].str.replace(',','')
data['Installs'] = data['Installs'].astype('int32')
le = LabelEncoder()
data['Installs'] = le.fit_transform(data['Installs'])
graph = sns.regplot(data['Installs'],data['Rating'],data=data)
graph.set_title('Rating vs Installs [Boxplot]')
plt.show()
#Code ends here
# --------------
#Code starts here
print(data['Price'].value_counts())
data['Price'] = data['Price'].str.replace('$','')
data['Price'] = data['Price'].astype('float32')
graph2 = sns.regplot(data['Price'],data['Rating'],data=data)
graph2.set_title('Rating vs Price [RegPlot]')
#Code ends here
# --------------
#Code starts here
print(len(data['Genres'].unique()), "genres")
data['Genres'] = data['Genres'].str.split(';').str[0]
gr_mean = data[['Genres','Rating']].groupby(['Genres'],as_index=False).mean()
print(gr_mean.describe())
gr_mean=gr_mean.sort_values('Rating')
print(gr_mean.head(1))
print(gr_mean.head(1))
#Code ends here
# --------------
#Code starts here
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
data['Last Updated Days'] = (data['Last Updated'].max()-data['Last Updated']).dt.days
plt.figure(figsize = (10,10))
sns.regplot(x="Last Updated Days", y="Rating",color='lightpink',data=data)
plt.title('Rating vs Last Updated [Regplot]',size =20)
#Code ends here
| 25.55102
| 89
| 0.680112
| 356
| 2,504
| 4.682584
| 0.283708
| 0.071986
| 0.058788
| 0.04799
| 0.313137
| 0.184763
| 0.169166
| 0.09958
| 0.055189
| 0.055189
| 0
| 0.015391
| 0.091853
| 2,504
| 97
| 90
| 25.814433
| 0.717678
| 0.14377
| 0
| 0.085106
| 0
| 0
| 0.214825
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085106
| 0
| 0.085106
| 0.170213
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a6e52033cd00d1b8f29b49e45d1f519baff3e9
| 6,597
|
py
|
Python
|
converters/brat2iob.py
|
Banguiskode/nerds
|
366420b2ec57bf790562de62a79f4973cbd6b3ed
|
[
"BSD-3-Clause"
] | 15
|
2019-12-05T18:40:22.000Z
|
2021-02-20T05:34:50.000Z
|
converters/brat2iob.py
|
Banguiskode/nerds
|
366420b2ec57bf790562de62a79f4973cbd6b3ed
|
[
"BSD-3-Clause"
] | null | null | null |
converters/brat2iob.py
|
Banguiskode/nerds
|
366420b2ec57bf790562de62a79f4973cbd6b3ed
|
[
"BSD-3-Clause"
] | 4
|
2019-12-30T13:03:05.000Z
|
2021-02-16T13:08:09.000Z
|
import argparse
import operator
import os
import re
import shutil
import spacy
import tempfile
from nerds.utils import spans_to_tokens, get_logger
def segment_text_to_sentences(text_file, sentence_splitter):
""" Segment text into sentences. Text is provided by BRAT in .txt
file.
Args:
text_file (str): the full path to the BRAT .txt file.
sentence_splitter (spacy LM): SpaCy EN language model.
Returns:
sentences (list((int, int, str))): list of sentence spans.
Spans are triples of (start_offset, end_offset, text),
where offset is relative to the text.
"""
sentences = []
ftext = open(text_file, "r")
for line in ftext:
splits = sentence_splitter(line.strip())
for sent in splits.sents:
sentences.append((sent.start_char, sent.end_char, sent.text))
ftext.close()
return sentences
def parse_text_annotations(ann_file):
""" Parses BRAT annotations provided in the .ann file and converts them
to annotation spans of (start_position, end_position, entity_class).
Args:
ann_file (str): full path to the BRAT .ann file.
Returns:
annotations (list((int, int, str))): list of annotation spans.
Spans are triples of (start_offset, end_offset, entity_class)
where offset is relative to the text.
"""
annots = []
fann = open(ann_file, "r")
for line in fann:
cols = re.split(r"\s+", line.strip())
if not cols[0].startswith("T"):
continue
annots.append((int(cols[2]), int(cols[3]), cols[1]))
fann.close()
return annots
def apply_annotations(sentences, annotations, tokenizer):
""" Apply annotation spans to the sentence spans to create a list of tokens
and tags.
Args:
sentences (list((int, int, str))): list of sentence spans.
annotations (list((int, int, str))): list of annotation spans.
tokenizer (spacy LM): SpaCy EN language model.
Returns:
tokens_tags_list (list((list(str), list(str)))): list of list of token
tag pairs. Each list of token-tag pairs corresponds to a single
sentence.
"""
tokens_tags_list = []
for sent_start, sent_end, sent_text in sentences:
sent_annots = [a for a in annotations if a[0] >= sent_start and a[1] <= sent_end]
# convert document offsets to sentence offsets
sent_annots = [(s[0] - sent_start, s[1] - sent_start, s[2]) for s in sent_annots]
tokens, tags = spans_to_tokens(sent_text, sent_annots, tokenizer)
tokens_tags_list.append(zip(tokens, tags))
return tokens_tags_list
def convert_brat_to_iob(input_dir, output_file, nlp):
""" Convenience Convertor function.
Args:
input_dir (str): the directory where the BRAT .txt and .ann files
are located.
output_file (str): the full path name of file to write output in
IOB format to.
nlp (SpaCy LM): reference to the SpaCy EN model.
Returns:
None.
"""
fout = open(output_file, "w")
for text_file in os.listdir(input_dir):
# only process .txt and .ann pairs in specified directory
if not text_file.endswith(".txt"):
continue
annot_file = text_file[:-4] + ".ann"
if not os.path.exists(os.path.join(input_dir, annot_file)):
# do not process file if no corresponding .ann file
continue
# process file pair
logger.info("Processing file: {:s}".format(text_file))
sentences = segment_text_to_sentences(os.path.join(input_dir, text_file), nlp)
annotations = parse_text_annotations(os.path.join(input_dir, annot_file))
tokens_tags_list = apply_annotations(sentences, annotations, nlp)
for tokens_tags in tokens_tags_list:
for token, tag in tokens_tags:
fout.write("{:s}\t{:s}\n".format(token, tag))
fout.write("\n")
fout.close()
def do_self_test(nlp):
""" Simple self-test with small dataset to prove that this works okay. """
text = "Pierre Vinken, 61 years old, will join the board as a nonexecutive director, Nov. 29. Mr. Vinken is chairman of Elsevier N.V., the Dutch publishing group."
annotations = [
"T1 PER 0 13 Pierre Vinken",
"T2 PER 86 96 Mr. Vinken",
"T3 DATE 15 27 61 years old",
"T4 DATE 77 84 Nov. 29",
"T5 ORG 112 125 Elsevier N.V.",
"T6 NORP 131 136 Dutch"
]
input_dir = tempfile.mkdtemp(dir="/tmp")
ftext = open(os.path.join(input_dir, "test.txt"), "w")
ftext.write(text)
ftext.close()
fann = open(os.path.join(input_dir, "test.ann"), "w")
for line in annotations:
fann.write(line + "\n")
fann.close()
output_file = os.path.join(input_dir, "test.iob")
convert_brat_to_iob(input_dir, output_file, nlp)
fout = open(output_file, "r")
for line in fout:
logger.warn(line.strip())
shutil.rmtree(input_dir)
################################ main ################################
#
# usage: brat2iob.py [-h] [-i INPUT_DIR] [-o OUTPUT_FILE] [-t]
# Script to convert BRAT annotations to IOB (NERDS) format.
# optional arguments:
# -h, --help show this help message and exit
# -i INPUT_DIR, --input_dir INPUT_DIR
# Directory to store BRAT .txt and .ann files.
# -o OUTPUT_FILE, --output_file OUTPUT_FILE
# Output file to write IOB output to.
# -t, --test Runs self test.
######################################################################
parser = argparse.ArgumentParser(
description="Script to convert BRAT annotations to IOB (NERDS) format.")
parser.add_argument("-i", "--input_dir", help="Directory to store BRAT .txt and .ann files.")
parser.add_argument("-o", "--output_file", help="Output file to write IOB output to.")
parser.add_argument("-t", "--test", help="Runs self test.", action="store_true")
args = parser.parse_args()
logger = get_logger()
input_dir = args.input_dir
output_file = args.output_file
self_test = args.test
nlp = spacy.load("en")
if self_test:
logger.info("Executing self test...")
do_self_test(nlp)
else:
logger.info("Reading BRAT .txt and .ann files from: {:s}".format(input_dir))
logger.info("Writing IOB tokens/tags to file: {:s}".format(output_file))
convert_brat_to_iob(input_dir, output_file, nlp)
| 36.854749
| 167
| 0.618463
| 906
| 6,597
| 4.363135
| 0.242826
| 0.042499
| 0.02125
| 0.022768
| 0.266886
| 0.217303
| 0.204149
| 0.130787
| 0.113585
| 0
| 0
| 0.010588
| 0.255571
| 6,597
| 178
| 168
| 37.061798
| 0.794339
| 0.33303
| 0
| 0.095745
| 0
| 0.010638
| 0.172877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053191
| false
| 0
| 0.085106
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a738f0c10019d9229ed8e9b93898831920170d
| 2,503
|
py
|
Python
|
kraken/lib/util.py
|
zjsteyn/kraken
|
eaa9f4290db5425ddf80d0aebfa3944713558ab5
|
[
"Apache-2.0"
] | 1
|
2022-02-03T14:41:58.000Z
|
2022-02-03T14:41:58.000Z
|
kraken/lib/util.py
|
ephenum/kraken
|
47be8f7ddcb7c7ad63bfc5636df1976a4e84a5f0
|
[
"Apache-2.0"
] | null | null | null |
kraken/lib/util.py
|
ephenum/kraken
|
47be8f7ddcb7c7ad63bfc5636df1976a4e84a5f0
|
[
"Apache-2.0"
] | 1
|
2022-01-19T10:53:20.000Z
|
2022-01-19T10:53:20.000Z
|
"""
Ocropus's magic PIL-numpy array conversion routines. They express slightly
different behavior from PIL.Image.toarray().
"""
import unicodedata
import numpy as np
from PIL import Image
__all__ = ['pil2array', 'array2pil']
def pil2array(im: Image.Image, alpha: int = 0) -> np.array:
if im.mode == '1':
return np.array(im.convert('L'))
return np.array(im)
def array2pil(a: np.array) -> Image:
if a.dtype == np.dtype("B"):
if a.ndim == 2:
return Image.frombytes("L", (a.shape[1], a.shape[0]),
a.tostring())
elif a.ndim == 3:
return Image.frombytes("RGB", (a.shape[1], a.shape[0]),
a.tostring())
else:
raise Exception("bad image rank")
elif a.dtype == np.dtype('float32'):
return Image.frombytes("F", (a.shape[1], a.shape[0]), a.tostring())
else:
raise Exception("unknown image type")
def is_bitonal(im: Image.Image) -> bool:
"""
Tests a PIL.Image for bitonality.
Args:
im (PIL.Image.Image): Image to test
Returns:
True if the image contains only two different color values. False
otherwise.
"""
return im.getcolors(2) is not None and len(im.getcolors(2)) == 2
def get_im_str(im: Image.Image) -> str:
return im.filename if hasattr(im, 'filename') else str(im)
def is_printable(char: str) -> bool:
"""
Determines if a chode point is printable/visible when printed.
Args:
char (str): Input code point.
Returns:
True if printable, False otherwise.
"""
letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu')
numbers = ('Nd', 'Nl', 'No')
punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps')
symbol = ('Sc', 'Sk', 'Sm', 'So')
printable = letters + numbers + punctuation + symbol
return unicodedata.category(char) in printable
def make_printable(char: str) -> str:
"""
Takes a Unicode code point and return a printable representation of it.
Args:
char (str): Input code point
Returns:
Either the original code point, the name of the code point if it is a
combining mark, whitespace etc., or the hex code if it is a control
symbol.
"""
if not char or is_printable(char):
return char
elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'):
return '0x{:x}'.format(ord(char))
else:
return unicodedata.name(char)
| 27.811111
| 77
| 0.582901
| 338
| 2,503
| 4.286982
| 0.41716
| 0.024845
| 0.024845
| 0.016563
| 0.116632
| 0.116632
| 0.116632
| 0.072464
| 0.056591
| 0.056591
| 0
| 0.011056
| 0.277267
| 2,503
| 89
| 78
| 28.123596
| 0.789939
| 0.291251
| 0
| 0.125
| 0
| 0
| 0.075256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.075
| 0.025
| 0.5
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a7f40bcb06653665d3b8d30577d4282cd0f05f
| 2,877
|
py
|
Python
|
analysis/calculate_holding_amount.py
|
hao44le/ico_top_holder_analysis
|
aeeab01c90e4446b424c52c33a68ccb814123121
|
[
"MIT"
] | 538
|
2018-07-04T21:14:52.000Z
|
2022-03-26T15:16:08.000Z
|
analysis/calculate_holding_amount.py
|
hao44le/ico_top_holder_analysis
|
aeeab01c90e4446b424c52c33a68ccb814123121
|
[
"MIT"
] | 4
|
2018-07-08T22:11:32.000Z
|
2021-12-13T19:48:38.000Z
|
analysis/calculate_holding_amount.py
|
hao44le/ico_top_holder_analysis
|
aeeab01c90e4446b424c52c33a68ccb814123121
|
[
"MIT"
] | 52
|
2018-07-05T12:07:37.000Z
|
2021-04-05T23:34:20.000Z
|
import sys
sys.path.insert(0,'..')
from data.whale_data import exchnage_accounts
from data.html_helper import check_if_address_name_exists
from data.whale_eth_tx_data import *
from data.whale_token_tx_data import identify_investor_type_token
holding_account = "holding_account"
deposit_account = 'deposit_account'
withdraw_account = "withdraw_account"
in_type = "IN"
out_type = "OUT"
all_acc_types = dict()
for acc in exchnage_accounts:
all_acc_types[acc] = exchange_type
def update_y_array(X,y,timestamp,amount):
target_index = 0
for i in range(len(X)):
x_time = X[i]
if timestamp < x_time:
target_index = i
break
for i in range(target_index,len(y)):
y[i] += amount
return y
def perform_bfs_on_accounts(out_txs,top_holder_type,acc,m_type='OUT'):
print("\t"+m_type)
unique_out = set()
for out in out_txs:
unique_out.add(out[3])
unique_out = list(unique_out)[:5]
for out in unique_out:
print("\t"+out)
if out not in all_acc_types:
investor_type = identify_investor_type(out)
if investor_type == affliate_type:
investor_type = identify_investor_type_token(out)
print("\t\t{}".format(investor_type))
else:
investor_type = all_acc_types[out]
if investor_type == exchange_type:
top_holder_type[acc] = deposit_account if m_type == "OUT" else withdraw_account
all_acc_types[out] = investor_type
if acc not in top_holder_type:
top_holder_type[acc] = holding_account
return top_holder_type
def calculate_holding_amount(X,escape_accounts,txs):
top_holder_type = dict()
for acc in txs:
tx = txs[acc]
if acc in escape_accounts:
continue
#如果当前账户从来没有向外打过token,ignore
out_txs = [item for item in tx if item[2] == 'OUT']
if len(out_txs) == 0:
print("\tholding account")
top_holder_type[acc] = holding_account
continue
# build all traxe Y: holding_amount, deposit_amount, withdraw_amount
amount_trace_y = [0] * len(X)
for holder in txs:
if holder in escape_accounts:
continue
if holder not in top_holder_type:
print("{} not identified! ".format(holder))
continue
holder_type = top_holder_type[holder]
holder_txs = txs[holder]
print("{} {}".format(holder,holder_type))
for tx in holder_txs:
[timestamp,from_a,tx_type,to_a,amount] = tx
if holder_type == holding_account:
if tx_type == in_type:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,amount)
else:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,-amount)
return amount_trace_y
| 29.96875
| 91
| 0.642336
| 400
| 2,877
| 4.2975
| 0.2075
| 0.069808
| 0.068063
| 0.037231
| 0.178592
| 0.095404
| 0.0605
| 0.0605
| 0.0605
| 0.0605
| 0
| 0.003338
| 0.271116
| 2,877
| 95
| 92
| 30.284211
| 0.816404
| 0.032325
| 0
| 0.108108
| 0
| 0
| 0.040618
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.067568
| 0
| 0.148649
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a80291d5fdb7e2a418a7fbbb6542744e0db4d2
| 66,926
|
py
|
Python
|
textbox/trainer/trainer.py
|
JBoRu/TextBox-1
|
0dcbaa153acc507e3d55075312d7ca5d23146e03
|
[
"MIT"
] | 1
|
2021-08-12T01:08:09.000Z
|
2021-08-12T01:08:09.000Z
|
textbox/trainer/trainer.py
|
JBoRu/TextBox-1
|
0dcbaa153acc507e3d55075312d7ca5d23146e03
|
[
"MIT"
] | null | null | null |
textbox/trainer/trainer.py
|
JBoRu/TextBox-1
|
0dcbaa153acc507e3d55075312d7ca5d23146e03
|
[
"MIT"
] | null | null | null |
# @Time : 2020/11/14
# @Author : Junyi Li, Gaole He
# @Email : lijunyi@ruc.edu.cn
# UPDATE:
# @Time : 2020/12/2, 2020/11/27, 2020/12/3, 2020/12/26
# @Author : Jinhao Jiang, Xiaoxuan Hu, Tianyi Tang, Jinhao Jiang
# @Email : jiangjinhao@std.uestc.edu.cn, huxiaoxuan@ruc.edu.cn, steventang@ruc.edu.cn, jiangjinhao@std.uestc.edu.cn
r"""
textbox.trainer.trainer
################################
"""
import os
import torch
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from torch.utils.data import DataLoader
from time import time
from logging import getLogger
from textbox.module.Optimizer.optim import ScheduledOptim
from textbox.evaluator import NgramEvaluator, TranslationEvaluator, SummarizationEvaluator
from textbox.utils import ensure_dir, early_stopping
class AbstractTrainer(object):
r"""Trainer Class is used to manage the training and evaluation processes of text generation system models.
AbstractTrainer is an abstract class in which the fit() and evaluate() method should be implemented according
to different training and evaluation strategies.
"""
def __init__(self, config, model):
self.config = config
self.model = model
def fit(self, train_data):
r"""Train the model based on the train data.
"""
raise NotImplementedError('Method [next] should be implemented.')
def evaluate(self, eval_data):
r"""Evaluate the model based on the eval data.
"""
raise NotImplementedError('Method [next] should be implemented.')
class Trainer(AbstractTrainer):
r"""The basic Trainer for basic training and evaluation strategies in text generation systems.
This class defines common functions for training and evaluation processes of most text generation system models,
including fit(), evalute(), resume_checkpoint() and some other features helpful for model training and evaluation.
Generally speaking, this class can serve most text generation system models, If the training process of the model
is to simply optimize a single loss without involving any complex training strategies, such as adversarial learning,
pre-training and so on.
Initializing the Trainer needs two parameters: `config` and `model`. `config` records the parameters information
for controlling training and evaluation, such as `learning_rate`, `epochs`, `eval_step` and so on.
More information can be found in [placeholder]. `model` is the instantiated object of a Model Class.
"""
def __init__(self, config, model):
super(Trainer, self).__init__(config, model)
self.logger = getLogger()
self.learner = config['learner']
self.learning_rate = config['learning_rate']
self.epochs = config['epochs']
self.eval_step = min(config['eval_step'], self.epochs)
self.stopping_step = config['stopping_step']
self.test_batch_size = config['eval_batch_size']
self.device = config['device']
self.embedding_size = config['embedding_size']
self.warmup_steps = config['warmup_steps']
self.checkpoint_dir = config['checkpoint_dir']
ensure_dir(self.checkpoint_dir)
saved_model_file = self.config['filename'] + '.pth'
self.saved_model_file = os.path.join(self.checkpoint_dir, saved_model_file)
self.generated_text_dir = config['generated_text_dir']
ensure_dir(self.generated_text_dir)
saved_text_file = self.config['filename'] + '.txt'
self.saved_text_file = os.path.join(self.generated_text_dir, saved_text_file)
self.start_epoch = 0
self.cur_step = 0
self.best_valid_score = 100000000
self.best_valid_result = None
self.train_loss_dict = dict()
self.optimizer = self._build_optimizer()
self.task_type = config['task_type'].lower()
if self.task_type == "translation":
self.evaluator = TranslationEvaluator(config)
elif self.task_type == "summarization":
self.evaluator = SummarizationEvaluator(config)
else:
self.evaluator = NgramEvaluator(config)
self.item_tensor = None
self.tot_item_num = None
self.iid_field = config['ITEM_ID_FIELD']
def _build_optimizer(self):
r"""Init the Optimizer
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'schedule':
optimizer = ScheduledOptim(optim.Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),
self.learning_rate, self.embedding_size, self.warmup_steps)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
return optimizer
def _train_epoch(self, train_data, epoch_idx):
r"""Train the model in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.train()
total_loss = None
for batch_idx, data in enumerate(train_data):
self.optimizer.zero_grad()
losses = self.model.calculate_loss(data, epoch_idx=epoch_idx)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
loss.backward()
self.optimizer.step()
train_loss = total_loss / len(train_data)
return train_loss
def _valid_epoch(self, valid_data):
r"""Valid the model with valid data
Args:
valid_data (DataLoader): the valid data
Returns:
float: valid score
dict: valid result
"""
self.model.eval()
total_loss = None
for batch_idx, data in enumerate(valid_data):
losses = self.model.calculate_loss(data)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
valid_loss = total_loss / len(valid_data)
ppl = np.exp(valid_loss)
return valid_loss, ppl
def _save_checkpoint(self, epoch):
r"""Store the model parameters information and training information.
Args:
epoch (int): the current epoch id
"""
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
torch.save(state, self.saved_model_file)
def _save_generated_text(self, generated_corpus):
r"""Store the generated text by our model.
Args:
corpus (list of string list):
"""
with open(self.saved_text_file, 'w') as fin:
for tokens in generated_corpus:
fin.write(' '.join(tokens) + '\n')
def resume_checkpoint(self, resume_file):
r"""Load the model parameters information and training information.
Args:
resume_file (file): the checkpoint file
"""
resume_file = str(resume_file)
checkpoint = torch.load(resume_file)
self.start_epoch = checkpoint['epoch'] + 1
self.cur_step = checkpoint['cur_step']
self.best_valid_score = checkpoint['best_valid_score']
# load architecture params from checkpoint
if checkpoint['config']['model'].lower() != self.config['model'].lower():
self.logger.warning('Architecture configuration given in config file is different from that of checkpoint. '
'This may yield an exception while state_dict is being loaded.')
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed
self.optimizer.load_state_dict(checkpoint['optimizer'])
message_output = 'Checkpoint loaded. Resume training from epoch {}'.format(self.start_epoch)
self.logger.info(message_output)
def _check_nan(self, loss):
if torch.isnan(loss):
raise ValueError('Training loss is nan')
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
train_loss_output = "epoch %d %straining [time: %.2fs, " % (epoch_idx, train_info, e_time - s_time)
if isinstance(losses, tuple):
for idx, loss in enumerate(losses):
train_loss_output += 'train_loss%d: %.4f, ' % (idx + 1, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
r"""Train the model based on the train data and the valid data.
Args:
train_data (DataLoader): the train data
valid_data (DataLoader, optional): the valid data, default: None.
If it's None, the early_stopping is invalid.
verbose (bool, optional): whether to write training and evaluation information to logger, default: True
saved (bool, optional): whether to save the model parameters, default: True
Returns:
(float, dict): best valid score and best valid result. If valid_data is None, it returns (-1, None)
"""
for epoch_idx in range(self.start_epoch, self.epochs):
# train
training_start_time = time()
train_loss = self._train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
self._save_checkpoint(epoch_idx)
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
# eval
if self.eval_step <= 0 or not valid_data:
if saved:
self._save_checkpoint(epoch_idx)
update_output = 'Saving current: %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
continue
if (epoch_idx + 1) % self.eval_step == 0:
valid_start_time = time()
with torch.no_grad():
valid_score, valid_result = self._valid_epoch(valid_data)
# valid_loss, ppl
self.best_valid_score, self.cur_step, stop_flag, update_flag = early_stopping(
valid_score, self.best_valid_score, self.cur_step,
max_step=self.stopping_step, bigger=False)
# better model are supposed to provide smaller perplexity and loss
valid_end_time = time()
valid_score_output = "epoch %d evaluating [time: %.2fs, valid_loss: %f]" % \
(epoch_idx, valid_end_time - valid_start_time, valid_score)
valid_result_output = 'valid ppl: {}'.format(valid_result)
if verbose:
self.logger.info(valid_score_output)
self.logger.info(valid_result_output)
if update_flag:
if saved:
self._save_checkpoint(epoch_idx)
update_output = 'Saving current best: %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
self.best_valid_result = valid_result
if stop_flag:
stop_output = 'Finished training, best eval result in epoch %d' % \
(epoch_idx - self.cur_step * self.eval_step)
if verbose:
self.logger.info(stop_output)
break
return self.best_valid_score, self.best_valid_result
def _evaluate_nll_test(self, eval_data):
r"""Calculate the negative log-likelihood of the eval_data.
Args:
eval_data (DataLoader): the eval data.
Returns:
Float: NLL_test of the eval data.
"""
total_loss = 0
for epoch_idx, eval_batch in enumerate(eval_data):
nll_test = self.model.calculate_nll_test(eval_batch, epoch_idx)
total_loss += float(nll_test)
return total_loss / len(eval_data)
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None):
r"""Evaluate the model based on the eval data.
Args:
eval_data (DataLoader): the eval data
load_best_model (bool, optional): whether load the best model in the training process, default: True.
It should be set True, if users want to test the model after training.
model_file (str, optional): the saved model file, default: None. If users want to test the previously
trained model file, they can set this parameter.
Returns:
dict: eval result, key is the eval metric and value in the corresponding metric value
"""
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
self.model.eval()
with torch.no_grad():
generate_corpus = self.model.generate(eval_data)
self._save_generated_text(generate_corpus)
reference_corpus = eval_data.get_reference()
result = self.evaluator.evaluate(generate_corpus, reference_corpus)
result['nll_test'] = self._evaluate_nll_test(eval_data)
return result
def plot_train_loss(self, show=True, save_path=None):
r"""Plot the train loss in each epoch
Args:
show (bool, optional): whether to show this figure, default: True
save_path (str, optional): the data path to save the figure, default: None.
If it's None, it will not be saved.
"""
epochs = list(self.train_loss_dict.keys())
epochs.sort()
values = [float(self.train_loss_dict[epoch]) for epoch in epochs]
plt.plot(epochs, values)
plt.xticks(epochs)
plt.xlabel('Epoch')
plt.ylabel('Loss')
if show:
plt.show()
if save_path:
plt.savefig(save_path)
class UnconditionalTrainer(Trainer):
r"""UnconditionalTrainer is designed for RNN, which is a typical unconditional generator.
"""
def __init__(self, config, model):
super(UnconditionalTrainer, self).__init__(config, model)
class GANTrainer(Trainer):
r"""GANTrainer is designed for GAN, which is a generative adversarial net method.
"""
def __init__(self, config, model):
super(GANTrainer, self).__init__(config, model)
self.optimizer = None
self.g_optimizer = self._build_module_optimizer(self.model.generator)
self.d_optimizer = self._build_module_optimizer(self.model.discriminator)
self.grad_clip = config['grad_clip']
self.g_pretraining_epochs = config['g_pretraining_epochs']
self.d_pretraining_epochs = config['d_pretraining_epochs']
self.d_sample_num = config['d_sample_num']
self.d_sample_training_epochs = config['d_sample_training_epochs']
self.adversarail_training_epochs = config['adversarail_training_epochs']
self.adversarail_d_epochs = config['adversarail_d_epochs']
self.g_pretraining_loss_dict = dict()
self.d_pretraining_loss_dict = dict()
self.max_length = config['max_seq_length'] + 2
self.pad_idx = model.pad_idx
def _build_module_optimizer(self, module):
r"""Init the Module Optimizer
Args:
module (torch.nn.Mudule): Mudule class of torch.nn needed optimizer
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(module.parameters(), lr=self.learning_rate)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(module.parameters(), lr=self.learning_rate)
return optimizer
def _optimize_step(self, losses, total_loss, model, opt):
r"""The opt uses the cliped losses to conduct an optimize step to optimize model
and sum up losses to the total_loss.
Args:
losses (torch.Tensor or tuple): The loss to be backward.
total_loss (Float): Total loss in an epoch.
model (torch.nn.Mudule): The model to be optimized.
opt (torch.optim): The optimizer of the model.
Returns:
torch.Tensor or tuple: Total loss in an epoch, shape: [].
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _save_checkpoint(self, epoch):
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict()
}
torch.save(state, self.saved_model_file)
def _add_pad(self, data):
r"""Pad the data to the max length of corpus.
Args:
data (torch.Tensor): The data to be padded, shape: [batch_size, max_batch_length].
Returns:
torch.Tensor: The padded data, shape: [batch_size, max_seq_length].
"""
batch_size = data.shape[0]
padded_data = torch.full((batch_size, self.max_length), self.pad_idx, dtype=torch.long, device=self.device)
padded_data[:, : data.shape[1]] = data
return padded_data
def _get_real_data(self, train_data):
r"""Get the target text index of the corpus train_datas.
Args:
train_data (DataLoader): the train data.
Returns:
torch.Tensor: The target text index, shape: [batch_size, max_batch_length].
"""
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx']
real_data = self._add_pad(real_data)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _g_train_epoch(self, train_data, epoch_idx):
r"""Train the generator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
for batch_idx, data in enumerate(train_data):
losses = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
total_loss = [l / len(train_data) for l in total_loss] if isinstance(total_loss, tuple) else total_loss / len(
train_data)
total_loss = tuple(total_loss) if isinstance(total_loss, list) else total_loss
return total_loss
def _d_train_epoch(self, train_data, epoch_idx):
r"""Train the discriminator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
fake_data = self.model.sample(self.d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for _ in range(self.d_sample_training_epochs): # d_epoch
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
losses = self.model.calculate_d_train_loss(real_data, fake_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / min(len(real_dataloader), len(fake_dataloader)) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Adversarial training in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
losses = self.model.calculate_g_adversarial_loss(epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
for epoch_idx in range(self.adversarail_d_epochs):
self._d_train_epoch(train_data, epoch_idx=epoch_idx)
return total_loss
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# generator pretraining
if verbose:
self.logger.info("Start generator pretraining...")
for epoch_idx in range(self.g_pretraining_epochs):
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
self.g_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"generator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End generator pretraining...")
# discriminator pretraining
if verbose:
self.logger.info("Start discriminator pretraining...")
for epoch_idx in range(self.d_pretraining_epochs):
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
self.d_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End discriminator pretraining...")
# adversarial training
if verbose:
self.logger.info("Start adversarial training...")
for epoch_idx in range(self.adversarail_training_epochs):
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End adversarial pretraining...")
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
class TextGANTrainer(GANTrainer):
r"""TextGANTrainer is designed for TextGAN.
"""
def __init__(self, config, model):
super(TextGANTrainer, self).__init__(config, model)
self.adversarail_g_epochs = config['adversarail_g_epochs']
def _d_train_epoch(self, train_data, epoch_idx):
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for _ in range(self.d_sample_training_epochs):
for idx, real_data in enumerate(real_dataloader):
fake_data, z = self.model.sample()
losses = self.model.calculate_d_train_loss(real_data, fake_data, z, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
if (idx * self.model.batch_size >= self.d_sample_num):
break
return total_loss / min(len(real_dataloader), self.d_sample_num // self.model.batch_size) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for idx, real_data in enumerate(real_dataloader):
if (idx == self.adversarail_g_epochs):
break
losses = self.model.calculate_g_adversarial_loss(real_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
for epoch_idx in range(self.adversarail_d_epochs):
self._d_train_epoch(train_data, epoch_idx=epoch_idx)
return total_loss / min(len(real_dataloader), self.adversarail_g_epochs)
class RankGANTrainer(GANTrainer):
r"""RankGANTrainer is designed for RankGAN.
"""
def __init__(self, config, model):
super(RankGANTrainer, self).__init__(config, model)
def _d_train_epoch(self, train_data, epoch_idx):
r"""Train the discriminator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
fake_data = self.model.sample(self.d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
ref_index = np.random.randint(0, real_data.shape[0], size=self.model.ref_size)
ref_data = real_data[ref_index] # ref_size * l
for _ in range(self.d_sample_training_epochs):
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
losses = self.model.calculate_d_train_loss(real_data, fake_data, ref_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / min(len(real_dataloader), len(fake_dataloader)) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Adversarial training in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data)
ref_index = np.random.randint(0, real_data.shape[0], size=self.model.ref_size)
ref_data = real_data[ref_index] # ref_size * l
losses = self.model.calculate_g_adversarial_loss(ref_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
d_loss = 0
for epoch_idx in range(self.adversarail_d_epochs):
d_loss += self._d_train_epoch(train_data, epoch_idx=epoch_idx)
d_loss = d_loss / self.adversarail_d_epochs
return total_loss
class ConditionalTrainer(Trainer):
r"""ConditionalTrainer is designed for seq2seq testing, which is a typically used setting.
"""
def __init__(self, config, model):
super(ConditionalTrainer, self).__init__(config, model)
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None):
r"""Evaluate the model based on the eval data.
Args:
eval_data (DataLoader): the eval data
load_best_model (bool, optional): whether load the best model in the training process, default: True.
It should be set True, if users want to test the model after training.
model_file (str, optional): the saved model file, default: None. If users want to test the previously
trained model file, they can set this parameter.
Returns:
dict: eval result, key is the eval metric and value in the corresponding metric value
"""
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
self.model.eval()
generate_corpus = self.model.generate(eval_data)
self._save_generated_text(generate_corpus)
reference_corpus = eval_data.get_reference()
result = self.evaluator.evaluate(generate_corpus, reference_corpus)
return result
class MaskGANTrainer(GANTrainer):
r""" Trainer specifically designed for MaskGAN training process.
"""
def __init__(self, config, model):
super(MaskGANTrainer, self).__init__(config, model)
self.max_length = config["max_seq_length"]
self.eos_token_idx = model.eos_idx
self.adversarail_c_epochs = config['adversarail_c_epochs']
self.g_mask_pretraining_epochs = config['g_mask_pretraining_epochs']
self.g_lr = config['gen_learning_rate']
self.d_lr = config['dis_learning_rate']
self.c_lr = config['critic_learning_rate']
self.g_optimizer = self._build_module_optimizer_(self.model.generator, self.g_lr)
self.d_optimizer = self._build_module_optimizer_(self.model.discriminator, self.d_lr)
self.c_optimizer = self._build_module_optimizer_(self.model.discriminator.critic_fc_linear, self.c_lr)
self.pre_lm_weight = config["pre_lm_weight"]
self.pretrain_lm_epochs = config["pretrain_lm_epochs"]
self.checkp = config['checkp']
def _build_module_optimizer_(self, module, lr):
r""" Init the Module Optimizer with specified learning rate
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(module.parameters(), lr)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(module.parameters(), lr)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(module.parameters(), lr)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(module.parameters(), lr)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(module.parameters(), lr)
return optimizer
def _optimize_step(self, losses, total_loss, model, opt, retain_graph=False):
r""" Add retain_graph option
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
r""" Specified for maskgan output
"""
train_loss_output = "%straining [time: %.2fs, " % (train_info, e_time - s_time)
if isinstance(losses, dict):
for key, loss in losses.items():
train_loss_output += '%s: %.4f, ' % (key, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def pretrain_lm(self, train_data, valid_data, verbose):
r""" Pretrain rnn-based Language Model with teacher forcing mechanism
"""
def lm_forward(data):
r""" One iteration of LM forward
"""
input = data[:, :-1] # bs * self.max_len - 1
target = data[:, 1:]
bs, seq_len = target.size()
lengths = torch.tensor([seq_len] * bs)
target_present = torch.ones_like(input).byte()
device = target.device
lengths = lengths.cuda(device)
# pretaining
encoder_outputs = pre_train_lm(input, lengths, target, target_present, pretrain=True)
logit = pre_train_lm.vocab_linear(encoder_outputs)
logit = logit.permute([0, 2, 1])
lossf = torch.nn.CrossEntropyLoss()
loss = lossf(logit, target)
return loss
pre_train_lm = self.model.generator
lm_opt = self._build_module_optimizer_(pre_train_lm, lr=0.001)
for epoch in range(self.pretrain_lm_epochs):
total_loss = None
real_data = self._get_real_data(train_data) # bs * self.max_len
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
loss = lm_forward(data)
total_loss = self._optimize_step(loss, total_loss, pre_train_lm, lm_opt)
total_loss = total_loss / len(real_dataloader)
if verbose:
self.logger.info("Epoch {}/{} of LM pretraining loss: {} ".format(epoch+1, self.pretrain_lm_epochs, total_loss))
ppl = 0.0
if (epoch+1) % 1 == 0:
pre_train_lm.eval()
validate_data = self._get_real_data(valid_data) # bs * self.max_len
validate_dataloader = DataLoader(validate_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
ppl = 0.0
for batch_idx, data in enumerate(validate_dataloader):
cross_entropy_loss = lm_forward(data)
ppl += math.exp(cross_entropy_loss.item())
ppl = ppl / len(validate_dataloader)
pre_train_lm.train()
if verbose:
self.logger.info("Epoch {}/{} of LM pretraining PPL: {}...".format(epoch + 1, self.pretrain_lm_epochs, ppl))
if ppl < 110:
state_dict = {
'embedder': pre_train_lm.embedder,
'encoder': pre_train_lm.encoder.encoder,
'vocab_linear': pre_train_lm.vocab_linear
}
self.pre_lm_weight = "saved/pretrain_lm_weight" + str(epoch+1) + ".pkl"
torch.save(state_dict, self.pre_lm_weight)
if verbose:
self.logger.info("End LM pretraining. PPL: {}".format(ppl))
self.logger.info("Weigth saved in {}".format(self.pre_lm_weight))
return pre_train_lm, ppl
def _g_train_epoch(self, train_data, epoch_idx):
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data) # bs * self.max_len
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
loss = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(loss, total_loss, self.model.generator, self.g_optimizer)
total_loss = total_loss / len(real_dataloader)
return total_loss
def _get_validate_ppl(self, validate_data, epoch_idx):
self.model.generator.eval()
ppl = 0.0
validate_data = self._get_real_data(validate_data) # bs * self.max_len
validate_dataloader = DataLoader(validate_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(validate_dataloader):
loss = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx, validate=True)
ppl += math.exp(loss.item())
ppl = ppl / len(validate_dataloader)
self.model.generator.train()
return ppl
def _d_train_epoch(self, train_data, epoch_idx):
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
losses = self.model.calculate_d_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / len(real_dataloader)
def _adversarial_train_epoch(self, train_data, epoch_idx):
r""" Specified for MaskGAN adversarial training
"""
dis_total_loss = None
gen_total_loss = None
critic_total_loss = None
g_num = 0.0
d_num = 0.0
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
dis_train_data = copy.deepcopy(real_dataloader)
gen_train_data = copy.deepcopy(real_dataloader)
c_train_data = copy.deepcopy(real_dataloader)
dis_train_data = iter(dis_train_data)
gen_train_data = iter(gen_train_data)
_ = next(dis_train_data) # have one offset
for g_x in gen_train_data:
g_num += 1
for _ in range(3):
d_num += 1
try:
d_x = next(dis_train_data)
except StopIteration:
del dis_train_data
dis_train_data = copy.deepcopy(real_dataloader)
dis_train_data = iter(dis_train_data)
d_x = next(dis_train_data)
losses = self.model.calculate_d_train_loss(d_x, epoch_idx=_)
dis_total_loss = self._optimize_step(losses, dis_total_loss, self.model.discriminator, self.d_optimizer)
gen_losses, critic_losses = self.model.calculate_g_adversarial_loss(g_x, epoch_idx=g_num)
gen_total_loss = self._optimize_step(gen_losses, gen_total_loss, self.model.generator, self.g_optimizer)
critic_total_loss = self._optimize_step(critic_losses, critic_total_loss, self.model.discriminator.critic_fc_linear, self.c_optimizer)
return {"dis_loss": dis_total_loss / d_num, "gen_loss": gen_total_loss / g_num, "critic_loss": critic_total_loss / g_num}
def _evaluate_nll_test(self, eval_data):
total_loss = 0
real_data = self._get_real_data(eval_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
nll_test = self.model.calculate_nll_test(data, batch_idx)
total_loss += float(nll_test)
return total_loss / len(eval_data)
def _add_eos(self, data, length):
batch_size, pad_seq_len = data.size()
padded_data = torch.full((batch_size, self.max_length), self.eos_token_idx, dtype=torch.long, device=self.device)
for i in range(batch_size):
l = int(length[i].cpu().data)
if l == self.max_length+2:
padded_data[i, :] = data[i, 1:l-1]
else:
padded_data[i, 0:l-1] = data[i, 1:l]
return padded_data
def _get_real_data(self, train_data):
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx'] # bs*batch_max_seq_len
length = corpus['target_length']
real_data = self._add_eos(real_data, length)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _save_checkpoint(self, epoch, postfix=None):
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict(),
'g_opt': self.g_optimizer.state_dict(),
'd_opt': self.d_optimizer.state_dict(),
'c_opt':self.c_optimizer.state_dict()
}
if postfix is not None:
path = self.saved_model_file + "_" + str(epoch) + "_" + postfix
torch.save(state, path)
return path
else:
torch.save(state, self.saved_model_file)
def _load_generated_text(self):
r""" Load the generated text by our model to log.
"""
with open(self.saved_text_file, 'r') as fin:
samples = []
for i in range(5):
text = fin.readline()
samples.append(text)
return samples
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# generator pretraining
if self.checkp is not None:
checkpoint = torch.load(self.checkp)
self.model.load_state_dict(checkpoint['state_dict'])
self.d_optimizer.load_state_dict(checkpoint["d_opt"])
self.g_optimizer.load_state_dict(checkpoint["g_opt"])
epoch_check = checkpoint['epoch']
if verbose:
self.logger.info("Load checkpoint file from: {}".format(self.checkp))
else:
if self.pre_lm_weight is None:
if verbose:
self.logger.info("Start LM pretraining...")
pretrain_lm, ppl = self.pretrain_lm(train_data, valid_data, verbose)
pretrain_lm = torch.load(self.pre_lm_weight)
embedder = pretrain_lm['embedder'].state_dict()
lstm = pretrain_lm['encoder'].state_dict()
vocab_linear = pretrain_lm['vocab_linear'].state_dict()
self.model.generator.embedder.load_state_dict(embedder)
self.model.generator.encoder.encoder.load_state_dict(lstm)
self.model.generator.decoder.decoder.load_state_dict(lstm)
self.model.generator.vocab_linear.load_state_dict(vocab_linear)
self.model.discriminator.encoder.encoder.load_state_dict(lstm)
self.model.discriminator.decoder.decoder.load_state_dict(lstm)
if verbose:
self.logger.info("Load pretrained LM weight")
else:
pretrain_lm = torch.load(self.pre_lm_weight)
embedder = pretrain_lm['embedder'].state_dict()
lstm = pretrain_lm['encoder'].state_dict()
vocab_linear = pretrain_lm['vocab_linear'].state_dict()
self.model.generator.embedder.load_state_dict(embedder)
self.model.generator.encoder.encoder.load_state_dict(lstm)
self.model.generator.decoder.decoder.load_state_dict(lstm)
self.model.generator.vocab_linear.load_state_dict(vocab_linear)
self.model.discriminator.encoder.encoder.load_state_dict(lstm)
self.model.discriminator.decoder.decoder.load_state_dict(lstm)
if verbose:
self.logger.info("Load pretrained LM weight from: {}".format(self.pre_lm_weight))
if verbose:
self.logger.info("Start generator mask pretraining...")
for epoch_idx in range(self.g_mask_pretraining_epochs):
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
self.g_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"generator pre")
if verbose:
self.logger.info(train_loss_output)
ppl = self._get_validate_ppl(valid_data, epoch_idx)
if verbose:
self.logger.info(
"Epoch {}/{} of mask pretraining PPL: {}...".format(epoch_idx + 1, self.g_mask_pretraining_epochs, ppl))
if ppl <= 90:
if verbose:
path = self._save_checkpoint(epoch_idx + 1, postfix="pretrain_gen")
self.logger.info(">>>> [Pretrain Gen] PPL: {} save weight in {}".format(ppl, path))
self.logger.info("End generator mask pretraining...")
break
if (epoch_idx) % 10 == 0:
self.logger.info(">>>> [Pretrain Gen] Save pretrain gen check in epoch %d ..." % (epoch_idx + 1))
path = self._save_checkpoint(epoch_idx + 1, postfix="pretrain_gen")
self.model.eval()
test_result = self.evaluate(valid_data, model_file=path)
self.model.train()
sample = self._load_generated_text()
tmp = "\n"
for i, s in enumerate(sample):
tmp += str(i)
tmp += ": "
tmp += s.strip()
tmp += "\n"
self.logger.info('>>>> [Pretrain Gen] test result: {}'.format(test_result))
self.logger.info('>>>> [Pretrain Gen] test result samples: {}'.format(tmp))
# discriminator pretraining
if verbose:
self.logger.info("Start discriminator pretraining...")
for epoch_idx in range(self.d_pretraining_epochs):
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
self.d_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End discriminator pretraining...")
# adversarial training
if verbose:
self.logger.info("Start adversarial training...")
for epoch_idx in range(self.adversarail_training_epochs):
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
if (epoch_idx+1) % 10 == 0:
path = self._save_checkpoint((epoch_idx + 1), postfix="adv_train")
self.model.eval()
test_result = self.evaluate(valid_data, model_file=path)
self.model.train()
sample = self._load_generated_text()
tmp = "\n"
for i, s in enumerate(sample):
tmp += str(i)
tmp += ": "
tmp += s.strip()
tmp += "\n"
self.logger.info('>>>>>> [Adv] test result: {}'.format(test_result))
self.logger.info('>>>>>> [Adv] test result samples: {}'.format(tmp))
if verbose:
self.logger.info("End adversarial pretraining...")
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
class LeakGANTrainer(GANTrainer):
r"""Specified for leakgan trainer
"""
def __init__(self, config, model):
super(LeakGANTrainer, self).__init__(config, model)
self.interleaved_pretrain_epoch = config['interleaved_pretrain_epoch']
self.adversarail_g_epochs = config['adversarail_g_epochs']
gen_lr = config['generator_lr'] # 0.001
dis_lr = config['discriminator_lr'] # 0.00005
self.g_optimizer = self._build_module_optimizer_(self.model.generator, gen_lr) # (manager_opt, worker_opt)
self.d_optimizer = self._build_module_optimizer_(self.model.discriminator, dis_lr)
self.iters_num = config['iter_num']
self.end_idx = model.end_idx
def _build_module_optimizer_(self, module, learing_rate):
r"""Specified for leakgan
"""
multi_flag = False
if module._get_name() == 'LeakGANGenerator':
manager_params, worker_params = module.split_params()
multi_flag = True
if self.learner.lower() == 'adam':
if multi_flag:
manager_opt = optim.Adam(manager_params, lr=learing_rate)
worker_opt = optim.Adam(worker_params, lr=learing_rate)
else:
optimizer = optim.Adam(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'sgd':
if multi_flag:
manager_opt = optim.SGD(manager_params, lr=learing_rate)
worker_opt = optim.SGD(worker_params, lr=learing_rate)
else:
optimizer = optim.SGD(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'adagrad':
if multi_flag:
manager_opt = optim.Adagrad(manager_params, lr=learing_rate)
worker_opt = optim.Adagrad(worker_params, lr=learing_rate)
else:
optimizer = optim.Adagrad(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'rmsprop':
if multi_flag:
manager_opt = optim.RMSprop(manager_params, lr=learing_rate)
worker_opt = optim.RMSprop(worker_params, lr=learing_rate)
else:
optimizer = optim.RMSprop(module.parameters(), lr=learing_rate)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
if multi_flag:
manager_opt = optim.Adam(manager_params, lr=learing_rate)
worker_opt = optim.Adam(worker_params, lr=learing_rate)
else:
optimizer = optim.Adam(module.parameters(), lr=learing_rate)
if multi_flag:
return (manager_opt, worker_opt)
else:
return optimizer
def _optimize_step(self, losses, total_loss, model, opt):
r"""Specified for leakgan optimize
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
if isinstance(losses, tuple):
for i, (o, loss) in enumerate(zip(opt, losses)):
o.zero_grad()
loss.backward(retain_graph=True if i < len(opt) - 1 else False)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
o.step()
else:
opt.zero_grad()
losses.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
r"""Specified for leakgan output format
"""
train_loss_output = "%straining [time: %.2fs, " % (train_info, e_time - s_time)
if isinstance(losses, dict):
for key, loss in losses.items():
train_loss_output += '%s: %.4f, ' % (key, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def _add_eos(self, data, length):
batch_size = data.shape[0]
padded_data = torch.full((batch_size, self.max_length), self.end_idx, dtype=torch.long, device=self.device)
for i in range(batch_size):
len = length[i].cpu().data
padded_data[i, :len] = data[i, :len]
return padded_data
def _get_real_data(self, train_data):
r"""Specified for leakgan which use eos_idx pad not pad_idx
"""
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx']
length = corpus['target_length']
real_data = self._add_eos(real_data, length)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Specified for leakgan adversarial training
"""
self.model.generator.train()
total_g_loss = None
total_d_loss = 0
total_d_acc = 0
adv_mana_loss = 0
adv_work_loss = 0
adv_d_loss = 0
for e in range(self.adversarail_g_epochs):
losses = self.model.calculate_g_adversarial_loss(epoch_idx=e)
total_g_loss = self._optimize_step(losses, total_g_loss, self.model.generator, self.g_optimizer)
adv_mana_loss, adv_work_loss = total_g_loss
adv_mana_loss = adv_mana_loss / self.adversarail_g_epochs
adv_work_loss = adv_work_loss / self.adversarail_g_epochs
for e in range(self.adversarail_d_epochs):
loss_dict = self._d_train_epoch(train_data, epoch_idx=epoch_idx)
total_d_loss = total_d_loss + loss_dict['total_loss']
total_d_acc = total_d_acc + loss_dict['train_acc']
adv_d_loss = total_d_loss / self.adversarail_d_epochs
adv_c_loss = total_d_acc / self.adversarail_d_epochs
return {"mana_loss": adv_mana_loss, "work_loss": adv_work_loss, "dis_loss": adv_d_loss, "train_acc": adv_c_loss}
def _g_train_epoch(self, train_data, epoch_idx):
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
# interaction = interaction.to(self.device)
losses = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
total_loss = [l / len(real_dataloader) for l in total_loss] if isinstance(total_loss,
tuple) else total_loss / len(
train_data)
mana_loss, work_loss = total_loss
return {"mana_loss": mana_loss, "work_loss": work_loss}
def _d_train_epoch(self, train_data, epoch_idx):
total_loss = None
total_acc = 0
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
# not need sample self.d_sample_num numbers becauese only train discriminator 5 batch
d_sample_num = (self.d_sample_training_epochs + 1) * self.model.batch_size
fake_data = self.model.sample(d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
idx = 0
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
# self.model.discriminator.eval() # pretraining not use dropout
if idx == self.d_sample_training_epochs:
break
losses, acc = self.model.calculate_d_train_loss(real_data, fake_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
total_acc = total_acc + acc
idx += 1
total_loss = total_loss / self.d_sample_training_epochs
total_acc = total_acc / self.d_sample_training_epochs
return {"total_loss": total_loss, "train_acc": total_acc}
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# pretraining
if verbose:
self.logger.info(">> Start pretraining")
# generator pretraining
for epoch_idx in range(self.g_pretraining_epochs): # 80
if verbose:
self.logger.info(">>>> [Pretrain Gen] Start %d / %d epochs generator pretraining" % (
epoch_idx + 1, self.g_pretraining_epochs))
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx + 1, training_start_time, training_end_time, train_loss,
"generator pre")
train_loss_output = ">>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# discriminator pretraining
for epoch_idx in range(self.d_pretraining_epochs): # 5
if verbose:
self.logger.info(">>>> [Pretrain Dis]Start %d / %d epochs discriminator pretraining..." % (
epoch_idx + 1, self.d_pretraining_epochs))
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
train_loss_output = ">>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info(">> End pretraining")
# adversarial training
if verbose:
self.logger.info(">> Start adversarial training")
for epoch in range(int(self.iters_num / self.adversarail_training_epochs)):
if verbose:
self.logger.info(">>>> [Adv] Start epoch %d / 10 interleaved adversarial training" % (epoch + 1))
for epoch_idx in range(self.adversarail_training_epochs):
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / %d adversarial training" % (
epoch_idx + 1, self.adversarail_training_epochs))
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
# self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
train_info="adv ")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# gen pretrain
for epoch_idx in range(5):
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / 5 pretrain generator" % (epoch_idx + 1))
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
"adv generator pre")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# dis pretrain
for epoch_idx in range(5): # d_steps
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / 5 pretrain discriminator" % (epoch_idx + 1))
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
"adv discriminator pre")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
| 45.997251
| 146
| 0.621836
| 8,279
| 66,926
| 4.749245
| 0.062206
| 0.032046
| 0.02556
| 0.021262
| 0.725909
| 0.678527
| 0.645439
| 0.614538
| 0.576668
| 0.559628
| 0
| 0.003928
| 0.288587
| 66,926
| 1,454
| 147
| 46.028886
| 0.821894
| 0.146311
| 0
| 0.568461
| 0
| 0
| 0.067128
| 0.00226
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057602
| false
| 0
| 0.012276
| 0
| 0.120869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a8234ec61d7794c6426793212657ac24a62f4a
| 649
|
py
|
Python
|
rsserpent/plugins/builtin/__init__.py
|
EurusEurus/RSSerpent
|
fd7aaf67b80b2b48c14b1a3efe733374b0012338
|
[
"MIT"
] | null | null | null |
rsserpent/plugins/builtin/__init__.py
|
EurusEurus/RSSerpent
|
fd7aaf67b80b2b48c14b1a3efe733374b0012338
|
[
"MIT"
] | null | null | null |
rsserpent/plugins/builtin/__init__.py
|
EurusEurus/RSSerpent
|
fd7aaf67b80b2b48c14b1a3efe733374b0012338
|
[
"MIT"
] | null | null | null |
from ...models import Persona, Plugin
from . import example, example_cache, example_ratelimit, example_with_args
plugin = Plugin(
name="rsserpent-plugin-builtin",
author=Persona(
name="queensferryme",
link="https://github.com/queensferryme",
email="queensferry.me@gmail.com",
),
repository="https://github.com/RSSerpent/RSSerpent",
prefix="/_",
routers={
example.path: example.provider,
example_cache.path: example_cache.provider,
example_ratelimit.path: example_ratelimit.provider,
example_with_args.path: example_with_args.provider,
},
)
__all__ = ("plugin",)
| 28.217391
| 74
| 0.682589
| 69
| 649
| 6.173913
| 0.42029
| 0.103286
| 0.105634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191063
| 649
| 22
| 75
| 29.5
| 0.811429
| 0
| 0
| 0
| 0
| 0
| 0.214176
| 0.07396
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a86ac522e7ca59c54af2df1492f75fd0ad7b3e
| 2,859
|
py
|
Python
|
data_processing/process_xls.py
|
luisroel91/libdib_assesment
|
c969cfecbce1243b457961ffafe5caaea7bb5149
|
[
"MIT"
] | null | null | null |
data_processing/process_xls.py
|
luisroel91/libdib_assesment
|
c969cfecbce1243b457961ffafe5caaea7bb5149
|
[
"MIT"
] | null | null | null |
data_processing/process_xls.py
|
luisroel91/libdib_assesment
|
c969cfecbce1243b457961ffafe5caaea7bb5149
|
[
"MIT"
] | null | null | null |
import pandas as pd
# Define our header
col_names = [
"year",
"num_males_with_income",
"male_median_income_curr_dollars",
"male_median_income_2019_dollars",
"num_females_with_income",
"female_median_income_curr_dollars",
"female_median_income_2019_dollars",
]
# Load Asian census data XLS, skipping all headers
dfa = pd.read_excel(
r'p08a.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define col names
names=col_names,
)
# Load White census data XLS, skipping all headers
dfw = pd.read_excel(
r'p08w.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define cold names
names=col_names
)
# Splinter off rows into age group DFs for both sets of data
dfa1524 = dfa.iloc[:20]
dfa2534 = dfa.iloc[25:45]
dfa3544 = dfa.iloc[50:70]
dfa4554 = dfa.iloc[75:95]
dfa5564 = dfa.iloc[100:120]
dfa6574 = dfa.iloc[125:145]
dfa75 = dfa.iloc[150:170]
dfw1524 = dfw.iloc[:20]
dfw2534 = dfw.iloc[25:45]
dfw3544 = dfw.iloc[50:70]
dfw4554 = dfw.iloc[75:95]
dfw5564 = dfw.iloc[100:120]
dfw6574 = dfw.iloc[125:145]
dfw75 = dfw.iloc[150:170]
# Add Age Range col to each DF
dfa1524.insert(0, 'age_range', '15-24')
dfa2534.insert(0, 'age_range', '25-34')
dfa3544.insert(0, 'age_range', '35-44')
dfa4554.insert(0, 'age_range', '45-54')
dfa5564.insert(0, 'age_range', '55-64')
dfa6574.insert(0, 'age_range', '65-74')
dfa75.insert(0, 'age_range', 'Over 75')
dfw1524.insert(0, 'age_range', '15-24')
dfw2534.insert(0, 'age_range', '25-34')
dfw3544.insert(0, 'age_range', '35-44')
dfw4554.insert(0, 'age_range', '45-54')
dfw5564.insert(0, 'age_range', '55-64')
dfw6574.insert(0, 'age_range', '65-74')
dfw75.insert(0, 'age_range', 'Over 75')
# Stack cleaned DF's vertically
dfa = pd.concat([
dfa1524,
dfa2534,
dfa3544,
dfa4554,
dfa5564,
dfa6574,
dfa75
], axis=0)
dfw = pd.concat([
dfw1524,
dfw2534,
dfw3544,
dfw4554,
dfw5564,
dfw6574,
dfw75
], axis=0)
# Add Race col
dfa.insert(0, 'race', 'asian')
dfw.insert(0, 'race', 'white')
# Clean garbage chars in Year col using regex
dfa['year'] = dfa['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
dfw['year'] = dfw['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
# Stack our cleaned + normalized data into a single DF
df = pd.concat([
dfa,
dfw
], axis=0)
# Convert the DF col types to conform to our CensusRecord model
df = df.astype({
"race": str,
"age_range": str,
"year": int,
"num_males_with_income": int,
"male_median_income_curr_dollars": float,
"male_median_income_2019_dollars": float,
"num_females_with_income": int,
"female_median_income_curr_dollars": float,
"female_median_income_2019_dollars": float,
})
# Pickle the DF
df.to_pickle("./res.pkl")
| 24.646552
| 82
| 0.671913
| 450
| 2,859
| 4.12
| 0.306667
| 0.06904
| 0.075512
| 0.113269
| 0.420712
| 0.289105
| 0.110032
| 0.110032
| 0.110032
| 0.110032
| 0
| 0.132291
| 0.172438
| 2,859
| 115
| 83
| 24.86087
| 0.65131
| 0.191675
| 0
| 0.079545
| 0
| 0
| 0.282353
| 0.149891
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011364
| 0
| 0.011364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3aa7d175c4008d278417caf82ba36b9fb655fda
| 520
|
py
|
Python
|
Section_1/Exercise_16.py
|
Szymon-Budziak/WDI_exercises_solutions
|
51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460
|
[
"MIT"
] | null | null | null |
Section_1/Exercise_16.py
|
Szymon-Budziak/WDI_exercises_solutions
|
51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460
|
[
"MIT"
] | null | null | null |
Section_1/Exercise_16.py
|
Szymon-Budziak/WDI_exercises_solutions
|
51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460
|
[
"MIT"
] | 1
|
2021-11-21T09:38:33.000Z
|
2021-11-21T09:38:33.000Z
|
"""
Dany jest ciąg określony wzorem: A[n+1] = (A[n] % 2) ∗ (3 ∗ A[n] + 1) + (1 − A[n] % 2) ∗ A[n] / 2.
Startując z dowolnej liczby naturalnej > 1 ciąg ten osiąga wartość 1. Napisać program, który
znajdzie wyraz początkowy z przedziału 2-10000 dla którego wartość 1 jest osiągalna po największej
liczbie kroków.
"""
a0 = 2
m = 1
for a0 in range(2, 10000):
n = 0
while a0 != 1:
a0 = (((a0 % 2) * (3 * a0 + 1)) + ((1 - (a0 % 2)) * (a0 / 2)))
n += 1
if n > m:
m = n
a0 += 1
print(m)
| 27.368421
| 98
| 0.542308
| 94
| 520
| 3.042553
| 0.468085
| 0.034965
| 0.031469
| 0.027972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117166
| 0.294231
| 520
| 18
| 99
| 28.888889
| 0.651226
| 0.588462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3ad80bfdfa53d706abcbf25b9e00b65302a112a
| 1,480
|
py
|
Python
|
AndroidSpider/spider_main.py
|
lidenghong1/SmallReptileTraining
|
a1bfb81c9969edfb7554acc50370c0cb036da690
|
[
"MIT"
] | 1
|
2018-05-10T01:52:37.000Z
|
2018-05-10T01:52:37.000Z
|
AndroidSpider/spider_main.py
|
lidenghong1/SmallReptileTraining
|
a1bfb81c9969edfb7554acc50370c0cb036da690
|
[
"MIT"
] | null | null | null |
AndroidSpider/spider_main.py
|
lidenghong1/SmallReptileTraining
|
a1bfb81c9969edfb7554acc50370c0cb036da690
|
[
"MIT"
] | null | null | null |
from AndroidSpider import url_manager, html_downloader, html_parser, html_output
'''
爬取百度百科 Android 关键词相关词及简介并输出为一个HTML tab网页
Extra module:
BeautifulSoup
'''
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownLoader()
self.parser = html_parser.HtmlParser()
self.out_put = html_output.HtmlOutput()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print("craw %d : %s" % (count, new_url))
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36"
}
html_content = self.downloader.download(new_url, retry_count=2, headers=headers)
new_urls, new_data = self.parser.parse(new_url, html_content, "utf-8")
self.urls.add_new_urls(new_urls)
self.out_put.collect_data(new_data)
if count >= 30:
break
count = count + 1
except Exception as e:
print("craw failed!\n"+str(e))
self.out_put.output_html()
if __name__ == "__main__":
rootUrl = "http://baike.baidu.com/item/Android"
objSpider = SpiderMain()
objSpider.craw(rootUrl)
| 36.097561
| 141
| 0.597297
| 180
| 1,480
| 4.65
| 0.505556
| 0.050179
| 0.035842
| 0.033453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032567
| 0.294595
| 1,480
| 40
| 142
| 37
| 0.769157
| 0
| 0
| 0
| 0
| 0.032258
| 0.134711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.032258
| 0
| 0.129032
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3ae0fed36bd78447d3c9b110c995da7eb0ec44e
| 517
|
py
|
Python
|
trompace/mutations/__init__.py
|
trompamusic/ce-queries-template
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | 1
|
2020-06-18T15:43:18.000Z
|
2020-06-18T15:43:18.000Z
|
trompace/mutations/__init__.py
|
trompamusic/ce-queries-template
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | 60
|
2019-12-17T11:08:28.000Z
|
2021-03-02T16:19:41.000Z
|
trompace/mutations/__init__.py
|
trompamusic/trompace-client
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | null | null | null |
MUTATION = '''mutation {{
{mutation}
}}'''
def _verify_additional_type(additionaltype):
"""Check that the input to additionaltype is a list of strings.
If it is empty, raise ValueError
If it is a string, convert it to a list of strings."""
if additionaltype is None:
return None
if isinstance(additionaltype, str):
additionaltype = [additionaltype]
if len(additionaltype) == 0:
raise ValueError("additionaltype must be a non-empty list")
return additionaltype
| 28.722222
| 67
| 0.68472
| 64
| 517
| 5.484375
| 0.5
| 0.091168
| 0.039886
| 0.079772
| 0.091168
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002513
| 0.230174
| 517
| 17
| 68
| 30.411765
| 0.879397
| 0.280464
| 0
| 0
| 0
| 0
| 0.184358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3ae4f1aada9f0b92aa00f9f17807bd4f8c072c1
| 951
|
py
|
Python
|
Web_App/infrastructure/infra.py
|
CapitalOneDevExchangeHackathon/Financial-Fitness
|
54a2203d6b3d96687d822247b040613b644874f2
|
[
"MIT"
] | null | null | null |
Web_App/infrastructure/infra.py
|
CapitalOneDevExchangeHackathon/Financial-Fitness
|
54a2203d6b3d96687d822247b040613b644874f2
|
[
"MIT"
] | null | null | null |
Web_App/infrastructure/infra.py
|
CapitalOneDevExchangeHackathon/Financial-Fitness
|
54a2203d6b3d96687d822247b040613b644874f2
|
[
"MIT"
] | null | null | null |
import boto
import boto3
from config import Config
dynamodb = boto3.resource('dynamodb',
aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY,
region_name=Config.REGION)
table = dynamodb.Table('user_details')
tables = boto3.resource('dynamodb', aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY, region_name=Config.REGION).Table('user_details')
print(tables.creation_date_time)
def main():
print("29.7604267")
def insert_into_db(user):
print(user.lastname)
try:
table.put_item(
Item={
'pin': user.pin,
'firstname': user.firstname,
'lastname': user.lastname,
}
)
except Exception as E:
print(E)
return False
return True
if __name__ == "__main__":
main()
| 22.116279
| 119
| 0.589905
| 108
| 951
| 4.87037
| 0.398148
| 0.068441
| 0.079848
| 0.091255
| 0.418251
| 0.418251
| 0.418251
| 0.418251
| 0.418251
| 0.418251
| 0
| 0.018433
| 0.315457
| 951
| 42
| 120
| 22.642857
| 0.789555
| 0
| 0
| 0
| 0
| 0
| 0.082105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.103448
| 0
| 0.241379
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b0debd51a02674a2485fcb5fa43dc82bc97eff
| 2,751
|
py
|
Python
|
SelfTests.py
|
TeaPackCZ/RobotZed
|
7ac8bfb14a6c2e5887f8fed299ad87b384701c54
|
[
"MIT"
] | null | null | null |
SelfTests.py
|
TeaPackCZ/RobotZed
|
7ac8bfb14a6c2e5887f8fed299ad87b384701c54
|
[
"MIT"
] | null | null | null |
SelfTests.py
|
TeaPackCZ/RobotZed
|
7ac8bfb14a6c2e5887f8fed299ad87b384701c54
|
[
"MIT"
] | null | null | null |
import os
import unittest
from Logger import Logger
class TestLogger(unittest.TestCase):
def test_file_handling(self):
testLog = Logger("testLog")
## Check if program can create and open file
self.assertTrue(testLog.opened)
returns = testLog.close()
## Check if logger correctly signs bool OPENED and returns
## 0 as succes.
self.assertFalse(testLog.opened)
self.assertEqual(returns,0)
returns = testLog.close()
## Check if logger returns 1 when trying to close already
## closed file
self.assertEqual(returns,1)
## Do cleanup:
os.remove(testLog.name)
def test_logging(self):
testLog = Logger("testLog")
testPhrase = "TestLine\r\n"
testLog.save_line(testPhrase)
testLog.close()
logfile = open(testLog.name)
content = logfile.read()
logfile.close()
saved = content.split(" : ")
## Check if saved data corresponds
self.assertEqual(saved[1],testPhrase)
## cleanup
os.remove(testLog.name)
from gpsNavigation import gpsModule,gpsPoint
class TestGPSNavigation(unittest.TestCase):
def test_gps_angles(self):
gpsMod = gpsModule()
A = gpsPoint(10,10)
B = gpsPoint(10.1,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15623.0)
self.assertEqual(azimut,45.0)
B = gpsPoint(10.0,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,10963.0)
self.assertEqual(azimut,90.0)
B = gpsPoint(9.9,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15625.0)
self.assertEqual(azimut,135.0)
B = gpsPoint(9.9,10.0)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,11132.0)
self.assertEqual(azimut,180.0)
B = gpsPoint(9.9,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15625.0)
self.assertEqual(azimut,225.0)
B = gpsPoint(10.0,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,10963.0)
self.assertEqual(azimut,270.0)
B = gpsPoint(10.1,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15623.0)
self.assertEqual(azimut,315.0)
B = gpsPoint(10.1,10.0)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,11132.0)
self.assertEqual(azimut,0)
if __name__ == '__main__':
unittest.main()
| 31.988372
| 66
| 0.624137
| 332
| 2,751
| 5.129518
| 0.253012
| 0.167352
| 0.093952
| 0.126835
| 0.571345
| 0.496183
| 0.44451
| 0.44451
| 0.44451
| 0.44451
| 0
| 0.06213
| 0.262814
| 2,751
| 85
| 67
| 32.364706
| 0.777613
| 0.08615
| 0
| 0.349206
| 0
| 0
| 0.014818
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.047619
| false
| 0
| 0.063492
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b19235edf240100e043436d336caa4a2f88321
| 1,986
|
py
|
Python
|
manga_py/parser.py
|
Abijithkrishna/manga-py
|
03b142ecb944ef37a36e5095ffa580209021e3b0
|
[
"MIT"
] | null | null | null |
manga_py/parser.py
|
Abijithkrishna/manga-py
|
03b142ecb944ef37a36e5095ffa580209021e3b0
|
[
"MIT"
] | null | null | null |
manga_py/parser.py
|
Abijithkrishna/manga-py
|
03b142ecb944ef37a36e5095ffa580209021e3b0
|
[
"MIT"
] | null | null | null |
from logging import warning
from requests import get
from .info import Info
from .provider import Provider
from .providers import get_provider
class Parser:
def __init__(self, args: dict):
self.params = args
def init_provider(
self,
chapter_progress: callable = None,
global_progress: callable = None,
log: callable = None,
quest: callable = None,
info: Info = None,
quest_password: callable = None,
):
original_url = self.params.get('url', '')
provider_url = self.params.get('force_provider', None)
provider = get_provider(provider_url or original_url)
if isinstance(provider, bool):
raise AttributeError('Provider not found')
# update url (if redirect)
self.provider = provider(info) # type: Provider
self.provider.original_url = original_url
real_url = self.check_url(original_url)
if self.provider.allow_auto_change_url():
if real_url != original_url:
warning('Manga url changed! New url: {}'.format(real_url))
self.params['url'] = real_url
self.provider.quiet = self.params.get('quiet', False)
self.provider.set_chapter_progress_callback(chapter_progress)
self.provider.set_global_progress_callback(global_progress)
self.provider.set_log_callback(log)
self.provider.set_quest_callback(quest)
self.provider.set_quest_password_callback(quest_password)
def start(self):
self.provider.process(self.params['url'], self.params)
def check_url(self, url):
proxy = self.params.get('proxy', None)
proxies = {
'http': proxy,
'https': proxy,
} if proxy else None
with get(url, stream=True, proxies=proxies) as response:
_url = response.url
if url != _url:
url = _url
return url
| 29.205882
| 74
| 0.618832
| 230
| 1,986
| 5.13913
| 0.282609
| 0.101523
| 0.063452
| 0.027073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.289023
| 1,986
| 67
| 75
| 29.641791
| 0.83711
| 0.019637
| 0
| 0
| 0
| 0
| 0.04632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.041667
| 0.104167
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b29bffdf2e36c45f804f1c4fc3a56bbdcb9b59
| 1,127
|
py
|
Python
|
customers/views.py
|
sindhumadhadi09/CustomerMgmt
|
db8b27ad6ceb8050843dc33509dc2b6c2ed2c1e2
|
[
"MIT"
] | null | null | null |
customers/views.py
|
sindhumadhadi09/CustomerMgmt
|
db8b27ad6ceb8050843dc33509dc2b6c2ed2c1e2
|
[
"MIT"
] | null | null | null |
customers/views.py
|
sindhumadhadi09/CustomerMgmt
|
db8b27ad6ceb8050843dc33509dc2b6c2ed2c1e2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Customer
class IndexView(generic.ListView):
template_name = 'customers/index.html'
context_object_name = 'customers_list'
def get_queryset(self):
return Customer.objects.all()
class CustomerView(generic.TemplateView):
template_name = 'customers/detail.html'
def add_customer(request):
customer = Customer()
customer.customer_firstname = request.POST['fname']
customer.customer_lastname = request.POST['lname']
customer.customer_address = request.POST['address']
customer.customer_city = request.POST['city']
customer.customer_zipcode = request.POST['zip']
customer.customer_state = request.POST['state']
customer.save()
return HttpResponseRedirect(reverse('customers:index'))
def delete_customer(request, customer_id):
p = Customer.objects.get(pk=customer_id)
p.delete()
return HttpResponseRedirect(reverse('customers:index'))
| 34.151515
| 59
| 0.759539
| 133
| 1,127
| 6.293233
| 0.413534
| 0.152927
| 0.050179
| 0.100358
| 0.112306
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003096
| 0.140195
| 1,127
| 33
| 60
| 34.151515
| 0.860681
| 0
| 0
| 0.074074
| 0
| 0
| 0.101064
| 0.018617
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0.037037
| 0.62963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b315d5551d6efa8a8b5d2f47e368467747b831
| 3,512
|
py
|
Python
|
butterfree/configs/db/metastore_config.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | null | null | null |
butterfree/configs/db/metastore_config.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | null | null | null |
butterfree/configs/db/metastore_config.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | null | null | null |
"""Holds configurations to read and write with Spark to AWS S3."""
import os
from typing import Any, Dict, List, Optional
from pyspark.sql import DataFrame
from butterfree.configs import environment
from butterfree.configs.db import AbstractWriteConfig
from butterfree.dataframe_service import extract_partition_values
class MetastoreConfig(AbstractWriteConfig):
"""Configuration for Spark metastore database stored.
By default the configuration is for AWS S3.
Attributes:
path: database root location.
mode: writing mode used be writers.
format_: expected stored file format.
file_system: file schema uri, like: s3a, file.
"""
def __init__(
self,
path: str = None,
mode: str = None,
format_: str = None,
file_system: str = None,
):
self.path = path
self.mode = mode
self.format_ = format_
self.file_system = file_system
@property
def path(self) -> Optional[str]:
"""Bucket name."""
return self.__path
@path.setter
def path(self, value: str) -> None:
self.__path = value or environment.get_variable("FEATURE_STORE_S3_BUCKET")
@property
def format_(self) -> Optional[str]:
"""Expected stored file format."""
return self.__format
@format_.setter
def format_(self, value: str) -> None:
self.__format = value or "parquet"
@property
def mode(self) -> Optional[str]:
"""Writing mode used be writers."""
return self.__mode
@mode.setter
def mode(self, value: str) -> None:
self.__mode = value or "overwrite"
@property
def file_system(self) -> Optional[str]:
"""Writing mode used be writers."""
return self.__file_system
@file_system.setter
def file_system(self, value: str) -> None:
self.__file_system = value or "s3a"
def get_options(self, key: str) -> Dict[Optional[str], Optional[str]]:
"""Get options for Metastore.
Options will be a dictionary with the write and read configuration for
Spark Metastore.
Args:
key: path to save data into Metastore.
Returns:
Options configuration for Metastore.
"""
return {
"mode": self.mode,
"format_": self.format_,
"path": os.path.join(f"{self.file_system}://{self.path}/", key),
}
def get_path_with_partitions(self, key: str, dataframe: DataFrame) -> List:
"""Get options for AWS S3 from partitioned parquet file.
Options will be a dictionary with the write and read configuration for
Spark to AWS S3.
Args:
key: path to save data into AWS S3 bucket.
dataframe: spark dataframe containing data from a feature set.
Returns:
A list of string for file-system backed data sources.
"""
path_list = []
dataframe_values = extract_partition_values(
dataframe, partition_columns=["year", "month", "day"]
)
for row in dataframe_values:
path_list.append(
f"{self.file_system}://{self.path}/{key}/year={row['year']}/"
f"month={row['month']}/day={row['day']}"
)
return path_list
def translate(self, schema: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Translate feature set spark schema to the corresponding database."""
pass
| 29.024793
| 82
| 0.611902
| 419
| 3,512
| 4.988067
| 0.252983
| 0.057416
| 0.026316
| 0.030622
| 0.227751
| 0.156938
| 0.156938
| 0.108134
| 0.108134
| 0.108134
| 0
| 0.003199
| 0.28787
| 3,512
| 120
| 83
| 29.266667
| 0.832467
| 0.299829
| 0
| 0.065574
| 0
| 0
| 0.087168
| 0.066814
| 0
| 0
| 0
| 0
| 0
| 1
| 0.196721
| false
| 0.016393
| 0.098361
| 0
| 0.409836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b384657bc7cd2ab9ee0a1d8b09ee80039ad894
| 2,401
|
py
|
Python
|
examples/2-objects.py
|
johanngan/special_relativity
|
cd372c7460d2c0d4040c81bc1bd0090086dba735
|
[
"MIT"
] | 4
|
2020-08-19T04:56:40.000Z
|
2022-02-07T22:09:45.000Z
|
examples/2-objects.py
|
johanngan/special_relativity
|
cd372c7460d2c0d4040c81bc1bd0090086dba735
|
[
"MIT"
] | null | null | null |
examples/2-objects.py
|
johanngan/special_relativity
|
cd372c7460d2c0d4040c81bc1bd0090086dba735
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
sys.path.append('..')
import specrel.geom as geom
import specrel.spacetime.physical as phy
import specrel.visualize as vis
# Shared parameters
include_grid = True
include_legend = True
tlim = (0, 2)
xlim = (-2, 2)
# A stationary point object
stationary = phy.MovingObject(0, draw_options={'label': '$v = 0$'})
## Alternate:
# direction = (1, 0)
# point = (0, 0)
# stationary = geom.Line(direction, point, draw_options={'label': '$v = 0$'})
title='Stationary object'
p = vis.stplot(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
p.save('2-objects_stationary_point.png')
p.show()
# A stationary point object, animated
anim = vis.stanimate(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
anim.save('2-objects_stationary_point_anim.mp4')
anim.show()
# A stationary point object, animated with worldline
anim = vis.stanimate_with_worldline(stationary, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper right')
anim.save('2-objects_stationary_point_anim_worldline.mp4')
anim.show()
# A bunch of moving point objects, animated
moving = phy.MovingObject(0, velocity=1/2,
draw_options={'color': 'red', 'label': '$v = c/2$'})
light = phy.MovingObject(0, velocity=1,
draw_options={'color': 'gold', 'label': '$v = c$'})
ftl = phy.MovingObject(0, velocity=3/2,
draw_options={'color': 'cyan', 'label': '$v = 3c/2$'})
objects = geom.Collection([stationary, moving, light, ftl])
title = 'Various objects'
anim = vis.stanimate_with_worldline(objects, title=title,
current_time_color='magenta', tlim=tlim, xlim=xlim, grid=include_grid,
legend=include_legend, legend_loc='upper left')
anim.save('2-objects_moving_points.mp4')
anim.show()
# A moving meterstick
meterstick = phy.MovingObject(-1/2, length=1, velocity=1/2,
draw_options={'label': 'Meterstick'})
# # Alternate:
# direction = (1, 1/2)
# left = geom.Line(direction, (0, -1/2))
# right = geom.Line(direction, (0, 1/2))
# meterstick = geom.Ribbon(left, right, draw_options={'label': 'Meterstick'})
title = 'Moving meterstick ($v = c/2$)'
anim = vis.stanimate_with_worldline(meterstick, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper left')
anim.save('2-objects_moving_meterstick.mp4')
anim.show()
| 34.797101
| 77
| 0.7197
| 345
| 2,401
| 4.884058
| 0.226087
| 0.045697
| 0.035608
| 0.047478
| 0.489614
| 0.351335
| 0.28724
| 0.245697
| 0.245697
| 0.245697
| 0
| 0.022243
| 0.11995
| 2,401
| 68
| 78
| 35.308824
| 0.775201
| 0.217409
| 0
| 0.177778
| 0
| 0
| 0.195056
| 0.090274
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.088889
| 0
| 0.088889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b4e8143896f099b74b0a3738681f49e357493f
| 4,049
|
py
|
Python
|
tests/sentry/auth/test_helper.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/auth/test_helper.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/auth/test_helper.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from six.moves.urllib.parse import urlencode
from django.test import RequestFactory
from django.contrib.auth.models import AnonymousUser
from sentry.auth.helper import handle_new_user
from sentry.models import AuthProvider, InviteStatus, OrganizationMember
from sentry.testutils import TestCase
from sentry.utils.compat import mock
class HandleNewUserTest(TestCase):
@mock.patch("sentry.analytics.record")
def test_simple(self, mock_record):
provider = "dummy"
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
auth_provider = AuthProvider.objects.create(
organization=self.organization, provider=provider
)
identity = {"id": "1234", "email": "test@example.com", "name": "Morty"}
auth_identity = handle_new_user(auth_provider, self.organization, request, identity)
user = auth_identity.user
assert user.email == identity["email"]
assert OrganizationMember.objects.filter(organization=self.organization, user=user).exists()
signup_record = [r for r in mock_record.call_args_list if r[0][0] == "user.signup"]
assert signup_record == [
mock.call(
"user.signup", user_id=user.id, source="sso", provider=provider, referrer="in-app"
)
]
def test_associated_existing_member_invite_by_email(self):
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "test@example.com", "name": "Morty"}
member = OrganizationMember.objects.create(
organization=self.organization, email=identity["email"]
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user=auth_identity.user
)
assert assigned_member.id == member.id
def test_associated_existing_member_invite_request(self):
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "test@example.com", "name": "Morty"}
member = self.create_member(
organization=self.organization,
email=identity["email"],
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assert OrganizationMember.objects.filter(
organization=self.organization,
user=auth_identity.user,
invite_status=InviteStatus.APPROVED.value,
).exists()
assert not OrganizationMember.objects.filter(id=member.id).exists()
def test_associate_pending_invite(self):
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "test@example.com", "name": "Morty"}
# The org member invite should have a non matching email, but the
# member id and token will match from the cookie, allowing association
member = OrganizationMember.objects.create(
organization=self.organization, email="different.email@example.com", token="abc"
)
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
request.COOKIES["pending-invite"] = urlencode(
{"memberId": member.id, "token": member.token, "url": ""}
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user=auth_identity.user
)
assert assigned_member.id == member.id
| 39.31068
| 100
| 0.674241
| 428
| 4,049
| 6.245327
| 0.245327
| 0.089787
| 0.115226
| 0.065095
| 0.615788
| 0.582117
| 0.532361
| 0.526375
| 0.356902
| 0.34119
| 0
| 0.005662
| 0.214868
| 4,049
| 102
| 101
| 39.696078
| 0.835168
| 0.032601
| 0
| 0.378378
| 0
| 0
| 0.085079
| 0.012775
| 0
| 0
| 0
| 0
| 0.094595
| 1
| 0.054054
| false
| 0
| 0.108108
| 0
| 0.175676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b57d8c1a4088165ce4f67e6fb27850615f9653
| 4,583
|
py
|
Python
|
density_model_torch_custom.py
|
piotrwinkler/breast_density_classifier
|
4d47dd98bb0a839cea8b9aef242f5af5db84f06f
|
[
"BSD-2-Clause"
] | null | null | null |
density_model_torch_custom.py
|
piotrwinkler/breast_density_classifier
|
4d47dd98bb0a839cea8b9aef242f5af5db84f06f
|
[
"BSD-2-Clause"
] | null | null | null |
density_model_torch_custom.py
|
piotrwinkler/breast_density_classifier
|
4d47dd98bb0a839cea8b9aef242f5af5db84f06f
|
[
"BSD-2-Clause"
] | null | null | null |
import argparse
import glob
import os
import numpy as np
import torch
from sklearn.metrics import accuracy_score
import models_torch as models
import utils
EXPERIMENT_DATA_DIR = "/tmp/mgr"
def inference(parameters, verbose=True) -> int:
# resolve device
device = torch.device(
"cuda:{}".format(parameters["gpu_number"]) if parameters["device_type"] == "gpu"
else "cpu"
)
# load input images
datum_l_cc = utils.load_images(parameters['image_path'], 'L-CC')
datum_r_cc = utils.load_images(parameters['image_path'], 'R-CC')
datum_l_mlo = utils.load_images(parameters['image_path'], 'L-MLO')
datum_r_mlo = utils.load_images(parameters['image_path'], 'R-MLO')
# construct models and prepare data
if parameters["model_type"] == 'cnn':
model = models.BaselineBreastModel(device, nodropout_probability=1.0, gaussian_noise_std=0.0).to(device)
model.load_state_dict(torch.load(parameters["model_path"]))
x = {
"L-CC": torch.Tensor(datum_l_cc).permute(0, 3, 1, 2).to(device),
"L-MLO": torch.Tensor(datum_l_mlo).permute(0, 3, 1, 2).to(device),
"R-CC": torch.Tensor(datum_r_cc).permute(0, 3, 1, 2).to(device),
"R-MLO": torch.Tensor(datum_r_mlo).permute(0, 3, 1, 2).to(device),
}
elif parameters["model_type"] == 'histogram':
model = models.BaselineHistogramModel(num_bins=parameters["bins_histogram"]).to(device)
model.load_state_dict(torch.load(parameters["model_path"]))
x = torch.Tensor(utils.histogram_features_generator([
datum_l_cc, datum_r_cc, datum_l_mlo, datum_r_mlo
], parameters)).to(device)
else:
raise RuntimeError(parameters["model_type"])
# run prediction
with torch.no_grad():
prediction_density = model(x).cpu().numpy()
if verbose:
# nicely prints out the predictions
print('Density prediction:\n'
'\tAlmost entirely fatty (0):\t\t\t' + str(prediction_density[0, 0]) + '\n'
'\tScattered areas of fibroglandular density (1):\t' + str(prediction_density[0, 1]) + '\n'
'\tHeterogeneously dense (2):\t\t\t' + str(prediction_density[0, 2]) + '\n'
'\tExtremely dense (3):\t\t\t\t' + str(prediction_density[0, 3]) + '\n')
return np.argmax(prediction_density[0])+1 # return density in scope 1 to 4
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Inference')
parser.add_argument('model_type')
parser.add_argument('--bins-histogram', default=50)
parser.add_argument('--model-path', default=None)
parser.add_argument('--device-type', default="cpu")
# parser.add_argument('--image-path', default="images/")
args = parser.parse_args()
parameters_ = {
"model_type": args.model_type,
"bins_histogram": args.bins_histogram,
"model_path": args.model_path,
"device_type": args.device_type,
# "image_path": args.image_path,
}
if parameters_["model_path"] is None:
if args.model_type == "histogram":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineHistogramModel/model.p"
if args.model_type == "cnn":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineBreastModel/model.p"
predicted_values = []
real_values = []
predicted_values_two_classes = []
real_values_two_classes = []
two_classes_mapping = {1: 0, 2: 0, 3: 1, 4: 1}
for dir in glob.glob(f"{EXPERIMENT_DATA_DIR}/*/"):
parameters_["image_path"] = dir
predicted_density = inference(parameters_)
with open(os.path.join(dir, "density.txt")) as file:
real_density = int(file.read())
print(f"Predicted density: {predicted_density}")
print(f"Real density: {real_density}\n")
print(f"Predicted density (2 cls): {two_classes_mapping[predicted_density]}")
print(f"Real density (2 cls): {two_classes_mapping[real_density]}\n")
predicted_values.append(predicted_density)
real_values.append(real_density)
predicted_values_two_classes.append(two_classes_mapping[predicted_density])
real_values_two_classes.append(two_classes_mapping[real_density])
print(f"Total accuracy: {accuracy_score(real_values, predicted_values)}")
print(f"Total accuracy two classes: {accuracy_score(real_values_two_classes, predicted_values_two_classes)}")
"""
python density_model_torch_custom.py histogram
python density_model_torch_custom.py cnn
"""
| 37.565574
| 113
| 0.669212
| 598
| 4,583
| 4.867893
| 0.235786
| 0.041223
| 0.032978
| 0.034352
| 0.300584
| 0.261422
| 0.171075
| 0.068018
| 0.037788
| 0.037788
| 0
| 0.013005
| 0.194632
| 4,583
| 121
| 114
| 37.876033
| 0.775671
| 0.050622
| 0
| 0.02439
| 0
| 0
| 0.246056
| 0.072993
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012195
| false
| 0
| 0.097561
| 0
| 0.121951
| 0.085366
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b664d11a53af7fe489af747c1768858a1613a2
| 4,878
|
py
|
Python
|
esmvaltool/diag_scripts/ensclus/ens_anom.py
|
yifatdzigan/ESMValTool
|
83320b0e0b24ddde965599961bb80428e180a731
|
[
"Apache-2.0"
] | 148
|
2017-02-07T13:16:03.000Z
|
2022-03-26T02:21:56.000Z
|
esmvaltool/diag_scripts/ensclus/ens_anom.py
|
yifatdzigan/ESMValTool
|
83320b0e0b24ddde965599961bb80428e180a731
|
[
"Apache-2.0"
] | 2,026
|
2017-02-03T12:57:13.000Z
|
2022-03-31T15:11:51.000Z
|
esmvaltool/diag_scripts/ensclus/ens_anom.py
|
yifatdzigan/ESMValTool
|
83320b0e0b24ddde965599961bb80428e180a731
|
[
"Apache-2.0"
] | 113
|
2017-01-27T13:10:19.000Z
|
2022-02-03T13:42:11.000Z
|
"""Computation of ensemble anomalies based on a desired value."""
import os
import numpy as np
from scipy import stats
# User-defined packages
from read_netcdf import read_iris, save_n_2d_fields
from sel_season_area import sel_area, sel_season
def ens_anom(filenames, dir_output, name_outputs, varname, numens, season,
area, extreme):
"""Ensemble anomalies.
Computation of the ensemble anomalies based on the desired value
from the input variable (it can be the percentile, mean, maximum, standard
deviation or trend)
OUTPUT: NetCDF files of ensemble mean of climatology, selected value and
anomaly maps.
"""
print('The name of the output files will be <variable>_{0}.txt'
.format(name_outputs))
print('Number of ensemble members: {0}'.format(numens))
outfiles = []
# Reading the netCDF file of 3Dfield, for all the ensemble members
var_ens = []
for ens in range(numens):
ifile = filenames[ens]
# print('ENSEMBLE MEMBER %s' %ens)
var, varunits, lat, lon, dates, _ = read_iris(ifile)
# Convertion from kg m-2 s-1 to mm/day
if varunits == 'kg m-2 s-1':
var = var * 86400 # there are 86400 seconds in a day
varunits = 'mm/day'
# Selecting a season (DJF,DJFM,NDJFM,JJA)
var_season, _ = sel_season(var, dates, season)
# Selecting only [latS-latN, lonW-lonE] box region
var_area, lat_area, lon_area = sel_area(lat, lon, var_season, area)
var_ens.append(var_area)
if varunits == 'kg m-2 s-1':
print('\nPrecipitation rate units were converted from kg m-2 s-1 '
'to mm/day')
print('The variable is {0} ({1})'.format(varname, varunits))
print('Original var shape: (time x lat x lon)={0}'.format(var.shape))
print('var shape after selecting season {0} and area {1}: '
'(time x lat x lon)={2}'.format(season, area, var_area.shape))
if extreme == 'mean':
# Compute the time mean over the entire period, for each ens member
varextreme_ens = [np.nanmean(var_ens[i], axis=0)
for i in range(numens)]
elif len(extreme.split("_")) == 2:
# Compute the chosen percentile over the period, for each ens member
quant = int(extreme.partition("th")[0])
varextreme_ens = [np.nanpercentile(var_ens[i], quant, axis=0)
for i in range(numens)]
elif extreme == 'maximum':
# Compute the maximum value over the period, for each ensemble member
varextreme_ens = [np.nanmax(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'std':
# Compute the standard deviation over the period, for each ens member
varextreme_ens = [np.nanstd(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'trend':
# Compute the linear trend over the period, for each ensemble member
trendmap = np.empty((var_ens[0].shape[1], var_ens[0].shape[2]))
trendmap_ens = []
for i in range(numens):
for jla in range(var_ens[0].shape[1]):
for jlo in range(var_ens[0].shape[2]):
slope, _, _, _, _ = \
stats.linregress(range(var_ens[0].shape[0]),
var_ens[i][:, jla, jlo])
trendmap[jla, jlo] = slope
trendmap_ens.append(trendmap.copy())
varextreme_ens = trendmap_ens
varextreme_ens_np = np.array(varextreme_ens)
print('Anomalies are computed with respect to the {0}'.format(extreme))
# Compute and save the anomalies with respect to the ensemble
ens_anomalies = varextreme_ens_np - np.nanmean(varextreme_ens_np, axis=0)
varsave = 'ens_anomalies'
ofile = os.path.join(dir_output, 'ens_anomalies_{0}.nc'
.format(name_outputs))
# print(ofile)
print('ens_anomalies shape: (numens x lat x lon)={0}'
.format(ens_anomalies.shape))
save_n_2d_fields(lat_area, lon_area, ens_anomalies, varsave,
varunits, ofile)
outfiles.append(ofile)
# Compute and save the climatology
vartimemean_ens = [np.mean(var_ens[i], axis=0) for i in range(numens)]
ens_climatologies = np.array(vartimemean_ens)
varsave = 'ens_climatologies'
ofile = os.path.join(dir_output, 'ens_climatologies_{0}.nc'
.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_climatologies, varsave,
varunits, ofile)
outfiles.append(ofile)
ens_extreme = varextreme_ens_np
varsave = 'ens_extreme'
ofile = os.path.join(dir_output, 'ens_extreme_{0}.nc'.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_extreme, varsave,
varunits, ofile)
outfiles.append(ofile)
return outfiles
| 40.65
| 79
| 0.630381
| 673
| 4,878
| 4.419019
| 0.239227
| 0.026227
| 0.04035
| 0.022192
| 0.306994
| 0.274042
| 0.211836
| 0.13887
| 0.113988
| 0.081372
| 0
| 0.015059
| 0.264863
| 4,878
| 119
| 80
| 40.991597
| 0.814278
| 0.215047
| 0
| 0.168831
| 0
| 0
| 0.141572
| 0.006351
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| false
| 0
| 0.064935
| 0
| 0.090909
| 0.103896
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b714ec9b000678e3e81df98484d9da903f0406
| 24,074
|
py
|
Python
|
pytition/petition/models.py
|
Te-k/Pytition
|
16ebce01b491b72ed387709d9b705f7cb0d5476f
|
[
"BSD-3-Clause"
] | null | null | null |
pytition/petition/models.py
|
Te-k/Pytition
|
16ebce01b491b72ed387709d9b705f7cb0d5476f
|
[
"BSD-3-Clause"
] | null | null | null |
pytition/petition/models.py
|
Te-k/Pytition
|
16ebce01b491b72ed387709d9b705f7cb0d5476f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.utils.html import mark_safe, strip_tags
from django.utils.text import slugify
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.exceptions import ValidationError
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.hashers import get_hasher
from django.db import transaction
from django.urls import reverse
from django.db.models import Q
from tinymce import models as tinymce_models
from colorfield.fields import ColorField
import html
class Petition(models.Model):
NO = "no gradient"
RIGHT = "to right"
BOTTOM = "to bottom"
BOTTOM_RIGHT = "to bottom right"
BOTTOM_LEFT = "to bottom left"
LINEAR_GRADIENT_CHOICES = (
(NO, "no gradient"),
(RIGHT, "to right"),
(BOTTOM, "to bottom"),
(BOTTOM_RIGHT, "to bottom right"),
(BOTTOM_LEFT, "to bottom left")
)
MAIL = "MAIL"
POST = "POST"
GET = "GET"
NEWSLETTER_SUBSCRIBE_METHOD_CHOICES = (
(MAIL, "MAIL"),
(POST, "POST"),
(GET, "GET")
)
title = models.TextField(verbose_name=ugettext_lazy("Title"))
text = tinymce_models.HTMLField(blank=True)
side_text = tinymce_models.HTMLField(blank=True)
target = models.IntegerField(default=500)
linear_gradient_direction = models.CharField(choices=LINEAR_GRADIENT_CHOICES, max_length=15, default=NO, blank=True)
gradient_from = ColorField(blank=True)
gradient_to = ColorField(blank=True)
bgcolor = ColorField(blank=True)
footer_text = tinymce_models.HTMLField(blank=True)
footer_links = tinymce_models.HTMLField(blank=True)
twitter_description = models.CharField(max_length=200, blank=True)
twitter_image = models.CharField(max_length=500, blank=True)
has_newsletter = models.BooleanField(default=False)
newsletter_subscribe_http_data = models.TextField(blank=True)
newsletter_subscribe_http_mailfield = models.CharField(max_length=100, blank=True)
newsletter_subscribe_http_url = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_subject = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_from = models.CharField(max_length=500, blank=True)
newsletter_subscribe_mail_to = models.CharField(max_length=500, blank=True)
newsletter_subscribe_method = models.CharField(choices=NEWSLETTER_SUBSCRIBE_METHOD_CHOICES, max_length=4,
default=MAIL)
newsletter_subscribe_mail_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
newsletter_subscribe_mail_smtp_port = models.IntegerField(default=25, blank=True)
newsletter_subscribe_mail_smtp_user = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_password = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_tls = models.BooleanField(default=False)
newsletter_subscribe_mail_smtp_starttls = models.BooleanField(default=False)
org_twitter_handle = models.CharField(max_length=20, blank=True)
published = models.BooleanField(default=False)
newsletter_text = models.CharField(max_length=1000, blank=True)
sign_form_footer = models.TextField(blank=True)
confirmation_email_sender = models.CharField(max_length=100, blank=True)
confirmation_email_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
confirmation_email_smtp_port = models.IntegerField(default=25, blank=True)
confirmation_email_smtp_user = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_password = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_tls = models.BooleanField(default=False)
confirmation_email_smtp_starttls = models.BooleanField(default=False)
use_custom_email_settings = models.BooleanField(default=False)
salt = models.TextField(blank=True)
slugs = models.ManyToManyField('SlugModel', blank=True, through='SlugOwnership')
def prepopulate_from_template(self, template):
for field in self._meta.fields:
if hasattr(self, field.name) and hasattr(template, field.name):
template_value = getattr(template, field.name)
if template_value is not None and template_value != "":
setattr(self, field.name, template_value)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if not self.salt:
hasher = get_hasher()
self.salt = hasher.salt().decode('utf-8')
super().save()
def slugify(self):
if self.slugs.count() == 0:
slugtext = slugify(self.raw_title)
# let's search for slug collisions
filters = {'slugs__slug': slugtext}
if self.organization_set.count() > 0:
org = self.organization_set.first()
filters.update({'organization__name': org.name})
else:
user = self.pytitionuser_set.first()
filters.update({'pytitionuser__user__username': user.user.username})
results = Petition.objects.filter(**filters)
if results.count() > 0:
raise ValueError(_("This slug is already used by another petition from this organization/user"))
slug = SlugModel(slug=slugify(slugtext))
slug.save()
self.slugs.add(slug)
self.save()
@classmethod
def by_id(cls, id):
try:
return Petition.objects.get(pk=id)
except Petition.DoesNotExist:
return None
def get_signature_number(self, confirmed=None):
signatures = self.signature_set
if confirmed is not None:
signatures = signatures.filter(confirmed=confirmed)
return signatures.count()
def already_signed(self, email):
signature_number = Signature.objects.filter(petition = self.id)\
.filter(confirmed = True).filter(email = email).count()
return signature_number > 0
def confirm_signature(self, conf_hash):
signature = Signature.objects.filter(petition=self.id).get(confirmation_hash=conf_hash)
if signature:
# Now confirm the signature corresponding to this hash
signature.confirm()
signature.save()
return _("Thank you for confirming your signature!")
else:
return None
def add_slug(self, slugtext):
with transaction.atomic():
slugtext = slugify(slugtext)
slug = SlugModel.objects.create(slug=slugtext)
if self.owner_type == "org":
SlugOwnership.objects.create(slug=slug, petition=self, organization=self.owner)
elif self.owner_type == "user":
SlugOwnership.objects.create(slug=slug, petition=self, user=self.owner)
else:
raise ValueError(_("This petition has no owner, cannot add slug!"))
def del_slug(self, slug):
slug.delete()
def publish(self):
self.published = True
self.save()
def unpublish(self):
self.published = False
self.save()
@property
def owner_type(self):
if self.organization_set.count() > 0:
return "org"
elif self.pytitionuser_set.count() > 0:
return "user"
else:
return "no_owner"
@property
def owner(self):
if self.organization_set.count() > 0:
return self.organization_set.first()
elif self.pytitionuser_set.count() > 0:
return self.pytitionuser_set.first()
else:
return None
@property
def signature_number(self):
return self.get_signature_number(True)
@property
def raw_twitter_description(self):
return html.unescape(mark_safe(strip_tags(self.twitter_description)))
@property
def raw_text(self):
return html.unescape(mark_safe(strip_tags(self.text)))
@property
def raw_title(self):
return html.unescape(mark_safe(strip_tags(self.title).strip()))
def __str__(self):
return self.raw_title
def __repr__(self):
return self.raw_title
@property
def url(self):
slugs = self.slugs.all()
if len(slugs) == 0:
# If there is no slug, ugly url
return reverse('detail', kwargs={'petition_id': self.id})
else:
if self.organization_set.count() > 0:
# This petition is owned by an Organization
org = self.organization_set.first()
return reverse("slug_show_petition",
kwargs={"orgslugname": org.slugname,
"petitionname": slugs[0]})
elif self.pytitionuser_set.count() > 0:
# This petition is owned by a PytitionUser
user = self.pytitionuser_set.first()
return reverse("slug_show_petition",
kwargs={"username": user.user.username,
"petitionname": slugs[0]})
else:
# This is a BUG!
raise ValueError(_("This petition is buggy. Sorry about that!"))
class SlugOwnership(models.Model):
petition = models.ForeignKey(Petition, on_delete=models.CASCADE)
slug = models.ForeignKey('SlugModel', on_delete=models.CASCADE)
user = models.ForeignKey('PytitionUser', on_delete=models.CASCADE, blank=True, null=True, default=None)
organization = models.ForeignKey('Organization', on_delete=models.CASCADE, blank=True, null=True, default=None)
class Meta:
constraints = [
models.UniqueConstraint(fields=['slug', 'organization'], name="unique_slugnameperorg", condition=Q(user=None)),
models.UniqueConstraint(fields=['slug', 'user'], name="unique_slugnameperuser",
condition=Q(organization=None)),
]
class Signature(models.Model):
first_name = models.CharField(max_length=50, verbose_name=ugettext_lazy("First name"))
last_name = models.CharField(max_length=50, verbose_name=ugettext_lazy("Last name"))
phone = models.CharField(max_length=20, blank=True, verbose_name=ugettext_lazy("Phone number"))
email = models.EmailField(verbose_name=ugettext_lazy("Email address"))
confirmation_hash = models.CharField(max_length=128)
confirmed = models.BooleanField(default=False, verbose_name=ugettext_lazy("Confirmed"))
petition = models.ForeignKey(Petition, on_delete=models.CASCADE, verbose_name=ugettext_lazy("Petition"))
subscribed_to_mailinglist = models.BooleanField(default=False, verbose_name=ugettext_lazy("Subscribed to mailing list"))
date = models.DateTimeField(blank=True, auto_now_add=True, verbose_name=ugettext_lazy("Date"))
ipaddress = models.TextField(blank=True, null=True)
def clean(self):
if self.petition.already_signed(self.email):
if self.petition.signature_set.filter(email = self.email).get(confirmed = True).id != self.id:
raise ValidationError(_("You already signed the petition"))
def save(self, *args, **kwargs):
self.clean()
if self.confirmed:
# invalidating other signatures from same email
Signature.objects.filter(petition=self.petition).filter(email=self.email)\
.exclude(id=self.id).delete()
super().save(*args, **kwargs)
def confirm(self):
self.confirmed = True
def __str__(self):
return html.unescape("[{}:{}] {} {}".format(self.petition.id, "OK" if self.confirmed else "..", self.first_name,
self.last_name))
def __repr__(self):
return html.unescape("[{}:{}] {} {}".format(self.petition.id, "OK" if self.confirmed else "..", self.first_name,
self.last_name))
class PetitionTemplate(models.Model):
NO = "no gradient"
RIGHT = "to right"
BOTTOM = "to bottom"
BOTTOM_RIGHT = "to bottom right"
BOTTOM_LEFT = "to bottom left"
LINEAR_GRADIENT_CHOICES = (
(NO, "no gradient"),
(RIGHT, "to right"),
(BOTTOM, "to bottom"),
(BOTTOM_RIGHT, "to bottom right"),
(BOTTOM_LEFT, "to bottom left")
)
MAIL = "MAIL"
POST = "POST"
GET = "GET"
NEWSLETTER_SUBSCRIBE_METHOD_CHOICES = (
(MAIL, "MAIL"),
(POST, "POST"),
(GET, "GET")
)
name = models.CharField(max_length=50, verbose_name=ugettext_lazy("Name"), db_index=True)
text = tinymce_models.HTMLField(blank=True)
side_text = tinymce_models.HTMLField(blank=True)
target = models.IntegerField(blank=True, null=True)
linear_gradient_direction = models.CharField(choices=LINEAR_GRADIENT_CHOICES, max_length=15, default=NO, blank=True)
gradient_from = ColorField(blank=True)
gradient_to = ColorField(blank=True)
bgcolor = ColorField(blank=True)
footer_text = tinymce_models.HTMLField(blank=True)
footer_links = tinymce_models.HTMLField(blank=True)
twitter_description = models.CharField(max_length=200, blank=True)
twitter_image = models.CharField(max_length=500, blank=True)
has_newsletter = models.BooleanField(default=False)
newsletter_subscribe_http_data = models.TextField(blank=True)
newsletter_subscribe_http_mailfield = models.CharField(max_length=100, blank=True)
newsletter_subscribe_http_url = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_subject = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_from = models.EmailField(max_length=500, blank=True)
newsletter_subscribe_mail_to = models.EmailField(max_length=500, blank=True)
newsletter_subscribe_method = models.CharField(choices=NEWSLETTER_SUBSCRIBE_METHOD_CHOICES, max_length=4,
default=MAIL)
newsletter_subscribe_mail_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
newsletter_subscribe_mail_smtp_port = models.IntegerField(default=25)
newsletter_subscribe_mail_smtp_user = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_password = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_tls = models.BooleanField(default=False)
newsletter_subscribe_mail_smtp_starttls = models.BooleanField(default=False)
org_twitter_handle = models.CharField(max_length=20, blank=True)
newsletter_text = models.CharField(max_length=1000, blank=True)
sign_form_footer = models.TextField(blank=True)
confirmation_email_sender = models.EmailField(max_length=100, blank=True)
confirmation_email_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
confirmation_email_smtp_port = models.IntegerField(default=25, blank=True)
confirmation_email_smtp_user = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_password = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_tls = models.BooleanField(default=False)
confirmation_email_smtp_starttls = models.BooleanField(default=False)
use_custom_email_settings = models.BooleanField(default=False)
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Meta:
index_together = ["id", ]
class SlugModel(models.Model):
slug = models.SlugField(max_length=200)
class Meta:
constraints = [
models.UniqueConstraint(fields=['slug'], name='unique_slugname')
]
def __str__(self):
return self.slug
def __repr__(self):
return self.slug
class Organization(models.Model):
name = models.CharField(max_length=200, verbose_name=ugettext_lazy("Name"), unique=True)
petition_templates = models.ManyToManyField(PetitionTemplate, through='TemplateOwnership',
through_fields=['organization', 'template'], blank=True,
verbose_name=ugettext_lazy("Petition templates"))
petitions = models.ManyToManyField(Petition, blank=True, verbose_name=ugettext_lazy("Petitions"))
default_template = models.ForeignKey(PetitionTemplate, blank=True, null=True, related_name='+',
verbose_name=ugettext_lazy("Default petition template"), to_field='id',
on_delete=models.SET_NULL)
slugname = models.SlugField(max_length=200, unique=True)
def drop(self):
with transaction.atomic():
petitions = list(self.petitions.all())
templates = list(self.petition_templates.all())
self.delete()
for petition in petitions:
petition.delete()
for template in templates:
template.delete()
def add_member(self, member):
member.organizations.add(self)
permission = Permission.objects.create(organization=self)
permission.save()
member.permissions.add(permission)
member.save()
def __str__(self):
return self.name
def __repr__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slugname:
self.slugname = slugify(self.name)
super(Organization, self).save(*args, **kwargs)
@property
def kind(self):
return "org"
@property
def fullname(self):
return self.name
def save(self, *args, **kwargs):
self.slugname = slugify(self.name)
super(Organization, self).save(*args, **kwargs)
class Permission(models.Model):
organization = models.ForeignKey(Organization, on_delete=models.CASCADE,
verbose_name=ugettext_lazy("Organization related to these permissions"))
can_add_members = models.BooleanField(default=False)
can_remove_members = models.BooleanField(default=False)
can_create_petitions = models.BooleanField(default=False)
can_modify_petitions = models.BooleanField(default=False)
can_delete_petitions = models.BooleanField(default=False)
can_create_templates = models.BooleanField(default=False)
can_modify_templates = models.BooleanField(default=False)
can_delete_templates = models.BooleanField(default=False)
can_view_signatures = models.BooleanField(default=False)
can_modify_signatures = models.BooleanField(default=False)
can_delete_signatures = models.BooleanField(default=False)
can_modify_permissions = models.BooleanField(default=False)
def set_all(self, value):
self.can_add_members = value
self.can_remove_members = value
self.can_create_petitions = value
self.can_modify_petitions = value
self.can_delete_petitions = value
self.can_create_templates = value
self.can_modify_templates = value
self.can_delete_templates = value
self.can_view_signatures = value
self.can_modify_signatures = value
self.can_delete_signatures = value
self.can_modify_permissions = value
self.save()
def __str__(self):
ret = "{orgname} : ".format(orgname=self.organization.name)
if self.user.count() > 0:
ret = ret + "{username}".format(username=self.user.all()[0].name)
else:
ret = ret + "None"
return ret
def __repr__(self):
return self.__str__()
class PytitionUser(models.Model):
petitions = models.ManyToManyField(Petition, blank=True)
organizations = models.ManyToManyField(Organization, related_name="members", blank=True)
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="pytitionuser")
permissions = models.ManyToManyField(Permission, related_name="user", blank=True)
invitations = models.ManyToManyField(Organization, related_name="invited", blank=True)
petition_templates = models.ManyToManyField(PetitionTemplate, blank=True, through='TemplateOwnership',
through_fields=['user', 'template'],
verbose_name=ugettext_lazy("Petition templates"))
default_template = models.ForeignKey(PetitionTemplate, blank=True, null=True, related_name='+',
verbose_name=ugettext_lazy("Default petition template"), to_field='id',
on_delete=models.SET_NULL)
def has_right(self, right, petition=None, org=None):
if petition:
if petition in self.petitions.all():
return True
try:
if not org:
org = Organization.objects.get(petitions=petition, members=self)
permissions = self.permissions.get(organization=org)
return getattr(permissions, right)
except:
return False
if org:
try:
permissions = self.permissions.get(organization=org)
return getattr(permissions, right)
except:
return False
return False
def drop(self):
with transaction.atomic():
orgs = list(self.organizations.all())
petitions = list(self.petitions.all())
templates = list(self.petition_templates.all())
self.delete()
for org in orgs:
if org.members.count() == 0:
org.drop()
for petition in petitions:
petition.delete()
for template in templates:
template.delete()
@property
def is_authenticated(self):
return self.user.is_authenticated
@property
def name(self):
return self.username
@property
def username(self):
return self.user.username
@property
def get_full_name(self):
return self.user.get_full_name()
@property
def fullname(self):
return self.get_full_name
@property
def kind(self):
return "user"
def __str__(self):
return self.get_full_name
def __repr__(self):
return self.get_full_name
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_user_profile(sender, instance, created, **kwargs):
if created:
PytitionUser.objects.create(user=instance)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def save_user_profile(sender, instance, **kwargs):
instance.pytitionuser.save()
@receiver(post_save, sender=Organization)
def save_user_profile(sender, instance, **kwargs):
if not instance.slugname:
slugtext = slugify(instance.name)
instance.slugname = slugtext
instance.save()
@receiver(post_delete, sender=PytitionUser)
def post_delete_user(sender, instance, *args, **kwargs):
if instance.user: # just in case user is not specified
instance.user.delete()
class TemplateOwnership(models.Model):
user = models.ForeignKey(PytitionUser, blank=True, null=True, on_delete=models.CASCADE)
organization = models.ForeignKey(Organization, blank=True, null=True, on_delete=models.CASCADE)
template = models.ForeignKey(PetitionTemplate, to_field='id', on_delete=models.CASCADE)
def clean(self):
if self.user is None and self.organization is None:
raise ValidationError(_("The template needs to be owned by a User or an Organization."
"It cannot hang around alone by itself."))
#class Meta:
# unique_together = (("user", "template"), ("organization", "template"))
| 40.734349
| 124
| 0.662748
| 2,715
| 24,074
| 5.673665
| 0.108287
| 0.043236
| 0.040898
| 0.054531
| 0.610556
| 0.560439
| 0.481823
| 0.450597
| 0.407492
| 0.386783
| 0
| 0.008396
| 0.238058
| 24,074
| 590
| 125
| 40.80339
| 0.83138
| 0.015785
| 0
| 0.503106
| 0
| 0
| 0.057805
| 0.002998
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113872
| false
| 0.008282
| 0.033126
| 0.05176
| 0.559006
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b72847ef50516acce4d8d4114c3432f306c66d
| 4,026
|
py
|
Python
|
bin/socialhistory.py
|
JohnShullTopDev/generating-traning-data-for-healthcare-machine-learningcare-
|
d0ffb26e1b99204a796df905b50c8caf01417f69
|
[
"Apache-2.0"
] | 1
|
2019-11-11T11:21:08.000Z
|
2019-11-11T11:21:08.000Z
|
bin/socialhistory.py
|
JohnShullTopDev/generating-traning-data-for-healthcare-machine-learningcare-
|
d0ffb26e1b99204a796df905b50c8caf01417f69
|
[
"Apache-2.0"
] | null | null | null |
bin/socialhistory.py
|
JohnShullTopDev/generating-traning-data-for-healthcare-machine-learningcare-
|
d0ffb26e1b99204a796df905b50c8caf01417f69
|
[
"Apache-2.0"
] | 1
|
2020-01-28T03:48:14.000Z
|
2020-01-28T03:48:14.000Z
|
import csv
from testdata import SOCIALHISTORY_FILE
from testdata import rndDate
from patient import Patient
SMOKINGCODES = {
'428041000124106': 'Current some day smoker',
'266919005' : 'Never smoker',
'449868002' : 'Current every day smoker',
'266927001' : 'Unknown if ever smoked',
'8517006' : 'Former smoker'
}
class SocialHistory(object):
"""Create instances of SocialHistory; also maintains socialHistory by patient id"""
socialHistories = {} # Dictionary of socialHistory by patient ID
@classmethod
def load(cls):
"""Loads patient SocialHistory"""
# Loop through socialHistories and build patient socialHistory lists:
histories = csv.reader(open(SOCIALHISTORY_FILE, 'U'), dialect='excel-tab')
header = next(histories)
for history in histories:
cls(dict(zip(header, history))) # Create a socialHistory instance
def __init__(self, p):
self.pid = p['PID']
self.id = p['ID']
self.smokingStatusCode = p['SMOKINGSTATUSCODE']
self.smokingStatusText = SMOKINGCODES[self.smokingStatusCode]
# Append socialHistory to the patient's socialHistory list:
if self.pid in self.__class__.socialHistories:
raise "Found >1 socialHistory for a patient"
else:
self.__class__.socialHistories[self.pid] = self
def toJSON(self, prefix=""):
if prefix:
prefix += "-"
patient = Patient.mpi[self.pid]
return {
"request": {
"method": "PUT",
"url": "Observation/" + prefix + "smokingstatus-" + self.id
},
"resource": {
"id": prefix + "smokingstatus-" + self.id,
"resourceType": "Observation",
"status": "final",
"identifier": [
{
"use" : "official",
"system": "http://www.bmc.nl/zorgportal/identifiers/observations",
"value" : prefix + self.id
}
],
"text": {
"status": "generated",
"div": '<div xmlns="http://www.w3.org/1999/xhtml">' +
'Tobacco smoking status: %s</div>'%self.smokingStatusText
},
"performer": [
{
"reference": "Practitioner/" + prefix + "Practitioner-" + patient.gp
}
],
"effectiveDateTime": rndDate(2016).isoformat(),
"code": {
"coding": [
{
"system" : "http://loinc.org",
"code" : "72166-2",
"display": "Tobacco smoking status"
}
],
"text": "Tobacco smoking status"
},
"subject": {
"reference": "Patient/" + prefix + self.pid
},
"category": [
{
"coding": [
{
"system" : "http://hl7.org/fhir/observation-category",
"code" : "social-history",
"display": "Social History"
}
],
"text": "Social History"
}
],
"valueCodeableConcept": {
"coding": [
{
"system" : "http://snomed.info/sct",
"code" : self.smokingStatusCode,
"display": self.smokingStatusText
}
],
"text": self.smokingStatusText
}
}
}
| 35.946429
| 92
| 0.435171
| 282
| 4,026
| 6.163121
| 0.478723
| 0.020138
| 0.034522
| 0.027618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030096
| 0.455291
| 4,026
| 111
| 93
| 36.27027
| 0.762426
| 0.076006
| 0
| 0.09375
| 0
| 0
| 0.233405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.041667
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3b9cafed89d7582e18fd4f82c78858c2882f5b3
| 1,453
|
py
|
Python
|
lib/spack/spack/test/cache_fetch.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
lib/spack/spack/test/cache_fetch.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
lib/spack/spack/test/cache_fetch.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
from llnl.util.filesystem import mkdirp, touch
import spack.config
from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError
from spack.stage import Stage
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch_missing_cache(tmpdir, _fetch_method):
"""Ensure raise a missing cache file."""
testpath = str(tmpdir)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url='file:///not-a-real-cache-file')
with Stage(fetcher, path=testpath):
with pytest.raises(NoCacheError, match=r'No cache'):
fetcher.fetch()
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch(tmpdir, _fetch_method):
"""Ensure a fetch after expanding is effectively a no-op."""
testpath = str(tmpdir)
cache = os.path.join(testpath, 'cache.tar.gz')
touch(cache)
url = 'file:///{0}'.format(cache)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url=url)
with Stage(fetcher, path=testpath) as stage:
source_path = stage.source_path
mkdirp(source_path)
fetcher.fetch()
| 35.439024
| 76
| 0.705437
| 185
| 1,453
| 5.410811
| 0.432432
| 0.087912
| 0.041958
| 0.051948
| 0.333666
| 0.277722
| 0.277722
| 0.277722
| 0.277722
| 0.277722
| 0
| 0.009205
| 0.177564
| 1,453
| 40
| 77
| 36.325
| 0.828452
| 0.192017
| 0
| 0.307692
| 0
| 0
| 0.131034
| 0.064655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3bac2f51025032288427c9fc39e3497207cc25d
| 2,201
|
py
|
Python
|
temp_range_sql.py
|
hanhanwu/Hanhan-Spark-Python
|
a04c33100742acffa2ad11d1937ea05c44688427
|
[
"MIT"
] | 45
|
2016-03-18T07:57:53.000Z
|
2022-03-20T07:14:15.000Z
|
temp_range_sql.py
|
hanhanwu/Hanhan-Spark-Python
|
a04c33100742acffa2ad11d1937ea05c44688427
|
[
"MIT"
] | null | null | null |
temp_range_sql.py
|
hanhanwu/Hanhan-Spark-Python
|
a04c33100742acffa2ad11d1937ea05c44688427
|
[
"MIT"
] | 16
|
2016-07-07T16:47:46.000Z
|
2020-05-04T17:38:40.000Z
|
__author__ = 'hanhanw'
import sys
from pyspark import SparkConf, SparkContext
from pyspark.sql.context import SQLContext
from pyspark.sql.types import StructType, StructField, StringType, DoubleType
conf = SparkConf().setAppName("temp range sql")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
assert sc.version >= '1.5.1'
inputs1 = sys.argv[1]
output = sys.argv[2]
def get_range(recordings):
recordings.registerTempTable('Recordings')
dfrange = sqlContext.sql("""
SELECT r1.DateTime, r1.StationID, (r1.DataValue-r2.DataValue) AS Range FROM
(SELECT StationID, DateTime, Observation, DataValue FROM Recordings
WHERE Observation='TMAX') r1
JOIN
(SELECT StationID, DateTime, Observation, DataValue FROM Recordings
WHERE Observation='TMIN') r2
ON (r1.StationID = r2.StationID AND r1.DateTime = r2.DateTime)
""")
dfrange.registerTempTable('RangeTable')
df_maxrange = sqlContext.sql("""
SELECT DateTime, MAX(Range) AS MaxRange FROM RangeTable
GROUP BY DateTime
""")
df_maxrange.registerTempTable('MaxRange')
df_result = sqlContext.sql("""
SELECT t1.DateTime as DateTime, t1.StationID as StationID, t2.MaxRange as MaxRange FROM
RangeTable t1
JOIN MaxRange t2
ON (t1.DateTime = t2.DateTime AND t1.Range = t2.MaxRange)
""")
return df_result
def main():
temp_schema = StructType([
StructField('StationID', StringType(), False),
StructField('DateTime', StringType(), False),
StructField('Observation', StringType(), False),
StructField('DataValue', DoubleType(), False),
StructField('MFlag', StringType(), True),
StructField('QFlag', StringType(), True),
StructField('SFlag', StringType(), True),
StructField('OBSTime', StringType(), True),
])
df = sqlContext.read.format('com.databricks.spark.csv').options(header='false').load(inputs1, schema=temp_schema)
df = df.filter(df.QFlag == '')
dfrange = get_range(df)
result = dfrange.rdd.map(lambda r: str(r.DateTime)+' '+str(r.StationID)+' '+str(r.MaxRange))
outdata = result.sortBy(lambda r: r[0]).coalesce(1)
outdata.saveAsTextFile(output)
if __name__ == "__main__":
main()
| 31.898551
| 117
| 0.698319
| 257
| 2,201
| 5.902724
| 0.342412
| 0.042189
| 0.037574
| 0.044825
| 0.096243
| 0.096243
| 0.096243
| 0.096243
| 0.096243
| 0
| 0
| 0.015292
| 0.168105
| 2,201
| 68
| 118
| 32.367647
| 0.813217
| 0
| 0
| 0.092593
| 0
| 0.018519
| 0.369378
| 0.023171
| 0
| 0
| 0
| 0
| 0.018519
| 1
| 0.037037
| false
| 0
| 0.074074
| 0
| 0.12963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3bca9436abafd191ec47379ebb1db10a4043237
| 11,326
|
py
|
Python
|
desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 3
|
2018-01-29T14:16:02.000Z
|
2019-02-05T21:33:05.000Z
|
desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 4
|
2021-03-11T04:02:00.000Z
|
2022-03-27T08:31:56.000Z
|
desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2
|
2019-12-05T17:24:36.000Z
|
2021-11-22T21:21:32.000Z
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.styles.colors import Color, BLACK, WHITE
from openpyxl.utils.units import (
pixels_to_EMU,
EMU_to_pixels,
short_color,
)
from openpyxl.compat import deprecated
from openpyxl.xml.functions import Element, SubElement, tostring
from openpyxl.xml.constants import (
DRAWING_NS,
SHEET_DRAWING_NS,
CHART_NS,
CHART_DRAWING_NS,
PKG_REL_NS
)
from openpyxl.compat.strings import safe_string
class Shape(object):
""" a drawing inside a chart
coordiantes are specified by the user in the axis units
"""
MARGIN_LEFT = 6 + 13 + 1
MARGIN_BOTTOM = 17 + 11
FONT_WIDTH = 7
FONT_HEIGHT = 8
ROUND_RECT = 'roundRect'
RECT = 'rect'
# other shapes to define :
'''
"line"
"lineInv"
"triangle"
"rtTriangle"
"diamond"
"parallelogram"
"trapezoid"
"nonIsoscelesTrapezoid"
"pentagon"
"hexagon"
"heptagon"
"octagon"
"decagon"
"dodecagon"
"star4"
"star5"
"star6"
"star7"
"star8"
"star10"
"star12"
"star16"
"star24"
"star32"
"roundRect"
"round1Rect"
"round2SameRect"
"round2DiagRect"
"snipRoundRect"
"snip1Rect"
"snip2SameRect"
"snip2DiagRect"
"plaque"
"ellipse"
"teardrop"
"homePlate"
"chevron"
"pieWedge"
"pie"
"blockArc"
"donut"
"noSmoking"
"rightArrow"
"leftArrow"
"upArrow"
"downArrow"
"stripedRightArrow"
"notchedRightArrow"
"bentUpArrow"
"leftRightArrow"
"upDownArrow"
"leftUpArrow"
"leftRightUpArrow"
"quadArrow"
"leftArrowCallout"
"rightArrowCallout"
"upArrowCallout"
"downArrowCallout"
"leftRightArrowCallout"
"upDownArrowCallout"
"quadArrowCallout"
"bentArrow"
"uturnArrow"
"circularArrow"
"leftCircularArrow"
"leftRightCircularArrow"
"curvedRightArrow"
"curvedLeftArrow"
"curvedUpArrow"
"curvedDownArrow"
"swooshArrow"
"cube"
"can"
"lightningBolt"
"heart"
"sun"
"moon"
"smileyFace"
"irregularSeal1"
"irregularSeal2"
"foldedCorner"
"bevel"
"frame"
"halfFrame"
"corner"
"diagStripe"
"chord"
"arc"
"leftBracket"
"rightBracket"
"leftBrace"
"rightBrace"
"bracketPair"
"bracePair"
"straightConnector1"
"bentConnector2"
"bentConnector3"
"bentConnector4"
"bentConnector5"
"curvedConnector2"
"curvedConnector3"
"curvedConnector4"
"curvedConnector5"
"callout1"
"callout2"
"callout3"
"accentCallout1"
"accentCallout2"
"accentCallout3"
"borderCallout1"
"borderCallout2"
"borderCallout3"
"accentBorderCallout1"
"accentBorderCallout2"
"accentBorderCallout3"
"wedgeRectCallout"
"wedgeRoundRectCallout"
"wedgeEllipseCallout"
"cloudCallout"
"cloud"
"ribbon"
"ribbon2"
"ellipseRibbon"
"ellipseRibbon2"
"leftRightRibbon"
"verticalScroll"
"horizontalScroll"
"wave"
"doubleWave"
"plus"
"flowChartProcess"
"flowChartDecision"
"flowChartInputOutput"
"flowChartPredefinedProcess"
"flowChartInternalStorage"
"flowChartDocument"
"flowChartMultidocument"
"flowChartTerminator"
"flowChartPreparation"
"flowChartManualInput"
"flowChartManualOperation"
"flowChartConnector"
"flowChartPunchedCard"
"flowChartPunchedTape"
"flowChartSummingJunction"
"flowChartOr"
"flowChartCollate"
"flowChartSort"
"flowChartExtract"
"flowChartMerge"
"flowChartOfflineStorage"
"flowChartOnlineStorage"
"flowChartMagneticTape"
"flowChartMagneticDisk"
"flowChartMagneticDrum"
"flowChartDisplay"
"flowChartDelay"
"flowChartAlternateProcess"
"flowChartOffpageConnector"
"actionButtonBlank"
"actionButtonHome"
"actionButtonHelp"
"actionButtonInformation"
"actionButtonForwardNext"
"actionButtonBackPrevious"
"actionButtonEnd"
"actionButtonBeginning"
"actionButtonReturn"
"actionButtonDocument"
"actionButtonSound"
"actionButtonMovie"
"gear6"
"gear9"
"funnel"
"mathPlus"
"mathMinus"
"mathMultiply"
"mathDivide"
"mathEqual"
"mathNotEqual"
"cornerTabs"
"squareTabs"
"plaqueTabs"
"chartX"
"chartStar"
"chartPlus"
'''
@deprecated("Chart Drawings need a complete rewrite")
def __init__(self,
chart,
coordinates=((0, 0), (1, 1)),
text=None,
scheme="accent1"):
self.chart = chart
self.coordinates = coordinates # in axis units
self.text = text
self.scheme = scheme
self.style = Shape.RECT
self.border_width = 0
self.border_color = BLACK # "F3B3C5"
self.color = WHITE
self.text_color = BLACK
@property
def border_color(self):
return self._border_color
@border_color.setter
def border_color(self, color):
self._border_color = short_color(color)
@property
def color(self):
return self._color
@color.setter
def color(self, color):
self._color = short_color(color)
@property
def text_color(self):
return self._text_color
@text_color.setter
def text_color(self, color):
self._text_color = short_color(color)
@property
def border_width(self):
return self._border_width
@border_width.setter
def border_width(self, w):
self._border_width = w
@property
def coordinates(self):
"""Return coordindates in axis units"""
return self._coordinates
@coordinates.setter
def coordinates(self, coords):
""" set shape coordinates in percentages (left, top, right, bottom)
"""
# this needs refactoring to reflect changes in charts
self.axis_coordinates = coords
(x1, y1), (x2, y2) = coords # bottom left, top right
drawing_width = pixels_to_EMU(self.chart.drawing.width)
drawing_height = pixels_to_EMU(self.chart.drawing.height)
plot_width = drawing_width * self.chart.width
plot_height = drawing_height * self.chart.height
margin_left = self.chart._get_margin_left() * drawing_width
xunit = plot_width / self.chart.get_x_units()
margin_top = self.chart._get_margin_top() * drawing_height
yunit = self.chart.get_y_units()
x_start = (margin_left + (float(x1) * xunit)) / drawing_width
y_start = ((margin_top
+ plot_height
- (float(y1) * yunit))
/ drawing_height)
x_end = (margin_left + (float(x2) * xunit)) / drawing_width
y_end = ((margin_top
+ plot_height
- (float(y2) * yunit))
/ drawing_height)
# allow user to specify y's in whatever order
# excel expect y_end to be lower
if y_end < y_start:
y_end, y_start = y_start, y_end
self._coordinates = (
self._norm_pct(x_start), self._norm_pct(y_start),
self._norm_pct(x_end), self._norm_pct(y_end)
)
@staticmethod
def _norm_pct(pct):
""" force shapes to appear by truncating too large sizes """
if pct > 1:
return 1
elif pct < 0:
return 0
return pct
class ShapeWriter(object):
""" one file per shape """
def __init__(self, shapes):
self._shapes = shapes
def write(self, shape_id):
root = Element('{%s}userShapes' % CHART_NS)
for shape in self._shapes:
anchor = SubElement(root, '{%s}relSizeAnchor' % CHART_DRAWING_NS)
xstart, ystart, xend, yend = shape.coordinates
_from = SubElement(anchor, '{%s}from' % CHART_DRAWING_NS)
SubElement(_from, '{%s}x' % CHART_DRAWING_NS).text = str(xstart)
SubElement(_from, '{%s}y' % CHART_DRAWING_NS).text = str(ystart)
_to = SubElement(anchor, '{%s}to' % CHART_DRAWING_NS)
SubElement(_to, '{%s}x' % CHART_DRAWING_NS).text = str(xend)
SubElement(_to, '{%s}y' % CHART_DRAWING_NS).text = str(yend)
sp = SubElement(anchor, '{%s}sp' % CHART_DRAWING_NS, {'macro':'', 'textlink':''})
nvspr = SubElement(sp, '{%s}nvSpPr' % CHART_DRAWING_NS)
SubElement(nvspr, '{%s}cNvPr' % CHART_DRAWING_NS, {'id':str(shape_id), 'name':'shape %s' % shape_id})
SubElement(nvspr, '{%s}cNvSpPr' % CHART_DRAWING_NS)
sppr = SubElement(sp, '{%s}spPr' % CHART_DRAWING_NS)
frm = SubElement(sppr, '{%s}xfrm' % DRAWING_NS,)
# no transformation
SubElement(frm, '{%s}off' % DRAWING_NS, {'x':'0', 'y':'0'})
SubElement(frm, '{%s}ext' % DRAWING_NS, {'cx':'0', 'cy':'0'})
prstgeom = SubElement(sppr, '{%s}prstGeom' % DRAWING_NS, {'prst':str(shape.style)})
SubElement(prstgeom, '{%s}avLst' % DRAWING_NS)
fill = SubElement(sppr, '{%s}solidFill' % DRAWING_NS, )
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.color})
border = SubElement(sppr, '{%s}ln' % DRAWING_NS, {'w':str(shape._border_width)})
sf = SubElement(border, '{%s}solidFill' % DRAWING_NS)
SubElement(sf, '{%s}srgbClr' % DRAWING_NS, {'val':shape.border_color})
self._write_style(sp)
self._write_text(sp, shape)
shape_id += 1
return tostring(root)
def _write_text(self, node, shape):
""" write text in the shape """
tx_body = SubElement(node, '{%s}txBody' % CHART_DRAWING_NS)
SubElement(tx_body, '{%s}bodyPr' % DRAWING_NS, {'vertOverflow':'clip'})
SubElement(tx_body, '{%s}lstStyle' % DRAWING_NS)
p = SubElement(tx_body, '{%s}p' % DRAWING_NS)
if shape.text:
r = SubElement(p, '{%s}r' % DRAWING_NS)
rpr = SubElement(r, '{%s}rPr' % DRAWING_NS, {'lang':'en-US'})
fill = SubElement(rpr, '{%s}solidFill' % DRAWING_NS)
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.text_color})
SubElement(r, '{%s}t' % DRAWING_NS).text = shape.text
else:
SubElement(p, '{%s}endParaRPr' % DRAWING_NS, {'lang':'en-US'})
def _write_style(self, node):
""" write style theme """
style = SubElement(node, '{%s}style' % CHART_DRAWING_NS)
ln_ref = SubElement(style, '{%s}lnRef' % DRAWING_NS, {'idx':'2'})
scheme_clr = SubElement(ln_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
SubElement(scheme_clr, '{%s}shade' % DRAWING_NS, {'val':'50000'})
fill_ref = SubElement(style, '{%s}fillRef' % DRAWING_NS, {'idx':'1'})
SubElement(fill_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
effect_ref = SubElement(style, '{%s}effectRef' % DRAWING_NS, {'idx':'0'})
SubElement(effect_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
font_ref = SubElement(style, '{%s}fontRef' % DRAWING_NS, {'idx':'minor'})
SubElement(font_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'lt1'})
| 27.160671
| 113
| 0.607099
| 1,124
| 11,326
| 5.922598
| 0.349644
| 0.060838
| 0.031546
| 0.014421
| 0.095088
| 0.075259
| 0.045666
| 0.017425
| 0.017425
| 0.017425
| 0
| 0.012571
| 0.269557
| 11,326
| 416
| 114
| 27.225962
| 0.792095
| 0.049002
| 0
| 0.056962
| 0
| 0
| 0.087689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101266
| false
| 0
| 0.044304
| 0.025316
| 0.253165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3bd2daadf5e4d9e5163b4a0fc7578b8fb655779
| 3,118
|
py
|
Python
|
scripts/VCF/FILTER/subset_vcf.py
|
elowy01/igsr_analysis
|
ffea4885227c2299f886a4f41e70b6e1f6bb43da
|
[
"Apache-2.0"
] | 3
|
2018-04-20T15:04:34.000Z
|
2022-03-30T06:36:02.000Z
|
scripts/VCF/FILTER/subset_vcf.py
|
elowy01/igsr_analysis
|
ffea4885227c2299f886a4f41e70b6e1f6bb43da
|
[
"Apache-2.0"
] | 7
|
2019-06-06T09:22:20.000Z
|
2021-11-23T17:41:52.000Z
|
scripts/VCF/FILTER/subset_vcf.py
|
elowy01/igsr_analysis
|
ffea4885227c2299f886a4f41e70b6e1f6bb43da
|
[
"Apache-2.0"
] | 5
|
2017-11-02T11:17:35.000Z
|
2021-12-11T19:34:09.000Z
|
from VcfQC import VcfQC
from ReseqTrackDB import File
from ReseqTrackDB import ReseqTrackDB
import argparse
import os
import logging
import datetime
#get command line arguments
parser = argparse.ArgumentParser(description='Script to subset a VCF by excluding the variants within the regions defined by a BED file')
'''
Reseqtrack DB connection parameters
'''
parser.add_argument('--hostname', type=str, required=True, help='Hostname for ReseqTrack DB' )
parser.add_argument('--username', type=str, required=True, help='User for ReseqTrack DB' )
parser.add_argument('--port', type=int, required=True, help='Port number in the ReseqTrack DB' )
parser.add_argument('--pwd', type=str, help='PWD for the ReseqTrack DB' )
parser.add_argument('--db', type=str, required=True, help='DB name in the ReseqTrack DB' )
parser.add_argument('--type', type=str, required=True, help='Type of the new VCF file' )
parser.add_argument('--vcftools_folder', type=str, required=True, help='Folder containing the VCFtools binary' )
parser.add_argument('--bgzip_folder', type=str, required=True, help='Folder containing the bgzip binary')
parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' )
parser.add_argument('--bed', type=str, required=True, help='BED file containing the coordinates to exclude' )
parser.add_argument('--outsuffix', type=str, required=True, help='Suffix for vcf output file. i.e. no_cms or no_offtarget' )
parser.add_argument('--outdir', type=str, required=True, help='Directory used to put the output files.' )
args = parser.parse_args()
if __name__ == '__main__':
if os.path.isdir(args.outdir) == False:
raise Exception("Output dir does not exist: %s"%args.outdir)
hostname=args.hostname
username=args.username
db=args.db
port=args.port
pwd=args.pwd
reseqdb = ReseqTrackDB(host=hostname,user=username,port=port,pwd=pwd,db=db)
file=reseqdb.fetch_file_by_filename(args.filename)
#constructing the out filename
now = datetime.datetime.now().strftime('%Y%m%d')
bits= os.path.basename(file.name).split('.')
outprefix=bits[0]+"."+bits[1]+"."+args.outsuffix+"."+now
log_filename="subset_vcf_%s.log"% outprefix
logger = logging.getLogger("subset_vcf")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(log_filename)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
logger.info("Program started")
vcfQC = VcfQC(vcf=file.path,bgzip_folder=args.bgzip_folder,vcftools_folder=args.vcftools_folder)
vcffile=vcfQC.subset_vcf(bed=args.bed,outprefix=outprefix,outdir=args.outdir,create_index=True)
f=File(path=vcffile,type=args.type,host_id=1,withdrawn=0)
f.store(reseqdb,do_md5=True)
logger.info("Done!.")
| 41.026316
| 275
| 0.735407
| 453
| 3,118
| 4.966887
| 0.335541
| 0.048
| 0.090667
| 0.084444
| 0.197333
| 0.115556
| 0.072889
| 0.042667
| 0.042667
| 0
| 0
| 0.001854
| 0.135022
| 3,118
| 75
| 276
| 41.573333
| 0.832406
| 0.037524
| 0
| 0
| 0
| 0.021277
| 0.310532
| 0.008466
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148936
| 0
| 0.148936
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3bef41781bb732a7cb06f991f90aba75666a0ca
| 4,276
|
py
|
Python
|
nova/tests/unit/conductor/tasks/test_migrate.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/conductor/tasks/test_migrate.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/conductor/tasks/test_migrate.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor.tasks import migrate
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit.conductor.test_conductor import FakeContext
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
class MigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(MigrationTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
self.flavor = fake_flavor.fake_flavor_obj(self.context)
self.flavor.extra_specs = {'extra_specs': 'fake'}
inst = fake_instance.fake_db_instance(image_ref='image_ref',
instance_type=self.flavor)
inst_object = objects.Instance(
flavor=self.flavor,
numa_topology=None,
pci_requests=None,
system_metadata={'image_hw_disk_bus': 'scsi'})
self.instance = objects.Instance._from_db_object(
self.context, inst_object, inst, [])
self.request_spec = objects.RequestSpec(image=objects.ImageMeta())
self.hosts = [dict(host='host1', nodename=None, limits={})]
self.filter_properties = {'limits': {}, 'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.reservations = []
self.clean_shutdown = True
def _generate_task(self):
return migrate.MigrationTask(self.context, self.instance, self.flavor,
self.request_spec, self.reservations,
self.clean_shutdown,
compute_rpcapi.ComputeAPI(),
scheduler_client.SchedulerClient())
@mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
@mock.patch.object(objects.Quotas, 'from_reservations')
def test_execute(self, quotas_mock, prep_resize_mock,
sel_dest_mock, sig_mock, request_spec_from_components):
sel_dest_mock.return_value = self.hosts
task = self._generate_task()
request_spec_from_components.return_value = self.request_spec
legacy_request_spec = self.request_spec.to_legacy_request_spec_dict()
task.execute()
quotas_mock.assert_called_once_with(self.context, self.reservations,
instance=self.instance)
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
self.filter_properties)
task.scheduler_client.select_destinations.assert_called_once_with(
self.context, self.request_spec)
prep_resize_mock.assert_called_once_with(
self.context, self.instance, legacy_request_spec['image'],
self.flavor, self.hosts[0]['host'], self.reservations,
request_spec=legacy_request_spec,
filter_properties=self.filter_properties,
node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown)
self.assertFalse(quotas_mock.return_value.rollback.called)
def test_rollback(self):
task = self._generate_task()
task.quotas = mock.MagicMock()
task.rollback()
task.quotas.rollback.assert_called_once_with()
| 47.511111
| 79
| 0.667212
| 501
| 4,276
| 5.461078
| 0.313373
| 0.052266
| 0.027412
| 0.03655
| 0.118421
| 0.07383
| 0.054094
| 0.028509
| 0
| 0
| 0
| 0.002786
| 0.244621
| 4,276
| 89
| 80
| 48.044944
| 0.844272
| 0.127689
| 0
| 0.028986
| 0
| 0
| 0.051144
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.057971
| false
| 0
| 0.144928
| 0.014493
| 0.231884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3c289b2ddb7ec4ef9412f5ae94e7553200e0202
| 4,668
|
py
|
Python
|
mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py
|
garlicbutter/Jonathan-Tom
|
c1696f0a94da46911b3566a3d4f49791e877373f
|
[
"MIT"
] | 2
|
2021-10-05T04:31:19.000Z
|
2021-10-05T04:31:26.000Z
|
mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py
|
garlicbutter/Tom-Jonathan
|
c1696f0a94da46911b3566a3d4f49791e877373f
|
[
"MIT"
] | null | null | null |
mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py
|
garlicbutter/Tom-Jonathan
|
c1696f0a94da46911b3566a3d4f49791e877373f
|
[
"MIT"
] | null | null | null |
import numpy as np
import mujoco_py as mj
from mujoco_py_renderer import SimulationError, XMLError, MujocoPyRenderer
from mujoco_py import (MjSim, load_model_from_xml,functions,
load_model_from_path, MjSimState,
ignore_mujoco_warnings,
load_model_from_mjb)
from matplotlib import pyplot as plt
import time
xml = """
<mujoco model="example">
<compiler coordinate="global"/>
<default>
<geom rgba=".8 .6 .4 1"/>
</default>
<asset>
<texture type="skybox" builtin="gradient" rgb1="1 1 1" rgb2=".6 .8 1"
width="256" height="256"/>
</asset>
<worldbody>
<light pos="0 1 1" dir="0 -1 -1" diffuse="1 1 1"/>
<geom name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="10 10 10" type="plane"/>
<body>
<site name="world" size="0.1" pos="0 0 0" />
<geom name="first_pole" type="capsule" fromto="0 0 0 0 0 0.5" size="0.04"/>
<joint name='a' type="hinge" pos="0 0 0" axis="0 0 1" />
<body name="second_pole">
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0 0 0.5 0.5 0 0.5" size="0.04" name="second_pole"/>
<joint name='b' type="hinge" pos="0 0 0.5" axis="0 1 0"/>
<body name='third_pole'>
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0.5 0 0.5 1 0 0.5" size="0.04" name="third_pole"/>
<joint name='c' type="hinge" pos="0.5 0 0.5" axis="0 1 0"/>
<site name="target" size="0.1" pos="1 0 0.5" />
<body name="mass">
<inertial pos="1 0 0.5" mass="1e-2" diaginertia="1e-008 1e-008 1e-008" />
<geom type="sphere" pos="1 0 0.5" size="0.2" name="mass"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
<motor joint="c"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
viewer = MujocoPyRenderer(sim)
sim.reset()
# After reset jacobians are all zeros
sim.forward()
target_jacp = np.zeros(3 * sim.model.nv)
target_jacr= np.zeros(3 * sim.model.nv)
F=np.array([0,0,-9.81*1e-2,0,0,0]).T
#np.testing.assert_allclose(target_jacp, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
#sim.forward()
K_diag=2000
C_diag=100
A_diag=1e-3
K=np.identity(3)*K_diag
C=np.identity(3)*C_diag
A=np.identity(3)*A_diag
#K_diag=0.3
#C_diag=0.05
for i in range(3):
K[i, i]=K_diag
C[i,i]=C_diag
A[i, i] = A_diag
x_intial=sim.data.site_xpos[1]
print(x_intial)
x_desired=np.array([0,1,0.3])
v_intial=sim.data.site_xvelp[1]
v_desired=np.array([0,0,0])
a_desired=np.array([0,0,0])
a_intial=np.array([0,0,0])
dt=sim.model.opt.timestep
#sim.data.get_site_jacp('target', jacp=target_jacp)
# Should be unchanged after steps (zero action)
graph=[]
for _ in range(100000):
F[:3]=np.dot(K,x_desired-x_intial)+np.dot(C,v_desired-v_intial)+np.dot(A,a_desired-a_intial)
H = np.zeros(sim.model.nv* sim.model.nv)
functions.mj_fullM(sim.model, H, sim.data.qM)
sim.data.get_site_jacp('target', jacp=target_jacp)
sim.data.get_site_jacr('target', jacr=target_jacr)
J_L = target_jacp.reshape((3, sim.model.nv))
J_A = target_jacr.reshape((3, sim.model.nv))
J = np.concatenate((J_L, J_A), axis=0)
H_L =np.dot(np.linalg.pinv(J_L.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J_L)))
H_all=np.dot(np.linalg.pinv(J.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J)))
#F_a=np.dot(A,0.3-sim.data.qacc)
#action = np.dot(J_L.T, np.dot(H_L, F[:3]))+sim.data.qfrc_bias
action = sim.data.qfrc_bias+np.dot(H.reshape(3,3),np.dot(J_L.T,F[:3]))
#print(action)
#action = np.dot(J.T, F)
sim.data.ctrl[:] = action
sim.step()
sim.forward()
#print(np.max(action))
#print(sim.data.qacc)
viewer.render()
x_intial = sim.data.site_xpos[1]
a_intial=(v_intial-sim.data.site_xvelp[1])/dt
print(a_intial)
v_intial = sim.data.site_xvelp[1]
normal=np.linalg.norm(x_intial-x_desired)
#print(normal)
if normal<0.1:
print("in")
if x_desired[0]==0:
x_desired = np.array([-1, 0, 0.5])
elif x_desired[0]==1:
x_desired = np.array([0, 1, 0.3])
elif x_desired[0] == -1:
x_desired = np.array([1, 0, 0.5])
graph.append(np.abs(x_intial-x_desired))
# sim.forward()
print("the desired is {} and the intial is{}".format(x_desired,x_intial))
plt.plot(graph)
plt.show()
| 29.923077
| 105
| 0.610111
| 835
| 4,668
| 3.291018
| 0.201198
| 0.028384
| 0.016376
| 0.0131
| 0.356623
| 0.338428
| 0.268559
| 0.212518
| 0.1754
| 0.091703
| 0
| 0.071776
| 0.206084
| 4,668
| 156
| 106
| 29.923077
| 0.669725
| 0.1009
| 0
| 0.127273
| 0
| 0.127273
| 0.402296
| 0.005023
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054545
| 0
| 0.054545
| 0.036364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3c726cfaf4ab3b53d1df8bd6d6c24aef693e3ab
| 5,066
|
py
|
Python
|
fedml_api/standalone/federated_sgan/fedssgan_api.py
|
arj119/FedML
|
5b7c098659f3e61f9e44583965300d8d0829f7a8
|
[
"Apache-2.0"
] | null | null | null |
fedml_api/standalone/federated_sgan/fedssgan_api.py
|
arj119/FedML
|
5b7c098659f3e61f9e44583965300d8d0829f7a8
|
[
"Apache-2.0"
] | null | null | null |
fedml_api/standalone/federated_sgan/fedssgan_api.py
|
arj119/FedML
|
5b7c098659f3e61f9e44583965300d8d0829f7a8
|
[
"Apache-2.0"
] | null | null | null |
import copy
import logging
import random
from typing import List, Tuple
import numpy as np
import torch
import wandb
from torch.utils.data import ConcatDataset
from fedml_api.standalone.fedavg.my_model_trainer import MyModelTrainer
from fedml_api.standalone.federated_sgan.ac_gan_model_trainer import ACGANModelTrainer
from fedml_api.standalone.federated_sgan.client import FedSSGANClient
from fedml_api.standalone.federated_sgan.model_trainer import FedSSGANModelTrainer
from fedml_api.standalone.utils.HeterogeneousModelBaseTrainerAPI import HeterogeneousModelBaseTrainerAPI
class FedSSGANAPI(HeterogeneousModelBaseTrainerAPI):
def __init__(self, dataset, device, args, adapter_model, client_models: List[Tuple[torch.nn.Module, int]]):
"""
Args:
dataset: Dataset presplit into data loaders
device: Device to run training on
args: Additional args
client_models: List of client models and their frequency participating (assuming a stateful algorithm for simplicity)
"""
super().__init__(dataset, device, args)
self.global_model = MyModelTrainer(adapter_model)
self._setup_clients(self.train_data_local_num_dict, self.train_data_local_dict, self.test_data_local_dict,
client_models)
self._plot_client_training_data_distribution()
def _setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict,
client_models):
logging.info("############setup_clients (START)#############")
c_idx = 0
for local_model, freq in client_models:
for i in range(freq):
model_trainer = ACGANModelTrainer(
copy.deepcopy(self.global_model.model),
copy.deepcopy(local_model)
)
c = FedSSGANClient(c_idx, train_data_local_dict[c_idx], test_data_local_dict[c_idx],
train_data_local_num_dict[c_idx], self.test_global, self.args, self.device,
model_trainer)
c_idx += 1
self.client_list.append(c)
logging.info("############setup_clients (END)#############")
def train(self):
logging.info('\n###############Pre-Training clients#############\n')
for i, c in enumerate(self.client_list):
logging.info(f'Pre=training client: {i}')
c.pre_train()
logging.info('###############Pre-Training clients (END)###########\n')
unlabelled_synthesised_data = None
w_global = self.global_model.get_model_params()
for round_idx in range(self.args.comm_round):
logging.info("################Communication round : {}".format(round_idx))
w_locals = []
synthesised_data_locals = []
client_synthesised_data_lens = {'round': round_idx}
client: FedSSGANClient
for idx, client in enumerate(self.client_list):
# Update client synthetic datasets
# client.set_synthetic_dataset(unlabelled_synthesised_data)
# Local round
w = client.train(copy.deepcopy(w_global), round_idx)
# self.logger.info("local weights = " + str(w))
w_locals.append((client.get_sample_number(), copy.deepcopy(w)))
# synthetic_data = client.generate_synthetic_dataset()
# if synthetic_data is not None:
# synthesised_data_locals.append(synthetic_data)
# client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = len(synthetic_data)
# else:
# client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = 0
#
# if len(synthesised_data_locals) > 0:
# unlabelled_synthesised_data = ConcatDataset(synthesised_data_locals)
# logging.info(f'\n Synthetic Unlabelled Dataset Size: {len(unlabelled_synthesised_data)}\n')
# client_synthesised_data_lens['Total Synthetic Dataset Size'] = len(unlabelled_synthesised_data)
# else:
# unlabelled_synthesised_data = None
# client_synthesised_data_lens['Total Synthetic Dataset Size'] = 0
# wandb.log(client_synthesised_data_lens)
# update global weights
w_global = self._aggregate(w_locals)
self.global_model.set_model_params(w_global)
# test results
# at last round
if round_idx == self.args.comm_round - 1:
self._local_test_on_all_clients(round_idx)
# per {frequency_of_the_test} round
elif round_idx % self.args.frequency_of_the_test == 0:
if self.args.dataset.startswith("stackoverflow"):
self._local_test_on_validation_set(round_idx)
else:
self._local_test_on_all_clients(round_idx)
| 44.831858
| 129
| 0.627319
| 562
| 5,066
| 5.329181
| 0.238434
| 0.080134
| 0.028047
| 0.050083
| 0.23172
| 0.193656
| 0.116861
| 0.116861
| 0.036728
| 0.036728
| 0
| 0.001907
| 0.275365
| 5,066
| 112
| 130
| 45.232143
| 0.813947
| 0.250888
| 0
| 0.032258
| 0
| 0
| 0.074792
| 0.042238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.209677
| 0
| 0.274194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3c78b4ed55d10de069695bce6f3d899ee02cc99
| 20,932
|
py
|
Python
|
pytorch-word2vec-master/csv.py
|
arjun-sai-krishnan/tamil-morpho-embeddings
|
a33bcb427d635dba3b1857f26ea7ab287e1a44c5
|
[
"MIT"
] | 2
|
2021-04-11T18:25:16.000Z
|
2022-03-16T03:48:52.000Z
|
pytorch-word2vec-master/csv.py
|
arjun-sai-krishnan/tamil-morpho-embeddings
|
a33bcb427d635dba3b1857f26ea7ab287e1a44c5
|
[
"MIT"
] | null | null | null |
pytorch-word2vec-master/csv.py
|
arjun-sai-krishnan/tamil-morpho-embeddings
|
a33bcb427d635dba3b1857f26ea7ab287e1a44c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
from collections import Counter
import pdb
import pickle
import re
import sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.multiprocessing as mp
import data_producer
from multiprocessing import set_start_method
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, default="", help="training file")
parser.add_argument("--vocab", type=str, default="", help="vocab pickle file")
parser.add_argument("--save", type=str, default="csv.pth.tar", help="saved model filename")
parser.add_argument("--size", type=int, default=300, help="word embedding dimension")
parser.add_argument("--window", type=int, default=5, help="context window size")
parser.add_argument("--sample", type=float, default=1e-5, help="subsample threshold")
parser.add_argument("--negative", type=int, default=10, help="number of negative samples")
parser.add_argument("--delta", type=float, default=0.15, help="create new sense for a type if similarity lower than this value.")
parser.add_argument("--min_count", type=int, default=5, help="minimum frequency of a word")
parser.add_argument("--processes", type=int, default=4, help="number of processes")
parser.add_argument("--num_workers", type=int, default=6, help="number of workers for data processsing")
parser.add_argument("--iter", type=int, default=3, help="number of iterations")
parser.add_argument("--lr", type=float, default=-1.0, help="initial learning rate")
parser.add_argument("--batch_size", type=int, default=100, help="(max) batch size")
parser.add_argument("--cuda", action='store_true', default=False, help="enable cuda")
parser.add_argument("--multi_proto", action='store_true', default=False, help="True: multi-prototype, False:single-prototype")
MAX_SENT_LEN = 1000
# Build the vocabulary.
def file_split(f, delim=' \t\n', bufsize=1024):
prev = ''
while True:
s = f.read(bufsize)
if not s:
break
tokens = re.split('['+delim+']{1,}', s)
if len(tokens) > 1:
yield prev + tokens[0]
prev = tokens[-1]
for x in tokens[1:-1]:
yield x
else:
prev += s
if prev:
yield prev
def build_vocab(args):
vocab = Counter()
word_count = 0
for word in file_split(open(args.train)):
vocab[word] += 1
word_count += 1
if word_count % 10000 == 0:
sys.stdout.write('%d\r' % len(vocab))
freq = {k:v for k,v in vocab.items() if v >= args.min_count}
word_count = sum([freq[k] for k in freq])
word_list = sorted(freq, key=freq.get, reverse=True)
word2idx = {}
for i,w in enumerate(word_list):
word2idx[w] = i
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
return word2idx, word_list, freq
class CSV(nn.Module):
def __init__(self, args):
super(CSV, self).__init__()
self.global_embs = nn.Embedding(args.vocab_size+1, args.size, padding_idx=args.vocab_size, sparse=True)
self.sense_embs = nn.Embedding(args.vocab_size*5, args.size, sparse=True)
self.ctx_weight = torch.nn.Parameter(torch.ones(2*args.window, args.size))
self.word2sense = [ [i] for i in range(args.vocab_size) ]
'''
word2sense = np.zeros((args.vocab_size, 5), dtype='int32')
for i in range(args.vocab_size):
word2sense[i, 0] = i
self.word2sense = torch.nn.Parameter(torch.from_numpy(word2sense).int())
self.word_sense_cnts = torch.nn.Parameter(torch.ones((args.vocab_size,)).int())
'''
self.global_embs.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.sense_embs.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.n_senses = args.vocab_size
self.sense_capacity = args.vocab_size*5
self.batch_size = args.batch_size
self.size = args.size
self.window = args.window
self.negative = args.negative
self.pad_idx = args.vocab_size
def get_context_feats(self, ctx_type_indices):
ctx_type_embs = self.global_embs(ctx_type_indices)
return torch.sum(ctx_type_embs * self.ctx_weight, 1).cpu().data.numpy()
def get_possible_sense_embs(self, type_indices, cuda=True):
sense_indices = []
sense2idx = {}
for type_id in type_indices:
for s_id in self.word2sense[type_id]:
if s_id not in sense2idx:
sense2idx[s_id] = len(sense_indices)
sense_indices.append( s_id )
sense_indices = np.array(sense_indices)
if cuda:
sense_embs = self.sense_embs(Variable(torch.LongTensor(sense_indices).cuda()))
return sense2idx, sense_embs.cpu().data.numpy()
else:
sense_embs = self.sense_embs(Variable(torch.LongTensor(sense_indices)))
return sense2idx, sense_embs.data.numpy()
def forward(self, data):
ctx_type_indices = data[:, 0:2*self.window]
pos_sense_idx = data[:, 2*self.window+1]
neg_sense_indices = data[:, 2*self.window+2:2*self.window+2+self.negative]
neg_mask = data[:, 2*self.window+2+self.negative:].float()
ctx_type_embs = self.global_embs(ctx_type_indices)
pos_sense_embs = self.sense_embs(pos_sense_idx)
neg_sense_embs = self.sense_embs(neg_sense_indices)
ctx_feats = torch.sum(ctx_type_embs * self.ctx_weight, 1, keepdim=True)
# Neg Log Likelihood
pos_ips = torch.sum(ctx_feats[:,0,:] * pos_sense_embs, 1)
pos_loss = torch.sum( -F.logsigmoid(torch.clamp(pos_ips,max=10,min=-10)))
neg_ips = torch.bmm(neg_sense_embs, ctx_feats.permute(0,2,1))[:,:,0]
neg_loss = torch.sum( -F.logsigmoid(torch.clamp(-neg_ips,max=10,min=-10)) * neg_mask )
return pos_loss + neg_loss
# Initialize model.
def init_net(args):
if args.lr == -1.0:
vars(args)['lr'] = 0.05
return CSV(args)
def save_model(filename, model, args, word2idx):
torch.save({
'word2idx':word2idx,
'args':args,
#'word2sense': model.word2sense,
'n_senses': model.n_senses,
'params': model.state_dict()
}, filename)
def load_model(filename):
checkpoint = torch.load(filename)
word2idx = checkpoint['word2idx']
args = checkpoint['args']
model = CSV(args)
if args.cuda:
model.cuda()
model.global_embs.weight.data = checkpoint['params']['global_embs.weight']
model.sense_embs.weight.data = checkpoint['params']['sense_embs.weight']
model.ctx_weight.data = checkpoint['params']['ctx_weight']
model.word2sense = checkpoint['word2sense']
#model.word2sense.data = checkpoint['params']['word2sense']
#model.word_sense_cnts.data = checkpoint['params']['word_sense_cnts']
model.n_senses = checkpoint['n_senses']
return model, word2idx
# Training
def train_process_sent_producer(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args):
n_proc = 1 if args.stage == 2 else args.processes
N = 1 if args.stage == 2 else args.iter
neg = 0 if args.stage == 2 else args.negative
if args.negative > 0:
table_ptr_val = data_producer.init_unigram_table(word_list, freq, args.train_words)
train_file = open(args.train)
file_pos = args.file_size * p_id // n_proc
train_file.seek(file_pos, 0)
while True:
try:
train_file.read(1)
except UnicodeDecodeError:
file_pos -= 1
train_file.seek(file_pos, 0)
else:
train_file.seek(file_pos, 0)
break
batch_count = 0
batch_placeholder = np.zeros((args.batch_size, 2*args.window+2+2*neg), 'int64')
for it in range(N):
train_file.seek(file_pos, 0)
last_word_cnt = 0
word_cnt = 0
sentence = []
prev = ''
eof = False
while True:
if eof or train_file.tell() > file_pos + args.file_size / n_proc:
break
while True:
s = train_file.read(1)
if not s:
eof = True
break
elif s == ' ' or s == '\t':
if prev in word2idx:
sentence.append(prev)
prev = ''
if len(sentence) >= MAX_SENT_LEN:
break
elif s == '\n':
if prev in word2idx:
sentence.append(prev)
prev = ''
break
else:
prev += s
if len(sentence) > 0:
# subsampling
sent_id = []
if args.sample != 0:
sent_len = len(sentence)
i = 0
while i < sent_len:
word = sentence[i]
f = freq[word] / args.train_words
pb = (np.sqrt(f / args.sample) + 1) * args.sample / f;
if pb > np.random.random_sample():
sent_id.append( word2idx[word] )
i += 1
if len(sent_id) < 2:
word_cnt += len(sentence)
sentence.clear()
continue
next_random = (2**24) * np.random.randint(0, 2**24) + np.random.randint(0, 2**24)
chunk = data_producer.cbow_producer(sent_id, len(sent_id), table_ptr_val, args.window,
neg, args.vocab_size, args.batch_size, next_random)
chunk_pos = 0
while chunk_pos < chunk.shape[0]:
remain_space = args.batch_size - batch_count
remain_chunk = chunk.shape[0] - chunk_pos
if remain_chunk < remain_space:
take_from_chunk = remain_chunk
else:
take_from_chunk = remain_space
batch_placeholder[batch_count:batch_count+take_from_chunk, :] = chunk[chunk_pos:chunk_pos+take_from_chunk, :]
batch_count += take_from_chunk
if batch_count == args.batch_size:
data_queue.put(batch_placeholder)
batch_count = 0
chunk_pos += take_from_chunk
word_cnt += len(sentence)
if word_cnt - last_word_cnt > 10000:
with word_count_actual.get_lock():
word_count_actual.value += word_cnt - last_word_cnt
last_word_cnt = word_cnt
sentence.clear()
with word_count_actual.get_lock():
word_count_actual.value += word_cnt - last_word_cnt
print(p_id, it, file_pos, train_file.tell(), args.file_size)
if batch_count > 0:
data_queue.put(batch_placeholder[:batch_count,:])
data_queue.put(None)
print(p_id, file_pos, train_file.tell(), args.file_size)
def train_process(p_id, word_count_actual, word2idx, word_list, freq, args, model):
data_queue = mp.SimpleQueue()
lr = args.lr
#optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
optimizer = optim.Adagrad(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
t = mp.Process(target=train_process_sent_producer, args=(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args))
t.start()
#n_iter = 1 if args.stage == 2 else args.iter
n_iter = args.iter
# get from data_queue and feed to model
prev_word_cnt = 0
while True:
chunk = data_queue.get()
if chunk is None:
break
else:
# lr anneal & output
if word_count_actual.value - prev_word_cnt > 10000:
#if args.lr_anneal:
# lr = args.lr * (1 - word_count_actual.value / (n_iter * args.train_words))
# if lr < 0.0001 * args.lr:
# lr = 0.0001 * args.lr
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
#sys.stdout.write("\rAlpha: %0.8f, Progess: %0.2f, Words/sec: %f, word_cnt: %d" % (lr, word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.write("\rProgess: %0.2f, Words/sec: %f, word_cnt: %d" % (word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.flush()
prev_word_cnt = word_count_actual.value
if args.stage == 1:
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
model.global_embs.weight.data[args.vocab_size].fill_(0)
elif args.stage == 3:
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
#type_ids = chunk[:, 2*args.window+1:2*args.window+2+2*args.negative]
type_ids = chunk[:, 2*args.window+1:2*args.window+2+args.negative]
type_ids = np.reshape(type_ids, (type_ids.shape[0] * type_ids.shape[1]))
sense2idx, sense_embs = model.get_possible_sense_embs(type_ids.tolist())
# get type_idx from chunk, and do sense selection here.
context_feats = model.get_context_feats(data[:, :2*args.window])
chunk = data_producer.select_sense(chunk, context_feats, sense2idx, sense_embs,
model.word2sense, chunk.shape[0], args.size, args.window, args.negative)
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
model.global_embs.weight.data[args.vocab_size].fill_(0)
t.join()
def train_process_stage2(p_id, word_count_actual, word2idx, word_list, freq, args, model):
data_queue = mp.SimpleQueue()
sense_embs = model.sense_embs.weight.data.numpy()
counter_list = np.zeros((model.sense_capacity), dtype='float32')
t = mp.Process(target=train_process_sent_producer, args=(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args))
t.start()
n_iter = 1
# get from data_queue and feed to model
prev_word_cnt = 0
while True:
chunk = data_queue.get()
if chunk is None:
break
else:
if word_count_actual.value - prev_word_cnt > 10000:
sys.stdout.write("\rProgess: %0.2f, Words/sec: %f, word_cnt: %d" % (word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.flush()
prev_word_cnt = word_count_actual.value
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
context_feats = model.get_context_feats(data[:, :2*args.window])
# update sense_embs
create_cnt = data_producer.create_n_update_sense(chunk[:, 2*args.window+1], context_feats, sense_embs, model.word2sense, counter_list, chunk.shape[0], args.size, args.delta, model.n_senses)
model.n_senses += create_cnt
#if model.n_senses + args.batch_size > model.sense_capacity:
# new_capacity = model.sense_capacity * 3 // 2
# counter_list = np.concatenate( (counter_list, np.ones((new_capacity - model.sense_capacity),dtype='float32')), axis=0)
# zero = np.zeros((new_capacity - model.sense_capacity, args.size), 'float32')
# sense_embs = np.concatenate((sense_embs, zero), 0)
# model.sense_capacity = new_capacity
# print("\nexapnded sense_embs: %d" % model.n_senses)
t.join()
sense_embs[:model.n_senses, :] = sense_embs[:model.n_senses, :] / counter_list[:model.n_senses, None]
if __name__ == '__main__':
set_start_method('forkserver')
args = parser.parse_args()
print("Starting training using file %s" % args.train)
train_file = open(args.train)
train_file.seek(0, 2)
vars(args)['file_size'] = train_file.tell()
word_count_actual = mp.Value('L', 0)
if args.vocab == '':
word2idx, word_list, freq = build_vocab(args)
else:
with open(args.vocab, 'rb') as f:
word2idx, word_list, freq, pos2idx, dep2id = pickle.load(f)
word_count = sum([freq[k] for k in freq])
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
model = init_net(args)
model.share_memory()
if args.cuda:
model.cuda()
# stage 1, learn robust context representation.
vars(args)['stage'] = 1
print("Stage 1")
vars(args)['lr_anneal'] = True
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
del processes
print("\nStage 1, ", time.monotonic() - args.t_start, " secs ", word_count_actual.value)
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage1.pth.tar'
save_model(filename, model, args, word2idx)
if args.multi_proto:
# stage 2, create new sense in a non-parametric way.
# Freeze model paramters except sense_embs, and use only 1 process to prevent race condition
old_batch_size = vars(args)['batch_size']
model.global_embs.requires_grad = False
model.ctx_weight.requires_grad = False
model.sense_embs = model.sense_embs.cpu()
vars(args)['stage'] = 2
vars(args)['batch_size'] = 5000
print("\nStage 2")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
train_process_stage2(0, word_count_actual, word2idx, word_list, freq, args, model)
if args.cuda:
model.cuda()
print("\nStage 2, ", time.monotonic() - args.t_start, " secs")
print("Current # of senses: %d" % model.n_senses)
pdb.set_trace()
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage2.pth.tar'
save_model(filename, model, args, word2idx)
# stage 3, no more sense creation.
vars(args)['lr'] = args.lr * 0.01
vars(args)['batch_size'] = old_batch_size
model.global_embs.requires_grad = True
model.ctx_weight.requires_grad = True
vars(args)['stage'] = 3
print("\nBegin stage 3")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
print("\nStage 3, ", time.monotonic() - args.t_start, " secs")
# save model
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage3.pth.tar'
save_model(filename, model, args, word2idx)
print("")
| 40.487427
| 250
| 0.591821
| 2,703
| 20,932
| 4.376619
| 0.128746
| 0.02967
| 0.038039
| 0.032122
| 0.471344
| 0.391801
| 0.352663
| 0.332798
| 0.295773
| 0.265596
| 0
| 0.01997
| 0.289509
| 20,932
| 516
| 251
| 40.565891
| 0.775484
| 0.089002
| 0
| 0.375321
| 0
| 0
| 0.066652
| 0.001184
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030848
| false
| 0
| 0.041131
| 0
| 0.092545
| 0.03856
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3c8721ad82d9b0c4f4bbb5e4ea027824401f22d
| 339
|
py
|
Python
|
Ogrenciler/Varol/buyuksayi.py
|
ProEgitim/Python-Dersleri-BEM
|
b25e9fdb1fa3026925a46b2fcbcba348726b775c
|
[
"MIT"
] | 1
|
2021-04-18T17:35:22.000Z
|
2021-04-18T17:35:22.000Z
|
Ogrenciler/Varol/buyuksayi.py
|
waroi/Python-Dersleri-BEM
|
b25e9fdb1fa3026925a46b2fcbcba348726b775c
|
[
"MIT"
] | null | null | null |
Ogrenciler/Varol/buyuksayi.py
|
waroi/Python-Dersleri-BEM
|
b25e9fdb1fa3026925a46b2fcbcba348726b775c
|
[
"MIT"
] | 2
|
2021-04-18T18:22:26.000Z
|
2021-04-24T17:16:19.000Z
|
sayi1 = int(input("1. Sayı: "))
sayi2 = int(input("2. Sayı: "))
sayi3 = int(input("3. Sayı: "))
sayi4 = int(input("4. Sayı: "))
sayi5 = int(input("5. Sayı: "))
sayilar=[];
sayilar.append(sayi1)
sayilar.append(sayi2)
sayilar.append(sayi3)
sayilar.append(sayi4)
sayilar.append(sayi5)
sayilar.sort()
print("En büyük sayimiz..",sayilar[-1])
| 21.1875
| 39
| 0.663717
| 49
| 339
| 4.591837
| 0.408163
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05298
| 0.109145
| 339
| 15
| 40
| 22.6
| 0.692053
| 0
| 0
| 0
| 0
| 0
| 0.186944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3c959da81854ccd184aefdeb715f7df8413b8b8
| 8,899
|
py
|
Python
|
baselines/deepq/build_graph_mfec.py
|
MouseHu/emdqn
|
ba907e959f21dd0b5a17117accccae9c82a79a3b
|
[
"MIT"
] | null | null | null |
baselines/deepq/build_graph_mfec.py
|
MouseHu/emdqn
|
ba907e959f21dd0b5a17117accccae9c82a79a3b
|
[
"MIT"
] | null | null | null |
baselines/deepq/build_graph_mfec.py
|
MouseHu/emdqn
|
ba907e959f21dd0b5a17117accccae9c82a79a3b
|
[
"MIT"
] | 1
|
2021-04-26T13:55:47.000Z
|
2021-04-26T13:55:47.000Z
|
"""Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative not update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
import numpy as np
def build_act_mf(make_obs_ph, q_func, z_noise, num_actions, scope="deepq", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
observations_ph = U.ensure_tf_input(make_obs_ph("observation"))
q, q_deterministic, v_mean, v_logvar, z_mean, z_logvar, recon_obs = q_func(observations_ph.get(), z_noise,
num_actions,
scope="q_func",
reuse=tf.AUTO_REUSE)
act = U.function(inputs=[observations_ph,z_noise],
outputs=[z_mean, z_logvar])
return act
def build_train_mf(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, scope="mfec",
alpha=1.0, beta=1.0, theta=1.0, latent_dim=32, ib=True, reuse=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
act_noise = tf.placeholder(tf.float32, [None, latent_dim], name="act_noise")
act_f = build_act_mf(make_obs_ph, q_func, act_noise, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
# EMDQN
obs_vae_input = U.ensure_tf_input(make_obs_ph("obs_vae"))
z_noise_vae = tf.placeholder(tf.float32, [None, latent_dim], name="z_noise_vae")
inputs = [obs_vae_input,z_noise_vae]
if ib:
qec_input = tf.placeholder(tf.float32, [None], name='qec')
inputs.append(qec_input)
outputs = []
q_vae, q_deterministic_vae, v_mean_vae, v_logvar_vae, z_mean_vae, z_logvar_vae, recon_obs = q_func(obs_vae_input.get(),
z_noise_vae, num_actions,
scope="q_func",
reuse=True)
q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))
encoder_loss = -1 + z_mean_vae ** 2 + tf.exp(z_logvar_vae) - z_logvar_vae
total_loss = tf.reduce_mean(beta * encoder_loss)
decoder_loss = tf.keras.losses.binary_crossentropy(tf.reshape(recon_obs, [-1]), tf.reshape(
tf.dtypes.cast(obs_vae_input._placeholder, tf.float32), [-1]))
print("here", z_mean_vae.shape, z_logvar_vae.shape, encoder_loss.shape, decoder_loss.shape)
vae_loss = beta * encoder_loss + theta * decoder_loss
outputs.append(encoder_loss)
outputs.append(decoder_loss)
outputs.append(vae_loss)
total_loss += tf.reduce_mean(theta * decoder_loss)
if ib:
ib_loss = (v_mean_vae - tf.stop_gradient(tf.expand_dims(qec_input, 1))) ** 2 / tf.exp(
v_logvar_vae) + v_logvar_vae
print("here2", v_mean_vae.shape, tf.expand_dims(qec_input, 1).shape, v_logvar_vae.shape, ib_loss.shape)
total_ib_loss = alpha * ib_loss + beta * encoder_loss
outputs.append(total_ib_loss)
total_loss += tf.reduce_mean(alpha * ib_loss)
if grad_norm_clipping is not None:
optimize_expr = U.minimize_and_clip(optimizer,
total_loss,
var_list=q_func_vars,
clip_val=grad_norm_clipping)
else:
optimize_expr = optimizer.minimize(total_loss, var_list=q_func_vars)
# Create callable functions
# EMDQN
total_loss_summary = tf.summary.scalar("total loss", total_loss)
z_var_summary = tf.summary.scalar("z_var", tf.reduce_mean(tf.exp(z_logvar_vae)))
encoder_loss_summary = tf.summary.scalar("encoder loss", tf.reduce_mean(encoder_loss))
decoder_loss_summary = tf.summary.scalar("decoder loss", tf.reduce_mean(decoder_loss))
summaries = [total_loss_summary, z_var_summary, encoder_loss_summary, decoder_loss_summary]
if ib:
ib_loss_summary = tf.summary.scalar("ib loss", tf.reduce_mean(ib_loss))
total_ib_loss_summary = tf.summary.scalar("total ib loss", tf.reduce_mean(total_ib_loss))
summaries.append(ib_loss_summary)
summaries.append(total_ib_loss_summary)
summary = tf.summary.merge(summaries)
outputs.append(summary)
train = U.function(
inputs=inputs,
outputs=[total_loss,summary],
updates=[optimize_expr]
)
return act_f, train
| 42.37619
| 127
| 0.618047
| 1,213
| 8,899
| 4.329761
| 0.222589
| 0.015994
| 0.018279
| 0.021325
| 0.246002
| 0.201828
| 0.129665
| 0.098439
| 0.058454
| 0.058454
| 0
| 0.008969
| 0.298348
| 8,899
| 209
| 128
| 42.578947
| 0.832159
| 0.480616
| 0
| 0.097222
| 0
| 0
| 0.029527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.041667
| 0
| 0.097222
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3cadf1c1469dc28d63f965c32ff3b98b7eb9d52
| 8,719
|
py
|
Python
|
src/salgan_dhf1k/train_bce.py
|
juanjo3ns/SalGAN2
|
ac52af743b94961cdb44c5d89774b72fc8acfd3e
|
[
"MIT"
] | null | null | null |
src/salgan_dhf1k/train_bce.py
|
juanjo3ns/SalGAN2
|
ac52af743b94961cdb44c5d89774b72fc8acfd3e
|
[
"MIT"
] | null | null | null |
src/salgan_dhf1k/train_bce.py
|
juanjo3ns/SalGAN2
|
ac52af743b94961cdb44c5d89774b72fc8acfd3e
|
[
"MIT"
] | null | null | null |
import os
from dataloader.datasetDHF1K import DHF1K
from torch.utils.data import DataLoader
from utils.salgan_utils import save_model, get_lr_optimizer
from utils.sendTelegram import send
from utils.printer import param_print
from utils.salgan_generator import create_model, add_bn
from evaluation.fast_evaluation import compute_metrics
import numpy as np
import torch
from torch.nn import AvgPool2d
from torch.nn.modules.loss import BCELoss
import torch.backends.cudnn as cudnn
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
from time import time
from IPython import embed
from tensorboard_logger import configure, log_value, log_histogram
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
def add_layer_weights(vgg_weights):
# Mean of RGB weights of first layer with size [64,1,3,3]
layer1 = vgg_weights['0.weight']
mean_rgb = layer1.mean(dim=1,keepdim=True)
vgg_weights['0.weight'] = torch.cat([layer1.cuda(),mean_rgb.cuda()],1)
# We could do it easily accessing to the weights trought model[0].weight and change dimension 1, but as we
# already have the 4th channel we'd be doing the mean of all of the channels, inicializing it in the wrong way.
return vgg_weights
def train_eval(mode, model, optimizer, dataloader):
if mode == TRAIN:
N = len(ds_train)/batch_size
model.train()
else:
N = len(ds_validate)/batch_size
model.eval()
total_loss = []
#iterate epoch...
#iterate epoch...
for i, X in enumerate(dataloader[mode]):
inputs = X[0].cuda()
# noramlize saliency maps values between [0,1]
gt_maps = X[1].cuda()/255
embed()
predictions = model.forward(inputs).squeeze()
# reduce size for loss
reduce_size = AvgPool2d((4,4))
pred_ = reduce_size(predictions)
gt_maps_ = reduce_size(gt_maps)
pred_ = pred_.view(pred_.size()[0], -1)
gt_maps_ = gt_maps_.view(gt_maps_.size()[0], -1)
loss = bce_loss(pred_, gt_maps_)
# make actual step update
if mode==TRAIN:
# compute gradients
loss.backward()
# step optimizer
optimizer.step()
# reset grads for next step
optimizer.zero_grad()
print("\t{}/{} loss:{}".format(i, int(N), loss.item()), end="\r")
total_loss.append(loss.item())
total_loss=np.mean(total_loss)
return total_loss
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--path_out", default='sal_dhf1k_adamdepthcoordaugm2_frombestsaldepth',
type=str,
help="""set output path for the trained model""")
parser.add_argument("--batch_size", default=12,
type=int,
help="""Set batch size""")
parser.add_argument("--n_epochs", default=10, type=int,
help="""Set total number of epochs""")
parser.add_argument("--depth", default=False, type=bool,
help="""Enable 4th channel with depth""")
parser.add_argument("--augment", default=False, type=bool,
help="""Enable data augmentation""")
parser.add_argument("--coord", default=False, type=bool,
help="""Enable coordconv""")
parser.add_argument("--flow", default=False, type=bool,
help="""Enable opticalflow""")
parser.add_argument("--lr", type=float, default=0.00001,
help="""Learning rate for training""")
parser.add_argument("--patience", type=int, default=3,
help="""Patience for learning rate scheduler (default 10)""")
args = parser.parse_args()
# set output path ==========================================================
path_out = '../trained_models/batch12_/' + args.path_out
if not os.path.exists(path_out):
# create output path
os.makedirs(path_out)
# create output for models
path_models = os.path.join(path_out, 'models')
if not os.path.exists(path_models):
os.makedirs(path_models)
# tensorboard
configure("{}".format(path_out), flush_secs=5)
# data =====================================================================
batch_size = args.batch_size
n_epochs = args.n_epochs
lr = args.lr
DEPTH = args.depth
AUGMENT = args.augment
COORD = args.coord
FLOW = args.flow
# Datasets for DHF1K
ds_train = DHF1K(mode=TRAIN, transformation=True, depth=DEPTH, d_augm=AUGMENT, coord=COORD)
ds_validate = DHF1K(mode=VAL, transformation=False, depth=DEPTH, d_augm=False, coord=COORD)
# Dataloaders
dataloader = {
TRAIN: DataLoader(ds_train, batch_size=batch_size,
shuffle=True, num_workers=2),
VAL: DataLoader(ds_validate, batch_size=batch_size,
shuffle=False, num_workers=2)
}
# POSSIBILITY OF CHOOSING GPU
torch.cuda.set_device(1)
# MODEL INITIALIZATION
print("Init model...")
vgg_weights = torch.load('../trained_models/salgan_baseline.pt')['state_dict']
model = create_model(3)
# if DEPTH and COORD:
# model = create_model(6)
# for i in range(0,3):
# vgg_weights = add_layer_weights(vgg_weights)
# elif DEPTH:
# model = create_model(4)
# add_layer_weights(vgg_weights)
# elif COORD:
# model = create_model(5)
# for i in range(0,2):
# vgg_weights = add_layer_weights(vgg_weights)
# else: model = create_model(3)
# Instead of adding manually the layer of new weights, we could use strict=False
model.load_state_dict(vgg_weights)
# Add batch normalization to current model if needed
model = add_bn(model)
model.train()
model.cuda()
cudnn.benchmark = True
# NOT WORKING UNMOUNTED DISK
# If we have the two GPU's available we are going to use both
# if torch.cuda.device_count() > 1:
# print("Using ", torch.cuda.device_count(), "GPUs!")
# model = torch.nn.DataParallel(model)
# LOSS FUNCTION
bce_loss = BCELoss()
# FINE-TUNE WHOLE NETWORK OR JUST DECODER => uncomment / or different lr for each part
# decoder_parameters = []
# base_params = []
# for i, (a, p) in enumerate(model.named_parameters()):
# embed()
# if i>25:
# # print(i, a, p.shape)
# decoder_parameters.append(p)
# else:
# base_params.append(p)
# If you wanna train just the decoder put this
# p.requires_grad = False
# ADAM OPTIMIZER
optimizer = Adam(model.parameters(),
lr = lr,
weight_decay=0.000001)
# STOCHASTIC GRADIENT DESCENT OPTIMIZER
# optimizer = SGD(model.parameters(),
# lr = 0.00001,
# momentum=0.9,
# weight_decay=0.00001,
# nesterov=True)
# NUMBER OF TOTAL PARAMETERS
# pytorch_total_params = sum(p.numel() for p in model.parameters())
# NUMBER OF TRAINABLE PARAMETERS
trainable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Trainable parameters: ", trainable_parameters)
send("Trainable parameters: " + str(trainable_parameters))
send("Experiment: " + args.path_out)
# PRINT TABLE OF PARAMETERS
param_print([path_out,"",DEPTH,AUGMENT,COORD,FLOW,batch_size,lr,n_epochs, trainable_parameters])
# set learning rate scheduler
# ReduceLROnPlateau(
# optimizer,
# mode (str) 'min':lr es reduira quan la metrica no es redueixi mes, 'max' al contrari,
# factor (float) factor de reduccio de la lr,
# patience (int) num epochs sense millora a partir dels quals es redueix lr,
# verbose (bool),
# )
# scheduler = ReduceLROnPlateau(optimizer,
# 'min',
# patience=args.patience,
# verbose=True)
scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
best_loss=9999999
# main loop training =======================================================
for id_epoch in range(n_epochs):
for mode in [VAL, TRAIN]:
# select dataloader
data_iterator = dataloader[mode]
#
# # saliency metrics
# if mode ==VAL:
# print("Evaluating metrics....")
# # only do 100 images from validation
# metrics = compute_metrics(model, 100, DEPTH, COORD)
#
# # log metric values
# for metric in metrics.keys():
# log_value("Metrics/{}".format(metric),
# metrics[metric], id_epoch)
#
# # get epoch loss
# print("--> {} epoch {}".format(mode, id_epoch))
epoch_loss = train_eval(mode, model, optimizer, dataloader)
lr = list(get_lr_optimizer(optimizer))[0]
print("-----------")
print("Done! {} epoch {} loss {} lr {}".format(mode, id_epoch, epoch_loss, lr))
send("{} epoch {}/{} loss {}".format(mode, id_epoch, n_epochs, epoch_loss))
print("\n")
# record loss
log_value("loss/{}".format(mode), epoch_loss, id_epoch)
log_value("lr/{}".format(mode), lr, id_epoch)
# for v in model.state_dict():
# log_histogram("Layer {}".format(v), model.state_dict()[v], id_epoch)
if (id_epoch%2)==0:
save_model(model, optimizer, id_epoch, path_out, name_model='{:03d}'.format(id_epoch))
# store model if val loss improves
if mode==VAL:
if best_loss > epoch_loss:
# update loss
best_loss = epoch_loss
save_model(model, optimizer, id_epoch, path_out, name_model='best')
# scheduler.step(epoch_loss)
scheduler.step()
| 31.139286
| 112
| 0.686661
| 1,222
| 8,719
| 4.738953
| 0.270049
| 0.014505
| 0.02642
| 0.012433
| 0.110862
| 0.093766
| 0.038335
| 0.026248
| 0.015887
| 0.015887
| 0
| 0.015247
| 0.165042
| 8,719
| 279
| 113
| 31.250896
| 0.78022
| 0.354972
| 0
| 0.029197
| 0
| 0
| 0.117913
| 0.019743
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014599
| false
| 0
| 0.138686
| 0
| 0.167883
| 0.058394
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3cc11867421204e587bf63f6a7dd58a6716ea01
| 2,030
|
py
|
Python
|
infapy/v3/agentService.py
|
infapy/infapy
|
0cb11310130be70ce1b647aa5ede929c1eb9b2ce
|
[
"Apache-2.0"
] | null | null | null |
infapy/v3/agentService.py
|
infapy/infapy
|
0cb11310130be70ce1b647aa5ede929c1eb9b2ce
|
[
"Apache-2.0"
] | null | null | null |
infapy/v3/agentService.py
|
infapy/infapy
|
0cb11310130be70ce1b647aa5ede929c1eb9b2ce
|
[
"Apache-2.0"
] | 1
|
2021-09-23T10:31:56.000Z
|
2021-09-23T10:31:56.000Z
|
# Copyright (c) 2021-Present (Prashanth Pradeep)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests as re
import infapy
from infapy.exceptions import InvalidDetailsProvided
class AgentService():
def __init__(self,v3,v3BaseURL,v3SessionID):
self._v3 = v3
self._v3BaseURL = v3BaseURL
self._v3SessionID = v3SessionID
def updateAgentService(self,serviceName, serviceAction, agentId):
url=self._v3BaseURL + "/public/core/v3/agent/service"
headers = {'Content-Type': "application/json", 'Accept': "application/json","INFA-SESSION-ID":self._v3SessionID}
body = {
'serviceName':serviceName,
'serviceAction':serviceAction,
'agentId':agentId}
infapy.log.info("agentService API URL - " + url)
infapy.log.info("API Headers: " + str(headers))
infapy.log.info("Body: " + str(body))
try:
response = re.post(url=url, json=body, headers=headers)
data = response.json()
infapy.log.debug(str(data))
try:
if ("error" in data):
infapy.log.error("Please validate the details passed")
infapy.log.error(str(data))
raise InvalidDetailsProvided
except Exception as e:
infapy.log.exception(e)
raise
except Exception as e:
infapy.log.exception(e)
raise
infapy.log.info(data["message"])
return data
| 38.301887
| 120
| 0.634975
| 237
| 2,030
| 5.400844
| 0.50211
| 0.063281
| 0.040625
| 0.025
| 0.065625
| 0.065625
| 0.065625
| 0.065625
| 0.065625
| 0
| 0
| 0.013495
| 0.269951
| 2,030
| 53
| 121
| 38.301887
| 0.850202
| 0.279803
| 0
| 0.228571
| 0
| 0
| 0.146694
| 0.019972
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0.028571
| 0.085714
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3cd937793e2d0c588285b6a5f1e77f851ebcc85
| 5,703
|
py
|
Python
|
home_application/views.py
|
pengwow/test-demo
|
9d5c460b534d93d84f39ae24db82aa101027d199
|
[
"Apache-2.0"
] | null | null | null |
home_application/views.py
|
pengwow/test-demo
|
9d5c460b534d93d84f39ae24db82aa101027d199
|
[
"Apache-2.0"
] | 4
|
2020-02-12T01:47:04.000Z
|
2021-06-10T21:34:36.000Z
|
home_application/views.py
|
pengwow/test-demo
|
9d5c460b534d93d84f39ae24db82aa101027d199
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from common.mymako import render_mako_context, render_json
from blueking.component.shortcuts import get_client_by_request
from django.views.decorators.csrf import csrf_exempt
from models import TEST, HostDisk, ScriptExecInfo
import json
import base64
def home(request):
"""
首页
"""
# yewu = [
# {'id': 1, "name": u"业务1"},
# {'id': 2, "name": u"业务2"},
# {'id': 3, "name": u"业务3"},
# ]
# 从环境配置获取APP信息,从request获取当前用户信息
client = get_client_by_request(request)
kwargs = {}
result = client.cc.search_business(kwargs)
print(result)
yewu = result['data']['info']
return render_mako_context(request, '/home_application/home.html',
{
"yewu": yewu,
"AAA": u"业务列表"
})
def submit_template(request):
"""
首页
"""
print(request.body)
return render_json({"1111111": "dddddddddd"})
def dev_guide(request):
"""
开发指引
"""
return render_mako_context(request, '/home_application/dev_guide.html')
def contactus(request):
"""
联系我们
"""
return render_mako_context(request, '/home_application/contact.html')
def tijiao(request):
data = json.loads(request.body)
print(type(data))
sss = TEST(**data)
sss.save()
return render_json({"DATA": "AAAAAAAA"})
def host_disk(request):
host_list = HostDisk.objects.all()
re_list = list()
for item in host_list:
temp_dict = dict()
temp_dict['os'] = item.os
temp_dict['host_ip'] = item.host_ip
temp_dict['host_name'] = item.host_name
temp_dict['host_path'] = item.host_path
temp_dict['create_time'] = item.create_time
re_list.append(temp_dict)
print(re_list)
return render_mako_context(request,
'/home_application/host_disk.html',
{'host_all': re_list}
)
def host_tijiao(request):
data = request.body
print(type(data))
data = json.loads(data)
host = HostDisk(**data)
host.save()
return render_json({"status": "OK"})
def host_script(request):
# 根据作业id查询日志
data = ScriptExecInfo.objects.all()
client = get_client_by_request(request)
script_all = list()
for item in data:
temp_dict = dict()
kwargs = {}
kwargs['bk_biz_id'] = item.bk_biz_id
kwargs['job_instance_id'] = item.job_instance_id
result = client.job.get_job_instance_log(kwargs)
log_content = result['data'][0]['step_results'][0]['ip_logs'][0]['log_content']
temp_dict['host_ip'] = item.host_ip
temp_dict['log_content'] = log_content
temp_dict['script_content'] = item.script_content
temp_dict['create_time'] = item.create_time
script_all.append(temp_dict)
return render_mako_context(request,
'/home_application/host_script.html',
{'script_all': script_all},
)
def script_tijiao(request):
try:
print(request.user.username)
except Exception as e:
print(str(e))
data = json.loads(request.body)
client = get_client_by_request(request)
kwargs = {}
result = client.cc.search_business(kwargs)
bk_biz_id = result['data']['info'][0]['bk_biz_id']
script_content = base64.b64encode(data['script_content'])
kwargs = dict()
kwargs['bk_biz_id'] = bk_biz_id
kwargs['script_content'] = script_content
kwargs["account"] = "root"
kwargs['ip_list'] = [{'bk_cloud_id': 0, "ip": data['host_ip']}]
result = client.job.fast_execute_script(kwargs)
script_dict = dict()
script_dict["host_ip"] = data['host_ip']
script_dict["script_content"] = data['script_content']
script_dict["job_instance_id"] = result['data']['job_instance_id']
script_dict['bk_biz_id'] = bk_biz_id
scriptexecinfo = ScriptExecInfo(**script_dict)
scriptexecinfo.save()
return render_json({"status": "OK"})
# ####################其他
def other(request):
return render_mako_context(request, '/home_application/other.html')
@csrf_exempt # 注意:需要添加此装饰器
def upload_file(request):
# 接收的为文件列表,需要遍历操作
files = request.FILES
for item in files:
_file = files.get(item)
print(_file.name)
print(_file.size)
with open('./' + str(_file.name), 'wb') as fd:
fd.write(_file.file.read())
return render_json({"status": "OK"})
def download_file(request):
"""
文件下载
:param request:
:return: 文件response
"""
from django.http import FileResponse
# 接收文件名请求
file_name = request.GET.get('filename')
fd = open('./' + file_name, 'rb')
response = FileResponse(fd)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="%s"' % file_name
return response
| 30.015789
| 115
| 0.627389
| 705
| 5,703
| 4.859574
| 0.303546
| 0.030356
| 0.016346
| 0.04028
| 0.227087
| 0.202277
| 0.159953
| 0.088733
| 0.060128
| 0.041448
| 0
| 0.007204
| 0.245485
| 5,703
| 189
| 116
| 30.174603
| 0.788984
| 0.162195
| 0
| 0.194915
| 0
| 0
| 0.146907
| 0.049613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0
| 0.059322
| 0.008475
| 0.262712
| 0.076271
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3cdf292bfc1d114fbf7d5d60cd7d8fcf12221e7
| 455
|
py
|
Python
|
Chapter 6/09 - The built-in multiprocessing module/basic_multiprocessing.py
|
moseskim/Expert-Python-Programming-Fourth-Edition
|
5160f974deb2365597b7be9cc032f24bfa13471a
|
[
"MIT"
] | null | null | null |
Chapter 6/09 - The built-in multiprocessing module/basic_multiprocessing.py
|
moseskim/Expert-Python-Programming-Fourth-Edition
|
5160f974deb2365597b7be9cc032f24bfa13471a
|
[
"MIT"
] | null | null | null |
Chapter 6/09 - The built-in multiprocessing module/basic_multiprocessing.py
|
moseskim/Expert-Python-Programming-Fourth-Edition
|
5160f974deb2365597b7be9cc032f24bfa13471a
|
[
"MIT"
] | null | null | null |
"""
"멀티프로세싱"절 예시
`multiprocessing` 모듈을 이용해 새로운 프로세스들을
생성하는 방법을 설명한다.
"""
from multiprocessing import Process
import os
def work(identifier):
print(f'Hey, I am the process ' f'{identifier}, pid: {os.getpid()}')
def main():
processes = [Process(target=work, args=(number,)) for number in range(5)]
for process in processes:
process.start()
while processes:
processes.pop().join()
if __name__ == "__main__":
main()
| 18.2
| 77
| 0.650549
| 60
| 455
| 4.8
| 0.683333
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002778
| 0.208791
| 455
| 24
| 78
| 18.958333
| 0.797222
| 0.140659
| 0
| 0
| 0
| 0
| 0.16188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3ce427e7608fff21718948d99c9396b801b2425
| 670
|
py
|
Python
|
sweeper/cloud/localhost/manager.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
sweeper/cloud/localhost/manager.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
sweeper/cloud/localhost/manager.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
__author__ = '@dominofire'
import os
from sweeper.cloud import resource_config_combinations
from sweeper.cloud.localhost import resource_config_factory as config_factory
from sweeper.resource import Resource
def possible_configs(num):
configs = config_factory.list_configs()
combs = resource_config_combinations(num, configs)
return combs
def create_resource(name, config_object):
res = Resource(config_object, name, 'localhost', None, None)
return res
def mount_distributed_file_system(name, vm_resources):
vm_first = vm_resources[0]
vm_first.execute_command('mkdir ./fileshare')
return os.path.join(os.getcwd(), 'fileshare')
| 23.928571
| 77
| 0.773134
| 86
| 670
| 5.732558
| 0.465116
| 0.11359
| 0.064909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001742
| 0.143284
| 670
| 27
| 78
| 24.814815
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.068657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.25
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3ced405166d997be98745f69fe1f51cd0fcd9c9
| 3,193
|
py
|
Python
|
tfx/orchestration/experimental/core/service_jobs_test.py
|
BACtaki/tfx
|
29db845200beccbb0ffa1e1e1a091e314a3a470f
|
[
"Apache-2.0"
] | 1,813
|
2019-02-04T17:17:30.000Z
|
2022-03-29T13:39:30.000Z
|
tfx/orchestration/experimental/core/service_jobs_test.py
|
BACtaki/tfx
|
29db845200beccbb0ffa1e1e1a091e314a3a470f
|
[
"Apache-2.0"
] | 2,710
|
2019-02-14T00:41:00.000Z
|
2022-03-31T07:23:00.000Z
|
tfx/orchestration/experimental/core/service_jobs_test.py
|
BACtaki/tfx
|
29db845200beccbb0ffa1e1e1a091e314a3a470f
|
[
"Apache-2.0"
] | 731
|
2019-02-04T17:59:18.000Z
|
2022-03-31T06:45:51.000Z
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.service_jobs."""
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import test_utils
class ExceptionHandlingServiceJobManagerWrapperTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self._mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
self._mock_service_job_manager.ensure_node_services.return_value = (
service_jobs.ServiceStatus.SUCCESS)
self._mock_service_job_manager.stop_node_services.return_value = True
self._mock_service_job_manager.is_pure_service_node.return_value = True
self._mock_service_job_manager.is_mixed_service_node.return_value = False
self._wrapper = service_jobs.ExceptionHandlingServiceJobManagerWrapper(
self._mock_service_job_manager)
def test_calls_forwarded_to_underlying_instance(self):
self.assertEqual(service_jobs.ServiceStatus.SUCCESS,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self.assertTrue(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self.assertTrue(self._wrapper.is_pure_service_node(mock.Mock(), 'node3'))
self.assertFalse(self._wrapper.is_mixed_service_node(mock.Mock(), 'node4'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
self._mock_service_job_manager.is_pure_service_node.assert_called_once_with(
mock.ANY, 'node3')
self._mock_service_job_manager.is_mixed_service_node.assert_called_once_with(
mock.ANY, 'node4')
def test_ensure_node_services_exception_handling(self):
self._mock_service_job_manager.ensure_node_services.side_effect = RuntimeError(
'test error')
self.assertEqual(service_jobs.ServiceStatus.FAILED,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
def test_stop_node_services_exception_handling(self):
self._mock_service_job_manager.stop_node_services.side_effect = RuntimeError(
'test error')
self.assertFalse(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
if __name__ == '__main__':
tf.test.main()
| 46.275362
| 83
| 0.777639
| 427
| 3,193
| 5.435597
| 0.311475
| 0.048255
| 0.090478
| 0.108574
| 0.549763
| 0.479966
| 0.442051
| 0.442051
| 0.337785
| 0.17751
| 0
| 0.007249
| 0.135922
| 3,193
| 68
| 84
| 46.955882
| 0.833998
| 0.197307
| 0
| 0.266667
| 0
| 0
| 0.034578
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 1
| 0.088889
| false
| 0
| 0.088889
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d03f04854e2e542f97a3c9c4b2caeaa5e05045
| 17,041
|
py
|
Python
|
dragonn/models.py
|
kundajelab/dragonn
|
431e7c6b94a82972ac0fc3ef76d76e9ce8ba67fc
|
[
"MIT"
] | 251
|
2016-06-20T20:18:27.000Z
|
2022-03-03T23:31:38.000Z
|
dragonn/models.py
|
kundajelab/dragonn
|
431e7c6b94a82972ac0fc3ef76d76e9ce8ba67fc
|
[
"MIT"
] | 39
|
2016-07-01T20:40:59.000Z
|
2022-02-09T23:30:24.000Z
|
dragonn/models.py
|
kundajelab/dragonn
|
431e7c6b94a82972ac0fc3ef76d76e9ce8ba67fc
|
[
"MIT"
] | 89
|
2016-06-09T17:59:21.000Z
|
2021-12-20T03:00:09.000Z
|
from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from abc import abstractmethod, ABCMeta
from dragonn.metrics import ClassificationResult
from sklearn.svm import SVC as scikit_SVC
from sklearn.tree import DecisionTreeClassifier as scikit_DecisionTree
from sklearn.ensemble import RandomForestClassifier
from keras.models import load_model
from dragonn.runtime_metrics import *
from dragonn.custom_losses import *
import warnings
warnings.filterwarnings('ignore')
def load_dragonn_model(model_string):
custom_objects={"recall":recall,
"sensitivity":recall,
"specificity":specificity,
"fpr":fpr,
"fnr":fnr,
"fdr":fdr,
"precision":precision,
"f1":f1,
"spearman_corr":spearman_corr,
"ambig_binary_crossentropy":ambig_binary_crossentropy,
"ambig_mean_squared_error":ambig_mean_squared_error}
model=load_model(model_string,custom_objects=custom_objects)
return model
class Model(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, **hyperparameters):
pass
@abstractmethod
def train(self, X, y, validation_data):
pass
@abstractmethod
def predict(self, X):
pass
def test(self, X, y):
return ClassificationResult(y, self.predict(X))
def score(self, X, y, metric):
return self.test(X, y)[metric]
class SequenceDNN(Model):
"""
Sequence DNN models.
Parameters
----------
seq_length : int, optional
length of input sequence.
keras_model : instance of keras.models.Sequential, optional
seq_length or keras_model must be specified.
num_tasks : int, optional
number of tasks. Default: 1.
num_filters : list[int] | tuple[int]
number of convolutional filters in each layer. Default: (15,).
conv_width : list[int] | tuple[int]
width of each layer's convolutional filters. Default: (15,).
pool_width : int
width of max pooling after the last layer. Default: 35.
L1 : float
strength of L1 penalty.
dropout : float
dropout probability in every convolutional layer. Default: 0.
verbose: int
Verbosity level during training. Valida values: 0, 1, 2.
Returns
-------
Compiled DNN model.
"""
def __init__(self, seq_length=None, keras_model=None,
use_RNN=False, num_tasks=1,
num_filters=(15, 15, 15), conv_width=(15, 15, 15),
pool_width=35, GRU_size=35, TDD_size=15,
L1=0, dropout=0.0, num_epochs=100, verbose=1):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Dropout, Flatten,
Permute, Reshape
)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
self.num_tasks = num_tasks
self.num_epochs = num_epochs
self.verbose = verbose
self.train_metrics = []
self.valid_metrics = []
if keras_model is not None and seq_length is None:
self.model = keras_model
self.num_tasks = keras_model.layers[-1].output_shape[-1]
elif seq_length is not None and keras_model is None:
self.model = Sequential()
assert len(num_filters) == len(conv_width)
for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
conv_height = 4 if i == 0 else 1
self.model.add(Convolution2D(
nb_filter=nb_filter, nb_row=conv_height,
nb_col=nb_col, activation='linear',
init='he_normal', input_shape=(1, 4, seq_length),
W_regularizer=l1(L1), b_regularizer=l1(L1)))
self.model.add(Activation('relu'))
self.model.add(Dropout(dropout))
self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
if use_RNN:
num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
self.model.add(Permute((2, 1)))
self.model.add(GRU(GRU_size, return_sequences=True))
self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(output_dim=self.num_tasks))
self.model.add(Activation('sigmoid'))
self.model.compile(optimizer='adam', loss='binary_crossentropy')
else:
raise ValueError("Exactly one of seq_length or keras_model must be specified!")
def train(self, X, y, validation_data, early_stopping_metric='Loss',
early_stopping_patience=5, save_best_model_to_prefix=None):
if y.dtype != bool:
assert set(np.unique(y)) == {0, 1}
y = y.astype(bool)
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
if self.verbose >= 1:
print('Training model (* indicates new best result)...')
X_valid, y_valid = validation_data
early_stopping_wait = 0
best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
for epoch in range(1, self.num_epochs + 1):
self.model.fit(X, y, batch_size=128, nb_epoch=1,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None, verbose=self.verbose >= 2)
epoch_train_metrics = self.test(X, y)
epoch_valid_metrics = self.test(X_valid, y_valid)
self.train_metrics.append(epoch_train_metrics)
self.valid_metrics.append(epoch_valid_metrics)
if self.verbose >= 1:
print('Epoch {}:'.format(epoch))
print('Train {}'.format(epoch_train_metrics))
print('Valid {}'.format(epoch_valid_metrics), end='')
current_metric = epoch_valid_metrics[early_stopping_metric].mean()
if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
if self.verbose >= 1:
print(' *')
best_metric = current_metric
best_epoch = epoch
early_stopping_wait = 0
if save_best_model_to_prefix is not None:
self.save(save_best_model_to_prefix)
else:
if self.verbose >= 1:
print()
if early_stopping_wait >= early_stopping_patience:
break
early_stopping_wait += 1
if self.verbose >= 1:
print('Finished training after {} epochs.'.format(epoch))
if save_best_model_to_prefix is not None:
print("The best model's architecture and weights (from epoch {0}) "
'were saved to {1}.arch.json and {1}.weights.h5'.format(
best_epoch, save_best_model_to_prefix))
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
def get_sequence_filters(self):
"""
Returns 3D array of 2D sequence filters.
"""
return self.model.layers[0].get_weights()[0].squeeze(axis=1)
@staticmethod
def _plot_scores(X, output_directory, peak_width, score_func, score_name):
from dragonn.plot import plot_bases_on_ax
scores = score_func(X).squeeze(axis=2) # (num_task, num_samples, num_bases, sequence_length)
try:
os.makedirs(output_directory)
except OSError:
pass
num_tasks = len(scores)
for task_index, task_scores in enumerate(scores):
for sequence_index, sequence_scores in enumerate(task_scores):
# sequence_scores is num_bases x sequence_length
basewise_max_sequence_scores = sequence_scores.max(axis=0)
plt.clf()
figure, (top_axis, bottom_axis) = plt.subplots(2)
top_axis.plot(range(1, len(basewise_max_sequence_scores) + 1),
basewise_max_sequence_scores)
top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
peak_position = basewise_max_sequence_scores.argmax()
top_axis.axvspan(peak_position - peak_width, peak_position + peak_width,
color='grey', alpha=0.1)
peak_sequence_scores = sequence_scores[:, peak_position - peak_width :
peak_position + peak_width].T
# Set non-max letter_heights to zero
letter_heights = np.zeros_like(peak_sequence_scores)
letter_heights[np.arange(len(letter_heights)),
peak_sequence_scores.argmax(axis=1)] = \
basewise_max_sequence_scores[peak_position - peak_width :
peak_position + peak_width]
plot_bases_on_ax(letter_heights, bottom_axis)
bottom_axis.set_xticklabels(tuple(map(
str, np.arange(peak_position - peak_width, peak_position + peak_width + 1))))
bottom_axis.tick_params(axis='x', labelsize='small')
plt.xlabel('Position')
plt.ylabel('Score')
plt.savefig(os.path.join(output_directory, 'sequence_{}{}'.format(
sequence_index, '_task_{}'.format(task_index) if num_tasks > 1 else '')))
plt.close()
def plot_deeplift(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.deeplift, score_name='DeepLift')
def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.in_silico_mutagenesis, score_name='ISM')
def plot_architecture(self, output_file):
from dragonn.visualize_util import plot as plot_keras_model
plot_keras_model(self.model, output_file, show_shape=True)
def save(self, save_best_model_to_prefix):
arch_fname = save_best_model_to_prefix + '.arch.json'
weights_fname = save_best_model_to_prefix + '.weights.h5'
open(arch_fname, 'w').write(self.model.to_json())
self.model.save_weights(weights_fname, overwrite=True)
@staticmethod
def load(model_hdf5_fname=None, arch_fname=None, weights_fname=None):
if model_hdf5_fname!=None:
from keras.models import load_model
sequence_dnn=SequenceDNN(keras_model=load_model(model_hdf5_fname))
else:
from keras.models import model_from_json
model_json_string = open(arch_fname).read()
sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
if weights_fname is not None:
sequence_dnn.model.load_weights(weights_fname)
return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Flatten, TimeDistributedDense
)
from keras.layers.recurrent import GRU
self.model = Sequential()
self.model.add(GRU(gru_size, return_sequences=True,
input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
from keras.callbacks import EarlyStopping
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X, y, batch_size=128, nb_epoch=100,
validation_data=validation_data,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self, prefix='./gkmSVM', word_length=11, mismatches=3, C=1,
threads=1, cache_memory=100, verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str, (word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(
('gkmtrain', self.options, pos_fname, neg_fname, self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join(['gkmpredict',
test_fname,
self.model_file,
temp_ofp.name,
threads_option])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
class SVC(Model):
def __init__(self):
self.classifier = scikit_SVC(probability=True, kernel='linear')
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
return self.classifier.predict_proba(X)[:, 1:]
class DecisionTree(Model):
def __init__(self):
self.classifier = scikit_DecisionTree()
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
predictions = np.asarray(self.classifier.predict_proba(X))[..., 1]
if len(predictions.shape) == 2: # multitask
predictions = predictions.T
else: # single-task
predictions = np.expand_dims(predictions, 1)
return predictions
class RandomForest(DecisionTree):
def __init__(self):
self.classifier = RandomForestClassifier(n_estimators=100)
| 41.261501
| 101
| 0.607007
| 2,039
| 17,041
| 4.835213
| 0.192251
| 0.030125
| 0.019475
| 0.012172
| 0.311086
| 0.252865
| 0.19292
| 0.177097
| 0.140379
| 0.113602
| 0
| 0.012492
| 0.295347
| 17,041
| 412
| 102
| 41.36165
| 0.808544
| 0.072707
| 0
| 0.232919
| 0
| 0
| 0.050096
| 0.003135
| 0
| 0
| 0
| 0
| 0.006211
| 1
| 0.096273
| false
| 0.012422
| 0.093168
| 0.015528
| 0.248447
| 0.040373
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d07703df62a187a4037e7b46931b65c218c987
| 3,921
|
py
|
Python
|
dgt/inference/forward_inference.py
|
fractalego/dgt
|
6781b9445d93c4a1680ab3d5636803c81062cc67
|
[
"MIT"
] | 3
|
2021-07-26T02:07:15.000Z
|
2021-12-21T22:36:15.000Z
|
dgt/inference/forward_inference.py
|
fractalego/dgt
|
6781b9445d93c4a1680ab3d5636803c81062cc67
|
[
"MIT"
] | null | null | null |
dgt/inference/forward_inference.py
|
fractalego/dgt
|
6781b9445d93c4a1680ab3d5636803c81062cc67
|
[
"MIT"
] | null | null | null |
import logging
import random
from dgt.graph.graph_matcher import GraphWeightedMatch
from dgt.utils import graph_iterations
_logger = logging.getLogger(__name__)
def find_weight_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return 1
def clean_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
new_s = s[:start - 1] + s[end + 1:]
return new_s
except ValueError:
return s
def eliminate_spaces(line):
line = line.replace(' ', '')
line = line.replace('\t', '')
line = line.replace('\n', '')
return line
class UniqueNamesModifier:
def apply(self, g):
from ..auxiliary import get_random_name
substitution_dict = {}
for v in g.vs:
random_name = get_random_name()
old_name = v['name']
new_name = old_name + random_name
v['name'] = new_name
substitution_dict[old_name] = new_name
try:
for v in g.vs:
referring_name = v['refers_to']
if referring_name:
v['refers_to'] = substitution_dict[referring_name]
except Exception as e:
_logger.warning("Exception while substituting refers_to ID: " + str(e))
for e in g.es:
e['name'] += get_random_name()
class BaseForwardInference:
def compute(self):
return None
class ForwardInference(BaseForwardInference):
_unique = UniqueNamesModifier()
def __init__(self, data, knowledge, permutation_shift, max_depth=1):
self.permutations = permutation_shift
self.data = data
self.knowledge = knowledge
self._max_depth = max_depth
self.permutation_shift = permutation_shift
def __apply_clause_to_graph(self, rule, data, i):
drs = data.copy()
drs.visit(self._unique)
w = 1
iterations = graph_iterations(drs._g)
if not iterations:
return drs, 0
drs._g = iterations[self.permutations[i] % len(iterations)]
if not rule.gradient:
weighted_match = GraphWeightedMatch(rule.get_hypothesis(), self.knowledge._metric,
self.knowledge._relations_metric)
w = drs.visit(weighted_match)
is_match = drs.visit(rule)
if not is_match:
return drs, 0
return drs, w
def _compute_step(self, data_tuple, i):
"""
Applies all the rules to a drs
:return: all the variants of the drs after a rule match as a pair (<NEW_DRS>, <WEIGHT>)
"""
data = data_tuple[0]
prior_w = data_tuple[1]
clauses = self.knowledge.ask_rule(data)
results = []
for clause_tuple in clauses:
rule = clause_tuple[0]
rule_weight = rule.weight
prior_rules = list(data_tuple[2])
if rule in prior_rules: # A rule can be used only once per path
continue
drs, w = self.__apply_clause_to_graph(rule, data, i)
if w > 0:
prior_rules.append(rule)
prior_rules.append(drs)
results.append((drs, prior_w * w * rule_weight, prior_rules))
return results
def compute(self):
results = []
to_process = [(self.data, 1, [self.data])]
for i in range(self._max_depth):
new_results = []
for data_tuple in to_process:
new_results += self._compute_step(data_tuple, i)
if not new_results:
break
to_process = sorted(new_results, key=lambda x: -x[1])
results += to_process
results = sorted(results, key=lambda x: -x[1])
return results
| 30.632813
| 95
| 0.579444
| 479
| 3,921
| 4.524008
| 0.263048
| 0.024919
| 0.020766
| 0.01569
| 0.118136
| 0.074758
| 0.057222
| 0.057222
| 0.057222
| 0.057222
| 0
| 0.005693
| 0.327978
| 3,921
| 127
| 96
| 30.874016
| 0.816698
| 0.040041
| 0
| 0.188119
| 0
| 0
| 0.0209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089109
| false
| 0
| 0.049505
| 0.009901
| 0.287129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d0a689ffb0010c1b8ab3fafb0b2e1dd2c2562d
| 1,528
|
py
|
Python
|
serverPythonClient/client.py
|
ikekilinc/dnnSuperBinoculars
|
b0fc584b1d449961bdbab37cf9d72c0b466f197f
|
[
"MIT"
] | null | null | null |
serverPythonClient/client.py
|
ikekilinc/dnnSuperBinoculars
|
b0fc584b1d449961bdbab37cf9d72c0b466f197f
|
[
"MIT"
] | null | null | null |
serverPythonClient/client.py
|
ikekilinc/dnnSuperBinoculars
|
b0fc584b1d449961bdbab37cf9d72c0b466f197f
|
[
"MIT"
] | null | null | null |
import argparse
import cv2
import common
# from .utils.cropAtCenter import cropImageCenter
# from cropAtCenter import cropImageCenter
from gabriel_client.websocket_client import WebsocketClient
from gabriel_client.opencv_adapter import OpencvAdapter
DEFAULT_SERVER_HOST = '128.2.212.50'
DEFAULT_ZOOM_FACTOR = 10
def preprocess(frame):
# return frame
print(type(frame), frame.shape)
width, height = frame.shape[1], frame.shape[0]
left = int(width/2 * (1 - 1/DEFAULT_ZOOM_FACTOR))
top = int(height/2 * (1 - 1/DEFAULT_ZOOM_FACTOR))
right = int(width/2 * (1 + 1/DEFAULT_ZOOM_FACTOR))
bottom = int(height/2 * (1 + 1/DEFAULT_ZOOM_FACTOR))
cropped_frame = frame[top:bottom, left:right]
return cropped_frame
def produce_extras():
return None
def consume_frame(frame, _):
cv2.imshow('Image from server', frame)
cv2.waitKey(1)
def main():
common.configure_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
'source_name', nargs='?', default=common.DEFAULT_SOURCE_NAME)
parser.add_argument('server_host', nargs='?', default=DEFAULT_SERVER_HOST)
args = parser.parse_args()
capture = cv2.VideoCapture(0)
opencv_adapter = OpencvAdapter(
preprocess, produce_extras, consume_frame, capture, args.source_name)
client = WebsocketClient(
args.server_host, common.WEBSOCKET_PORT,
opencv_adapter.get_producer_wrappers(), opencv_adapter.consumer)
client.launch()
if __name__ == '__main__':
main()
| 25.898305
| 78
| 0.719241
| 193
| 1,528
| 5.440415
| 0.362694
| 0.052381
| 0.080952
| 0.038095
| 0.108571
| 0.108571
| 0.108571
| 0.108571
| 0
| 0
| 0
| 0.024564
| 0.174084
| 1,528
| 58
| 79
| 26.344828
| 0.807448
| 0.066099
| 0
| 0
| 0
| 0
| 0.042867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.135135
| 0.027027
| 0.297297
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d10c9654ae4266e8db0dc3b63e312a5537bc75
| 719
|
py
|
Python
|
src/DeepCard.API/batch.py
|
SharsDela/BankCardRecognize
|
ce80589bc5a5afaba2b97b1ccab35354fb99b548
|
[
"Apache-2.0"
] | 7
|
2019-09-01T13:36:52.000Z
|
2021-05-20T19:38:40.000Z
|
src/DeepCard.API/batch.py
|
SharsDela/BankCardRecognize
|
ce80589bc5a5afaba2b97b1ccab35354fb99b548
|
[
"Apache-2.0"
] | 1
|
2019-09-01T13:37:50.000Z
|
2020-09-18T10:35:20.000Z
|
src/DeepCard.API/batch.py
|
SharsDela/BankCardRecognize
|
ce80589bc5a5afaba2b97b1ccab35354fb99b548
|
[
"Apache-2.0"
] | 2
|
2020-02-03T01:57:36.000Z
|
2020-03-05T11:19:14.000Z
|
from api import get_result
import os
import shutil
from glob import glob
from PIL import Image
if __name__ == '__main__':
image_files = glob('./test_images/*.*')
result_dir = './test_results'
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
os.mkdir(result_dir)
txt_file = os.path.join(result_dir, 'result.txt')
txt_f = open(txt_file, 'w')
for image_file in sorted(image_files):
if ".gitkeep" in image_files:
continue
print("Finded file", image_file, end=" ")
result = get_result(Image.open(image_file))
print(":", result)
txt_f.write(image_file.split('/')[-1].split('.')[0] + ':' + result + '\n')
txt_f.close()
| 28.76
| 82
| 0.623088
| 100
| 719
| 4.19
| 0.42
| 0.107399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003604
| 0.228095
| 719
| 25
| 83
| 28.76
| 0.751351
| 0
| 0
| 0
| 0
| 0
| 0.105556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.238095
| 0
| 0.238095
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d15d48b5db9739108b6ecc4d1923cf6d0d654b
| 4,106
|
py
|
Python
|
CIM14/ENTSOE/Equipment/Core/Curve.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 58
|
2015-04-22T10:41:03.000Z
|
2022-03-29T16:04:34.000Z
|
CIM14/ENTSOE/Equipment/Core/Curve.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 12
|
2015-08-26T03:57:23.000Z
|
2020-12-11T20:14:42.000Z
|
CIM14/ENTSOE/Equipment/Core/Curve.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 35
|
2015-01-10T12:21:03.000Z
|
2020-09-09T08:18:16.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Equipment.Core.IdentifiedObject import IdentifiedObject
class Curve(IdentifiedObject):
"""A multi-purpose curve or functional relationship between an independent variable (X-axis) and dependent (Y-axis) variables.
"""
def __init__(self, y1Unit="A", curveStyle="straightLineYValues", xUnit="A", CurveDatas=None, *args, **kw_args):
"""Initialises a new 'Curve' instance.
@param y1Unit: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
@param curveStyle: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula"
@param xUnit: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
@param CurveDatas: The point data values that define a curve
"""
#: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
self.y1Unit = y1Unit
#: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula"
self.curveStyle = curveStyle
#: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
self.xUnit = xUnit
self._CurveDatas = []
self.CurveDatas = [] if CurveDatas is None else CurveDatas
super(Curve, self).__init__(*args, **kw_args)
_attrs = ["y1Unit", "curveStyle", "xUnit"]
_attr_types = {"y1Unit": str, "curveStyle": str, "xUnit": str}
_defaults = {"y1Unit": "A", "curveStyle": "straightLineYValues", "xUnit": "A"}
_enums = {"y1Unit": "UnitSymbol", "curveStyle": "CurveStyle", "xUnit": "UnitSymbol"}
_refs = ["CurveDatas"]
_many_refs = ["CurveDatas"]
def getCurveDatas(self):
"""The point data values that define a curve
"""
return self._CurveDatas
def setCurveDatas(self, value):
for x in self._CurveDatas:
x.Curve = None
for y in value:
y._Curve = self
self._CurveDatas = value
CurveDatas = property(getCurveDatas, setCurveDatas)
def addCurveDatas(self, *CurveDatas):
for obj in CurveDatas:
obj.Curve = self
def removeCurveDatas(self, *CurveDatas):
for obj in CurveDatas:
obj.Curve = None
| 52.641026
| 277
| 0.614467
| 574
| 4,106
| 4.355401
| 0.329268
| 0.0352
| 0.0176
| 0.0288
| 0.3312
| 0.3312
| 0.2976
| 0.2976
| 0.2384
| 0.2384
| 0
| 0.011057
| 0.207014
| 4,106
| 77
| 278
| 53.324675
| 0.756757
| 0.640039
| 0
| 0.066667
| 0
| 0
| 0.125804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.033333
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d2324b7f134c8871f8f82a96cc6abc0a30b3ea
| 2,432
|
py
|
Python
|
fluent/syntax/errors.py
|
unclenachoduh/python-fluent
|
1d15bdc94a37ecb488a80aefcdd37b8cb5535f73
|
[
"Apache-2.0"
] | null | null | null |
fluent/syntax/errors.py
|
unclenachoduh/python-fluent
|
1d15bdc94a37ecb488a80aefcdd37b8cb5535f73
|
[
"Apache-2.0"
] | null | null | null |
fluent/syntax/errors.py
|
unclenachoduh/python-fluent
|
1d15bdc94a37ecb488a80aefcdd37b8cb5535f73
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
class ParseError(Exception):
def __init__(self, code, *args):
self.code = code
self.args = args
self.message = get_error_message(code, args)
def get_error_message(code, args):
if code == 'E00001':
return 'Generic error'
if code == 'E0002':
return 'Expected an entry start'
if code == 'E0003':
return 'Expected token: "{}"'.format(args[0])
if code == 'E0004':
return 'Expected a character from range: "{}"'.format(args[0])
if code == 'E0005':
msg = 'Expected message "{}" to have a value or attributes'
return msg.format(args[0])
if code == 'E0006':
msg = 'Expected term "{}" to have a value'
return msg.format(args[0])
if code == 'E0007':
return 'Keyword cannot end with a whitespace'
if code == 'E0008':
return 'The callee has to be a simple, upper-case identifier'
if code == 'E0009':
return 'The key has to be a simple identifier'
if code == 'E0010':
return 'Expected one of the variants to be marked as default (*)'
if code == 'E0011':
return 'Expected at least one variant after "->"'
if code == 'E0012':
return 'Expected value'
if code == 'E0013':
return 'Expected variant key'
if code == 'E0014':
return 'Expected literal'
if code == 'E0015':
return 'Only one variant can be marked as default (*)'
if code == 'E0016':
return 'Message references cannot be used as selectors'
if code == 'E0017':
return 'Variants cannot be used as selectors'
if code == 'E0018':
return 'Attributes of messages cannot be used as selectors'
if code == 'E0019':
return 'Attributes of terms cannot be used as placeables'
if code == 'E0020':
return 'Unterminated string expression'
if code == 'E0021':
return 'Positional arguments must not follow named arguments'
if code == 'E0022':
return 'Named arguments must be unique'
if code == 'E0023':
return 'VariantLists are only allowed inside of other VariantLists.'
if code == 'E0024':
return 'Cannot access variants of a message.'
if code == 'E0025':
return 'Unknown escape sequence: {}'.format(args[0])
if code == 'E0026':
return 'Invalid Unicode escape sequence: {}'.format(args[0])
return code
| 36.298507
| 76
| 0.60773
| 307
| 2,432
| 4.771987
| 0.374593
| 0.106485
| 0.045051
| 0.044369
| 0.238225
| 0.12628
| 0.094881
| 0
| 0
| 0
| 0
| 0.063356
| 0.279605
| 2,432
| 66
| 77
| 36.848485
| 0.772831
| 0
| 0
| 0.032258
| 0
| 0
| 0.441612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.016129
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d2e1e0e46f7f6e0817c75f138edaf65c103137
| 14,084
|
py
|
Python
|
twitterinfrastructure/CH-Data-Public.py
|
jacob-heglund/socialsensing-jh
|
fd6d2d749f40fee46bee749ff868212bf117a747
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
twitterinfrastructure/CH-Data-Public.py
|
jacob-heglund/socialsensing-jh
|
fd6d2d749f40fee46bee749ff868212bf117a747
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
twitterinfrastructure/CH-Data-Public.py
|
jacob-heglund/socialsensing-jh
|
fd6d2d749f40fee46bee749ff868212bf117a747
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
'''
Created on Mar 22, 2018
Edited on Jan 11, 2019
@author: npvance2
@author: curtisd2
Variables that will need to be edited/personalized:
monitorID in Variables() (line 27)
projectStartDate in Variables() (line 28)
projectEndDate in Variables() (line 29)
authToken in getAuthToken() (line 49)
consumer_key in twitterAPI() (line 62)
consumer_secret in twitterAPI() (line 63)
access_token in twitterAPI() (line 64)
access_secret in twitterAPI() (line 65)
'''
from datetime import date, timedelta
import urllib.request
import json
import csv
import tweepy
from tweepy import OAuthHandler
def Variables():
monitorID = "9926183772" # The numerical ID for your Crimson Hexagon monitor
startDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
endDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
variableMap = {}
variableMap['monitorID'] = monitorID
variableMap['startDate'] = startDate
variableMap['endDate'] = endDate
return variableMap
def getURL(): #provides URL for Crimson API
urlStart = "https://api.crimsonhexagon.com/api"
return urlStart
###########
#
# You'll need to generate your own Crimson API key/token from here:
# https://apidocs.crimsonhexagon.com/reference
#
###########
def getAuthToken(): #provides auth token needed to access Crimson API
authToken = ''
authToken = "&auth="+authToken
return authToken
###########
#
# You'll need to add your own Twitter API keys here.
# Instructions on generating API keys: https://developer.twitter.com/en/docs/basics/authentication/guides/access-tokens.html
# API reference guide: https://developer.twitter.com/en/docs/api-reference-index.html
#
###########
def twitterAPI(): #Provides access keys for Twitter API
consumer_key = '2S1Z7Giq0oOf3w0R0sJUPnLFx'
consumer_secret = '9IPOE8dqWzUPseAPHeNxTTv1jAr9BNj8mF2ryw8DIud8Ot8VCe'
access_token = '998275516892409858-hQ1pk5wKg1YyxUrbiFkuFHKHqztPMNE'
access_secret = 'gsXqGx1gU93HkKNDupTPt56ZnAmmalsaSNBUuoBToraBw'
if (consumer_key == '') or (consumer_secret =='') or (access_token =='') or (access_secret ==''):
print("Not all Twitter keys have been entered, please add them to the script and try again")
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
def getTwitterURL(): #provides URL for Twitter api
urlStart = "https://api.twitter.com/1.1/statuses/lookup.json?id="
return urlStart
def DatePull(startdate, enddate):
listArray = []
startdate = date(int(startdate[0:4]), int(startdate[5:7]), int(startdate[8:10]))
enddate = date(int(enddate[0:4]), int(enddate[5:7]), int(enddate[8:10]))
while startdate <= enddate:
listArray.append(str(startdate))
startdate += timedelta(days=1)
return listArray
def main():
monitorID = Variables()['monitorID']
projectStartDate = Variables()['startDate']
projectEndDate = Variables()['endDate']
fPath = "Monitor-"+monitorID+'-from-'+projectStartDate+'-to-'+projectEndDate+'.csv'
lineArray = DatePull(projectStartDate, projectEndDate)
print("------------------------------")
print("MonitorID is "+monitorID)
print(lineArray[0],lineArray[-1])
with open(fPath, 'w', newline = '', encoding = 'utf-8') as f:
writer = csv.writer(f)
header = ["PostType","PostDate","PostTime","URL","TweetID","Contents","RetweetCount","FavoriteCount","Location","Language","Sentiment","NeutralScore","PositiveScore","NegativeScore","Followers","Friends","Author","AuthorGender","AuthorTweets"]
writer.writerow(header)
for i in range(len(lineArray)-1):
print(lineArray[i])
startDate = lineArray[i]
endDate = lineArray[i+1]
dates = "&start="+startDate+"&end="+endDate #Combines start and end date into format needed for API call
urlStart = getURL() #Gets URL
authToken = getAuthToken() #Gets auth token
endpoint = "/monitor/posts?id="; #endpoint needed for this query
extendLimit = "&extendLimit=true" #extends call number from 500 to 10,000
fullContents = "&fullContents=true" #Brings back full contents for Blog and Tumblr posts which are usually truncated around search keywords. This can occasionally disrupt CSV formatting.
urlData = urlStart+endpoint+monitorID+authToken+dates+extendLimit+fullContents #Combines all API calls parts into full URL
webURL = urllib.request.urlopen(urlData)
if (webURL.getcode() == 200):
with open(fPath, 'a', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
data = webURL.read().decode('utf8')
theJSON = json.loads(data)
postDates = [] #These initialize the attributes of the final output
postTimes = []
urls = []
contents = []
authors = []
authorGenders = []
locations = []
languages = []
postTypes = []
sentiments = []
neutralScore = []
positiveScore = []
negativeScore = []
tweetIDs = []
followers = []
friends = []
retweetCounts = []
favoritesCount = []
statusesCount = []
tweetCount = 0
tempTweetIDs = []
api = twitterAPI()
c = 0
for i in theJSON["posts"]:
postDates.append("")
postTimes.append("")
if ('date' in i): #identifies date posted
tempDate = str(i["date"])
dateTime = tempDate.split("T")
postDates[c] = dateTime[0]
postTimes[c] = dateTime[1]
urls.append(i["url"])
contents.append("")
if ('contents' in i): #identifies post contents
contents[c] = i["contents"].replace(",","").replace("\n"," ") #replaces commas and new lines to facilitate CSV formatting, this occasionally missed new lines in some blog posts which I'm working to fix
authors.append("")
if ('author' in i): #identifies author
authors[c] = i["author"].replace(",","")
authorGenders.append("")
if ('authorGender' in i): #identifies author gender
authorGenders[c] = i["authorGender"]
locations.append("")
if ('location' in i): #identifies location
locations[c] = i["location"].replace(",","")
languages.append("")
if ('language' in i): #identifies language specified in the author's profile
languages[c] = i["language"]
postTypes.append(i["type"]) #identifies the type of post, i.e. Twitter, Tumblr, Blog
tweetIDs.append("")
followers.append("")
friends.append("")
retweetCounts.append("")
favoritesCount.append("")
statusesCount.append("")
if postTypes[c] == "Twitter": #if the post type is Twitter it goes through more processing
tweetCount = tweetCount + 1 #counts number of tweets
tweetSplit = urls[c].split("status/") #splits URL to get tweetID
tweetIDs[c] = tweetSplit[1]
tempTweetIDs.append(tweetIDs[c])
if tweetCount == 100: #the max number of TweetIDs in one API call is 100 so a call is run every 100 tweets identified
tweepys = api.statuses_lookup(id_=tempTweetIDs) #call to Twitter API
for tweet in tweepys:
tempID = tweet.id_str #finds tweetsID
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID: #matches tweetID in Twitter API call to tweetID stored from Crimson API
tempDate = str(tweet.created_at).replace(" "," ") #These all fill the matching Crimson attributes to those found in the Twitter API
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0 #clears tweet count for a new 100
tempTweetIDs = [] #clears tweetIDs for next call
sentiments.append("")
neutralScore.append("")
positiveScore.append("")
negativeScore.append("")
if ('categoryScores' in i): #finds sentiment value and matching attribute
for l in i["categoryScores"]:
catName = l["categoryName"]
if catName == "Basic Neutral":
neutralScore[c] = l["score"]
elif catName =="Basic Positive":
positiveScore[c] = l["score"]
elif catName == "Basic Negative":
negativeScore[c] = l["score"]
if neutralScore[c] > positiveScore[c] and neutralScore[c] > negativeScore[c]:
sentiments[c] = "Basic Neutral"
if positiveScore[c] > neutralScore[c] and positiveScore[c] > negativeScore[c]:
sentiments[c] = "Basic Positive"
if negativeScore[c] > positiveScore[c] and negativeScore[c] > neutralScore[c]:
sentiments[c] = "Basic Negative"
c = c + 1
if len(tempTweetIDs) != 0: #after loop the Twitter API call must run one more time to clean up all the tweets since the last 100
try:
tweepys = api.statuses_lookup(id_=tempTweetIDs)
for tweet in tweepys:
tempID = tweet.id_str
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID:
tempDate = str(tweet.created_at).replace(" "," ")
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0
except:
print("Tweepy error: skipping cleanup")
pC = 0
for pDate in postDates: #iterates through the word lists and prints matching posts to CSV
csvRow=[postTypes[pC], pDate, postTimes[pC], urls[pC], str(tweetIDs[pC]), contents[pC].replace("\n"," "), retweetCounts[pC], favoritesCount[pC], locations[pC], languages[pC], sentiments[pC], str(neutralScore[pC]), str(positiveScore[pC]), str(negativeScore[pC]), followers[pC], friends[pC], authors[pC], authorGenders[pC], statusesCount[pC]]
writer.writerow(csvRow)
pC = pC + 1
else:
print("Server Error, No Data" + str(webURL.getcode())) #displays error if Crimson URL fails
if __name__ == '__main__':
main()
| 47.103679
| 360
| 0.510934
| 1,266
| 14,084
| 5.64218
| 0.274092
| 0.01344
| 0.0238
| 0.01932
| 0.196556
| 0.196556
| 0.153437
| 0.153437
| 0.144197
| 0.131597
| 0
| 0.017683
| 0.389662
| 14,084
| 298
| 361
| 47.261745
| 0.813285
| 0.183187
| 0
| 0.218447
| 0
| 0
| 0.094766
| 0.017533
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033981
| false
| 0
| 0.029126
| 0
| 0.092233
| 0.033981
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d3652391aca6bc7ecc488069329c58736eb71f
| 1,286
|
py
|
Python
|
roles/slurm/files/startnode.py
|
danhnguyen48/slurm-elastic-computing
|
0793cf23677169a6d9dceea0793118bc00c0913e
|
[
"MIT"
] | null | null | null |
roles/slurm/files/startnode.py
|
danhnguyen48/slurm-elastic-computing
|
0793cf23677169a6d9dceea0793118bc00c0913e
|
[
"MIT"
] | null | null | null |
roles/slurm/files/startnode.py
|
danhnguyen48/slurm-elastic-computing
|
0793cf23677169a6d9dceea0793118bc00c0913e
|
[
"MIT"
] | null | null | null |
#! /opt/cloud_sdk/bin/python
import asyncio
import logging
import subprocess
import sys
import citc_cloud
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
log.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
async def main() -> None:
nodespace = citc_cloud.get_nodespace()
keys_file = "/home/slurm/opc_authorized_keys"
with open(keys_file) as kf:
ssh_keys = kf.read()
hosts = subprocess.run(["scontrol", "show", "hostnames", sys.argv[1]], stdout=subprocess.PIPE).stdout.decode().split()
await asyncio.gather(*(
citc_cloud.start_node( log, host, nodespace, ssh_keys)
for host in hosts
))
sys.excepthook = handle_exception
if __name__ == "__main__":
log = logging.getLogger("startnode")
log.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/slurm/elastic.log')
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
| 26.244898
| 122
| 0.694401
| 162
| 1,286
| 5.259259
| 0.530864
| 0.032864
| 0.035211
| 0.052817
| 0.09507
| 0.09507
| 0
| 0
| 0
| 0
| 0
| 0.003795
| 0.180404
| 1,286
| 48
| 123
| 26.791667
| 0.804554
| 0.020995
| 0
| 0
| 0
| 0
| 0.130366
| 0.04531
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.151515
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d474d2b653dcd5a9578ce3979ff7a04e191213
| 2,300
|
py
|
Python
|
tests/pyre/components/component_class_registration_model.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
tests/pyre/components/component_class_registration_model.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
tests/pyre/components/component_class_registration_model.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify that component registration interacts correctly with the pyre configurator model
"""
# access
# print(" -- importing pyre")
import pyre
# print(" -- done")
def declare():
# declare a protocol
class protocol(pyre.protocol):
"""a protocol"""
# properties
p1 = pyre.properties.str()
p2 = pyre.properties.str()
# behavior
@pyre.provides
def do(self):
"""behave"""
# declare a component
class component(pyre.component, family="test", implements=protocol):
"""a component"""
# traits
p1 = pyre.properties.str(default="p1")
p2 = pyre.properties.str(default="p2")
@pyre.export
def do(self):
"""behave"""
return "component"
return component
def test():
# and the model
model = pyre.executive.nameserver
# model.dump(pattern='test')
# print(" -- making some configuration changes")
# add an assignment
model['test.p1'] = 'step 1'
# an alias
model.alias(alias='p1', target='test.p1')
# and a reference to the alias
model['ref'] = '{p1}'
# check that they point to the same slot
assert model.retrieve(name='p1') == model.retrieve(name='test.p1')
# save the nodes
ref = model.retrieve(name='ref')
step_0 = model.retrieve(name='test.p1')
# now declare the component and its protocol
# print(" -- declaring components")
component = declare()
# print(" -- done")
# model.dump(pattern='')
assert component.p1 == 'step 1'
assert component.p2 == 'p2'
# check that the model is as we expect
# model.dump()
assert model['test.p1'] == component.p1
assert model['test.p2'] == component.p2
# how about the alias and the reference?
assert model['ref'] == component.p1
assert model['p1'] == component.p1
# make a late registration to what is now the component trait
model['test.p2'] = 'step 2'
# model.dump(pattern='test')
# and check
assert component.p1 == 'step 1'
assert component.p2 == 'step 2'
return
# main
if __name__ == "__main__":
test()
# end of file
| 22.772277
| 87
| 0.59913
| 283
| 2,300
| 4.837456
| 0.367491
| 0.021914
| 0.049671
| 0.027757
| 0.090577
| 0.056976
| 0.056976
| 0.056976
| 0
| 0
| 0
| 0.02472
| 0.261304
| 2,300
| 100
| 88
| 23
| 0.781048
| 0.37913
| 0
| 0.114286
| 0
| 0
| 0.091508
| 0
| 0
| 0
| 0
| 0
| 0.257143
| 1
| 0.114286
| false
| 0
| 0.028571
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d571f1fc3a63903055bc9efe42eada3f2c5310
| 3,699
|
py
|
Python
|
apps/ignite/views.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 2
|
2015-04-06T15:20:29.000Z
|
2016-12-30T12:25:11.000Z
|
apps/ignite/views.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 2
|
2019-02-17T17:38:02.000Z
|
2019-03-28T03:49:16.000Z
|
apps/ignite/views.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 1
|
2019-03-28T03:49:18.000Z
|
2019-03-28T03:49:18.000Z
|
from django.shortcuts import get_object_or_404
import jingo
import waffle
from django.contrib.auth.models import User
from challenges.models import Submission, Category
from projects.models import Project
from blogs.models import BlogEntry
from events.models import Event
def splash(request, project, slug, template_name='ignite/splash.html'):
"""Show an individual project challenge."""
project = get_object_or_404(Project, slug=project)
challenge = get_object_or_404(project.challenge_set, slug=slug)
num_blogs = 3
# have we announced the winners yet - switch template
if waffle.switch_is_active('announce_winners'):
template_name = 'ignite/homepage-winners.html'
num_blogs = 5
blogs = BlogEntry.objects.filter(
page='splash'
).order_by("-updated",)[:num_blogs]
# if the dev challenge is open we want to only show dev entries
if request.development.is_open:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Development")
.order_by("?"))
num_entries = len(entries)
entries_from = 'apps'
if num_entries < 5:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Ideation")
.order_by("?"))
entries_from = 'ideas'
else:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Ideation")
.order_by("?"))
entries_from = 'ideas'
event_list = Event.objects.get_featured()[:5]
return jingo.render(request, template_name, {
'challenge': challenge,
'project': project,
'phases': list(enumerate(challenge.phases.all(), start=1)),
'entries': entries[:5],
'categories': Category.objects.all(),
'blogs': blogs,
'event_list': event_list,
'entries_from': entries_from,
})
def about(request, project, slug, template_name='ignite/about.html'):
if waffle.switch_is_active('announce_winners'):
template_name = 'ignite/about-winners.html'
return jingo.render(request, template_name)
def judges(request, project, slug, template_name='challenges/all_judges.html'):
""" List all judges we have in the system """
profiles = []
for judge in User.objects.filter(groups__name='Judges'):
profile = judge.get_profile()
# we only want to show featured profiles
if profile.featured == True:
profiles.append(profile)
return jingo.render(request, 'ignite/judges.html', {
'profiles': profiles
})
def terms(request, project, slug, template_name='static/terms_conditions.html'):
return jingo.render(request, template_name, {})
def terms_development(request, project, slug, template_name='static/terms_conditions_development.html'):
return jingo.render(request, template_name, {})
def fail(request, template_name='404.html'):
return jingo.render(request, template_name, {}, status=404)
def app_fail(request, template_name='500.html'):
return jingo.render(request, template_name, {}, status=500)
def action_unavailable_response(request, message=None,
template_name="action_unavailable.html"):
"""Generic page for unavailable actions"""
context = {'message': message}
return jingo.render(request, template_name, context, status=403)
| 36.99
| 104
| 0.638821
| 412
| 3,699
| 5.550971
| 0.262136
| 0.0892
| 0.07477
| 0.083953
| 0.40927
| 0.377787
| 0.314823
| 0.314823
| 0.17359
| 0.17359
| 0
| 0.010842
| 0.25196
| 3,699
| 99
| 105
| 37.363636
| 0.815685
| 0.072182
| 0
| 0.260274
| 0
| 0
| 0.121887
| 0.04981
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109589
| false
| 0
| 0.109589
| 0.054795
| 0.328767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d6b9ef2efd18b552dbe05895fafd84b7430c25
| 17,209
|
py
|
Python
|
bdlb/diabetic_retinopathy_diagnosis/benchmark.py
|
Sairam954/bdl-benchmarks
|
6fbc855ca51403ad8f64b6be30ed92f6118c6cae
|
[
"Apache-2.0"
] | 666
|
2019-06-14T17:14:05.000Z
|
2022-03-24T10:48:47.000Z
|
bdlb/diabetic_retinopathy_diagnosis/benchmark.py
|
Sairam954/bdl-benchmarks
|
6fbc855ca51403ad8f64b6be30ed92f6118c6cae
|
[
"Apache-2.0"
] | 12
|
2019-06-26T16:54:14.000Z
|
2020-08-18T13:16:01.000Z
|
bdlb/diabetic_retinopathy_diagnosis/benchmark.py
|
Sairam954/bdl-benchmarks
|
6fbc855ca51403ad8f64b6be30ed92f6118c6cae
|
[
"Apache-2.0"
] | 97
|
2019-06-14T20:30:39.000Z
|
2022-02-05T08:33:49.000Z
|
# Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diabetic retinopathy diagnosis BDL Benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Text
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import logging
from ..core import transforms
from ..core.benchmark import Benchmark
from ..core.benchmark import BenchmarkInfo
from ..core.benchmark import DataSplits
from ..core.constants import DATA_DIR
from ..core.levels import Level
tfk = tf.keras
_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR = os.path.join(
DATA_DIR, "downloads", "manual", "diabetic_retinopathy_diagnosis")
class DiabeticRetinopathyDiagnosisBecnhmark(Benchmark):
"""Diabetic retinopathy diagnosis benchmark class."""
def __init__(
self,
level: Union[Text, Level],
batch_size: int = 64,
data_dir: Optional[Text] = None,
download_and_prepare: bool = False,
):
"""Constructs a benchmark object.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
download_and_prepare: (optional) `bool`, if the data is not available
it downloads and preprocesses it.
"""
self.__level = level if isinstance(level, Level) else Level.from_str(level)
try:
self.__ds = self.load(level=level,
batch_size=batch_size,
data_dir=data_dir or DATA_DIR)
except AssertionError:
if not download_and_prepare:
raise
else:
logging.info(
"Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()`"
" is now running...")
self.download_and_prepare()
@classmethod
def evaluate(
cls,
estimator: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
dataset: tf.data.Dataset,
output_dir: Optional[Text] = None,
name: Optional[Text] = None,
) -> Dict[Text, float]:
"""Evaluates an `estimator` on the `mode` benchmark dataset.
Args:
estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation
function, which returns `mean_x` and predictive `uncertainty_x`.
dataset: `tf.data.Dataset`, on which dataset to performance evaluation.
output_dir: (optional) `str`, directory to save figures.
name: (optional) `str`, the name of the method.
"""
import inspect
import tqdm
import tensorflow_datasets as tfds
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Containers used for caching performance evaluation
y_true = list()
y_pred = list()
y_uncertainty = list()
# Convert to NumPy iterator if necessary
ds = dataset if inspect.isgenerator(dataset) else tfds.as_numpy(dataset)
for x, y in tqdm.tqdm(ds):
# Sample from probabilistic model
mean, uncertainty = estimator(x)
# Cache predictions
y_true.append(y)
y_pred.append(mean)
y_uncertainty.append(uncertainty)
# Use vectorized NumPy containers
y_true = np.concatenate(y_true).flatten()
y_pred = np.concatenate(y_pred).flatten()
y_uncertainty = np.concatenate(y_uncertainty).flatten()
fractions = np.asarray([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# Metrics for evaluation
metrics = zip(["accuracy", "auc"], cls.metrics())
return {
metric: cls._evaluate_metric(
y_true,
y_pred,
y_uncertainty,
fractions,
lambda y_true, y_pred: metric_fn(y_true, y_pred).numpy(),
name,
) for (metric, metric_fn) in metrics
}
@staticmethod
def _evaluate_metric(
y_true: np.ndarray,
y_pred: np.ndarray,
y_uncertainty: np.ndarray,
fractions: Sequence[float],
metric_fn: Callable[[np.ndarray, np.ndarray], float],
name=None,
) -> pd.DataFrame:
"""Evaluate model predictive distribution on `metric_fn` at data retain
`fractions`.
Args:
y_true: `numpy.ndarray`, the ground truth labels, with shape [N].
y_pred: `numpy.ndarray`, the model predictions, with shape [N].
y_uncertainty: `numpy.ndarray`, the model uncertainties,
with shape [N].
fractions: `iterable`, the percentages of data to retain for
calculating `metric_fn`.
metric_fn: `lambda(y_true, y_pred) -> float`, a metric
function that provides a score given ground truths
and predictions.
name: (optional) `str`, the name of the method.
Returns:
A `pandas.DataFrame` with columns ["retained_data", "mean", "std"],
that summarizes the scores at different data retained fractions.
"""
N = y_true.shape[0]
# Sorts indexes by ascending uncertainty
I_uncertainties = np.argsort(y_uncertainty)
# Score containers
mean = np.empty_like(fractions)
# TODO(filangel): do bootstrap sampling and estimate standard error
std = np.zeros_like(fractions)
for i, frac in enumerate(fractions):
# Keep only the %-frac of lowest uncertainties
I = np.zeros(N, dtype=bool)
I[I_uncertainties[:int(N * frac)]] = True
mean[i] = metric_fn(y_true[I], y_pred[I])
# Store
df = pd.DataFrame(dict(retained_data=fractions, mean=mean, std=std))
df.name = name
return df
@property
def datasets(self) -> tf.data.Dataset:
"""Pointer to the processed datasets."""
return self.__ds
@property
def info(self) -> BenchmarkInfo:
"""Text description of the benchmark."""
return BenchmarkInfo(description="", urls="", setup="", citation="")
@property
def level(self) -> Level:
"""The downstream task level."""
return self.__level
@staticmethod
def loss() -> tfk.losses.Loss:
"""Loss used for training binary classifiers."""
return tfk.losses.BinaryCrossentropy()
@staticmethod
def metrics() -> tfk.metrics.Metric:
"""Evaluation metrics used for monitoring training."""
return [tfk.metrics.BinaryAccuracy(), tfk.metrics.AUC()]
@staticmethod
def class_weight() -> Sequence[float]:
"""Class weights used for rebalancing the dataset, by skewing the `loss`
accordingly."""
return [1.0, 4.0]
@classmethod
def load(
cls,
level: Union[Text, Level] = "realworld",
batch_size: int = 64,
data_dir: Optional[Text] = None,
as_numpy: bool = False,
) -> DataSplits:
"""Loads the datasets for the benchmark.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
as_numpy: (optional) `bool`, if True returns python generators
with `numpy.ndarray` outputs.
Returns:
A namedtuple with properties:
* train: `tf.data.Dataset`, train dataset.
* validation: `tf.data.Dataset`, validation dataset.
* test: `tf.data.Dataset`, test dataset.
"""
import tensorflow_datasets as tfds
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Fetch datasets
try:
ds_train, ds_validation, ds_test = DiabeticRetinopathyDiagnosis(
data_dir=data_dir or DATA_DIR,
config=level).as_dataset(split=["train", "validation", "test"],
shuffle_files=True,
batch_size=batch_size)
except AssertionError as ae:
raise AssertionError(
str(ae) +
" Run DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()"
" first and then retry.")
# Parse task level
level = level if isinstance(level, Level) else Level.from_str(level)
# Dataset tranformations
transforms_train, transforms_eval = cls._preprocessors()
# Apply transformations
ds_train = ds_train.map(transforms_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.map(
transforms_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(transforms_eval,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetches datasets to memory
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
if as_numpy:
# Convert to NumPy iterators
ds_train = tfds.as_numpy(ds_train)
ds_validation = tfds.as_numpy(ds_validation)
ds_test = tfds.as_numpy(ds_test)
return DataSplits(ds_train, ds_validation, ds_test)
@classmethod
def download_and_prepare(cls, levels=None) -> None:
"""Downloads dataset from Kaggle, extracts zip files and processes it using
`tensorflow_datasets`.
Args:
levels: (optional) `iterable` of `str`, specifies which
levels from {'medium', 'realworld'} to prepare,
if None it prepares all the levels.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
# Disable GPU for data download, extraction and preparation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cls._download()
# cls._extract()
#cls._prepare(levels)
@staticmethod
def _download() -> None:
"""Downloads data from Kaggle using `tensorflow_datasets`.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
import subprocess as sp
import tensorflow_datasets as tfds
# Append `/home/$USER/.local/bin` to path
os.environ["PATH"] += ":/home/{}/.local/bin/".format(os.environ["USER"])
# Download all files from Kaggle
drd = tfds.download.kaggle.KaggleCompetitionDownloader(
"diabetic-retinopathy-detection")
try:
for dfile in drd.competition_files:
drd.download_file(dfile,
output_dir=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
except sp.CalledProcessError as cpe:
raise OSError(
str(cpe) + "." +
" Make sure you have ~/.kaggle/kaggle.json setup, fetched from the Kaggle website"
" https://www.kaggle.com/<username>/account -> 'Create New API Key'."
" Also accept the dataset license by going to"
" https://www.kaggle.com/c/diabetic-retinopathy-detection/rules"
" and look for the button 'I Understand and Accept' (make sure when reloading the"
" page that the button does not pop up again).")
@staticmethod
def _extract() -> None:
"""Extracts zip files downloaded from Kaggle."""
import glob
import tqdm
import zipfile
import tempfile
# Extract train and test original images
for split in ["train", "test"]:
# Extract "<split>.zip.00*"" files to "<split>"
with tempfile.NamedTemporaryFile() as tmp:
# Concatenate "<split>.zip.00*" to "<split>.zip"
for fname in tqdm.tqdm(
sorted(
glob.glob(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{split}.zip.00*".format(split=split))))):
# Unzip "<split>.zip" to "<split>"
with open(fname, "rb") as ztmp:
tmp.write(ztmp.read())
with zipfile.ZipFile(tmp) as zfile:
for image in tqdm.tqdm(iterable=zfile.namelist(),
total=len(zfile.namelist())):
zfile.extract(member=image,
path=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
# Delete "<split>.zip.00*" files
for splitzip in os.listdir(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR):
if "{split}.zip.00".format(split=split) in splitzip:
os.remove(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip))
# Extract "sample.zip", "trainLabels.csv.zip"
for fname in ["sample", "trainLabels.csv"]:
zfname = os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{fname}.zip".format(fname=fname))
with zipfile.ZipFile(zfname) as zfile:
zfile.extractall(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
os.remove(zfname)
@staticmethod
def _prepare(levels=None) -> None:
"""Generates the TFRecord objects for medium and realworld experiments."""
import multiprocessing
from absl import logging
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Hangle each level individually
for level in levels or ["medium", "realworld"]:
dtask = DiabeticRetinopathyDiagnosis(data_dir=DATA_DIR, config=level)
logging.debug("=== Preparing TFRecords for {} ===".format(level))
dtask.download_and_prepare()
@classmethod
def _preprocessors(cls) -> Tuple[transforms.Transform, transforms.Transform]:
"""Applies transformations to the raw data."""
import tensorflow_datasets as tfds
# Transformation hyperparameters
mean = np.asarray([0.42606387, 0.29752496, 0.21309826])
stddev = np.asarray([0.27662534, 0.20280295, 0.1687619])
class Parse(transforms.Transform):
"""Parses datapoints from raw `tf.data.Dataset`."""
def __call__(self, x, y=None):
"""Returns `as_supervised` tuple."""
return x["image"], x["label"]
class CastX(transforms.Transform):
"""Casts image to `dtype`."""
def __init__(self, dtype):
"""Constructs a type caster."""
self.dtype = dtype
def __call__(self, x, y):
"""Returns casted image (to `dtype`) and its (unchanged) label as
tuple."""
return tf.cast(x, self.dtype), y
class To01X(transforms.Transform):
"""Rescales image to [min, max]=[0, 1]."""
def __call__(self, x, y):
"""Returns rescaled image and its (unchanged) label as tuple."""
return x / 255.0, y
# Get augmentation schemes
[augmentation_config,
no_augmentation_config] = cls._ImageDataGenerator_config()
# Transformations for train dataset
transforms_train = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**augmentation_config),
])
# Transformations for validation/test dataset
transforms_eval = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**no_augmentation_config),
])
return transforms_train, transforms_eval
@staticmethod
def _ImageDataGenerator_config():
"""Returns the configs for the
`tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the
random augmentation of the dataset, following the implementation of
https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93
e3faa342f7d088e5/scripts/inspect_data_augmentation.py."""
augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=180.0,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.,
zoom_range=0.10,
channel_shift_range=0.,
fill_mode="constant",
cval=0.,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
no_augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
shear_range=0.,
zoom_range=0.0,
channel_shift_range=0.,
fill_mode="nearest",
cval=0.,
horizontal_flip=False,
vertical_flip=False,
data_format="channels_last",
)
return augmentation_config, no_augmentation_config
| 34.625755
| 92
| 0.658609
| 2,048
| 17,209
| 5.381836
| 0.249512
| 0.013972
| 0.027944
| 0.023226
| 0.24814
| 0.191163
| 0.155416
| 0.145255
| 0.111958
| 0.105244
| 0
| 0.012101
| 0.236504
| 17,209
| 496
| 93
| 34.695565
| 0.826775
| 0.316404
| 0
| 0.232082
| 0
| 0
| 0.079319
| 0.019764
| 0
| 0
| 0
| 0.004032
| 0.010239
| 1
| 0.068259
| false
| 0
| 0.139932
| 0
| 0.269625
| 0.003413
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d75ce424bf88d4d06c99b804df0f846b952cac
| 1,873
|
py
|
Python
|
vivisect/storage/mpfile.py
|
vEpiphyte/vivisect
|
14947a53c6781175f0aa83d49cc16c524a2e23a3
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
vivisect/storage/mpfile.py
|
vEpiphyte/vivisect
|
14947a53c6781175f0aa83d49cc16c524a2e23a3
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
vivisect/storage/mpfile.py
|
vEpiphyte/vivisect
|
14947a53c6781175f0aa83d49cc16c524a2e23a3
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import base64
import logging
import msgpack
logger = logging.getLogger(__name__)
loadargs = {'use_list': False, 'raw': False}
if msgpack.version < (1, 0, 0):
loadargs['encoding'] = 'utf-8'
else:
loadargs['strict_map_key'] = False
VSIG = b'MSGVIV'.ljust(8, b'\x00')
def vivEventsAppendFile(filename, events):
with open(filename, 'ab') as f:
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspaceChanges(vw, filename):
events = vw.exportWorkspaceChanges()
vivEventsAppendFile(filename, events)
def vivEventsToFile(filename, events):
with open(filename, 'wb') as f:
msgpack.pack(VSIG, f, use_bin_type=False)
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspace(vw, filename):
events = vw.exportWorkspace()
vivEventsToFile(filename, events)
def vivEventsFromFile(filename):
events = []
with open(filename, 'rb') as f:
unpacker = msgpack.Unpacker(f, **loadargs)
siggy = next(unpacker)
if siggy.encode('utf-8') != VSIG:
logger.warning('Invalid file signature of %s', str(siggy))
return
for event in unpacker:
if event[0] == 20:
mape = base64.b64decode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
events.append(event)
return events
def loadWorkspace(vw, filename):
events = vivEventsFromFile(filename)
vw.importWorkspace(events)
| 28.815385
| 81
| 0.599573
| 233
| 1,873
| 4.763949
| 0.313305
| 0.064865
| 0.037838
| 0.059459
| 0.373874
| 0.278378
| 0.26036
| 0.26036
| 0.26036
| 0.26036
| 0
| 0.041667
| 0.256807
| 1,873
| 64
| 82
| 29.265625
| 0.755747
| 0
| 0
| 0.25
| 0
| 0
| 0.04645
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d771361889efe007b26f62c7cd92ffc6f656a2
| 3,832
|
py
|
Python
|
pytest_pgsql/plugin.py
|
mathiasose/pytest-pgsql
|
5e076db146699c3b683b49e4a31323c4c23054de
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_pgsql/plugin.py
|
mathiasose/pytest-pgsql
|
5e076db146699c3b683b49e4a31323c4c23054de
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_pgsql/plugin.py
|
mathiasose/pytest-pgsql
|
5e076db146699c3b683b49e4a31323c4c23054de
|
[
"BSD-3-Clause"
] | null | null | null |
"""This forms the core of the pytest plugin."""
import pytest
import testing.postgresql
from pytest_pgsql import database
from pytest_pgsql import ext
def pytest_addoption(parser):
"""Add configuration options for pytest_pgsql."""
parser.addoption(
'--pg-extensions', action='store', default='',
help="A comma-separated list of PostgreSQL extensions to install at "
"the beginning of the session for use by all tests. Example: "
"--pg-extensions=uuid-ossp,pg_tgrm,pgcrypto")
parser.addoption(
'--pg-work-mem', type=int, default=32,
help='Set the value of the `work_mem` setting, in megabytes. '
'`pytest_pgsql` defaults to 32. Adjusting this up or down can '
'help performance; see the Postgres documentation for more details.')
parser.addoption(
'--pg-conf-opt', action='append',
help='Add a key=value line that will be appended to postgresql.conf')
@pytest.fixture(scope='session')
def database_uri(request):
"""A fixture giving the connection URI of the session-wide test database."""
# Note: due to the nature of the variable configs, the command line options
# must be tested manually.
work_mem = request.config.getoption('--pg-work-mem')
if work_mem < 0: # pragma: no cover
pytest.exit('ERROR: --pg-work-mem value must be >= 0. Got: %d' % work_mem)
return
elif work_mem == 0: # pragma: no cover
# Disable memory tweak and use the server default.
work_mem_setting = ''
else:
# User wants to change the working memory setting.
work_mem_setting = '-c work_mem=%dMB ' % work_mem
conf_opts = request.config.getoption('--pg-conf-opt')
if conf_opts:
conf_opts_string = ' -c ' + ' -c '.join(conf_opts)
else:
conf_opts_string = ''
# pylint: disable=bad-continuation,deprecated-method
with testing.postgresql.Postgresql(
postgres_args='-c TimeZone=UTC '
'-c fsync=off '
'-c synchronous_commit=off '
'-c full_page_writes=off '
+ work_mem_setting +
'-c checkpoint_timeout=30min '
'-c bgwriter_delay=10000ms'
+ conf_opts_string) as pgdb:
yield pgdb.url()
#: A SQLAlchemy engine shared by the transacted and non-transacted database fixtures.
#:
#: .. seealso:: `pytest_pgsql.ext.create_engine_fixture`
# pylint: disable=invalid-name
pg_engine = ext.create_engine_fixture('pg_engine', scope='session')
# pylint: enable=invalid-name
@pytest.fixture(scope='session')
def database_snapshot(pg_engine):
"""Create one database snapshot for the session.
The database will be restored to this state after each test.
.. note ::
This is an implementation detail and should not be used directly except
by derived fixtures.
"""
return database.create_database_snapshot(pg_engine)
# pylint: disable=invalid-name
#: Create a test database instance and cleans up after each test finishes.
#:
#: You should prefer the `transacted_postgresql_db` fixture unless your test
#: cannot be run in a single transaction. The `transacted_postgresql_db` fixture
#: leads to faster tests since it doesn't tear down the entire database between
#: each test.
postgresql_db = \
database.PostgreSQLTestDB.create_fixture('postgresql_db')
#: Create a test database instance that rolls back the current transaction after
#: each test finishes, verifying its integrity before returning.
#:
#: Read the warning in the main documentation page before using this fixture.
transacted_postgresql_db = \
database.TransactedPostgreSQLTestDB.create_fixture('transacted_postgresql_db')
# pylint: enable=invalid-name
| 35.813084
| 85
| 0.675626
| 497
| 3,832
| 5.090543
| 0.420523
| 0.035968
| 0.022134
| 0.016601
| 0.0917
| 0.045059
| 0
| 0
| 0
| 0
| 0
| 0.004749
| 0.230689
| 3,832
| 106
| 86
| 36.150943
| 0.85346
| 0.378914
| 0
| 0.137255
| 0
| 0
| 0.326998
| 0.057883
| 0.019608
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.078431
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d816c8c07445ebc9580d3703129a46fcf2cc64
| 737
|
py
|
Python
|
power_data_to_sat_passes/date_utils.py
|
abrahamneben/orbcomm_beam_mapping
|
71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004
|
[
"MIT"
] | 1
|
2019-04-10T02:50:19.000Z
|
2019-04-10T02:50:19.000Z
|
power_data_to_sat_passes/date_utils.py
|
abrahamneben/orbcomm_beam_mapping
|
71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004
|
[
"MIT"
] | null | null | null |
power_data_to_sat_passes/date_utils.py
|
abrahamneben/orbcomm_beam_mapping
|
71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004
|
[
"MIT"
] | null | null | null |
# written by abraham on aug 24
def dyear2date(dyear):
year = int(dyear)
month_lengths = [31,28,31,30,31,30,31,31,30,31,30,31]
days_before_months = [0,31,59,90,120,151,181,212,243,273,304,334]
days_into_year_f = (dyear-year)*365
days_into_year_i = int(days_into_year_f)
for i in range(12):
if days_before_months[i] < days_into_year_f < (days_before_months[i]+month_lengths[i]):
month = i+1
break
date = days_into_year_i - days_before_months[month-1]
hours_f = (days_into_year_f-days_into_year_i)*24
hours_i = int(hours_f)
minutes_f = (hours_f-hours_i)*60
minutes_i = int(minutes_f)
seconds_i = int((minutes_f-minutes_i)*60)
return "%02d/%02d/%d %02d:%02d:%02d" % (month,date,year,hours_i,minutes_i,seconds_i)
| 27.296296
| 89
| 0.72863
| 144
| 737
| 3.416667
| 0.340278
| 0.113821
| 0.170732
| 0.105691
| 0.109756
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125581
| 0.12483
| 737
| 26
| 90
| 28.346154
| 0.637209
| 0.037992
| 0
| 0
| 0
| 0
| 0.03819
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d8391391013bac7dd77afd2eebf78925078f05
| 752
|
py
|
Python
|
app/base/count_lines.py
|
sourcery-ai-bot/personal-expenses-accounting
|
55e76744a06fd502d119f57427cd7a0bfaf68fe1
|
[
"MIT"
] | 5
|
2020-02-21T16:26:21.000Z
|
2021-08-05T09:34:28.000Z
|
app/base/count_lines.py
|
sourcery-ai-bot/personal-expenses-accounting
|
55e76744a06fd502d119f57427cd7a0bfaf68fe1
|
[
"MIT"
] | 11
|
2020-06-26T09:05:04.000Z
|
2022-01-24T20:35:07.000Z
|
app/base/count_lines.py
|
sourcery-ai-bot/personal-expenses-accounting
|
55e76744a06fd502d119f57427cd7a0bfaf68fe1
|
[
"MIT"
] | 1
|
2021-06-25T09:42:08.000Z
|
2021-06-25T09:42:08.000Z
|
import glob
from os import walk
exclude_folders = [
'node_modules',
'ios',
'android',
'__pycache__'
]
exclude_files = [
'json',
'txt',
'traineddata',
'lstmf',
'yml',
'md'
'log',
'env',
'gitignore',
'dockerignore'
]
# get all files in directory
dirr = '/home/viktor/Documents/personal-expenses-accounting/app/services/web_service/'
folders = glob.glob(dirr + '/**/', recursive=True)
# only app related directories
directories = []
for folder in folders:
current_folder = folder.split('/')[-2]
if current_folder not in exclude_folders:
files = glob.glob(folder + '*')
print(files)
directories.append(folder)
# num_lines = sum(1 for line in open('myfile.txt'))
| 19.282051
| 86
| 0.625
| 87
| 752
| 5.264368
| 0.678161
| 0.061135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003448
| 0.228723
| 752
| 38
| 87
| 19.789474
| 0.786207
| 0.139628
| 0
| 0
| 0
| 0
| 0.265941
| 0.119751
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068966
| 0
| 0.068966
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d89936d8b1b9966571e7248379800a7bb8190c
| 17,617
|
py
|
Python
|
charmhelpers/contrib/charmsupport/nrpe.py
|
nobuto-m/charm-helpers
|
4cffc05ace43234d34b040cccdde3460f68cb673
|
[
"Apache-2.0"
] | null | null | null |
charmhelpers/contrib/charmsupport/nrpe.py
|
nobuto-m/charm-helpers
|
4cffc05ace43234d34b040cccdde3460f68cb673
|
[
"Apache-2.0"
] | 1
|
2019-09-04T12:17:17.000Z
|
2019-09-04T12:17:17.000Z
|
charmhelpers/contrib/charmsupport/nrpe.py
|
nobuto-m/charm-helpers
|
4cffc05ace43234d34b040cccdde3460f68cb673
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compatibility with the nrpe-external-master charm"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import subprocess
import pwd
import grp
import os
import glob
import shutil
import re
import shlex
import yaml
from charmhelpers.core.hookenv import (
config,
hook_name,
local_unit,
log,
relation_get,
relation_ids,
relation_set,
relations_of_type,
)
from charmhelpers.core.host import service
from charmhelpers.core import host
# This module adds compatibility with the nrpe-external-master and plain nrpe
# subordinate charms. To use it in your charm:
#
# 1. Update metadata.yaml
#
# provides:
# (...)
# nrpe-external-master:
# interface: nrpe-external-master
# scope: container
#
# and/or
#
# provides:
# (...)
# local-monitors:
# interface: local-monitors
# scope: container
#
# 2. Add the following to config.yaml
#
# nagios_context:
# default: "juju"
# type: string
# description: |
# Used by the nrpe subordinate charms.
# A string that will be prepended to instance name to set the host name
# in nagios. So for instance the hostname would be something like:
# juju-myservice-0
# If you're running multiple environments with the same services in them
# this allows you to differentiate between them.
# nagios_servicegroups:
# default: ""
# type: string
# description: |
# A comma-separated list of nagios servicegroups.
# If left empty, the nagios_context will be used as the servicegroup
#
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
#
# 4. Update your hooks.py with something like this:
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE()
# nrpe_compat.add_check(
# shortname = "myservice",
# description = "Check MyService",
# check_cmd = "check_http -w 2 -c 10 http://localhost"
# )
# nrpe_compat.add_check(
# "myservice_other",
# "Check for widget failures",
# check_cmd = "/srv/myapp/scripts/widget_check"
# )
# nrpe_compat.write()
#
# def config_changed():
# (...)
# update_nrpe_config()
#
# def nrpe_external_master_relation_changed():
# update_nrpe_config()
#
# def local_monitors_relation_changed():
# update_nrpe_config()
#
# 4.a If your charm is a subordinate charm set primary=False
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE(primary=False)
#
# 5. ln -s hooks.py nrpe-external-master-relation-changed
# ln -s hooks.py local-monitors-relation-changed
class CheckException(Exception):
pass
class Check(object):
shortname_re = '[A-Za-z0-9-_.@]+$'
service_template = ("""
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
define service {{
use active-service
host_name {nagios_hostname}
service_description {nagios_hostname}[{shortname}] """
"""{description}
check_command check_nrpe!{command}
servicegroups {nagios_servicegroup}
}}
""")
def __init__(self, shortname, description, check_cmd):
super(Check, self).__init__()
# XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname):
raise CheckException("shortname must match {}".format(
Check.shortname_re))
self.shortname = shortname
self.command = "check_{}".format(shortname)
# Note: a set of invalid characters is defined by the
# Nagios server config
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
self.description = description
self.check_cmd = self._locate_cmd(check_cmd)
def _get_check_filename(self):
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
def _get_service_filename(self, hostname):
return os.path.join(NRPE.nagios_exportdir,
'service__{}_{}.cfg'.format(hostname, self.command))
def _locate_cmd(self, check_cmd):
search_path = (
'/usr/lib/nagios/plugins',
'/usr/local/lib/nagios/plugins',
)
parts = shlex.split(check_cmd)
for path in search_path:
if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0])
if len(parts) > 1:
command += " " + " ".join(parts[1:])
return command
log('Check command not found: {}'.format(parts[0]))
return ''
def _remove_service_files(self):
if not os.path.exists(NRPE.nagios_exportdir):
return
for f in os.listdir(NRPE.nagios_exportdir):
if f.endswith('_{}.cfg'.format(self.command)):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
def remove(self, hostname):
nrpe_check_file = self._get_check_filename()
if os.path.exists(nrpe_check_file):
os.remove(nrpe_check_file)
self._remove_service_files()
def write(self, nagios_context, hostname, nagios_servicegroups):
nrpe_check_file = self._get_check_filename()
with open(nrpe_check_file, 'w') as nrpe_check_config:
nrpe_check_config.write("# check {}\n".format(self.shortname))
if nagios_servicegroups:
nrpe_check_config.write(
"# The following header was added automatically by juju\n")
nrpe_check_config.write(
"# Modifying it will affect nagios monitoring and alerting\n")
nrpe_check_config.write(
"# servicegroups: {}\n".format(nagios_servicegroups))
nrpe_check_config.write("command[{}]={}\n".format(
self.command, self.check_cmd))
if not os.path.exists(NRPE.nagios_exportdir):
log('Not writing service config as {} is not accessible'.format(
NRPE.nagios_exportdir))
else:
self.write_service_config(nagios_context, hostname,
nagios_servicegroups)
def write_service_config(self, nagios_context, hostname,
nagios_servicegroups):
self._remove_service_files()
templ_vars = {
'nagios_hostname': hostname,
'nagios_servicegroup': nagios_servicegroups,
'description': self.description,
'shortname': self.shortname,
'command': self.command,
}
nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = self._get_service_filename(hostname)
with open(nrpe_service_file, 'w') as nrpe_service_config:
nrpe_service_config.write(str(nrpe_service_text))
def run(self):
subprocess.call(self.check_cmd)
class NRPE(object):
nagios_logdir = '/var/log/nagios'
nagios_exportdir = '/var/lib/nagios/export'
nrpe_confdir = '/etc/nagios/nrpe.d'
homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
def __init__(self, hostname=None, primary=True):
super(NRPE, self).__init__()
self.config = config()
self.primary = primary
self.nagios_context = self.config['nagios_context']
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
self.nagios_servicegroups = self.config['nagios_servicegroups']
else:
self.nagios_servicegroups = self.nagios_context
self.unit_name = local_unit().replace('/', '-')
if hostname:
self.hostname = hostname
else:
nagios_hostname = get_nagios_hostname()
if nagios_hostname:
self.hostname = nagios_hostname
else:
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
# Iff in an nrpe-external-master relation hook, set primary status
relation = relation_ids('nrpe-external-master')
if relation:
log("Setting charm primary status {}".format(primary))
for rid in relation:
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
self.remove_check_queue = set()
def add_check(self, *args, **kwargs):
shortname = None
if kwargs.get('shortname') is None:
if len(args) > 0:
shortname = args[0]
else:
shortname = kwargs['shortname']
self.checks.append(Check(*args, **kwargs))
try:
self.remove_check_queue.remove(shortname)
except KeyError:
pass
def remove_check(self, *args, **kwargs):
if kwargs.get('shortname') is None:
raise ValueError('shortname of check must be specified')
# Use sensible defaults if they're not specified - these are not
# actually used during removal, but they're required for constructing
# the Check object; check_disk is chosen because it's part of the
# nagios-plugins-basic package.
if kwargs.get('check_cmd') is None:
kwargs['check_cmd'] = 'check_disk'
if kwargs.get('description') is None:
kwargs['description'] = ''
check = Check(*args, **kwargs)
check.remove(self.hostname)
self.remove_check_queue.add(kwargs['shortname'])
def write(self):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
if not os.path.exists(NRPE.nagios_logdir):
os.mkdir(NRPE.nagios_logdir)
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
for nrpecheck in self.checks:
nrpecheck.write(self.nagios_context, self.hostname,
self.nagios_servicegroups)
nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command,
}
# update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unnecessary alerts. Let's not restart
# on update-status hooks.
if not hook_name() == 'update-status':
service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
for rid in monitor_ids:
reldata = relation_get(unit=local_unit(), rid=rid)
if 'monitors' in reldata:
# update the existing set of monitors with the new data
old_monitors = yaml.safe_load(reldata['monitors'])
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
# remove keys that are in the remove_check_queue
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
if k not in self.remove_check_queue}
# update/add nrpe_monitors
old_nrpe_monitors.update(nrpe_monitors)
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
# write back to the relation
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
else:
# write a brand new set of monitors, as no existing ones.
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
self.remove_check_queue.clear()
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_host_context
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_host_context' in rel:
return rel['nagios_host_context']
def get_nagios_hostname(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_hostname
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_hostname' in rel:
return rel['nagios_hostname']
def get_nagios_unit_name(relation_name='nrpe-external-master'):
"""
Return the nagios unit name prepended with host_context if needed
:param str relation_name: Name of relation nrpe sub joined to
"""
host_context = get_nagios_hostcontext(relation_name)
if host_context:
unit = "%s:%s" % (host_context, local_unit())
else:
unit = local_unit()
return unit
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param list services: List of services to check
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
next
upstart_init = '/etc/init/%s.conf' % svc
sysv_init = '/etc/init.d/%s' % svc
if host.init_is_systemd():
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_systemd.py %s' % svc
)
elif os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)
elif os.path.exists(sysv_init):
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
croncmd = (
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
'-e -s /etc/init.d/%s status' % svc
)
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
f = open(cronpath, 'w')
f.write(cron_file)
f.close()
nrpe.add_check(
shortname=svc,
description='service check {%s}' % unit_name,
check_cmd='check_status_file.py -f %s' % checkpath,
)
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
# (LP: #1670223).
if immediate_check and os.path.isdir(nrpe.homedir):
f = open(checkpath, 'w')
subprocess.call(
croncmd.split(),
stdout=f,
stderr=subprocess.STDOUT
)
f.close()
os.chmod(checkpath, 0o644)
def copy_nrpe_checks(nrpe_files_dir=None):
"""
Copy the nrpe checks into place
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
for segment in ['.', 'hooks']:
nrpe_files_dir = os.path.abspath(os.path.join(
os.getenv('CHARM_DIR'),
segment,
'charmhelpers',
'contrib',
'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
if os.path.isfile(fname):
shutil.copy2(fname,
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
def add_haproxy_checks(nrpe, unit_name):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param str unit_name: Unit name to use in check description
"""
nrpe.add_check(
shortname='haproxy_servers',
description='Check HAProxy {%s}' % unit_name,
check_cmd='check_haproxy.sh')
nrpe.add_check(
shortname='haproxy_queue',
description='Check HAProxy queue depth {%s}' % unit_name,
check_cmd='check_haproxy_queue_depth.sh')
| 36.174538
| 90
| 0.605097
| 2,077
| 17,617
| 4.959557
| 0.198844
| 0.012232
| 0.022716
| 0.009708
| 0.238423
| 0.187263
| 0.132414
| 0.09999
| 0.085234
| 0.085234
| 0
| 0.00391
| 0.288698
| 17,617
| 486
| 91
| 36.248971
| 0.818131
| 0.273656
| 0
| 0.144876
| 0
| 0
| 0.162476
| 0.030061
| 0.003534
| 0
| 0
| 0
| 0
| 1
| 0.067138
| false
| 0.007067
| 0.042403
| 0.007067
| 0.173145
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d9e6ada4265efd73113dc71c68649cc06c25fa
| 13,250
|
py
|
Python
|
venv/Lib/site-packages/proglog/proglog.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | 83
|
2017-08-14T02:20:38.000Z
|
2022-03-01T20:32:03.000Z
|
venv/lib/python3.7/site-packages/proglog/proglog.py
|
haideraltahan/CropMe
|
75a111b9d3b2c50c6f2a9a36d21432053f02284d
|
[
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
venv/lib/python3.7/site-packages/proglog/proglog.py
|
haideraltahan/CropMe
|
75a111b9d3b2c50c6f2a9a36d21432053f02284d
|
[
"MIT"
] | 6
|
2018-10-23T08:12:26.000Z
|
2021-02-14T13:53:13.000Z
|
"""Implements the generic progress logger class, and the ProgressBar class.
"""
from tqdm import tqdm, tqdm_notebook
from collections import OrderedDict
import time
SETTINGS = {
'notebook': False
}
def notebook(turn='on'):
SETTINGS['notebook'] = True if (turn == 'on') else False
def troncate_string(s, max_length=25):
return s if (len(s) < max_length) else (s[:max_length] + "...")
class ProgressLogger:
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary.
Parameters
----------
init_state
Dictionnary representing the initial state.
"""
def __init__(self, init_state=None):
self.state = {}
self.stored = {}
self.logs = []
self.log_indent = 0
if init_state is not None:
self.state.update(init_state)
def log(self, message):
self.logs.append((' ' * self.log_indent) + message)
def dump_logs(self, filepath=None):
if filepath is not None:
with open(filepath, 'a') as f:
f.write("\n".join(self.logs))
else:
return "\n".join(self.logs)
def callback(self, **kw):
"""Execute something after the state has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def store(self, **kw):
"""Store objects in the logger and trigger ``self.store_callback``.
This works exactly like ``logger()``, but the later is meant for simple
data objects (text, numbers) that will be sent over the network or
written to a file. The ``store`` method expects rather large objects
which are not necessarily serializable, and will be used eg to draw
plots on the fly.
"""
self.stored.update(kw)
self.store_callback(**kw)
def store_callback(self, **kw):
"""Execute something after the store has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def iter(self, **kw):
"""Iterate through a list while updating the state.
Examples
--------
>>> for username in logger.iter(user=['tom', 'tim', 'lea']:
>>> # At every loop, logger.state['user'] is updated
>>> print (username)
"""
for field, iterable in kw.items():
for it in iterable:
self(**{field: it})
yield it
def __call__(self, **kw):
self.state.update(kw)
self.callback(**kw)
class ProgressBarLogger(ProgressLogger):
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
"""
bar_indent = 2
def __init__(self, init_state=None, bars=None, ignored_bars=None,
logged_bars='all', min_time_interval=0, ignore_bars_under=0):
ProgressLogger.__init__(self, init_state)
if bars is None:
bars = OrderedDict()
elif isinstance(bars, (list, tuple)):
bars = OrderedDict([
(b, dict(title=b, index=-1, total=None, message=None,
indent=0))
for b in bars
])
if isinstance(ignored_bars, (list, tuple)):
ignored_bars = set(ignored_bars)
self.ignored_bars = ignored_bars
self.logged_bars = logged_bars
self.state['bars'] = bars
self.min_time_interval = min_time_interval
self.ignore_bars_under = ignore_bars_under
@property
def bars(self):
"""Return ``self.state['bars'].``"""
return self.state['bars']
def bar_is_ignored(self, bar):
if self.ignored_bars is None:
return False
elif self.ignored_bars == 'all_others':
return (bar not in self.bars)
else:
return bar in self.ignored_bars
def bar_is_logged(self, bar):
if (not self.logged_bars):
return False
elif self.logged_bars == 'all':
return True
else:
return bar in self.logged_bars
def iterable_is_too_short(self, iterable):
length = len(iterable) if hasattr(iterable, '__len__') else None
return (length is not None) and (length < self.ignore_bars_under)
def iter_bar(self, bar_prefix='', **kw):
"""Iterate through a list while updating a state bar.
Examples
--------
>>> for username in logger.iter_bar(user=['tom', 'tim', 'lea']):
>>> # At every loop, logger.state['bars']['user'] is updated
>>> # to {index: i, total: 3, title:'user'}
>>> print (username)
"""
if 'bar_message' in kw:
bar_message = kw.pop('bar_message')
else:
bar_message = None
bar, iterable = kw.popitem()
if self.bar_is_ignored(bar) or self.iterable_is_too_short(iterable):
return iterable
bar = bar_prefix + bar
if hasattr(iterable, '__len__'):
self(**{bar + '__total': len(iterable)})
def new_iterable():
last_time = time.time()
i = 0 # necessary in case the iterator is empty
for i, it in enumerate(iterable):
now_time = time.time()
if (i == 0) or (now_time - last_time > self.min_time_interval):
if bar_message is not None:
self(**{bar + '__message': bar_message(it)})
self(**{bar + '__index': i})
last_time = now_time
yield it
if self.bars[bar]['index'] != i:
self(**{bar + '__index': i})
self(**{bar + '__index': i + 1})
return new_iterable()
def bars_callback(self, bar, attr, value, old_value=None):
"""Execute a custom action after the progress bars are updated.
Parameters
----------
bar
Name/ID of the bar to be modified.
attr
Attribute of the bar attribute to be modified
value
New value of the attribute
old_value
Previous value of this bar's attribute.
This default callback does nothing, overwrite it by subclassing.
"""
pass
def __call__(self, **kw):
items = sorted(kw.items(), key=lambda kv: not kv[0].endswith('total'))
for key, value in items:
if '__' in key:
bar, attr = key.split('__')
if self.bar_is_ignored(bar):
continue
kw.pop(key)
if bar not in self.bars:
self.bars[bar] = dict(title=bar, index=-1,
total=None, message=None)
old_value = self.bars[bar][attr]
if self.bar_is_logged(bar):
new_bar = (attr == 'index') and (value < old_value)
if (attr == 'total') or (new_bar):
self.bars[bar]['indent'] = self.log_indent
else:
self.log_indent = self.bars[bar]['indent']
self.log("[%s] %s: %s" % (bar, attr, value))
self.log_indent += self.bar_indent
self.bars[bar][attr] = value
self.bars_callback(bar, attr, value, old_value)
self.state.update(kw)
self.callback(**kw)
class TqdmProgressBarLogger(ProgressBarLogger):
"""Tqdm-powered progress bar for console or Notebooks.
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
leave_bars
notebook
True will make the bars look nice (HTML) in the jupyter notebook. It is
advised to leave to 'default' as the default can be globally set from
inside a notebook with ``import proglog; proglog.notebook_mode()``.
print_messages
If True, every ``logger(message='something')`` will print a message in
the console / notebook
"""
def __init__(self, init_state=None, bars=None, leave_bars=False,
ignored_bars=None, logged_bars='all', notebook='default',
print_messages=True, min_time_interval=0,
ignore_bars_under=0):
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
ignore_bars_under=ignore_bars_under,
min_time_interval=min_time_interval)
self.leave_bars = leave_bars
self.tqdm_bars = OrderedDict([
(bar, None)
for bar in self.bars
])
if notebook == 'default':
notebook = SETTINGS['notebook']
self.notebook = notebook
self.print_messages = print_messages
self.tqdm = (tqdm_notebook if self.notebook else tqdm)
def new_tqdm_bar(self, bar):
"""Create a new tqdm bar, possibly replacing an existing one."""
if (bar in self.tqdm_bars) and (self.tqdm_bars[bar] is not None):
self.close_tqdm_bar(bar)
infos = self.bars[bar]
self.tqdm_bars[bar] = self.tqdm(
total=infos['total'],
desc=infos['title'],
postfix=dict(now=troncate_string(str(infos['message']))),
leave=self.leave_bars
)
def close_tqdm_bar(self, bar):
"""Close and erase the tqdm bar"""
self.tqdm_bars[bar].close()
if not self.notebook:
self.tqdm_bars[bar] = None
def bars_callback(self, bar, attr, value, old_value):
if (bar not in self.tqdm_bars) or (self.tqdm_bars[bar] is None):
self.new_tqdm_bar(bar)
if attr == 'index':
if value >= old_value:
total = self.bars[bar]['total']
if total and (value >= total):
self.close_tqdm_bar(bar)
else:
self.tqdm_bars[bar].update(value - old_value)
else:
self.new_tqdm_bar(bar)
self.tqdm_bars[bar].update(value + 1)
elif attr == 'message':
self.tqdm_bars[bar].set_postfix(now=troncate_string(str(value)))
self.tqdm_bars[bar].update(0)
def callback(self, **kw):
if self.print_messages and ('message' in kw) and kw['message']:
if self.notebook:
print(kw['message'])
else:
self.tqdm.write(kw['message'])
class RqWorkerProgressLogger:
def __init__(self, job):
self.job = job
if 'progress_data' not in self.job.meta:
self.job.meta['progress_data'] = {}
self.job.save()
def callback(self, **kw):
self.job.meta['progress_data'] = self.state
self.job.save()
class RqWorkerBarLogger(RqWorkerProgressLogger, ProgressBarLogger):
def __init__(self, job, init_state=None, bars=None, ignored_bars=(),
logged_bars='all', min_time_interval=0):
RqWorkerProgressLogger.__init__(self, job)
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval)
class MuteProgressBarLogger(ProgressBarLogger):
def bar_is_ignored(self, bar):
return True
def default_bar_logger(logger, bars=None, ignored_bars=None, logged_bars='all',
min_time_interval=0, ignore_bars_under=0):
if logger == 'bar':
return TqdmProgressBarLogger(
bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval,
ignore_bars_under=ignore_bars_under
)
elif logger is None:
return MuteProgressBarLogger()
else:
return logger
| 33.80102
| 79
| 0.568981
| 1,615
| 13,250
| 4.496594
| 0.15418
| 0.030295
| 0.026852
| 0.01859
| 0.411595
| 0.374552
| 0.306252
| 0.263839
| 0.241807
| 0.221427
| 0
| 0.003911
| 0.324528
| 13,250
| 391
| 80
| 33.887468
| 0.807486
| 0.260075
| 0
| 0.219731
| 0
| 0
| 0.033269
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130045
| false
| 0.013453
| 0.013453
| 0.008969
| 0.246637
| 0.017937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3d9f04c9618b248a5e94c0c7319362fccd10a9f
| 665
|
py
|
Python
|
gdsfactory/tests/test_component_from_yaml_bezier.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 42
|
2020-05-25T09:33:45.000Z
|
2022-03-29T03:41:19.000Z
|
gdsfactory/tests/test_component_from_yaml_bezier.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 133
|
2020-05-28T18:29:04.000Z
|
2022-03-31T22:21:42.000Z
|
gdsfactory/tests/test_component_from_yaml_bezier.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 17
|
2020-06-30T07:07:50.000Z
|
2022-03-17T15:45:27.000Z
|
import gdsfactory as gf
from gdsfactory.component import Component
yaml = """
name:
test_component_yaml_without_cell
instances:
mmi:
component: mmi1x2
bend:
component: bend_s
connections:
bend,o1: mmi,o2
"""
def test_component_from_yaml_without_cell() -> Component:
"""bezier does not have cell"""
c = gf.read.from_yaml(yaml)
assert c.name == "test_component_yaml_without_cell", c.name
assert len(c.get_dependencies()) == 2, len(c.get_dependencies())
assert len(c.ports) == 0, len(c.ports)
return c
if __name__ == "__main__":
c = test_component_from_yaml_without_cell()
print(c.name)
c.show()
| 20.151515
| 68
| 0.682707
| 93
| 665
| 4.55914
| 0.408602
| 0.122642
| 0.141509
| 0.099057
| 0.301887
| 0.301887
| 0
| 0
| 0
| 0
| 0
| 0.011299
| 0.201504
| 665
| 32
| 69
| 20.78125
| 0.787194
| 0.037594
| 0
| 0
| 0
| 0
| 0.312303
| 0.100946
| 0
| 0
| 0
| 0
| 0.130435
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.173913
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3db4ad6c588be26e30297068925d6bff9a900d1
| 5,616
|
py
|
Python
|
Tests/Methods/Machine/test_Magnet_Type_11_meth.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | 2
|
2020-06-29T13:48:37.000Z
|
2021-06-15T07:34:05.000Z
|
Tests/Methods/Machine/test_Magnet_Type_11_meth.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
Tests/Methods/Machine/test_Magnet_Type_11_meth.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@date Created on Thu Dec 18 13:56:33 2014
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
from unittest import TestCase
from ddt import ddt, data
from pyleecan.Classes.Arc1 import Arc1
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.MagnetType11 import MagnetType11
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotMPolar import SlotMPolar
from numpy import pi, exp, angle, array
from pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface
Mag11_test = list()
# Internal Slot surface
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=0, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=1, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 0.78539616, "Ao": pi / 4, "H_exp": 1})
# Internal Slot inset
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=40e-3, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=20e-3, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 7.3827e-3, "Ao": pi / 4, "H_exp": 20e-3})
# Outward Slot inset
lam = LamSlotMag(is_internal=False, Rext=0.1325)
lam.slot = SlotMPolar(H0=5e-3, W0=pi / 10, Zs=8)
lam.slot.magnet = [MagnetType11(Hmag=8e-3, Wmag=pi / 12)]
Mag11_test.append({"test_obj": lam, "S_exp": 2.09439e-6, "Ao": pi / 12, "H_exp": 8e-3})
# For AlmostEqual
DELTA = 1e-4
@ddt
class test_Magnet_Type_11_meth(TestCase):
"""unittest for MagnetType11 methods
"""
@data(*Mag11_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Compare numerical and analytical results
b = comp_surface(test_obj.slot.magnet[0])
msg = "Analytical: " + str(a) + " Numerical " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_angle_op(self, test_dict):
"""Check that the computation of the opening angle is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_angle_opening()
a = result
b = test_dict["Ao"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
def test_build_geometry_out(self):
"""check that curve_list is correct (outwards magnet)"""
lam = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=False,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (40e-3 + 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (40e-3 + 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z - 0.2) * exp(1j * angle(Z1))
Z4 = (Z - 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
def test_build_geometry_in(self):
"""check that curve_list is correct (inwards magnet)"""
lam = LamSlotMag(
Rint=40e-1,
Rext=90e-1,
is_internal=True,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (90e-1 - 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (90e-1 - 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z + 0.2) * exp(1j * angle(Z1))
Z4 = (Z + 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
| 33.035294
| 87
| 0.579238
| 821
| 5,616
| 3.848965
| 0.177832
| 0.037658
| 0.055696
| 0.058228
| 0.704747
| 0.652532
| 0.631962
| 0.612342
| 0.601899
| 0.551266
| 0
| 0.069008
| 0.274929
| 5,616
| 169
| 88
| 33.230769
| 0.707024
| 0.108974
| 0
| 0.551724
| 0
| 0
| 0.034295
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 1
| 0.043103
| false
| 0
| 0.077586
| 0
| 0.12931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3db7f4c59462c81c92a9534466aa08adc11bb16
| 4,600
|
py
|
Python
|
tomo_encoders/tasks/void_mapping.py
|
arshadzahangirchowdhury/TomoEncoders
|
9c2b15fd515d864079f198546821faee5d78df17
|
[
"BSD-3-Clause"
] | null | null | null |
tomo_encoders/tasks/void_mapping.py
|
arshadzahangirchowdhury/TomoEncoders
|
9c2b15fd515d864079f198546821faee5d78df17
|
[
"BSD-3-Clause"
] | null | null | null |
tomo_encoders/tasks/void_mapping.py
|
arshadzahangirchowdhury/TomoEncoders
|
9c2b15fd515d864079f198546821faee5d78df17
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from operator import mod
from tomo_encoders.misc.voxel_processing import modified_autocontrast, TimerGPU
from tomo_encoders.reconstruction.recon import recon_patches_3d
import cupy as cp
import numpy as np
from skimage.filters import threshold_otsu
from tomo_encoders import Grid
def get_values_cyl_mask(vol, mask_fac):
vol_shape = vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
return vol[cyl > 0]
def cylindrical_mask(out_vol, mask_fac, mask_val = 0):
vol_shape = out_vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
out_vol[cyl == 0] = mask_val
return
def segment_otsu(vol, s = 0.05):
'''segment volume with otsu'''
timer = TimerGPU()
timer.tic()
tmp_values = vol[::4,::4,::4].get()
# rec_min_max = modified_autocontrast(tmp_values, s = s, normalize_sampling_factor=1)
thresh = cp.float32(threshold_otsu(tmp_values.reshape(-1)))
vol = (vol < thresh).astype(cp.uint8)
timer.toc("otsu thresholding")
return vol
def edge_map(Y):
'''
this algorithm was inspired by: https://github.com/tomochallenge/tomochallenge_utils/blob/master/foam_phantom_utils.py
'''
msk = cp.zeros_like(Y)
tmp = Y[:-1]!=Y[1:]
msk[:-1][tmp] = 1
msk[1:][tmp] = 1
tmp = Y[:,:-1]!=Y[:,1:]
msk[:,:-1][tmp] = 1
msk[:,1:][tmp] = 1
tmp = Y[:,:,:-1]!=Y[:,:,1:]
msk[:,:,:-1][tmp] = 1
msk[:,:,1:][tmp] = 1
return msk > 0
def guess_surface(V_bin, b, wd):
# find patches on surface
wdb = int(wd//b)
p3d = Grid(V_bin.shape, width = wdb)
x = p3d.extract(V_bin)
is_surf = (np.std(x, axis = (1,2,3)) > 0.0)
is_ones = (np.sum(x, axis = (1,2,3))/(wdb**3) == 1)
is_zeros = (np.sum(x, axis = (1,2,3))/(wdb**3) == 0)
p3d = p3d.rescale(b)
p3d_surf = p3d.filter_by_condition(is_surf)
p3d_ones = p3d.filter_by_condition(is_ones)
p3d_zeros = p3d.filter_by_condition(is_zeros)
eff = len(p3d_surf)*(wd**3)/np.prod(p3d_surf.vol_shape)
print(f"\tSTAT: r value: {eff*100.0:.2f}")
return p3d_surf, p3d_ones, p3d_zeros
def process_patches(projs, theta, center, fe, p_surf, min_max, TIMEIT = False):
# SCHEME 1: integrate reconstruction and segmention (segments data on gpu itself)
# st_proc = cp.cuda.Event(); end_proc = cp.cuda.Event(); st_proc.record()
# x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
# apply_fbp = True, segmenter = fe, \
# segmenter_batch_size = 256)
# end_proc.record(); end_proc.synchronize(); t_surf = cp.cuda.get_elapsed_time(st_proc,end_proc)
# SCHEME 2: reconstruct and segment separately (copies rec data from gpu to cpu)
st_rec = cp.cuda.Event(); end_rec = cp.cuda.Event(); st_rec.record()
x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
apply_fbp =True)
end_rec.record(); end_rec.synchronize(); t_rec = cp.cuda.get_elapsed_time(st_rec,end_rec)
st_seg = cp.cuda.Event(); end_seg = cp.cuda.Event(); st_seg.record()
x_surf = np.clip(x_surf, *min_max)
x_surf = fe.predict_patches("segmenter", x_surf[...,np.newaxis], 256, None, min_max = min_max)[...,0]
end_seg.record(); end_seg.synchronize(); t_seg = cp.cuda.get_elapsed_time(st_seg,end_seg)
print(f'\tTIME: local reconstruction - {t_rec/1000.0:.2f} secs')
print(f'\tTIME: local segmentation - {t_seg/1000.0:.2f} secs')
print(f'\tSTAT: total patches in neighborhood: {len(p_surf)}')
if TIMEIT:
return x_surf, p_surf, t_rec, t_seg
else:
return x_surf, p_surf
| 35.384615
| 122
| 0.62087
| 728
| 4,600
| 3.721154
| 0.273352
| 0.038391
| 0.017719
| 0.017719
| 0.370986
| 0.331857
| 0.294943
| 0.294943
| 0.294943
| 0.282392
| 0
| 0.032773
| 0.223913
| 4,600
| 129
| 123
| 35.658915
| 0.72605
| 0.186739
| 0
| 0.219512
| 0
| 0
| 0.087591
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.073171
| false
| 0
| 0.085366
| 0
| 0.243902
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3dd7fa87a5a13e38a56d66d0de7938491e30d3e
| 793
|
py
|
Python
|
TuShare/view/sh_margins.py
|
lwh2015/TuShare
|
f244e05e5cf208e18e6237d3b81f71f0d3c1394a
|
[
"MIT"
] | 1
|
2018-09-26T08:34:02.000Z
|
2018-09-26T08:34:02.000Z
|
TuShare/view/sh_margins.py
|
lwh2015/TuShare
|
f244e05e5cf208e18e6237d3b81f71f0d3c1394a
|
[
"MIT"
] | null | null | null |
TuShare/view/sh_margins.py
|
lwh2015/TuShare
|
f244e05e5cf208e18e6237d3b81f71f0d3c1394a
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import tushare as ts
from .publiceClass import DateEncoder
@csrf_exempt
def sh_margins(request):
try:
start = request.POST.get('start','')#选填
end = request.POST.get('end','')#选填
data = ts.sh_margins(start,end)
res = {'columns':[
'信用交易日期',
'本日融资余额(元)',
'本日融资买入额(元)',
'本日融券余量',
'本日融券余量金额(元)',
'本日融券卖出量',
'本日融资融券余额(元)'
],'data':json.loads(json.dumps(data.values,cls=DateEncoder))}
except(BaseException):
return HttpResponse(BaseException)
else:
return HttpResponse(json.dumps(res),content_type="application/json")
| 26.433333
| 76
| 0.596469
| 88
| 793
| 5.318182
| 0.568182
| 0.042735
| 0.059829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001712
| 0.263556
| 793
| 29
| 77
| 27.344828
| 0.799658
| 0.031526
| 0
| 0
| 0
| 0
| 0.124509
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.208333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3de1f30f9f2a9d6efbf703fb8df76e65a62d871
| 1,181
|
py
|
Python
|
intermediate/classes/camera.py
|
robertob45/learning-python
|
7407f7d9e513792150eb2b65ebc644b5f8632c56
|
[
"MIT"
] | null | null | null |
intermediate/classes/camera.py
|
robertob45/learning-python
|
7407f7d9e513792150eb2b65ebc644b5f8632c56
|
[
"MIT"
] | null | null | null |
intermediate/classes/camera.py
|
robertob45/learning-python
|
7407f7d9e513792150eb2b65ebc644b5f8632c56
|
[
"MIT"
] | null | null | null |
class Camera:
"""docstring for ."""
def __init__(self, brand, sensor, lens, battery):
self.brand = brand
self.sensor = sensor
self.lens = lens
self.battery = battery
def __str__(self):
return self.brand + ' ' + self.sensor + ' ' + self.lens + ' ' + self.battery
def focus(self):
print('Focusing using', self.lens, '...')
print('')
def frame(self):
print('Move until your subject is in the desired position')
print('.')
print('.')
print('.')
def flash(self, flash_use):
if flash_use == 's':
print('Shooting with flash...')
else:
print('Shooting without flash...')
print('')
def format(self, save_format):
if save_format == 'jpg':
print('Saving in: ' + save_format)
elif save_format == 'raw':
print('Saving in: ' + save_format)
else:
print('No valid format to save')
def take_picture(self, save_format, flash_use):
print('Say cheese!')
self.focus()
self.frame()
self.flash(flash_use)
self.format(save_format)
| 27.465116
| 84
| 0.531753
| 132
| 1,181
| 4.606061
| 0.363636
| 0.115132
| 0.049342
| 0.055921
| 0.075658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.329382
| 1,181
| 42
| 85
| 28.119048
| 0.767677
| 0.012701
| 0
| 0.257143
| 0
| 0
| 0.157759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0.028571
| 0.257143
| 0.371429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3de3ec0c21d41a610e2d90e04c28f83ca0ba4c2
| 7,332
|
py
|
Python
|
dbaas/tsuru/tests/test_service_add.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | null | null | null |
dbaas/tsuru/tests/test_service_add.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | null | null | null |
dbaas/tsuru/tests/test_service_add.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | null | null | null |
from mock import patch, MagicMock
from django.contrib.auth.models import User
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.datastructures import MultiValueDictKeyError
from account.models import Role, Team, Organization
from physical.tests.factory import EnvironmentFactory, PlanFactory
from physical.models import Plan
class ValidationTestCase(TestCase):
"""HTTP test cases for the tsuru Service Add. This class focuses on
validations of POST
"""
USERNAME = "fake_user"
PASSWORD = "123456"
def setUp(self):
self.role = Role.objects.get_or_create(name="fake_role")[0]
self.organization = Organization.objects.get_or_create(
name='fake_organization'
)[0]
self.team = Team.objects.get_or_create(
name="fake_team", role=self.role,
organization=self.organization)[0]
self.superuser = User.objects.create_superuser(
self.USERNAME,
email="{}@admin.com".format(self.USERNAME),
password=self.PASSWORD
)
self.team.users.add(self.superuser)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.env = 'dev'
self.environment = EnvironmentFactory.create(name=self.env)
self.url = reverse('tsuru:service-add', args=(self.env,))
self.name = 'fake_database'
self.user = '{}@admin.com'.format(self.USERNAME)
self.description = 'fake desc'
self.plan = PlanFactory(name='fake_plan', provider=Plan.CLOUDSTACK)
self.plan.environments.add(self.environment)
self.plan_name = 'fake-plan-dev'
def tearDown(self):
self.client.logout()
def _assert_resp(self, resp, msg):
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, msg)
def test_name_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(self.url, {})
def test_user_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(
self.url,
{'name': self.name}
)
def test_team_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(
self.url,
{'name': self.name, 'user': self.user}
)
def test_description_fail(self):
resp = self.client.post(
self.url,
{'name': self.name, 'user': self.user, 'team': self.team}
)
self._assert_resp(resp, '"A description must be provided."')
def test_name_fail(self):
resp = self.client.post(
self.url,
{
'name': '99invalid-name',
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"Your database name must match /^[a-z][a-z0-9_]+$/ ."'
)
@patch('tsuru.views.Database.objects.get', new=MagicMock())
def test_database_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"There is already a database called fake_database in dev."'
)
@patch(
'tsuru.views.database_name_evironment_constraint',
new=MagicMock(return_value=True)
)
def test_already_exist_database_with_name(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"fake_database already exists in env dev!"'
)
def test_user_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': 'another_user@not_found.com',
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"User does not exist."'
)
def test_team_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': 'another_user@not_found.com',
'description': self.description,
'team': 'team_not_found'
}
)
self._assert_resp(
resp,
'"User does not exist."'
)
def test_env_not_found(self):
self.url = self.url.replace(
'/{}/'.format(self.env),
'/env_not_found/'
)
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
'"Environment does not exist."'
)
@patch(
'tsuru.views.Team.count_databases_in_use',
new=MagicMock(return_value=99)
)
def test_allocation_limit(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
('"The database alocation limit of 2 has been exceeded for the '
'selected team: fake_team"')
)
def test_plan_not_on_payload(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
'"Plan was not found"'
)
def test_plan_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name,
'plan': 'not found'
}
)
self._assert_resp(
resp,
'"Plan was not found"'
)
@patch('notification.tasks.TaskRegister.create_task', new=MagicMock())
@patch('notification.tasks.create_database_with_retry')
def test_call_database_create(self, create_database_mock):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name,
'plan': self.plan_name
}
)
self.assertTrue(create_database_mock.called)
self.assertEqual(resp.status_code, 201)
| 30.17284
| 76
| 0.524686
| 740
| 7,332
| 5.048649
| 0.187838
| 0.038544
| 0.052463
| 0.067452
| 0.492773
| 0.466542
| 0.422109
| 0.414079
| 0.401231
| 0.390257
| 0
| 0.004723
| 0.364703
| 7,332
| 242
| 77
| 30.297521
| 0.797338
| 0.011457
| 0
| 0.419355
| 0
| 0
| 0.152006
| 0.035685
| 0
| 0
| 0
| 0
| 0.082949
| 1
| 0.078341
| false
| 0.013825
| 0.036866
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e06ae8cd6e0aabca5915c1a17ae312a2a03a30
| 734
|
py
|
Python
|
gryphon/data/template_scaffolding/template/setup.py
|
ow-gryphon/gryphon
|
0b34f2f61a50af46b9d1ec1d3c15d53cf4055dd5
|
[
"MIT"
] | null | null | null |
gryphon/data/template_scaffolding/template/setup.py
|
ow-gryphon/gryphon
|
0b34f2f61a50af46b9d1ec1d3c15d53cf4055dd5
|
[
"MIT"
] | 1
|
2022-03-08T14:54:26.000Z
|
2022-03-08T15:02:52.000Z
|
gryphon/data/template_scaffolding/template/setup.py
|
ow-gryphon/gryphon
|
0b34f2f61a50af46b9d1ec1d3c15d53cf4055dd5
|
[
"MIT"
] | null | null | null |
import json
import setuptools
with open("template/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as fr:
requirements = fr.read().strip().split('\n')
with open('metadata.json') as fr:
metadata = json.load(fr)
setuptools.setup(
name="", # Name of the repository
version="0.0.1",
author=metadata.get("author", ""),
author_email=metadata.get("author_email", ""),
description=metadata.get("description", ""),
long_description=long_description,
long_description_content_type="text/markdown",
url="", # Repository URL or externally maintained page
packages=setuptools.find_packages(),
python_requires='>=3.6',
install_requires=requirements,
)
| 28.230769
| 59
| 0.688011
| 91
| 734
| 5.428571
| 0.538462
| 0.121457
| 0.157895
| 0.121457
| 0.11336
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008091
| 0.158038
| 734
| 25
| 60
| 29.36
| 0.791262
| 0.091281
| 0
| 0
| 0
| 0
| 0.153614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e0ad9312af3accd64fc327daefc5bf89405ae4
| 6,558
|
py
|
Python
|
train_base3.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | 2
|
2019-01-10T03:44:03.000Z
|
2019-05-24T08:50:14.000Z
|
train_base3.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | null | null | null |
train_base3.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | null | null | null |
"""
@Time : 201/21/19 10:41
@Author : TaylorMei
@Email : mhy845879017@gmail.com
@Project : iccv
@File : train_base3.py
@Function:
"""
import datetime
import os
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
from tqdm import tqdm
import joint_transforms
from config import msd_training_root
from config import backbone_path
from dataset import ImageFolder
from misc import AvgMeter, check_mkdir
from model.base3 import BASE3
import loss as L
cudnn.benchmark = True
device_ids = [2]
ckpt_path = './ckpt'
exp_name = 'BASE3'
args = {
'epoch_num': 100,
'train_batch_size': 14,
'last_epoch': 0,
'lr': 5e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': '',
'scale': 384,
'save_point': [60, 80, 90],
'add_graph': True,
'poly_train': True,
'optimizer': 'SGD'
}
# Path.
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
vis_path = os.path.join(ckpt_path, exp_name, 'log')
check_mkdir(vis_path)
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
writer = SummaryWriter(log_dir=vis_path, comment=exp_name)
# Transform Data.
joint_transform = joint_transforms.Compose([
joint_transforms.RandomRotate(),
joint_transforms.Resize((args['scale'], args['scale']))
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # maybe can optimized.
])
target_transform = transforms.ToTensor()
# Prepare Data Set.
train_set = ImageFolder(msd_training_root, joint_transform, img_transform, target_transform)
print("Train set: {}".format(train_set.__len__()))
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True)
def main():
print(args)
print(exp_name)
net = BASE3(backbone_path).cuda(device_ids[0]).train()
if args['add_graph']:
writer.add_graph(net, input_to_model=torch.rand(
args['train_batch_size'], 3, args['scale'], args['scale']).cuda(device_ids[0]))
if args['optimizer'] == 'Adam':
print("Adam")
optimizer = optim.Adam([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
])
else:
print("SGD")
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('Training Resumes From \'%s\'' % args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net = nn.DataParallel(net, device_ids=device_ids)
print("Using {} GPU(s) to Train.".format(len(device_ids)))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
writer.close()
def train(net, optimizer):
curr_iter = 1
for epoch in range(args['last_epoch'] + 1, args['last_epoch'] + 1 + args['epoch_num']):
loss_4_record, loss_3_record, loss_2_record, loss_1_record, \
loss_f_record, loss_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
train_iterator = tqdm(train_loader, total=len(train_loader))
for data in train_iterator:
if args['poly_train']:
base_lr = args['lr'] * (1 - float(curr_iter) / (args['epoch_num'] * len(train_loader))) ** args[
'lr_decay']
optimizer.param_groups[0]['lr'] = 2 * base_lr
optimizer.param_groups[1]['lr'] = 1 * base_lr
inputs, labels = data
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda(device_ids[0])
labels = Variable(labels).cuda(device_ids[0])
optimizer.zero_grad()
predict_4, predict_3, predict_2, predict_1, predict_f = net(inputs)
loss_4 = L.lovasz_hinge(predict_4, labels)
loss_3 = L.lovasz_hinge(predict_3, labels)
loss_2 = L.lovasz_hinge(predict_2, labels)
loss_1 = L.lovasz_hinge(predict_1, labels)
loss_f = L.lovasz_hinge(predict_f, labels)
loss = loss_4 + loss_3 + loss_2 + loss_1 + loss_f
loss.backward()
optimizer.step()
loss_record.update(loss.data, batch_size)
loss_4_record.update(loss_4.data, batch_size)
loss_3_record.update(loss_3.data, batch_size)
loss_2_record.update(loss_2.data, batch_size)
loss_1_record.update(loss_1.data, batch_size)
loss_f_record.update(loss_f.data, batch_size)
if curr_iter % 50 == 0:
writer.add_scalar('loss', loss, curr_iter)
writer.add_scalar('loss_4', loss_4, curr_iter)
writer.add_scalar('loss_3', loss_3, curr_iter)
writer.add_scalar('loss_2', loss_2, curr_iter)
writer.add_scalar('loss_1', loss_1, curr_iter)
writer.add_scalar('loss_f', loss_f, curr_iter)
log = '[%3d], [%6d], [%.6f], [%.5f], [L4: %.5f], [L3: %.5f], [L2: %.5f], [L1: %.5f], [Lf: %.5f]' % \
(epoch, curr_iter, base_lr, loss_record.avg, loss_4_record.avg, loss_3_record.avg, loss_2_record.avg,
loss_1_record.avg, loss_f_record.avg)
train_iterator.set_description(log)
open(log_path, 'a').write(log + '\n')
curr_iter += 1
if epoch in args['save_point']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
net.cuda(device_ids[0])
if epoch >= args['epoch_num']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
print("Optimization Have Done!")
return
if __name__ == '__main__':
main()
| 34.15625
| 119
| 0.617261
| 894
| 6,558
| 4.281879
| 0.223714
| 0.025862
| 0.023772
| 0.021944
| 0.207158
| 0.1779
| 0.142633
| 0.129572
| 0.11442
| 0.11442
| 0
| 0.030975
| 0.236962
| 6,558
| 191
| 120
| 34.335079
| 0.734013
| 0.03004
| 0
| 0.105634
| 0
| 0.007042
| 0.106135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014085
| false
| 0
| 0.126761
| 0
| 0.147887
| 0.056338
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e11b8d66ab1bd3a621bca6d89f7a077e4198d7
| 3,584
|
py
|
Python
|
teacher/views.py
|
itteamforslp/safelife_project
|
53af23dec0d19acf7227a43a16d7aedad443e90d
|
[
"MIT"
] | null | null | null |
teacher/views.py
|
itteamforslp/safelife_project
|
53af23dec0d19acf7227a43a16d7aedad443e90d
|
[
"MIT"
] | 4
|
2021-04-08T20:11:37.000Z
|
2021-09-22T19:37:57.000Z
|
safelife/safelife_project/teacher/views.py
|
CSUS-Scrumbags/safelife
|
2de7f83f637fae930b1176af796f4cc6f0519c86
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.template import loader
from django.db import connection
from django.http import HttpResponseRedirect
import datetime
from django.http import JsonResponse
from administrator.models import Course, CourseTeacher, CourseStudent, Student
from django.core.exceptions import PermissionDenied
def teacher_only(function):
#"""Limit view to teacher only."""
def _inner(request, *args, **kwargs):
if not request.user.is_staff == False | request.user.is_superuser:
raise PermissionDenied
return function(request, *args, **kwargs)
return _inner
@login_required(login_url = '/users')
@teacher_only
def home(request):
current_user = request.user.id
teacher_current_courses = Course.objects.select_related().raw('SELECT * '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id AND C.is_complete = 0 ', [current_user])
currentdate = datetime.datetime.today().strftime('%Y-%m-%d')
with connection.cursor() as cursor:
cursor.execute('SELECT CL.course_id, CL.date '
'FROM classes as CL, course_teachers as CT '
'WHERE CT.teachers_id = %s AND CL.date >= %s '
'AND CT.course_id = CL.course_id '
'GROUP BY CL.course_id ', [current_user, currentdate])
next_class_date = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT CS.course_id, COUNT(CS.students_id) '
'FROM course_teachers as CT, course_students as CS '
'WHERE CT.teachers_id = %s AND CT.course_id = CS.course_id '
'GROUP BY CS.course_id ', [current_user])
teacher_student_count = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT C.course_id, C.notes '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id '
'GROUP BY CT.course_id ', [current_user])
teacher_course_notes = cursor.fetchall()
template = loader.get_template('teacher/dashboard.html')
context = {
'teacher_current_courses': teacher_current_courses,
'teacher_student_count': teacher_student_count,
'next_class_date': next_class_date,
'teacher_course_notes': teacher_course_notes
}
# Render the template to the user
return HttpResponse(template.render(context, request))
@csrf_exempt
def update_course_notes(request):
# Get the student name that was passed from the web page
courseNotes = request.POST.get('courseNotes')
courseId = request.POST.get('courseId')
# Create a cursor to execute raw SQL queries.
with connection.cursor() as cursor:
cursor.execute('UPDATE courses '
'SET notes = %s '
'WHERE course_id = %s', [courseNotes, courseId])
# Render the response to the user
| 44.8
| 154
| 0.595145
| 402
| 3,584
| 5.131841
| 0.278607
| 0.058168
| 0.024237
| 0.034901
| 0.239457
| 0.191953
| 0.171595
| 0.151721
| 0.128938
| 0.128938
| 0
| 0.000412
| 0.323382
| 3,584
| 79
| 155
| 45.367089
| 0.850309
| 0.054688
| 0
| 0.1
| 0
| 0.016667
| 0.237363
| 0.025717
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.183333
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e2215b6ec560d3033ce187558d53690b59cd03
| 33,955
|
py
|
Python
|
pywikibot/site/_datasite.py
|
xqt/pwb
|
9a4fe27138f32952e533256195849d05855df0b0
|
[
"MIT"
] | null | null | null |
pywikibot/site/_datasite.py
|
xqt/pwb
|
9a4fe27138f32952e533256195849d05855df0b0
|
[
"MIT"
] | 1
|
2021-12-08T16:29:41.000Z
|
2021-12-08T16:29:41.000Z
|
pywikibot/site/_datasite.py
|
xqt/pwb
|
9a4fe27138f32952e533256195849d05855df0b0
|
[
"MIT"
] | 2
|
2022-01-04T04:10:38.000Z
|
2022-01-04T04:18:18.000Z
|
"""Objects representing API interface to Wikibase site."""
#
# (C) Pywikibot team, 2012-2022
#
# Distributed under the terms of the MIT license.
#
import datetime
import json
import uuid
from contextlib import suppress
from typing import Optional
from warnings import warn
import pywikibot
from pywikibot.data import api
from pywikibot.exceptions import (
APIError,
EntityTypeUnknownError,
IsRedirectPageError,
NoPageError,
NoWikibaseEntityError,
)
from pywikibot.site._apisite import APISite
from pywikibot.site._decorators import need_extension, need_right, need_version
from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args
__all__ = ('DataSite', )
class DataSite(APISite):
"""Wikibase data capable site."""
def __init__(self, *args, **kwargs) -> None:
"""Initializer."""
super().__init__(*args, **kwargs)
self._item_namespace = None
self._property_namespace = None
self._type_to_class = {
'item': pywikibot.ItemPage,
'property': pywikibot.PropertyPage,
'mediainfo': pywikibot.MediaInfo,
'lexeme': pywikibot.LexemePage,
'form': pywikibot.LexemeForm,
'sense': pywikibot.LexemeSense,
}
def _cache_entity_namespaces(self) -> None:
"""Find namespaces for each known wikibase entity type."""
self._entity_namespaces = {}
for entity_type in self._type_to_class:
for namespace in self.namespaces.values():
if not hasattr(namespace, 'defaultcontentmodel'):
continue
content_model = namespace.defaultcontentmodel
if content_model == ('wikibase-' + entity_type):
self._entity_namespaces[entity_type] = namespace
break
def get_namespace_for_entity_type(self, entity_type):
"""
Return namespace for given entity type.
:return: corresponding namespace
:rtype: Namespace
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
if entity_type in self._entity_namespaces:
return self._entity_namespaces[entity_type]
raise EntityTypeUnknownError(
'{!r} does not support entity type "{}" '
"or it doesn't have its own namespace"
.format(self, entity_type))
@property
def item_namespace(self):
"""
Return namespace for items.
:return: item namespace
:rtype: Namespace
"""
if self._item_namespace is None:
self._item_namespace = self.get_namespace_for_entity_type('item')
return self._item_namespace
@property
def property_namespace(self):
"""
Return namespace for properties.
:return: property namespace
:rtype: Namespace
"""
if self._property_namespace is None:
self._property_namespace = self.get_namespace_for_entity_type(
'property')
return self._property_namespace
def get_entity_for_entity_id(self, entity_id):
"""
Return a new instance for given entity id.
:raises pywikibot.exceptions.NoWikibaseEntityError: there is no entity
with the id
:return: a WikibaseEntity subclass
:rtype: WikibaseEntity
"""
for cls in self._type_to_class.values():
if cls.is_valid_id(entity_id):
return cls(self, entity_id)
entity = pywikibot.page.WikibaseEntity(self, entity_id)
raise NoWikibaseEntityError(entity)
@property
@need_version('1.28-wmf.3')
def sparql_endpoint(self):
"""
Return the sparql endpoint url, if any has been set.
:return: sparql endpoint url
:rtype: str|None
"""
return self.siteinfo['general'].get('wikibase-sparql')
@property
@need_version('1.28-wmf.23')
def concept_base_uri(self):
"""
Return the base uri for concepts/entities.
:return: concept base uri
:rtype: str
"""
return self.siteinfo['general']['wikibase-conceptbaseuri']
def geo_shape_repository(self):
"""Return Site object for the geo-shapes repository e.g. commons."""
url = self.siteinfo['general'].get('wikibase-geoshapestoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def tabular_data_repository(self):
"""Return Site object for the tabular-datas repository e.g. commons."""
url = self.siteinfo['general'].get(
'wikibase-tabulardatastoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def loadcontent(self, identification, *props):
"""
Fetch the current content of a Wikibase item.
This is called loadcontent since
wbgetentities does not support fetching old
revisions. Eventually this will get replaced by
an actual loadrevisions.
:param identification: Parameters used to identify the page(s)
:type identification: dict
:param props: the optional properties to fetch.
"""
params = merge_unique_dicts(identification, action='wbgetentities',
# TODO: When props is empty it results in
# an empty string ('&props=') but it should
# result in a missing entry.
props=props if props else False)
req = self.simple_request(**params)
data = req.submit()
if 'success' not in data:
raise APIError(data['errors'], '')
return data['entities']
def preload_entities(self, pagelist, groupsize: int = 50):
"""
Yield subclasses of WikibaseEntity's with content prefilled.
Note that pages will be iterated in a different order
than in the underlying pagelist.
:param pagelist: an iterable that yields either WikibaseEntity objects,
or Page objects linked to an ItemPage.
:param groupsize: how many pages to query at a time
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
for sublist in itergroup(pagelist, groupsize):
req = {'ids': [], 'titles': [], 'sites': []}
for p in sublist:
if isinstance(p, pywikibot.page.WikibaseEntity):
ident = p._defined_by()
for key in ident:
req[key].append(ident[key])
else:
if p.site == self and p.namespace() in (
self._entity_namespaces.values()):
req['ids'].append(p.title(with_ns=False))
else:
assert p.site.has_data_repository, \
'Site must have a data repository'
req['sites'].append(p.site.dbName())
req['titles'].append(p._link._text)
req = self.simple_request(action='wbgetentities', **req)
data = req.submit()
for entity in data['entities']:
if 'missing' in data['entities'][entity]:
continue
cls = self._type_to_class[data['entities'][entity]['type']]
page = cls(self, entity)
# No api call is made because item._content is given
page._content = data['entities'][entity]
with suppress(IsRedirectPageError):
page.get() # cannot provide get_redirect=True (T145971)
yield page
def getPropertyType(self, prop):
"""
Obtain the type of a property.
This is used specifically because we can cache
the value for a much longer time (near infinite).
"""
params = {'action': 'wbgetentities', 'ids': prop.getID(),
'props': 'datatype'}
expiry = datetime.timedelta(days=365 * 100)
# Store it for 100 years
req = self._request(expiry=expiry, parameters=params)
data = req.submit()
# the IDs returned from the API can be upper or lowercase, depending
# on the version. See bug T55894 for more information.
try:
dtype = data['entities'][prop.getID()]['datatype']
except KeyError:
dtype = data['entities'][prop.getID().lower()]['datatype']
return dtype
@need_right('edit')
def editEntity(self, entity, data, bot: bool = True, **kwargs):
"""
Edit entity.
Note: This method is unable to create entities other than 'item'
if dict with API parameters was passed to 'entity' parameter.
:param entity: Page to edit, or dict with API parameters
to use for entity identification
:type entity: WikibaseEntity or dict
:param data: data updates
:type data: dict
:param bot: Whether to mark the edit as a bot edit
:return: New entity data
:rtype: dict
"""
# this changes the reference to a new object
data = dict(data)
if isinstance(entity, pywikibot.page.WikibaseEntity):
params = entity._defined_by(singular=True)
if 'id' in params and params['id'] == '-1':
del params['id']
if not params:
params['new'] = entity.entity_type
data_for_new_entity = entity.get_data_for_new_entity()
data.update(data_for_new_entity)
else:
if 'id' in entity and entity['id'] == '-1':
del entity['id']
params = dict(entity)
if not params: # If no identification was provided
params['new'] = 'item'
params['action'] = 'wbeditentity'
if bot:
params['bot'] = 1
if 'baserevid' in kwargs and kwargs['baserevid']:
params['baserevid'] = kwargs['baserevid']
params['token'] = self.tokens['edit']
for arg in kwargs:
if arg in ['clear', 'summary']:
params[arg] = kwargs[arg]
elif arg != 'baserevid':
warn('Unknown wbeditentity parameter {} ignored'.format(arg),
UserWarning, 2)
params['data'] = json.dumps(data)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def addClaim(self, entity, claim, bot: bool = True, summary=None) -> None:
"""
Add a claim.
:param entity: Entity to modify
:type entity: WikibaseEntity
:param claim: Claim to be added
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
claim.snak = entity.getID() + '$' + str(uuid.uuid4())
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'baserevid': entity.latest_revision_id,
'summary': summary,
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
# Update the item
if claim.getID() in entity.claims:
entity.claims[claim.getID()].append(claim)
else:
entity.claims[claim.getID()] = [claim]
entity.latest_revision_id = data['pageinfo']['lastrevid']
@need_right('edit')
def changeClaimTarget(self, claim, snaktype: str = 'value',
bot: bool = True, summary=None):
"""
Set the claim target to the value of the provided claim target.
:param claim: The source of the claim target value
:type claim: pywikibot.Claim
:param snaktype: An optional snaktype ('value', 'novalue' or
'somevalue'). Default: 'value'
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaimvalue', 'claim': claim.snak,
'snaktype': snaktype, 'summary': summary, 'bot': bot,
'token': self.tokens['edit']}
if snaktype == 'value':
params['value'] = json.dumps(claim._formatValue())
params['baserevid'] = claim.on_item.latest_revision_id
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def save_claim(self, claim, summary=None, bot: bool = True):
"""
Save the whole claim to the wikibase site.
:param claim: The claim to save
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'token': self.tokens['edit'],
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
claim.on_item.latest_revision_id = data['pageinfo']['lastrevid']
return data
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editSource(self, claim, source,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a source.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the source to
:type claim: pywikibot.Claim
:param source: A Claim object to be used as a source
:type source: pywikibot.Claim
:param new: Whether to create a new one if the "source" already exists
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
if claim.isReference or claim.isQualifier:
raise ValueError('The claim cannot have a source.')
params = {'action': 'wbsetreference', 'statement': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot, 'token': self.tokens['edit']}
# build up the snak
if isinstance(source, list):
sources = source
else:
sources = [source]
snak = {}
for sourceclaim in sources:
datavalue = sourceclaim._formatDataValue()
valuesnaks = snak.get(sourceclaim.getID(), [])
valuesnaks.append({
'snaktype': 'value',
'property': sourceclaim.getID(),
'datavalue': datavalue,
})
snak[sourceclaim.getID()] = valuesnaks
# set the hash if the source should be changed.
# if present, all claims of one source have the same hash
if not new and hasattr(sourceclaim, 'hash'):
params['reference'] = sourceclaim.hash
params['snaks'] = json.dumps(snak)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editQualifier(self, claim, qualifier,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a qualifier.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the qualifier to
:type claim: pywikibot.Claim
:param qualifier: A Claim object to be used as a qualifier
:type qualifier: pywikibot.Claim
:param new: Whether to create a new one if the "qualifier"
already exists
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
if claim.isReference or claim.isQualifier:
raise ValueError('The claim cannot have a qualifier.')
params = {'action': 'wbsetqualifier', 'claim': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot}
if (not new and hasattr(qualifier, 'hash')
and qualifier.hash is not None):
params['snakhash'] = qualifier.hash
params['token'] = self.tokens['edit']
# build up the snak
if qualifier.getSnakType() == 'value':
params['value'] = json.dumps(qualifier._formatValue())
params['snaktype'] = qualifier.getSnakType()
params['property'] = qualifier.getID()
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def removeClaims(self, claims,
bot: bool = True,
summary: Optional[str] = None):
"""Remove claims.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claims: Claims to be removed
:type claims: List[pywikibot.Claim]
:param bot: Whether to mark the edit as a bot edit
:type bot: bool
:param summary: Edit summary
:type summary: str
"""
# Check on_item for all additional claims
items = {claim.on_item for claim in claims if claim.on_item}
assert len(items) == 1
baserevid = items.pop().latest_revision_id
params = {
'action': 'wbremoveclaims', 'baserevid': baserevid,
'summary': summary,
'bot': bot,
'claim': '|'.join(claim.snak for claim in claims),
'token': self.tokens['edit'],
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def removeSources(self, claim, sources,
bot: bool = True,
summary: Optional[str] = None):
"""Remove sources.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to remove the sources from
:type claim: pywikibot.Claim
:param sources: A list of Claim objects that are sources
:type sources: list
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
params = {
'action': 'wbremovereferences',
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot,
'statement': claim.snak,
'references': '|'.join(source.hash for source in sources),
'token': self.tokens['edit'],
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def remove_qualifiers(self, claim, qualifiers,
bot: bool = True,
summary: Optional[str] = None):
"""Remove qualifiers.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to remove the qualifier from
:type claim: pywikibot.Claim
:param qualifiers: Claim objects currently used as a qualifiers
:type qualifiers: List[pywikibot.Claim]
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
params = {
'action': 'wbremovequalifiers',
'claim': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
'qualifiers': [qualifier.hash for qualifier in qualifiers],
'token': self.tokens['edit']
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def linkTitles(self, page1, page2, bot: bool = True):
"""
Link two pages together.
:param page1: First page to link
:type page1: pywikibot.Page
:param page2: Second page to link
:type page2: pywikibot.Page
:param bot: Whether to mark the edit as a bot edit
:return: dict API output
:rtype: dict
"""
params = {
'action': 'wblinktitles',
'tosite': page1.site.dbName(),
'totitle': page1.title(),
'fromsite': page2.site.dbName(),
'fromtitle': page2.title(),
'token': self.tokens['edit']
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
return req.submit()
@need_right('item-merge')
def mergeItems(self, from_item, to_item, ignore_conflicts=None,
summary=None, bot: bool = True):
"""
Merge two items together.
:param from_item: Item to merge from
:type from_item: pywikibot.ItemPage
:param to_item: Item to merge into
:type to_item: pywikibot.ItemPage
:param ignore_conflicts: Which type of conflicts
('description', 'sitelink', and 'statement')
should be ignored
:type ignore_conflicts: list of str
:param summary: Edit summary
:type summary: str
:param bot: Whether to mark the edit as a bot edit
:return: dict API output
:rtype: dict
"""
params = {
'action': 'wbmergeitems',
'fromid': from_item.getID(),
'toid': to_item.getID(),
'ignoreconflicts': ignore_conflicts,
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
return req.submit()
@need_right('item-merge')
@need_extension('WikibaseLexeme')
def mergeLexemes(self, from_lexeme, to_lexeme, summary=None, *,
bot: bool = True) -> dict:
"""
Merge two lexemes together.
:param from_lexeme: Lexeme to merge from
:type from_lexeme: pywikibot.LexemePage
:param to_lexeme: Lexeme to merge into
:type to_lexeme: pywikibot.LexemePage
:param summary: Edit summary
:type summary: str
:keyword bot: Whether to mark the edit as a bot edit
:return: dict API output
"""
params = {
'action': 'wblmergelexemes',
'source': from_lexeme.getID(),
'target': to_lexeme.getID(),
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('item-redirect')
def set_redirect_target(self, from_item, to_item, bot: bool = True):
"""
Make a redirect to another item.
:param to_item: title of target item.
:type to_item: pywikibot.ItemPage
:param from_item: Title of the item to be redirected.
:type from_item: pywikibot.ItemPage
:param bot: Whether to mark the edit as a bot edit
"""
params = {
'action': 'wbcreateredirect',
'from': from_item.getID(),
'to': to_item.getID(),
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
return req.submit()
def search_entities(self, search: str, language: str,
total: Optional[int] = None, **kwargs):
"""
Search for pages or properties that contain the given text.
:param search: Text to find.
:param language: Language to search in.
:param total: Maximum number of pages to retrieve in total, or
None in case of no limit.
:return: 'search' list from API output.
:rtype: Generator
"""
lang_codes = self._paraminfo.parameter('wbsearchentities',
'language')['type']
if language not in lang_codes:
raise ValueError('Data site used does not support provided '
'language.')
if 'site' in kwargs:
if kwargs['site'].sitename != self.sitename:
raise ValueError('The site given in the kwargs is different.')
warn('search_entities should not get a site via kwargs.',
UserWarning, 2)
del kwargs['site']
parameters = dict(search=search, language=language, **kwargs)
gen = self._generator(api.APIGenerator,
type_arg='wbsearchentities',
data_name='search',
total=total, parameters=parameters)
return gen
@need_right('edit')
def _wbset_action(self, itemdef, action: str, action_data,
**kwargs) -> dict:
"""
Execute wbset{action} on a Wikibase entity.
Supported actions are:
wbsetaliases, wbsetdescription, wbsetlabel and wbsetsitelink
:param itemdef: Entity to modify or create
:type itemdef: str, WikibaseEntity or Page connected to such item
:param action: wbset{action} to perform:
'wbsetaliases', 'wbsetdescription', 'wbsetlabel', 'wbsetsitelink'
:param action_data: data to be used in API request, see API help
:type action_data: SiteLink or dict
wbsetaliases:
dict shall have the following structure:
{'language': value (str),
'add': list of language codes (str),
'remove': list of language codes (str),
'set' list of language codes (str)
}
'add' and 'remove' are alternative to 'set'
wbsetdescription and wbsetlabel:
dict shall have keys 'language', 'value'
wbsetsitelink:
dict shall have keys 'linksite', 'linktitle' and
optionally 'badges'
:keyword bot: Whether to mark the edit as a bot edit, default is True
:type bot: bool
:keyword tags: Change tags to apply with the edit
:type tags: list of str
:return: query result
:raises AssertionError, TypeError
"""
def format_sitelink(sitelink):
"""Convert SiteLink to a dict accepted by wbsetsitelink API."""
if isinstance(sitelink, pywikibot.page.SiteLink):
_dict = {
'linksite': sitelink._sitekey,
'linktitle': sitelink._rawtitle,
'badges': '|'.join([b.title() for b in sitelink.badges]),
}
else:
_dict = sitelink
return _dict
def prepare_data(action, data):
"""Prepare data as expected by API."""
if action == 'wbsetaliases':
res = data
keys = set(res)
assert keys < {'language', 'add', 'remove', 'set'}
assert 'language' in keys
assert ({'add', 'remove', 'set'} & keys)
assert ({'add', 'set'} >= keys)
assert ({'remove', 'set'} >= keys)
elif action in ('wbsetlabel', 'wbsetdescription'):
res = data
keys = set(res)
assert keys == {'language', 'value'}
elif action == 'wbsetsitelink':
res = format_sitelink(data)
keys = set(res)
assert keys >= {'linksite'}
assert keys <= {'linksite', 'linktitle', 'badges'}
else:
raise ValueError('Something has gone wrong ...')
return res
# Supported actions
assert action in ('wbsetaliases', 'wbsetdescription',
'wbsetlabel', 'wbsetsitelink'), \
'action {} not supported.'.format(action)
# prefer ID over (site, title)
if isinstance(itemdef, str):
itemdef = self.get_entity_for_entity_id(itemdef)
elif isinstance(itemdef, pywikibot.Page):
itemdef = pywikibot.ItemPage.fromPage(itemdef, lazy_load=True)
elif not isinstance(itemdef, pywikibot.page.WikibaseEntity):
raise TypeError('itemdef shall be str, WikibaseEntity or Page')
params = itemdef._defined_by(singular=True)
# TODO: support 'new'
baserevid = kwargs.pop(
'baserevid',
itemdef.latest_revision_id if 'id' in params else 0
)
params.update(
{'baserevid': baserevid,
'action': action,
'token': self.tokens['edit'],
'bot': kwargs.pop('bot', True),
})
params.update(prepare_data(action, action_data))
for arg in kwargs:
if arg in ['summary', 'tags']:
params[arg] = kwargs[arg]
else:
warn('Unknown parameter {} for action {}, ignored'
.format(arg, action), UserWarning, 2)
req = self.simple_request(**params)
data = req.submit()
return data
def wbsetaliases(self, itemdef, aliases, **kwargs):
"""
Set aliases for a single Wikibase entity.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetaliases', aliases, **kwargs)
def wbsetdescription(self, itemdef, description, **kwargs):
"""
Set description for a single Wikibase entity.
See self._wbset_action()
"""
return self._wbset_action(itemdef, 'wbsetdescription', description,
**kwargs)
def wbsetlabel(self, itemdef, label, **kwargs):
"""
Set label for a single Wikibase entity.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetlabel', label, **kwargs)
def wbsetsitelink(self, itemdef, sitelink, **kwargs):
"""
Set, remove or modify a sitelink on a Wikibase item.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetsitelink', sitelink, **kwargs)
@need_right('edit')
@need_extension('WikibaseLexeme')
def add_form(self, lexeme, form, *, bot: bool = True,
baserevid=None) -> dict:
"""
Add a form.
:param lexeme: Lexeme to modify
:type lexeme: pywikibot.LexemePage
:param form: Form to be added
:type form: pywikibot.LexemeForm
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
"""
params = {
'action': 'wbladdform',
'lexemeId': lexeme.getID(),
'data': json.dumps(form.toJSON()),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('edit')
@need_extension('WikibaseLexeme')
def remove_form(self, form, *, bot: bool = True, baserevid=None) -> dict:
"""
Remove a form.
:param form: Form to be removed
:type form: pywikibot.LexemeForm
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
"""
params = {
'action': 'wblremoveform',
'id': form.getID(),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('edit')
@need_extension('WikibaseLexeme')
def edit_form_elements(self, form, data, *, bot: bool = True,
baserevid=None) -> dict:
"""
Edit lexeme form elements.
:param form: Form
:type form: pywikibot.LexemeForm
:param data: data updates
:type data: dict
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
:return: New form data
"""
params = {
'action': 'wbleditformelements',
'formId': form.getID(),
'data': json.dumps(data),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
| 36.160809
| 79
| 0.560389
| 3,604
| 33,955
| 5.194229
| 0.1404
| 0.007479
| 0.013194
| 0.020299
| 0.390224
| 0.340865
| 0.313408
| 0.286165
| 0.26437
| 0.255235
| 0
| 0.003829
| 0.338536
| 33,955
| 938
| 80
| 36.19936
| 0.829653
| 0.280754
| 0
| 0.380392
| 0
| 0
| 0.117756
| 0.003975
| 0
| 0
| 0
| 0.002132
| 0.021569
| 1
| 0.072549
| false
| 0
| 0.023529
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e3eb5e33cc147796a90e6e65542a513c75576b
| 1,210
|
py
|
Python
|
app.py
|
MisaelVillaverde/fourier-calculator
|
fd50cd292e333c1a9d75e93962a0aaa0985ecef9
|
[
"MIT"
] | null | null | null |
app.py
|
MisaelVillaverde/fourier-calculator
|
fd50cd292e333c1a9d75e93962a0aaa0985ecef9
|
[
"MIT"
] | 1
|
2021-11-07T04:40:13.000Z
|
2021-11-07T04:40:13.000Z
|
app.py
|
MisaelVillaverde/fourier-calculator
|
fd50cd292e333c1a9d75e93962a0aaa0985ecef9
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template, request
from flask import jsonify
import requests
import json
app = Flask(__name__)
@app.route("/symbo",methods=['POST'])
def symbo():
#import pdb; pdb.set_trace()
session = requests.session()
token = session.get("https://es.symbolab.com/solver/step-by-step/x%5E%7B2%7D?or=input").cookies.get_dict()["sy2.pub.token"]
query = request.json["expression"]
#response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query=%5Cint+tcos%5Cleft(nt%5Cright)dt+&referer=https%3A%2F%2Fes.symbolab.com%2Fsolver%2Fstep-by-step%2F%255Cint_%257B%2520%257Dtcos%255Cleft(nt%255Cright)dt%2520%3For%3Dinput&plotRequest=PlotOptional&page=step-by-step",headers={
response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query={query}",headers={
"x-requested-with":"XMLHttpRequest",
"authorization":f"Bearer {token}"
}).content)
return {
"dym":response["dym"],
"solutions":response["solutions"]
}
@app.route('/')
def hello():
return render_template('index.html')
app.run(debug=True)
| 41.724138
| 349
| 0.717355
| 170
| 1,210
| 5.041176
| 0.517647
| 0.051342
| 0.052509
| 0.063011
| 0.226371
| 0.226371
| 0.226371
| 0.226371
| 0.226371
| 0.226371
| 0
| 0.036178
| 0.109091
| 1,210
| 29
| 350
| 41.724138
| 0.758813
| 0.306612
| 0
| 0
| 0
| 0.086957
| 0.335322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.217391
| 0.043478
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e55a939b6d954bcaed4fd506083967468d2eb3
| 1,584
|
py
|
Python
|
my_code/Chapter_2.py
|
kalona/Spark-The-Definitive-Guide
|
0b495c4710b2030aa59d5a7f4053ee0a8345d0d8
|
[
"Apache-2.0"
] | 2
|
2022-01-02T14:24:29.000Z
|
2022-01-02T15:54:47.000Z
|
my_code/Chapter_2.py
|
kalona/Spark-The-Definitive-Guide
|
0b495c4710b2030aa59d5a7f4053ee0a8345d0d8
|
[
"Apache-2.0"
] | null | null | null |
my_code/Chapter_2.py
|
kalona/Spark-The-Definitive-Guide
|
0b495c4710b2030aa59d5a7f4053ee0a8345d0d8
|
[
"Apache-2.0"
] | null | null | null |
from pyspark.sql import SparkSession
# spark = SparkSession.builder.master("local[*]").getOrCreate()
spark = SparkSession.builder.getOrCreate()
file_path = "C:\home_work\local_github\Spark-The-Definitive-Guide\data\/flight-data\csv\/2015-summary.csv"
# COMMAND ----------
# COMMAND ----------
flightData2015 = spark\
.read\
.option("inferSchema", "true")\
.option("header", "true")\
.csv("./data/flight-data/csv/2015-summary.csv")
# COMMAND ----------
flightData2015.createOrReplaceTempView("flight_data_2015")
# COMMAND ----------
sqlWay = spark.sql("""
SELECT DEST_COUNTRY_NAME, count(1)
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
""")
dataFrameWay = flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.count()
sqlWay.explain()
dataFrameWay.explain()
# COMMAND ----------
from pyspark.sql.functions import max, col
#
flightData2015.select(max(col("count"))).show(1)
# COMMAND ----------
maxSql = spark.sql("""
SELECT DEST_COUNTRY_NAME, sum(count) as destination_total
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
ORDER BY sum(count) DESC
LIMIT 5
""")
maxSql.show()
# COMMAND ----------
from pyspark.sql.functions import desc
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.show()
# COMMAND ----------
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.explain()
# COMMAND ----------
| 18.418605
| 106
| 0.680556
| 181
| 1,584
| 5.801105
| 0.325967
| 0.073333
| 0.1
| 0.091429
| 0.533333
| 0.491429
| 0.367619
| 0.367619
| 0.295238
| 0.219048
| 0
| 0.035
| 0.116162
| 1,584
| 85
| 107
| 18.635294
| 0.715
| 0.146465
| 0
| 0.431818
| 0
| 0.022727
| 0.41194
| 0.097761
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068182
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e58f9e7062eea97241b4b05b8e709ab53b50c3
| 7,508
|
py
|
Python
|
tests/test_intake_postgres.py
|
ContinuumIO/intake-postgres
|
fda7f7b2b6255544ea7ffd365a4ac8b2655fd226
|
[
"BSD-2-Clause"
] | 2
|
2018-11-26T00:14:10.000Z
|
2018-12-21T01:52:44.000Z
|
tests/test_intake_postgres.py
|
ContinuumIO/intake-postgres
|
fda7f7b2b6255544ea7ffd365a4ac8b2655fd226
|
[
"BSD-2-Clause"
] | 1
|
2018-12-20T08:41:05.000Z
|
2018-12-21T15:00:08.000Z
|
tests/test_intake_postgres.py
|
ContinuumIO/intake-postgres
|
fda7f7b2b6255544ea7ffd365a4ac8b2655fd226
|
[
"BSD-2-Clause"
] | 3
|
2018-12-19T08:34:14.000Z
|
2019-01-24T07:58:32.000Z
|
import os
import pickle
import pytest
import pandas as pd
from shapely import wkt
from intake_postgres import PostgresSource
from intake import open_catalog
from .util import verify_datasource_interface
TEST_DATA_DIR = 'tests'
TEST_DATA = [
('sample1', 'sample1.csv'),
('sample2_1', 'sample2_1.csv'),
('sample2_2', 'sample2_2.csv'),
]
TEST_GIS_DATA = [
('points', 'sample_points.psql'),
('multipoints', 'sample_multipoints.psql'),
('lines', 'sample_lines.psql'),
('multilines', 'sample_multilines.psql'),
('polygons', 'sample_polygons.psql'),
('multipolygons', 'sample_multipolygons.psql'),
# ('triangles', 'sample_triangles.psql'),
]
TEST_TEMPLATE_DATA = [
'jinja2_params_with_env',
]
@pytest.fixture(scope='module')
def engine():
"""Start docker container for PostgreSQL database, yield a tuple (engine,
metadata), and cleanup connection afterward."""
from .util import start_postgres, stop_postgres
from sqlalchemy import create_engine
stop_postgres(let_fail=True)
local_port = start_postgres()
uri = 'postgresql://postgres@localhost:{}/postgres'.format(local_port)
engine = create_engine(uri)
for table_name, csv_fname in TEST_DATA:
csv_fpath = os.path.join(TEST_DATA_DIR, csv_fname)
df = pd.read_csv(csv_fpath)
df.to_sql(table_name, engine, index=False)
for table_name, psql_fname in TEST_GIS_DATA:
psql_fpath = os.path.join(TEST_DATA_DIR, psql_fname)
with engine.connect() as conn:
with open(psql_fpath, 'r') as fp:
cmds = fp.read().strip().split(';')
for cmd in cmds:
if cmd.strip():
conn.execute(' '.join(cmd.split()))
try:
yield engine
finally:
stop_postgres()
@pytest.mark.parametrize('table_name,_', TEST_DATA)
def test_open(engine, table_name, _):
d = PostgresSource(str(engine.url), 'select * from '+table_name)
assert d.container == 'dataframe'
assert d.description is None
verify_datasource_interface(d)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_read(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover_after_read(engine, table_name, csv_fpath):
"""Assert that after reading the dataframe, discover() shows more accurate
information.
"""
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
df = source.read()
assert expected_df.equals(df)
info = source.discover()
assert info['dtype'] == dt
assert info['shape'] == (4, 3)
assert info['npartitions'] == 1
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_close(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
source.close()
# Can reopen after close
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_pickle(engine, table_name, csv_fpath):
source = PostgresSource(str(engine.url), 'select * from '+table_name)
pickled_source = pickle.dumps(source)
source_clone = pickle.loads(pickled_source)
expected_df = source.read()
df = source_clone.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,_1', TEST_DATA)
def test_catalog(engine, table_name, _1):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name.rsplit('_idx', 1)[0]
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
def test_catalog_join(engine):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = 'sample2'
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('table_name,_1', TEST_GIS_DATA)
def test_postgis_data(engine, table_name, _1):
from sqlalchemy import MetaData
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
meta = MetaData()
meta.reflect(bind=engine)
col_exprs = ['ST_AsText({0}) as {0}'.format(col.name)
for col in meta.tables[table_name].columns]
_query = pgsrc._sql_expr.replace('*', ', '.join(col_exprs))
expected_df = pd.read_sql_query(_query, engine).applymap(
lambda geom: str(wkt.loads(geom))
)
df = pgsrc.read().applymap(lambda geom: str(wkt.loads(geom)))
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('ds_name', TEST_TEMPLATE_DATA)
def test_jinja2(engine, ds_name):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
| 31.546218
| 78
| 0.678876
| 1,010
| 7,508
| 4.816832
| 0.167327
| 0.051799
| 0.041932
| 0.028777
| 0.633916
| 0.619938
| 0.608633
| 0.573279
| 0.556629
| 0.556629
| 0
| 0.006494
| 0.179542
| 7,508
| 237
| 79
| 31.679325
| 0.783279
| 0.035162
| 0
| 0.508571
| 0
| 0
| 0.134506
| 0.018701
| 0
| 0
| 0
| 0
| 0.205714
| 1
| 0.062857
| false
| 0
| 0.062857
| 0
| 0.125714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e6e2cb9c18b7306bf960a8fcbaf212c1159394
| 351
|
py
|
Python
|
Module_3/testImage.py
|
dks1018/CoffeeShopCoding
|
13ac1700673c86c601eb2758570920620a956e4c
|
[
"ADSL"
] | null | null | null |
Module_3/testImage.py
|
dks1018/CoffeeShopCoding
|
13ac1700673c86c601eb2758570920620a956e4c
|
[
"ADSL"
] | null | null | null |
Module_3/testImage.py
|
dks1018/CoffeeShopCoding
|
13ac1700673c86c601eb2758570920620a956e4c
|
[
"ADSL"
] | null | null | null |
# file = open('C:\\Users\\dks10\\OneDrive\\Desktop\\Projects\\Code\\Python\\PythonCrypto\\Module_3\\eye.png', 'rb')
file = open('encrypt_eye.png', 'rb')
image = file.read()
file.close()
image = bytearray(image)
key = 48
for index, value in enumerate(image):
image[index] = value^key
file = open('2eye.png','wb')
file.write(image)
file.close()
| 21.9375
| 115
| 0.675214
| 52
| 351
| 4.519231
| 0.596154
| 0.102128
| 0.068085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019293
| 0.11396
| 351
| 16
| 116
| 21.9375
| 0.736334
| 0.321937
| 0
| 0.2
| 0
| 0
| 0.113924
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3e8a92c23b5ddc471c49e37f3c8dc3fb274d2ab
| 1,702
|
py
|
Python
|
ledfxcontroller/effects/temporal.py
|
Aircoookie/LedFx
|
95628fc237497dd89aaf30fdbf88f780f3330166
|
[
"MIT"
] | 17
|
2018-08-31T05:51:09.000Z
|
2022-02-12T15:41:33.000Z
|
ledfxcontroller/effects/temporal.py
|
Aircoookie/LedFx
|
95628fc237497dd89aaf30fdbf88f780f3330166
|
[
"MIT"
] | null | null | null |
ledfxcontroller/effects/temporal.py
|
Aircoookie/LedFx
|
95628fc237497dd89aaf30fdbf88f780f3330166
|
[
"MIT"
] | 5
|
2019-07-15T22:12:45.000Z
|
2022-02-05T10:50:44.000Z
|
import time
import logging
from ledfxcontroller.effects import Effect
from threading import Thread
import voluptuous as vol
_LOGGER = logging.getLogger(__name__)
DEFAULT_RATE = 1.0 / 60.0
@Effect.no_registration
class TemporalEffect(Effect):
_thread_active = False
_thread = None
CONFIG_SCHEMA = vol.Schema({
vol.Required('speed', default = 1.0): float
})
def thread_function(self):
while self._thread_active:
startTime = time.time()
# Treat the return value of the effect loop as a speed modifier
# such that effects that are nartually faster or slower can have
# a consistent feel.
sleepInterval = self.effect_loop()
if sleepInterval is None:
sleepInterval = 1.0
sleepInterval = sleepInterval * DEFAULT_RATE
# Calculate the time to sleep accounting for potential heavy
# frame assembly operations
timeToSleep = (sleepInterval / self._config['speed']) - (time.time() - startTime)
if timeToSleep > 0:
time.sleep(timeToSleep)
def effect_loop(self):
"""
Triggered periodically based on the effect speed and
any additional effect modifiers
"""
pass
def activate(self, pixel_count):
super().activate(pixel_count)
self._thread_active = True
self._thread = Thread(target = self.thread_function)
self._thread.start()
def deactivate(self):
if self._thread_active:
self._thread_active = False
self._thread.join()
self._thread = None
super().deactivate()
| 28.847458
| 93
| 0.621622
| 186
| 1,702
| 5.521505
| 0.473118
| 0.087634
| 0.062317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00846
| 0.305523
| 1,702
| 58
| 94
| 29.344828
| 0.860406
| 0.185076
| 0
| 0
| 0
| 0
| 0.007402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0.027027
| 0.135135
| 0
| 0.351351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3ec4aae5421f3c1473f18af462a1b949c04b4de
| 1,796
|
py
|
Python
|
utils.py
|
LuChang-CS/sherbet
|
d1061aca108eab8e0ccbd2202460e25261fdf1d5
|
[
"Apache-2.0"
] | 2
|
2022-01-26T05:38:04.000Z
|
2022-03-20T08:54:18.000Z
|
utils.py
|
LuChang-CS/sherbet
|
d1061aca108eab8e0ccbd2202460e25261fdf1d5
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
LuChang-CS/sherbet
|
d1061aca108eab8e0ccbd2202460e25261fdf1d5
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
class DataGenerator:
def __init__(self, inputs, shuffle=True, batch_size=32):
assert len(inputs) > 0
self.inputs = inputs
self.idx = np.arange(len(inputs[0]))
self.shuffle = shuffle
self.batch_size = batch_size
self.on_epoch_end()
def data_length(self):
return len(self.idx)
def __len__(self):
n = self.data_length()
len_ = n // self.batch_size
return len_ if n % self.batch_size == 0 else len_ + 1
def __getitem__(self, index):
start = index * self.batch_size
end = start + self.batch_size
index = self.idx[start:end]
data = []
for x in self.inputs:
data.append(x[index])
return data
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.idx)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def lr_decay(total_epoch, init_lr, split_val):
lr_map = [init_lr] * total_epoch
if len(split_val) > 0:
assert split_val[0][0] > 1
assert split_val[-1][0] <= total_epoch
current_split_index = 0
current_lr = init_lr
next_epoch, next_lr = split_val[current_split_index]
for i in range(total_epoch):
if i < next_epoch - 1:
lr_map[i] = current_lr
else:
current_lr = next_lr
lr_map[i] = current_lr
current_split_index += 1
if current_split_index >= len(split_val):
next_epoch = total_epoch + 1
else:
next_epoch, next_lr = split_val[current_split_index]
def lr_schedule_fn(epoch, lr):
return lr_map[epoch]
return lr_schedule_fn
| 28.967742
| 72
| 0.577394
| 244
| 1,796
| 3.930328
| 0.209016
| 0.103233
| 0.094891
| 0.029197
| 0.183525
| 0.08342
| 0.08342
| 0.08342
| 0.08342
| 0
| 0
| 0.013389
| 0.334633
| 1,796
| 61
| 73
| 29.442623
| 0.789121
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06
| 1
| 0.16
| false
| 0
| 0.02
| 0.04
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3ec779913e7a7957725c231bcea5cdaa55dcfbf
| 810
|
py
|
Python
|
Version1_STI.py
|
sudhanshu55/Speech_to_Image
|
7a047725b3167cfcb7a68004b3c35b2ece75fde4
|
[
"MIT"
] | null | null | null |
Version1_STI.py
|
sudhanshu55/Speech_to_Image
|
7a047725b3167cfcb7a68004b3c35b2ece75fde4
|
[
"MIT"
] | null | null | null |
Version1_STI.py
|
sudhanshu55/Speech_to_Image
|
7a047725b3167cfcb7a68004b3c35b2ece75fde4
|
[
"MIT"
] | null | null | null |
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import speech_recognition as sr
import nltk
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
data = r.recognize_google(audio).encode("utf-8")
print (data)
stopWords = set(stopwords.words('english'))
words = word_tokenize(data)
wordsFiltered = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w)
into_string = str(wordsFiltered)
print(into_string)
arguments = {"keywords":into_string,"limit":2,"print_urls":True} #creating list of arguments
response.download(arguments) #passing the arguments to the function
| 32.4
| 94
| 0.769136
| 109
| 810
| 5.577982
| 0.53211
| 0.059211
| 0.098684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002849
| 0.133333
| 810
| 25
| 95
| 32.4
| 0.863248
| 0.077778
| 0
| 0
| 0
| 0
| 0.065684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.227273
| 0
| 0.227273
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|